1/*
2 * Copyright (C) 2021 Collabora Ltd.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "va_compiler.h"
25
26/* Valhall specific instruction selection optimizations */
27
28static enum bi_opcode
29va_op_add_imm(enum bi_opcode op)
30{
31   switch (op) {
32   case BI_OPCODE_FADD_F32:   return BI_OPCODE_FADD_IMM_F32;
33   case BI_OPCODE_FADD_V2F16: return BI_OPCODE_FADD_IMM_V2F16;
34   case BI_OPCODE_IADD_S32:
35   case BI_OPCODE_IADD_U32:   return BI_OPCODE_IADD_IMM_I32;
36   case BI_OPCODE_IADD_V2S16:
37   case BI_OPCODE_IADD_V2U16: return BI_OPCODE_IADD_IMM_V2I16;
38   case BI_OPCODE_IADD_V4S8:
39   case BI_OPCODE_IADD_V4U8:  return BI_OPCODE_IADD_IMM_V4I8;
40   default: return 0;
41   }
42}
43
44static bool
45va_is_add_imm(bi_instr *I, unsigned s)
46{
47   return I->src[s].swizzle == BI_SWIZZLE_H01 &&
48          !I->src[s].abs && !I->src[s].neg && !I->clamp && !I->round;
49}
50
51static unsigned
52va_choose_imm(bi_instr *I)
53{
54   for (unsigned i = 0; i < 2; ++i) {
55      if (I->src[i].type == BI_INDEX_CONSTANT)
56         return i;
57   }
58
59   return ~0;
60}
61
62/* Lower MOV.i32 #constant --> IADD_IMM.i32 0x0, #constant */
63static void
64va_lower_mov_imm(bi_instr *I)
65{
66   if (I->src[0].type == BI_INDEX_CONSTANT) {
67      I->op = BI_OPCODE_IADD_IMM_I32;
68      I->index = I->src[0].value;
69      I->src[0] = bi_zero();
70   }
71}
72
73void
74va_fuse_add_imm(bi_instr *I)
75{
76   if (I->op == BI_OPCODE_MOV_I32) {
77      va_lower_mov_imm(I);
78      return;
79   }
80
81   enum bi_opcode op = va_op_add_imm(I->op);
82   if (!op) return;
83
84   unsigned s = va_choose_imm(I);
85   if (s > 1) return;
86   if (!va_is_add_imm(I, 1 - s)) return;
87
88   I->op = op;
89   I->index = bi_apply_swizzle(I->src[s].value, I->src[s].swizzle);
90
91   assert(!I->src[s].abs && "redundant .abs set");
92
93   /* If the constant is negated, flip the sign bit */
94   if (I->src[s].neg) {
95      if (I->op == BI_OPCODE_FADD_IMM_F32)
96         I->index ^= (1 << 31);
97      else if (I->op == BI_OPCODE_FADD_IMM_V2F16)
98         I->index ^= (1 << 31) | (1 << 15);
99      else
100         unreachable("unexpected .neg");
101   }
102
103   I->src[0] = I->src[1 - s];
104   I->src[1] = bi_null();
105}
106
107void
108va_optimize(bi_context *ctx)
109{
110   bi_foreach_instr_global(ctx, I) {
111      va_fuse_add_imm(I);
112   }
113}
114