1/*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "nir.h"
25#include "nir_builder.h"
26#include "util/fast_idiv_by_const.h"
27#include "util/u_math.h"
28
29static nir_ssa_def *
30build_udiv(nir_builder *b, nir_ssa_def *n, uint64_t d)
31{
32   if (d == 0) {
33      return nir_imm_intN_t(b, 0, n->bit_size);
34   } else if (util_is_power_of_two_or_zero64(d)) {
35      return nir_ushr_imm(b, n, util_logbase2_64(d));
36   } else {
37      struct util_fast_udiv_info m =
38         util_compute_fast_udiv_info(d, n->bit_size, n->bit_size);
39
40      if (m.pre_shift)
41         n = nir_ushr_imm(b, n, m.pre_shift);
42      if (m.increment)
43         n = nir_uadd_sat(b, n, nir_imm_intN_t(b, m.increment, n->bit_size));
44      n = nir_umul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
45      if (m.post_shift)
46         n = nir_ushr_imm(b, n, m.post_shift);
47
48      return n;
49   }
50}
51
52static nir_ssa_def *
53build_umod(nir_builder *b, nir_ssa_def *n, uint64_t d)
54{
55   if (d == 0) {
56      return nir_imm_intN_t(b, 0, n->bit_size);
57   } else if (util_is_power_of_two_or_zero64(d)) {
58      return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
59   } else {
60      return nir_isub(b, n, nir_imul(b, build_udiv(b, n, d),
61                                        nir_imm_intN_t(b, d, n->bit_size)));
62   }
63}
64
65static nir_ssa_def *
66build_idiv(nir_builder *b, nir_ssa_def *n, int64_t d)
67{
68   int64_t int_min = u_intN_min(n->bit_size);
69   if (d == int_min)
70      return nir_b2i(b, nir_ieq_imm(b, n, int_min), n->bit_size);
71
72   uint64_t abs_d = d < 0 ? -d : d;
73
74   if (d == 0) {
75      return nir_imm_intN_t(b, 0, n->bit_size);
76   } else if (d == 1) {
77      return n;
78   } else if (d == -1) {
79      return nir_ineg(b, n);
80   } else if (util_is_power_of_two_or_zero64(abs_d)) {
81      nir_ssa_def *uq = nir_ushr_imm(b, nir_iabs(b, n), util_logbase2_64(abs_d));
82      nir_ssa_def *n_neg = nir_ilt(b, n, nir_imm_intN_t(b, 0, n->bit_size));
83      nir_ssa_def *neg = d < 0 ? nir_inot(b, n_neg) : n_neg;
84      return nir_bcsel(b, neg, nir_ineg(b, uq), uq);
85   } else {
86      struct util_fast_sdiv_info m =
87         util_compute_fast_sdiv_info(d, n->bit_size);
88
89      nir_ssa_def *res =
90         nir_imul_high(b, n, nir_imm_intN_t(b, m.multiplier, n->bit_size));
91      if (d > 0 && m.multiplier < 0)
92         res = nir_iadd(b, res, n);
93      if (d < 0 && m.multiplier > 0)
94         res = nir_isub(b, res, n);
95      if (m.shift)
96         res = nir_ishr_imm(b, res, m.shift);
97      res = nir_iadd(b, res, nir_ushr_imm(b, res, n->bit_size - 1));
98
99      return res;
100   }
101}
102
103static nir_ssa_def *
104build_irem(nir_builder *b, nir_ssa_def *n, int64_t d)
105{
106   int64_t int_min = u_intN_min(n->bit_size);
107   if (d == 0) {
108      return nir_imm_intN_t(b, 0, n->bit_size);
109   } else if (d == int_min) {
110      return nir_bcsel(b, nir_ieq_imm(b, n, int_min), nir_imm_intN_t(b, 0, n->bit_size), n);
111   } else {
112      d = d < 0 ? -d : d;
113      if (util_is_power_of_two_or_zero64(d)) {
114         nir_ssa_def *tmp = nir_bcsel(b, nir_ilt(b, n, nir_imm_intN_t(b, 0, n->bit_size)),
115                                      nir_iadd_imm(b, n, d - 1), n);
116         return nir_isub(b, n, nir_iand_imm(b, tmp, -d));
117      } else {
118         return nir_isub(b, n, nir_imul(b, build_idiv(b, n, d),
119                                        nir_imm_intN_t(b, d, n->bit_size)));
120      }
121   }
122}
123
124static nir_ssa_def *
125build_imod(nir_builder *b, nir_ssa_def *n, int64_t d)
126{
127   int64_t int_min = u_intN_min(n->bit_size);
128   if (d == 0) {
129      return nir_imm_intN_t(b, 0, n->bit_size);
130   } else if (d == int_min) {
131      nir_ssa_def *int_min_def = nir_imm_intN_t(b, int_min, n->bit_size);
132      nir_ssa_def *is_neg_not_int_min = nir_ult(b, int_min_def, n);
133      nir_ssa_def *is_zero = nir_ieq_imm(b, n, 0);
134      return nir_bcsel(b, nir_ior(b, is_neg_not_int_min, is_zero), n, nir_iadd(b, int_min_def, n));
135   } else if (d > 0 && util_is_power_of_two_or_zero64(d)) {
136      return nir_iand(b, n, nir_imm_intN_t(b, d - 1, n->bit_size));
137   } else if (d < 0 && util_is_power_of_two_or_zero64(-d)) {
138      nir_ssa_def *d_def = nir_imm_intN_t(b, d, n->bit_size);
139      nir_ssa_def *res = nir_ior(b, n, d_def);
140      return nir_bcsel(b, nir_ieq(b, res, d_def), nir_imm_intN_t(b, 0, n->bit_size), res);
141   } else {
142      nir_ssa_def *rem = build_irem(b, n, d);
143      nir_ssa_def *zero = nir_imm_intN_t(b, 0, n->bit_size);
144      nir_ssa_def *sign_same = d < 0 ? nir_ilt(b, n, zero) : nir_ige(b, n, zero);
145      nir_ssa_def *rem_zero = nir_ieq(b, rem, zero);
146      return nir_bcsel(b, nir_ior(b, rem_zero, sign_same), rem, nir_iadd_imm(b, rem, d));
147   }
148}
149
150static bool
151nir_opt_idiv_const_instr(nir_builder *b, nir_alu_instr *alu)
152{
153   assert(alu->dest.dest.is_ssa);
154   assert(alu->src[0].src.is_ssa && alu->src[1].src.is_ssa);
155
156   if (!nir_src_is_const(alu->src[1].src))
157      return false;
158
159   unsigned bit_size = alu->src[1].src.ssa->bit_size;
160
161   b->cursor = nir_before_instr(&alu->instr);
162
163   nir_ssa_def *q[NIR_MAX_VEC_COMPONENTS];
164   for (unsigned comp = 0; comp < alu->dest.dest.ssa.num_components; comp++) {
165      /* Get the numerator for the channel */
166      nir_ssa_def *n = nir_channel(b, alu->src[0].src.ssa,
167                                   alu->src[0].swizzle[comp]);
168
169      /* Get the denominator for the channel */
170      int64_t d = nir_src_comp_as_int(alu->src[1].src,
171                                      alu->src[1].swizzle[comp]);
172
173      nir_alu_type d_type = nir_op_infos[alu->op].input_types[1];
174      if (nir_alu_type_get_base_type(d_type) == nir_type_uint) {
175         /* The code above sign-extended.  If we're lowering an unsigned op,
176          * we need to mask it off to the correct number of bits so that a
177          * cast to uint64_t will do the right thing.
178          */
179         if (bit_size < 64)
180            d &= (1ull << bit_size) - 1;
181      }
182
183      switch (alu->op) {
184      case nir_op_udiv:
185         q[comp] = build_udiv(b, n, d);
186         break;
187      case nir_op_idiv:
188         q[comp] = build_idiv(b, n, d);
189         break;
190      case nir_op_umod:
191         q[comp] = build_umod(b, n, d);
192         break;
193      case nir_op_imod:
194         q[comp] = build_imod(b, n, d);
195         break;
196      case nir_op_irem:
197         q[comp] = build_irem(b, n, d);
198         break;
199      default:
200         unreachable("Unknown integer division op");
201      }
202   }
203
204   nir_ssa_def *qvec = nir_vec(b, q, alu->dest.dest.ssa.num_components);
205   nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, qvec);
206   nir_instr_remove(&alu->instr);
207
208   return true;
209}
210
211static bool
212nir_opt_idiv_const_impl(nir_function_impl *impl, unsigned min_bit_size)
213{
214   bool progress = false;
215
216   nir_builder b;
217   nir_builder_init(&b, impl);
218
219   nir_foreach_block(block, impl) {
220      nir_foreach_instr_safe(instr, block) {
221         if (instr->type != nir_instr_type_alu)
222            continue;
223
224         nir_alu_instr *alu = nir_instr_as_alu(instr);
225         if (alu->op != nir_op_udiv &&
226             alu->op != nir_op_idiv &&
227             alu->op != nir_op_umod &&
228             alu->op != nir_op_imod &&
229             alu->op != nir_op_irem)
230            continue;
231
232         assert(alu->dest.dest.is_ssa);
233         if (alu->dest.dest.ssa.bit_size < min_bit_size)
234            continue;
235
236         progress |= nir_opt_idiv_const_instr(&b, alu);
237      }
238   }
239
240   if (progress) {
241      nir_metadata_preserve(impl, nir_metadata_block_index |
242                                  nir_metadata_dominance);
243   } else {
244      nir_metadata_preserve(impl, nir_metadata_all);
245   }
246
247   return progress;
248}
249
250bool
251nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size)
252{
253   bool progress = false;
254
255   nir_foreach_function(function, shader) {
256      if (function->impl)
257         progress |= nir_opt_idiv_const_impl(function->impl, min_bit_size);
258   }
259
260   return progress;
261}
262