1/*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "nir_builder.h"
25
26#include "util/format_rgb9e5.h"
27
28static inline nir_ssa_def *
29nir_shift_imm(nir_builder *b, nir_ssa_def *value, int left_shift)
30{
31   if (left_shift > 0)
32      return nir_ishl(b, value, nir_imm_int(b, left_shift));
33   else if (left_shift < 0)
34      return nir_ushr(b, value, nir_imm_int(b, -left_shift));
35   else
36      return value;
37}
38
39static inline nir_ssa_def *
40nir_shift(nir_builder *b, nir_ssa_def *value, nir_ssa_def *left_shift)
41{
42   return nir_bcsel(b,
43                    nir_ige(b, left_shift, nir_imm_int(b, 0)),
44                    nir_ishl(b, value, left_shift),
45                    nir_ushr(b, value, nir_ineg(b, left_shift)));
46}
47
48static inline nir_ssa_def *
49nir_mask_shift(struct nir_builder *b, nir_ssa_def *src,
50               uint32_t mask, int left_shift)
51{
52   return nir_shift_imm(b, nir_iand(b, src, nir_imm_int(b, mask)), left_shift);
53}
54
55static inline nir_ssa_def *
56nir_mask_shift_or(struct nir_builder *b, nir_ssa_def *dst, nir_ssa_def *src,
57                  uint32_t src_mask, int src_left_shift)
58{
59   return nir_ior(b, nir_mask_shift(b, src, src_mask, src_left_shift), dst);
60}
61
62static inline nir_ssa_def *
63nir_format_mask_uvec(nir_builder *b, nir_ssa_def *src, const unsigned *bits)
64{
65   nir_const_value mask[NIR_MAX_VEC_COMPONENTS];
66   memset(mask, 0, sizeof(mask));
67   for (unsigned i = 0; i < src->num_components; i++) {
68      assert(bits[i] < 32);
69      mask[i].u32 = (1u << bits[i]) - 1;
70   }
71   return nir_iand(b, src, nir_build_imm(b, src->num_components, 32, mask));
72}
73
74static inline nir_ssa_def *
75nir_format_sign_extend_ivec(nir_builder *b, nir_ssa_def *src,
76                            const unsigned *bits)
77{
78   assert(src->num_components <= 4);
79   nir_ssa_def *comps[4];
80   for (unsigned i = 0; i < src->num_components; i++) {
81      nir_ssa_def *shift = nir_imm_int(b, src->bit_size - bits[i]);
82      comps[i] = nir_ishr(b, nir_ishl(b, nir_channel(b, src, i), shift), shift);
83   }
84   return nir_vec(b, comps, src->num_components);
85}
86
87
88static inline nir_ssa_def *
89nir_format_unpack_int(nir_builder *b, nir_ssa_def *packed,
90                      const unsigned *bits, unsigned num_components,
91                      bool sign_extend)
92{
93   assert(num_components >= 1 && num_components <= 4);
94   const unsigned bit_size = packed->bit_size;
95   nir_ssa_def *comps[4];
96
97   if (bits[0] >= bit_size) {
98      assert(bits[0] == bit_size);
99      assert(num_components == 1);
100      return packed;
101   }
102
103   unsigned next_chan = 0;
104   unsigned offset = 0;
105   for (unsigned i = 0; i < num_components; i++) {
106      assert(bits[i] < bit_size);
107      assert(offset + bits[i] <= bit_size);
108      nir_ssa_def *chan = nir_channel(b, packed, next_chan);
109      nir_ssa_def *lshift = nir_imm_int(b, bit_size - (offset + bits[i]));
110      nir_ssa_def *rshift = nir_imm_int(b, bit_size - bits[i]);
111      if (sign_extend)
112         comps[i] = nir_ishr(b, nir_ishl(b, chan, lshift), rshift);
113      else
114         comps[i] = nir_ushr(b, nir_ishl(b, chan, lshift), rshift);
115      offset += bits[i];
116      if (offset >= bit_size) {
117         next_chan++;
118         offset -= bit_size;
119      }
120   }
121
122   return nir_vec(b, comps, num_components);
123}
124
125static inline nir_ssa_def *
126nir_format_unpack_uint(nir_builder *b, nir_ssa_def *packed,
127                       const unsigned *bits, unsigned num_components)
128{
129   return nir_format_unpack_int(b, packed, bits, num_components, false);
130}
131
132static inline nir_ssa_def *
133nir_format_unpack_sint(nir_builder *b, nir_ssa_def *packed,
134                       const unsigned *bits, unsigned num_components)
135{
136   return nir_format_unpack_int(b, packed, bits, num_components, true);
137}
138
139static inline nir_ssa_def *
140nir_format_pack_uint_unmasked(nir_builder *b, nir_ssa_def *color,
141                              const unsigned *bits, unsigned num_components)
142{
143   assert(num_components >= 1 && num_components <= 4);
144   nir_ssa_def *packed = nir_imm_int(b, 0);
145   unsigned offset = 0;
146   for (unsigned i = 0; i < num_components; i++) {
147      packed = nir_ior(b, packed, nir_shift_imm(b, nir_channel(b, color, i),
148                                               offset));
149      offset += bits[i];
150   }
151   assert(offset <= packed->bit_size);
152
153   return packed;
154}
155
156static inline nir_ssa_def *
157nir_format_pack_uint_unmasked_ssa(nir_builder *b, nir_ssa_def *color,
158                                  nir_ssa_def *bits)
159{
160   nir_ssa_def *packed = nir_imm_int(b, 0);
161   nir_ssa_def *offset = nir_imm_int(b, 0);
162   for (unsigned i = 0; i < bits->num_components; i++) {
163      packed = nir_ior(b, packed, nir_ishl(b, nir_channel(b, color, i), offset));
164      offset = nir_iadd(b, offset, nir_channel(b, bits, i));
165   }
166   return packed;
167}
168
169static inline nir_ssa_def *
170nir_format_pack_uint(nir_builder *b, nir_ssa_def *color,
171                     const unsigned *bits, unsigned num_components)
172{
173   return nir_format_pack_uint_unmasked(b, nir_format_mask_uvec(b, color, bits),
174                                        bits, num_components);
175}
176
177static inline nir_ssa_def *
178nir_format_bitcast_uvec_unmasked(nir_builder *b, nir_ssa_def *src,
179                                 unsigned src_bits, unsigned dst_bits)
180{
181   assert(src->bit_size >= src_bits && src->bit_size >= dst_bits);
182   assert(src_bits == 8 || src_bits == 16 || src_bits == 32);
183   assert(dst_bits == 8 || dst_bits == 16 || dst_bits == 32);
184
185   if (src_bits == dst_bits)
186      return src;
187
188   const unsigned dst_components =
189      DIV_ROUND_UP(src->num_components * src_bits, dst_bits);
190   assert(dst_components <= 4);
191
192   nir_ssa_def *dst_chan[4] = {0};
193   if (dst_bits > src_bits) {
194      unsigned shift = 0;
195      unsigned dst_idx = 0;
196      for (unsigned i = 0; i < src->num_components; i++) {
197         nir_ssa_def *shifted = nir_ishl(b, nir_channel(b, src, i),
198                                            nir_imm_int(b, shift));
199         if (shift == 0) {
200            dst_chan[dst_idx] = shifted;
201         } else {
202            dst_chan[dst_idx] = nir_ior(b, dst_chan[dst_idx], shifted);
203         }
204
205         shift += src_bits;
206         if (shift >= dst_bits) {
207            dst_idx++;
208            shift = 0;
209         }
210      }
211   } else {
212      nir_ssa_def *mask = nir_imm_int(b, ~0u >> (32 - dst_bits));
213
214      unsigned src_idx = 0;
215      unsigned shift = 0;
216      for (unsigned i = 0; i < dst_components; i++) {
217         dst_chan[i] = nir_iand(b, nir_ushr_imm(b, nir_channel(b, src, src_idx),
218                                                shift),
219                                   mask);
220         shift += dst_bits;
221         if (shift >= src_bits) {
222            src_idx++;
223            shift = 0;
224         }
225      }
226   }
227
228   return nir_vec(b, dst_chan, dst_components);
229}
230
231static inline nir_ssa_def *
232_nir_format_norm_factor(nir_builder *b, const unsigned *bits,
233                        unsigned num_components,
234                        bool is_signed)
235{
236   nir_const_value factor[NIR_MAX_VEC_COMPONENTS];
237   memset(factor, 0, sizeof(factor));
238   for (unsigned i = 0; i < num_components; i++) {
239      assert(bits[i] <= 32);
240      factor[i].f32 = (1ull << (bits[i] - is_signed)) - 1;
241   }
242   return nir_build_imm(b, num_components, 32, factor);
243}
244
245static inline nir_ssa_def *
246nir_format_unorm_to_float(nir_builder *b, nir_ssa_def *u, const unsigned *bits)
247{
248   nir_ssa_def *factor =
249      _nir_format_norm_factor(b, bits, u->num_components, false);
250
251   return nir_fdiv(b, nir_u2f32(b, u), factor);
252}
253
254static inline nir_ssa_def *
255nir_format_snorm_to_float(nir_builder *b, nir_ssa_def *s, const unsigned *bits)
256{
257   nir_ssa_def *factor =
258      _nir_format_norm_factor(b, bits, s->num_components, true);
259
260   return nir_fmax(b, nir_fdiv(b, nir_i2f32(b, s), factor),
261                      nir_imm_float(b, -1.0f));
262}
263
264static inline nir_ssa_def *
265nir_format_float_to_unorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
266{
267   nir_ssa_def *factor =
268      _nir_format_norm_factor(b, bits, f->num_components, false);
269
270   /* Clamp to the range [0, 1] */
271   f = nir_fsat(b, f);
272
273   return nir_f2u32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
274}
275
276static inline nir_ssa_def *
277nir_format_float_to_snorm(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
278{
279   nir_ssa_def *factor =
280      _nir_format_norm_factor(b, bits, f->num_components, true);
281
282   /* Clamp to the range [-1, 1] */
283   f = nir_fmin(b, nir_fmax(b, f, nir_imm_float(b, -1)), nir_imm_float(b, 1));
284
285   return nir_f2i32(b, nir_fround_even(b, nir_fmul(b, f, factor)));
286}
287
288/* Converts a vector of floats to a vector of half-floats packed in the low 16
289 * bits.
290 */
291static inline nir_ssa_def *
292nir_format_float_to_half(nir_builder *b, nir_ssa_def *f)
293{
294   nir_ssa_def *zero = nir_imm_float(b, 0);
295   nir_ssa_def *f16comps[4];
296   for (unsigned i = 0; i < f->num_components; i++)
297      f16comps[i] = nir_pack_half_2x16_split(b, nir_channel(b, f, i), zero);
298   return nir_vec(b, f16comps, f->num_components);
299}
300
301static inline nir_ssa_def *
302nir_format_linear_to_srgb(nir_builder *b, nir_ssa_def *c)
303{
304   nir_ssa_def *linear = nir_fmul(b, c, nir_imm_float(b, 12.92f));
305   nir_ssa_def *curved =
306      nir_fsub(b, nir_fmul(b, nir_imm_float(b, 1.055f),
307                              nir_fpow(b, c, nir_imm_float(b, 1.0 / 2.4))),
308                  nir_imm_float(b, 0.055f));
309
310   return nir_fsat(b, nir_bcsel(b, nir_flt(b, c, nir_imm_float(b, 0.0031308f)),
311                                   linear, curved));
312}
313
314static inline nir_ssa_def *
315nir_format_srgb_to_linear(nir_builder *b, nir_ssa_def *c)
316{
317   nir_ssa_def *linear = nir_fdiv(b, c, nir_imm_float(b, 12.92f));
318   nir_ssa_def *curved =
319      nir_fpow(b, nir_fdiv(b, nir_fadd(b, c, nir_imm_float(b, 0.055f)),
320                              nir_imm_float(b, 1.055f)),
321                  nir_imm_float(b, 2.4f));
322
323   return nir_fsat(b, nir_bcsel(b, nir_fge(b, nir_imm_float(b, 0.04045f), c),
324                                   linear, curved));
325}
326
327/* Clamps a vector of uints so they don't extend beyond the given number of
328 * bits per channel.
329 */
330static inline nir_ssa_def *
331nir_format_clamp_uint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
332{
333   if (bits[0] == 32)
334      return f;
335
336   nir_const_value max[NIR_MAX_VEC_COMPONENTS];
337   memset(max, 0, sizeof(max));
338   for (unsigned i = 0; i < f->num_components; i++) {
339      assert(bits[i] < 32);
340      max[i].u32 = (1 << bits[i]) - 1;
341   }
342   return nir_umin(b, f, nir_build_imm(b, f->num_components, 32, max));
343}
344
345/* Clamps a vector of sints so they don't extend beyond the given number of
346 * bits per channel.
347 */
348static inline nir_ssa_def *
349nir_format_clamp_sint(nir_builder *b, nir_ssa_def *f, const unsigned *bits)
350{
351   if (bits[0] == 32)
352      return f;
353
354   nir_const_value min[NIR_MAX_VEC_COMPONENTS], max[NIR_MAX_VEC_COMPONENTS];
355   memset(min, 0, sizeof(min));
356   memset(max, 0, sizeof(max));
357   for (unsigned i = 0; i < f->num_components; i++) {
358      assert(bits[i] < 32);
359      max[i].i32 = (1 << (bits[i] - 1)) - 1;
360      min[i].i32 = -(1 << (bits[i] - 1));
361   }
362   f = nir_imin(b, f, nir_build_imm(b, f->num_components, 32, max));
363   f = nir_imax(b, f, nir_build_imm(b, f->num_components, 32, min));
364
365   return f;
366}
367
368static inline nir_ssa_def *
369nir_format_unpack_11f11f10f(nir_builder *b, nir_ssa_def *packed)
370{
371   nir_ssa_def *chans[3];
372   chans[0] = nir_mask_shift(b, packed, 0x000007ff, 4);
373   chans[1] = nir_mask_shift(b, packed, 0x003ff800, -7);
374   chans[2] = nir_mask_shift(b, packed, 0xffc00000, -17);
375
376   for (unsigned i = 0; i < 3; i++)
377      chans[i] = nir_unpack_half_2x16_split_x(b, chans[i]);
378
379   return nir_vec(b, chans, 3);
380}
381
382static inline nir_ssa_def *
383nir_format_pack_11f11f10f(nir_builder *b, nir_ssa_def *color)
384{
385   /* 10 and 11-bit floats are unsigned.  Clamp to non-negative */
386   nir_ssa_def *clamped = nir_fmax(b, color, nir_imm_float(b, 0));
387
388   nir_ssa_def *undef = nir_ssa_undef(b, 1, color->bit_size);
389   nir_ssa_def *p1 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 0),
390                                                 nir_channel(b, clamped, 1));
391   nir_ssa_def *p2 = nir_pack_half_2x16_split(b, nir_channel(b, clamped, 2),
392                                                 undef);
393
394   /* A 10 or 11-bit float has the same exponent as a 16-bit float but with
395    * fewer mantissa bits and no sign bit.  All we have to do is throw away
396    * the sign bit and the bottom mantissa bits and shift it into place.
397    */
398   nir_ssa_def *packed = nir_imm_int(b, 0);
399   packed = nir_mask_shift_or(b, packed, p1, 0x00007ff0, -4);
400   packed = nir_mask_shift_or(b, packed, p1, 0x7ff00000, -9);
401   packed = nir_mask_shift_or(b, packed, p2, 0x00007fe0, 17);
402
403   return packed;
404}
405
406static inline nir_ssa_def *
407nir_format_pack_r9g9b9e5(nir_builder *b, nir_ssa_def *color)
408{
409   /* See also float3_to_rgb9e5 */
410
411   /* First, we need to clamp it to range. */
412   nir_ssa_def *clamped = nir_fmin(b, color, nir_imm_float(b, MAX_RGB9E5));
413
414   /* Get rid of negatives and NaN */
415   clamped = nir_bcsel(b, nir_ult(b, nir_imm_int(b, 0x7f800000), color),
416                          nir_imm_float(b, 0), clamped);
417
418   /* maxrgb.u = MAX3(rc.u, gc.u, bc.u); */
419   nir_ssa_def *maxu = nir_umax(b, nir_channel(b, clamped, 0),
420                       nir_umax(b, nir_channel(b, clamped, 1),
421                                   nir_channel(b, clamped, 2)));
422
423   /* maxrgb.u += maxrgb.u & (1 << (23-9)); */
424   maxu = nir_iadd(b, maxu, nir_iand(b, maxu, nir_imm_int(b, 1 << 14)));
425
426   /* exp_shared = MAX2((maxrgb.u >> 23), -RGB9E5_EXP_BIAS - 1 + 127) +
427    *              1 + RGB9E5_EXP_BIAS - 127;
428    */
429   nir_ssa_def *exp_shared =
430      nir_iadd(b, nir_umax(b, nir_ushr_imm(b, maxu, 23),
431                              nir_imm_int(b, -RGB9E5_EXP_BIAS - 1 + 127)),
432                  nir_imm_int(b, 1 + RGB9E5_EXP_BIAS - 127));
433
434   /* revdenom_biasedexp = 127 - (exp_shared - RGB9E5_EXP_BIAS -
435    *                             RGB9E5_MANTISSA_BITS) + 1;
436    */
437   nir_ssa_def *revdenom_biasedexp =
438      nir_isub(b, nir_imm_int(b, 127 + RGB9E5_EXP_BIAS +
439                                 RGB9E5_MANTISSA_BITS + 1),
440                  exp_shared);
441
442   /* revdenom.u = revdenom_biasedexp << 23; */
443   nir_ssa_def *revdenom =
444      nir_ishl(b, revdenom_biasedexp, nir_imm_int(b, 23));
445
446   /* rm = (int) (rc.f * revdenom.f);
447    * gm = (int) (gc.f * revdenom.f);
448    * bm = (int) (bc.f * revdenom.f);
449    */
450   nir_ssa_def *mantissa =
451      nir_f2i32(b, nir_fmul(b, clamped, revdenom));
452
453   /* rm = (rm & 1) + (rm >> 1);
454    * gm = (gm & 1) + (gm >> 1);
455    * bm = (bm & 1) + (bm >> 1);
456    */
457   mantissa = nir_iadd(b, nir_iand_imm(b, mantissa, 1),
458                          nir_ushr_imm(b, mantissa, 1));
459
460   nir_ssa_def *packed = nir_channel(b, mantissa, 0);
461   packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 1), ~0, 9);
462   packed = nir_mask_shift_or(b, packed, nir_channel(b, mantissa, 2), ~0, 18);
463   packed = nir_mask_shift_or(b, packed, exp_shared, ~0, 27);
464
465   return packed;
466}
467