1/*
2 * Copyright © 2020 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "nir_builder.h"
25
26static bool
27opt_memcpy_deref_cast(nir_intrinsic_instr *cpy, nir_src *deref_src)
28{
29   assert(cpy->intrinsic == nir_intrinsic_memcpy_deref);
30
31   nir_deref_instr *cast = nir_src_as_deref(*deref_src);
32   if (cast == NULL || cast->deref_type != nir_deref_type_cast)
33      return false;
34
35   /* We always have to replace the source with a deref, not a bare uint
36    * pointer.  If it's the first deref in the chain, bail.
37    */
38   nir_deref_instr *parent = nir_src_as_deref(cast->parent);
39   if (parent == NULL)
40      return false;
41
42   /* If it has useful alignment information, we want to keep that */
43   if (cast->cast.align_mul > 0)
44      return false;
45
46   /* Casts to uint8 or int8 never do us any good; get rid of them */
47   if (cast->type == glsl_int8_t_type() ||
48       cast->type == glsl_uint8_t_type()) {
49      nir_instr_rewrite_src(&cpy->instr, deref_src,
50                            nir_src_for_ssa(&parent->dest.ssa));
51      return true;
52   }
53
54   int64_t parent_type_size = glsl_get_explicit_size(parent->type, false);
55   if (parent_type_size < 0)
56      return false;
57
58   if (!nir_src_is_const(cpy->src[2]))
59      return false;
60
61   /* We don't want to get rid of the cast if the resulting type would be
62    * smaller than the amount of data we're copying.
63    */
64   if (nir_src_as_uint(cpy->src[2]) < (uint64_t)parent_type_size)
65      return false;
66
67   nir_instr_rewrite_src(&cpy->instr, deref_src,
68                         nir_src_for_ssa(&parent->dest.ssa));
69   return true;
70}
71
72static bool
73type_is_tightly_packed(const struct glsl_type *type, unsigned *size_out)
74{
75   unsigned size = 0;
76   if (glsl_type_is_struct_or_ifc(type)) {
77      unsigned num_fields = glsl_get_length(type);
78      for (unsigned i = 0; i < num_fields; i++) {
79         const struct glsl_struct_field *field =
80            glsl_get_struct_field_data(type, i);
81
82         if (field->offset < 0 || field->offset != size)
83            return false;
84
85         unsigned field_size;
86         if (!type_is_tightly_packed(field->type, &field_size))
87            return false;
88
89         size = field->offset + field_size;
90      }
91   } else if (glsl_type_is_array_or_matrix(type)) {
92      if (glsl_type_is_unsized_array(type))
93         return false;
94
95      unsigned stride = glsl_get_explicit_stride(type);
96      if (stride == 0)
97         return false;
98
99      const struct glsl_type *elem_type = glsl_get_array_element(type);
100
101      unsigned elem_size;
102      if (!type_is_tightly_packed(elem_type, &elem_size))
103         return false;
104
105      if (elem_size != stride)
106         return false;
107
108      size = stride * glsl_get_length(type);
109   } else {
110      assert(glsl_type_is_vector_or_scalar(type));
111      if (glsl_get_explicit_stride(type) > 0)
112         return false;
113
114      if (glsl_type_is_boolean(type))
115         return false;
116
117      size = glsl_get_explicit_size(type, false);
118   }
119
120   if (size_out)
121      *size_out = size;
122   return true;
123}
124
125static bool
126try_lower_memcpy(nir_builder *b, nir_intrinsic_instr *cpy,
127                 struct set *complex_vars)
128{
129   nir_deref_instr *dst = nir_src_as_deref(cpy->src[0]);
130   nir_deref_instr *src = nir_src_as_deref(cpy->src[1]);
131
132   /* A self-copy can always be eliminated */
133   if (dst == src) {
134      nir_instr_remove(&cpy->instr);
135      return true;
136   }
137
138   if (!nir_src_is_const(cpy->src[2]))
139      return false;
140
141   uint64_t size = nir_src_as_uint(cpy->src[2]);
142   if (size == 0) {
143      nir_instr_remove(&cpy->instr);
144      return true;
145   }
146
147   if (glsl_type_is_vector_or_scalar(src->type) &&
148       glsl_type_is_vector_or_scalar(dst->type) &&
149       glsl_get_explicit_size(dst->type, false) == size &&
150       glsl_get_explicit_size(src->type, false) == size) {
151      b->cursor = nir_instr_remove(&cpy->instr);
152      nir_ssa_def *data =
153         nir_load_deref_with_access(b, src, nir_intrinsic_src_access(cpy));
154      data = nir_bitcast_vector(b, data, glsl_get_bit_size(dst->type));
155      assert(data->num_components == glsl_get_vector_elements(dst->type));
156      nir_store_deref_with_access(b, dst, data, ~0 /* write mask */,
157                                  nir_intrinsic_dst_access(cpy));
158      return true;
159   }
160
161   unsigned type_size;
162   if (dst->type == src->type &&
163       type_is_tightly_packed(dst->type, &type_size) &&
164       type_size == size) {
165      b->cursor = nir_instr_remove(&cpy->instr);
166      nir_copy_deref_with_access(b, dst, src,
167                                 nir_intrinsic_dst_access(cpy),
168                                 nir_intrinsic_src_access(cpy));
169      return true;
170   }
171
172   /* If one of the two types is tightly packed and happens to equal the
173    * memcpy size, then we can get the memcpy by casting to that type and
174    * doing a deref copy.
175    *
176    * However, if we blindly apply this logic, we may end up with extra casts
177    * where we don't want them. The whole point of converting memcpy to
178    * copy_deref is in the hopes that nir_opt_copy_prop_vars or
179    * nir_lower_vars_to_ssa will get rid of the copy and those passes don't
180    * handle casts well. Heuristically, only do this optimization if the
181    * tightly packed type is on a deref with nir_var_function_temp so we stick
182    * the cast on the other mode.
183    */
184   if (dst->modes == nir_var_function_temp &&
185       type_is_tightly_packed(dst->type, &type_size) &&
186       type_size == size) {
187      b->cursor = nir_instr_remove(&cpy->instr);
188      src = nir_build_deref_cast(b, &src->dest.ssa,
189                                 src->modes, dst->type, 0);
190      nir_copy_deref_with_access(b, dst, src,
191                                 nir_intrinsic_dst_access(cpy),
192                                 nir_intrinsic_src_access(cpy));
193      return true;
194   }
195
196   /* If we can get at the variable AND the only complex use of that variable
197    * is as a memcpy destination, then we don't have to care about any empty
198    * space in the variable.  In particular, we know that the variable is never
199    * cast to any other type and it's never used as a memcpy source so nothing
200    * can see any padding bytes.  This holds even if some other memcpy only
201    * writes to part of the variable.
202    */
203   if (dst->deref_type == nir_deref_type_var &&
204       dst->modes == nir_var_function_temp &&
205       _mesa_set_search(complex_vars, dst->var) == NULL &&
206       glsl_get_explicit_size(dst->type, false) <= size) {
207      b->cursor = nir_instr_remove(&cpy->instr);
208      src = nir_build_deref_cast(b, &src->dest.ssa,
209                                 src->modes, dst->type, 0);
210      nir_copy_deref_with_access(b, dst, src,
211                                 nir_intrinsic_dst_access(cpy),
212                                 nir_intrinsic_src_access(cpy));
213      return true;
214   }
215
216   if (src->modes == nir_var_function_temp &&
217       type_is_tightly_packed(src->type, &type_size) &&
218       type_size == size) {
219      b->cursor = nir_instr_remove(&cpy->instr);
220      dst = nir_build_deref_cast(b, &dst->dest.ssa,
221                                 dst->modes, src->type, 0);
222      nir_copy_deref_with_access(b, dst, src,
223                                 nir_intrinsic_dst_access(cpy),
224                                 nir_intrinsic_src_access(cpy));
225      return true;
226   }
227
228   return false;
229}
230
231static bool
232opt_memcpy_impl(nir_function_impl *impl)
233{
234   bool progress = false;
235
236   nir_builder b;
237   nir_builder_init(&b, impl);
238
239   struct set *complex_vars = _mesa_pointer_set_create(NULL);
240
241   nir_foreach_block(block, impl) {
242      nir_foreach_instr(instr, block) {
243         if (instr->type != nir_instr_type_deref)
244            continue;
245
246         nir_deref_instr *deref = nir_instr_as_deref(instr);
247         if (deref->deref_type != nir_deref_type_var)
248            continue;
249
250         nir_deref_instr_has_complex_use_options opts =
251            nir_deref_instr_has_complex_use_allow_memcpy_dst;
252         if (nir_deref_instr_has_complex_use(deref, opts))
253            _mesa_set_add(complex_vars, deref->var);
254      }
255   }
256
257   nir_foreach_block(block, impl) {
258      nir_foreach_instr_safe(instr, block) {
259         if (instr->type != nir_instr_type_intrinsic)
260            continue;
261
262         nir_intrinsic_instr *cpy = nir_instr_as_intrinsic(instr);
263         if (cpy->intrinsic != nir_intrinsic_memcpy_deref)
264            continue;
265
266         while (opt_memcpy_deref_cast(cpy, &cpy->src[0]))
267            progress = true;
268         while (opt_memcpy_deref_cast(cpy, &cpy->src[1]))
269            progress = true;
270
271         if (try_lower_memcpy(&b, cpy, complex_vars)) {
272            progress = true;
273            continue;
274         }
275      }
276   }
277
278   _mesa_set_destroy(complex_vars, NULL);
279
280   if (progress) {
281      nir_metadata_preserve(impl, nir_metadata_block_index |
282                                  nir_metadata_dominance);
283   } else {
284      nir_metadata_preserve(impl, nir_metadata_all);
285   }
286
287   return progress;
288}
289
290bool
291nir_opt_memcpy(nir_shader *shader)
292{
293   bool progress = false;
294
295   nir_foreach_function(function, shader) {
296      if (function->impl && opt_memcpy_impl(function->impl))
297         progress = true;
298   }
299
300   return progress;
301}
302