1/*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28#include "nir.h"
29#include "nir_control_flow.h"
30
31/*
32 * This file implements an optimization that deletes statically
33 * unreachable/dead code. In NIR, one way this can happen is when an if
34 * statement has a constant condition:
35 *
36 * if (true) {
37 *    ...
38 * }
39 *
40 * We delete the if statement and paste the contents of the always-executed
41 * branch into the surrounding control flow, possibly removing more code if
42 * the branch had a jump at the end.
43 *
44 * Another way is that control flow can end in a jump so that code after it
45 * never gets executed. In particular, this can happen after optimizing
46 * something like:
47 *
48 * if (true) {
49 *    ...
50 *    break;
51 * }
52 * ...
53 *
54 * We also consider the case where both branches of an if end in a jump, e.g.:
55 *
56 * if (...) {
57 *    break;
58 * } else {
59 *    continue;
60 * }
61 * ...
62 *
63 * Finally, we also handle removing useless loops and ifs, i.e. loops and ifs
64 * with no side effects and without any definitions that are used
65 * elsewhere. This case is a little different from the first two in that the
66 * code is actually run (it just never does anything), but there are similar
67 * issues with needing to be careful with restarting after deleting the
68 * cf_node (see dead_cf_list()) so this is a convenient place to remove them.
69 */
70
71static void
72remove_after_cf_node(nir_cf_node *node)
73{
74   nir_cf_node *end = node;
75   while (!nir_cf_node_is_last(end))
76      end = nir_cf_node_next(end);
77
78   nir_cf_list list;
79   nir_cf_extract(&list, nir_after_cf_node(node), nir_after_cf_node(end));
80   nir_cf_delete(&list);
81}
82
83static void
84opt_constant_if(nir_if *if_stmt, bool condition)
85{
86   /* First, we need to remove any phi nodes after the if by rewriting uses to
87    * point to the correct source.
88    */
89   nir_block *after = nir_cf_node_as_block(nir_cf_node_next(&if_stmt->cf_node));
90   nir_block *last_block = condition ? nir_if_last_then_block(if_stmt)
91                                     : nir_if_last_else_block(if_stmt);
92
93   nir_foreach_instr_safe(instr, after) {
94      if (instr->type != nir_instr_type_phi)
95         break;
96
97      nir_phi_instr *phi = nir_instr_as_phi(instr);
98      nir_ssa_def *def = NULL;
99      nir_foreach_phi_src(phi_src, phi) {
100         if (phi_src->pred != last_block)
101            continue;
102
103         assert(phi_src->src.is_ssa);
104         def = phi_src->src.ssa;
105      }
106
107      assert(def);
108      assert(phi->dest.is_ssa);
109      nir_ssa_def_rewrite_uses(&phi->dest.ssa, def);
110      nir_instr_remove(instr);
111   }
112
113   /* The control flow list we're about to paste in may include a jump at the
114    * end, and in that case we have to delete the rest of the control flow
115    * list after the if since it's unreachable and the validator will balk if
116    * we don't.
117    */
118
119   if (!exec_list_is_empty(&last_block->instr_list)) {
120      nir_instr *last_instr = nir_block_last_instr(last_block);
121      if (last_instr->type == nir_instr_type_jump)
122         remove_after_cf_node(&if_stmt->cf_node);
123   }
124
125   /* Finally, actually paste in the then or else branch and delete the if. */
126   struct exec_list *cf_list = condition ? &if_stmt->then_list
127                                         : &if_stmt->else_list;
128
129   nir_cf_list list;
130   nir_cf_list_extract(&list, cf_list);
131   nir_cf_reinsert(&list, nir_after_cf_node(&if_stmt->cf_node));
132   nir_cf_node_remove(&if_stmt->cf_node);
133}
134
135static bool
136def_only_used_in_cf_node(nir_ssa_def *def, void *_node)
137{
138   nir_cf_node *node = _node;
139   assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
140
141   nir_block *before = nir_cf_node_as_block(nir_cf_node_prev(node));
142   nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
143
144   nir_foreach_use(use, def) {
145      /* Because NIR is structured, we can easily determine whether or not a
146       * value escapes a CF node by looking at the block indices of its uses
147       * to see if they lie outside the bounds of the CF node.
148       *
149       * Note: Normally, the uses of a phi instruction are considered to be
150       * used in the block that is the predecessor of the phi corresponding to
151       * that use.  If we were computing liveness or something similar, that
152       * would mean a special case here for phis.  However, we're trying here
153       * to determine if the SSA def ever escapes the loop.  If it's used by a
154       * phi that lives outside the loop then it doesn't matter if the
155       * corresponding predecessor is inside the loop or not because the value
156       * can go through the phi into the outside world and escape the loop.
157       */
158      if (use->parent_instr->block->index <= before->index ||
159          use->parent_instr->block->index >= after->index)
160         return false;
161   }
162
163   /* Same check for if-condition uses */
164   nir_foreach_if_use(use, def) {
165      nir_block *use_block =
166         nir_cf_node_as_block(nir_cf_node_prev(&use->parent_if->cf_node));
167
168      if (use_block->index <= before->index ||
169          use_block->index >= after->index)
170         return false;
171   }
172
173   return true;
174}
175
176/*
177 * Test if a loop or if node is dead. Such nodes are dead if:
178 *
179 * 1) It has no side effects (i.e. intrinsics which could possibly affect the
180 * state of the program aside from producing an SSA value, indicated by a lack
181 * of NIR_INTRINSIC_CAN_ELIMINATE).
182 *
183 * 2) It has no phi instructions after it, since those indicate values inside
184 * the node being used after the node.
185 *
186 * 3) None of the values defined inside the node is used outside the node,
187 * i.e. none of the definitions that dominate the node exit are used outside.
188 *
189 * If those conditions hold, then the node is dead and can be deleted.
190 */
191
192static bool
193node_is_dead(nir_cf_node *node)
194{
195   assert(node->type == nir_cf_node_loop || node->type == nir_cf_node_if);
196
197   nir_block *after = nir_cf_node_as_block(nir_cf_node_next(node));
198
199   /* Quick check if there are any phis that follow this CF node.  If there
200    * are, then we automatically know it isn't dead.
201    */
202   if (!exec_list_is_empty(&after->instr_list) &&
203       nir_block_first_instr(after)->type == nir_instr_type_phi)
204      return false;
205
206   nir_function_impl *impl = nir_cf_node_get_function(node);
207   nir_metadata_require(impl, nir_metadata_block_index);
208
209   nir_foreach_block_in_cf_node(block, node) {
210      bool inside_loop = node->type == nir_cf_node_loop;
211      for (nir_cf_node *n = &block->cf_node;
212           !inside_loop && n != node; n = n->parent) {
213         if (n->type == nir_cf_node_loop)
214            inside_loop = true;
215      }
216
217      nir_foreach_instr(instr, block) {
218         if (instr->type == nir_instr_type_call)
219            return false;
220
221         /* Return and halt instructions can cause us to skip over other
222          * side-effecting instructions after the loop, so consider them to
223          * have side effects here.
224          *
225          * When the block is not inside a loop, break and continue might also
226          * cause a skip.
227          */
228         if (instr->type == nir_instr_type_jump &&
229             (!inside_loop ||
230              nir_instr_as_jump(instr)->type == nir_jump_return ||
231              nir_instr_as_jump(instr)->type == nir_jump_halt))
232            return false;
233
234         if (instr->type == nir_instr_type_intrinsic) {
235            nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
236            if (!(nir_intrinsic_infos[intrin->intrinsic].flags &
237                NIR_INTRINSIC_CAN_ELIMINATE))
238               return false;
239
240            switch (intrin->intrinsic) {
241            case nir_intrinsic_load_deref:
242            case nir_intrinsic_load_ssbo:
243            case nir_intrinsic_load_global:
244               /* If there's a memory barrier after the loop, a load might be
245                * required to happen before some other instruction after the
246                * barrier, so it is not valid to eliminate it -- unless we
247                * know we can reorder it.
248                *
249                * Consider only loads that the result can be affected by other
250                * invocations.
251                */
252               if (intrin->intrinsic == nir_intrinsic_load_deref) {
253                  nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
254                  if (!nir_deref_mode_may_be(deref, nir_var_mem_ssbo |
255                                                    nir_var_mem_shared |
256                                                    nir_var_mem_global |
257                                                    nir_var_shader_out))
258                     break;
259               }
260               if (nir_intrinsic_access(intrin) & ACCESS_CAN_REORDER)
261                  break;
262               return false;
263
264            case nir_intrinsic_load_shared:
265            case nir_intrinsic_load_shared2_amd:
266            case nir_intrinsic_load_output:
267            case nir_intrinsic_load_per_vertex_output:
268               /* Same as above loads. */
269               return false;
270
271            default:
272               /* Do nothing. */
273               break;
274            }
275         }
276
277         if (!nir_foreach_ssa_def(instr, def_only_used_in_cf_node, node))
278            return false;
279      }
280   }
281
282   return true;
283}
284
285static bool
286dead_cf_block(nir_block *block)
287{
288   nir_if *following_if = nir_block_get_following_if(block);
289   if (following_if) {
290      if (nir_src_is_const(following_if->condition)) {
291         opt_constant_if(following_if, nir_src_as_bool(following_if->condition));
292         return true;
293      }
294
295      if (node_is_dead(&following_if->cf_node)) {
296         nir_cf_node_remove(&following_if->cf_node);
297         return true;
298      }
299   }
300
301   nir_loop *following_loop = nir_block_get_following_loop(block);
302   if (!following_loop)
303      return false;
304
305   if (!node_is_dead(&following_loop->cf_node))
306      return false;
307
308   nir_cf_node_remove(&following_loop->cf_node);
309   return true;
310}
311
312static bool
313dead_cf_list(struct exec_list *list, bool *list_ends_in_jump)
314{
315   bool progress = false;
316   *list_ends_in_jump = false;
317
318   nir_cf_node *prev = NULL;
319
320   foreach_list_typed(nir_cf_node, cur, node, list) {
321      switch (cur->type) {
322      case nir_cf_node_block: {
323         nir_block *block = nir_cf_node_as_block(cur);
324         if (dead_cf_block(block)) {
325            /* We just deleted the if or loop after this block, so we may have
326             * deleted the block before or after it -- which one is an
327             * implementation detail. Therefore, to recover the place we were
328             * at, we have to use the previous cf_node.
329             */
330
331            if (prev) {
332               cur = nir_cf_node_next(prev);
333            } else {
334               cur = exec_node_data(nir_cf_node, exec_list_get_head(list),
335                                    node);
336            }
337
338            block = nir_cf_node_as_block(cur);
339
340            progress = true;
341         }
342
343         if (nir_block_ends_in_jump(block)) {
344            *list_ends_in_jump = true;
345
346            if (!exec_node_is_tail_sentinel(cur->node.next)) {
347               remove_after_cf_node(cur);
348               return true;
349            }
350         }
351
352         break;
353      }
354
355      case nir_cf_node_if: {
356         nir_if *if_stmt = nir_cf_node_as_if(cur);
357         bool then_ends_in_jump, else_ends_in_jump;
358         progress |= dead_cf_list(&if_stmt->then_list, &then_ends_in_jump);
359         progress |= dead_cf_list(&if_stmt->else_list, &else_ends_in_jump);
360
361         if (then_ends_in_jump && else_ends_in_jump) {
362            *list_ends_in_jump = true;
363            nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
364            if (!exec_list_is_empty(&next->instr_list) ||
365                !exec_node_is_tail_sentinel(next->cf_node.node.next)) {
366               remove_after_cf_node(cur);
367               return true;
368            }
369         }
370
371         break;
372      }
373
374      case nir_cf_node_loop: {
375         nir_loop *loop = nir_cf_node_as_loop(cur);
376         bool dummy;
377         progress |= dead_cf_list(&loop->body, &dummy);
378
379         nir_block *next = nir_cf_node_as_block(nir_cf_node_next(cur));
380         if (next->predecessors->entries == 0 &&
381             (!exec_list_is_empty(&next->instr_list) ||
382             !exec_node_is_tail_sentinel(next->cf_node.node.next))) {
383            remove_after_cf_node(cur);
384            return true;
385         }
386         break;
387      }
388
389      default:
390         unreachable("unknown cf node type");
391      }
392
393      prev = cur;
394   }
395
396   return progress;
397}
398
399static bool
400opt_dead_cf_impl(nir_function_impl *impl)
401{
402   bool dummy;
403   bool progress = dead_cf_list(&impl->body, &dummy);
404
405   if (progress) {
406      nir_metadata_preserve(impl, nir_metadata_none);
407
408      /* The CF manipulation code called by this pass is smart enough to keep
409       * from breaking any SSA use/def chains by replacing any uses of removed
410       * instructions with SSA undefs.  However, it's not quite smart enough
411       * to always preserve the dominance properties.  In particular, if you
412       * remove the one break from a loop, stuff in the loop may still be used
413       * outside the loop even though there's no path between the two.  We can
414       * easily fix these issues by calling nir_repair_ssa which will ensure
415       * that the dominance properties hold.
416       */
417      nir_repair_ssa_impl(impl);
418   } else {
419      nir_metadata_preserve(impl, nir_metadata_all);
420   }
421
422   return progress;
423}
424
425bool
426nir_opt_dead_cf(nir_shader *shader)
427{
428   bool progress = false;
429
430   nir_foreach_function(function, shader)
431      if (function->impl)
432         progress |= opt_dead_cf_impl(function->impl);
433
434   return progress;
435}
436