1/*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Eric Anholt <eric@anholt.net>
25 *
26 */
27
28#include "brw_eu.h"
29#include "brw_fs.h"
30#include "brw_cfg.h"
31#include "util/set.h"
32#include "util/register_allocate.h"
33
34using namespace brw;
35
36static void
37assign_reg(unsigned *reg_hw_locations, fs_reg *reg)
38{
39   if (reg->file == VGRF) {
40      reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
41      reg->offset %= REG_SIZE;
42   }
43}
44
45void
46fs_visitor::assign_regs_trivial()
47{
48   unsigned hw_reg_mapping[this->alloc.count + 1];
49   unsigned i;
50   int reg_width = dispatch_width / 8;
51
52   /* Note that compressed instructions require alignment to 2 registers. */
53   hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
54   for (i = 1; i <= this->alloc.count; i++) {
55      hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
56			   this->alloc.sizes[i - 1]);
57   }
58   this->grf_used = hw_reg_mapping[this->alloc.count];
59
60   foreach_block_and_inst(block, fs_inst, inst, cfg) {
61      assign_reg(hw_reg_mapping, &inst->dst);
62      for (i = 0; i < inst->sources; i++) {
63         assign_reg(hw_reg_mapping, &inst->src[i]);
64      }
65   }
66
67   if (this->grf_used >= max_grf) {
68      fail("Ran out of regs on trivial allocator (%d/%d)\n",
69	   this->grf_used, max_grf);
70   } else {
71      this->alloc.count = this->grf_used;
72   }
73
74}
75
76/**
77 * Size of a register from the aligned_bary_class register class.
78 */
79static unsigned
80aligned_bary_size(unsigned dispatch_width)
81{
82   return (dispatch_width == 8 ? 2 : 4);
83}
84
85static void
86brw_alloc_reg_set(struct brw_compiler *compiler, int dispatch_width)
87{
88   const struct intel_device_info *devinfo = compiler->devinfo;
89   int base_reg_count = BRW_MAX_GRF;
90   const int index = util_logbase2(dispatch_width / 8);
91
92   if (dispatch_width > 8 && devinfo->ver >= 7) {
93      /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
94       * SIMD16.  Therefore, we can use the exact same register sets for
95       * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
96       */
97      compiler->fs_reg_sets[index] = compiler->fs_reg_sets[0];
98      return;
99   }
100
101   /* The registers used to make up almost all values handled in the compiler
102    * are a scalar value occupying a single register (or 2 registers in the
103    * case of SIMD16, which is handled by dividing base_reg_count by 2 and
104    * multiplying allocated register numbers by 2).  Things that were
105    * aggregates of scalar values at the GLSL level were split to scalar
106    * values by split_virtual_grfs().
107    *
108    * However, texture SEND messages return a series of contiguous registers
109    * to write into.  We currently always ask for 4 registers, but we may
110    * convert that to use less some day.
111    *
112    * Additionally, on gfx5 we need aligned pairs of registers for the PLN
113    * instruction, and on gfx4 we need 8 contiguous regs for workaround simd16
114    * texturing.
115    */
116   const int class_count = MAX_VGRF_SIZE;
117   int class_sizes[MAX_VGRF_SIZE];
118   for (unsigned i = 0; i < MAX_VGRF_SIZE; i++)
119      class_sizes[i] = i + 1;
120
121   struct ra_regs *regs = ra_alloc_reg_set(compiler, BRW_MAX_GRF, false);
122   if (devinfo->ver >= 6)
123      ra_set_allocate_round_robin(regs);
124   struct ra_class **classes = ralloc_array(compiler, struct ra_class *, class_count);
125   struct ra_class *aligned_bary_class = NULL;
126
127   /* Now, make the register classes for each size of contiguous register
128    * allocation we might need to make.
129    */
130   for (int i = 0; i < class_count; i++) {
131      classes[i] = ra_alloc_contig_reg_class(regs, class_sizes[i]);
132
133      if (devinfo->ver <= 5 && dispatch_width >= 16) {
134         /* From the G45 PRM:
135          *
136          * In order to reduce the hardware complexity, the following
137          * rules and restrictions apply to the compressed instruction:
138          * ...
139          * * Operand Alignment Rule: With the exceptions listed below, a
140          *   source/destination operand in general should be aligned to
141          *   even 256-bit physical register with a region size equal to
142          *   two 256-bit physical register
143          */
144         for (int reg = 0; reg <= base_reg_count - class_sizes[i]; reg += 2)
145            ra_class_add_reg(classes[i], reg);
146      } else {
147         for (int reg = 0; reg <= base_reg_count - class_sizes[i]; reg++)
148            ra_class_add_reg(classes[i], reg);
149      }
150   }
151
152   /* Add a special class for aligned barycentrics, which we'll put the
153    * first source of LINTERP on so that we can do PLN on Gen <= 6.
154    */
155   if (devinfo->has_pln && (devinfo->ver == 6 ||
156                            (dispatch_width == 8 && devinfo->ver <= 5))) {
157      int contig_len = aligned_bary_size(dispatch_width);
158      aligned_bary_class = ra_alloc_contig_reg_class(regs, contig_len);
159
160      for (int i = 0; i <= base_reg_count - contig_len; i += 2)
161         ra_class_add_reg(aligned_bary_class, i);
162   }
163
164   ra_set_finalize(regs, NULL);
165
166   compiler->fs_reg_sets[index].regs = regs;
167   for (unsigned i = 0; i < ARRAY_SIZE(compiler->fs_reg_sets[index].classes); i++)
168      compiler->fs_reg_sets[index].classes[i] = NULL;
169   for (int i = 0; i < class_count; i++)
170      compiler->fs_reg_sets[index].classes[class_sizes[i] - 1] = classes[i];
171   compiler->fs_reg_sets[index].aligned_bary_class = aligned_bary_class;
172}
173
174void
175brw_fs_alloc_reg_sets(struct brw_compiler *compiler)
176{
177   brw_alloc_reg_set(compiler, 8);
178   brw_alloc_reg_set(compiler, 16);
179   brw_alloc_reg_set(compiler, 32);
180}
181
182static int
183count_to_loop_end(const bblock_t *block)
184{
185   if (block->end()->opcode == BRW_OPCODE_WHILE)
186      return block->end_ip;
187
188   int depth = 1;
189   /* Skip the first block, since we don't want to count the do the calling
190    * function found.
191    */
192   for (block = block->next();
193        depth > 0;
194        block = block->next()) {
195      if (block->start()->opcode == BRW_OPCODE_DO)
196         depth++;
197      if (block->end()->opcode == BRW_OPCODE_WHILE) {
198         depth--;
199         if (depth == 0)
200            return block->end_ip;
201      }
202   }
203   unreachable("not reached");
204}
205
206void fs_visitor::calculate_payload_ranges(int payload_node_count,
207                                          int *payload_last_use_ip) const
208{
209   int loop_depth = 0;
210   int loop_end_ip = 0;
211
212   for (int i = 0; i < payload_node_count; i++)
213      payload_last_use_ip[i] = -1;
214
215   int ip = 0;
216   foreach_block_and_inst(block, fs_inst, inst, cfg) {
217      switch (inst->opcode) {
218      case BRW_OPCODE_DO:
219         loop_depth++;
220
221         /* Since payload regs are deffed only at the start of the shader
222          * execution, any uses of the payload within a loop mean the live
223          * interval extends to the end of the outermost loop.  Find the ip of
224          * the end now.
225          */
226         if (loop_depth == 1)
227            loop_end_ip = count_to_loop_end(block);
228         break;
229      case BRW_OPCODE_WHILE:
230         loop_depth--;
231         break;
232      default:
233         break;
234      }
235
236      int use_ip;
237      if (loop_depth > 0)
238         use_ip = loop_end_ip;
239      else
240         use_ip = ip;
241
242      /* Note that UNIFORM args have been turned into FIXED_GRF by
243       * assign_curbe_setup(), and interpolation uses fixed hardware regs from
244       * the start (see interp_reg()).
245       */
246      for (int i = 0; i < inst->sources; i++) {
247         if (inst->src[i].file == FIXED_GRF) {
248            int node_nr = inst->src[i].nr;
249            if (node_nr >= payload_node_count)
250               continue;
251
252            for (unsigned j = 0; j < regs_read(inst, i); j++) {
253               payload_last_use_ip[node_nr + j] = use_ip;
254               assert(node_nr + j < unsigned(payload_node_count));
255            }
256         }
257      }
258
259      if (inst->dst.file == FIXED_GRF) {
260         int node_nr = inst->dst.nr;
261         if (node_nr < payload_node_count) {
262            for (unsigned j = 0; j < regs_written(inst); j++) {
263               payload_last_use_ip[node_nr + j] = use_ip;
264               assert(node_nr + j < unsigned(payload_node_count));
265            }
266         }
267      }
268
269      /* Special case instructions which have extra implied registers used. */
270      switch (inst->opcode) {
271      case CS_OPCODE_CS_TERMINATE:
272         payload_last_use_ip[0] = use_ip;
273         break;
274
275      default:
276         if (inst->eot) {
277            /* We could omit this for the !inst->header_present case, except
278             * that the simulator apparently incorrectly reads from g0/g1
279             * instead of sideband.  It also really freaks out driver
280             * developers to see g0 used in unusual places, so just always
281             * reserve it.
282             */
283            payload_last_use_ip[0] = use_ip;
284            payload_last_use_ip[1] = use_ip;
285         }
286         break;
287      }
288
289      ip++;
290   }
291}
292
293class fs_reg_alloc {
294public:
295   fs_reg_alloc(fs_visitor *fs):
296      fs(fs), devinfo(fs->devinfo), compiler(fs->compiler),
297      live(fs->live_analysis.require()), g(NULL),
298      have_spill_costs(false)
299   {
300      mem_ctx = ralloc_context(NULL);
301
302      /* Stash the number of instructions so we can sanity check that our
303       * counts still match liveness.
304       */
305      live_instr_count = fs->cfg->last_block()->end_ip + 1;
306
307      spill_insts = _mesa_pointer_set_create(mem_ctx);
308
309      /* Most of this allocation was written for a reg_width of 1
310       * (dispatch_width == 8).  In extending to SIMD16, the code was
311       * left in place and it was converted to have the hardware
312       * registers it's allocating be contiguous physical pairs of regs
313       * for reg_width == 2.
314       */
315      int reg_width = fs->dispatch_width / 8;
316      rsi = util_logbase2(reg_width);
317      payload_node_count = ALIGN(fs->first_non_payload_grf, reg_width);
318
319      /* Get payload IP information */
320      payload_last_use_ip = ralloc_array(mem_ctx, int, payload_node_count);
321
322      node_count = 0;
323      first_payload_node = 0;
324      first_mrf_hack_node = 0;
325      scratch_header_node = 0;
326      grf127_send_hack_node = 0;
327      first_vgrf_node = 0;
328      last_vgrf_node = 0;
329      first_spill_node = 0;
330
331      spill_vgrf_ip = NULL;
332      spill_vgrf_ip_alloc = 0;
333      spill_node_count = 0;
334   }
335
336   ~fs_reg_alloc()
337   {
338      ralloc_free(mem_ctx);
339   }
340
341   bool assign_regs(bool allow_spilling, bool spill_all);
342
343private:
344   void setup_live_interference(unsigned node,
345                                int node_start_ip, int node_end_ip);
346   void setup_inst_interference(const fs_inst *inst);
347
348   void build_interference_graph(bool allow_spilling);
349   void discard_interference_graph();
350
351   void emit_unspill(const fs_builder &bld, struct shader_stats *stats,
352                     fs_reg dst, uint32_t spill_offset, unsigned count);
353   void emit_spill(const fs_builder &bld, struct shader_stats *stats,
354                   fs_reg src, uint32_t spill_offset, unsigned count);
355
356   void set_spill_costs();
357   int choose_spill_reg();
358   fs_reg alloc_scratch_header();
359   fs_reg alloc_spill_reg(unsigned size, int ip);
360   void spill_reg(unsigned spill_reg);
361
362   void *mem_ctx;
363   fs_visitor *fs;
364   const intel_device_info *devinfo;
365   const brw_compiler *compiler;
366   const fs_live_variables &live;
367   int live_instr_count;
368
369   set *spill_insts;
370
371   /* Which compiler->fs_reg_sets[] to use */
372   int rsi;
373
374   ra_graph *g;
375   bool have_spill_costs;
376
377   int payload_node_count;
378   int *payload_last_use_ip;
379
380   int node_count;
381   int first_payload_node;
382   int first_mrf_hack_node;
383   int scratch_header_node;
384   int grf127_send_hack_node;
385   int first_vgrf_node;
386   int last_vgrf_node;
387   int first_spill_node;
388
389   int *spill_vgrf_ip;
390   int spill_vgrf_ip_alloc;
391   int spill_node_count;
392
393   fs_reg scratch_header;
394};
395
396/**
397 * Sets the mrf_used array to indicate which MRFs are used by the shader IR
398 *
399 * This is used in assign_regs() to decide which of the GRFs that we use as
400 * MRFs on gfx7 get normally register allocated, and in register spilling to
401 * see if we can actually use MRFs to do spills without overwriting normal MRF
402 * contents.
403 */
404static void
405get_used_mrfs(const fs_visitor *v, bool *mrf_used)
406{
407   int reg_width = v->dispatch_width / 8;
408
409   memset(mrf_used, 0, BRW_MAX_MRF(v->devinfo->ver) * sizeof(bool));
410
411   foreach_block_and_inst(block, fs_inst, inst, v->cfg) {
412      if (inst->dst.file == MRF) {
413         int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
414         mrf_used[reg] = true;
415         if (reg_width == 2) {
416            if (inst->dst.nr & BRW_MRF_COMPR4) {
417               mrf_used[reg + 4] = true;
418            } else {
419               mrf_used[reg + 1] = true;
420            }
421         }
422      }
423
424      if (inst->mlen > 0) {
425	 for (unsigned i = 0; i < inst->implied_mrf_writes(); i++) {
426            mrf_used[inst->base_mrf + i] = true;
427         }
428      }
429   }
430}
431
432namespace {
433   /**
434    * Maximum spill block size we expect to encounter in 32B units.
435    *
436    * This is somewhat arbitrary and doesn't necessarily limit the maximum
437    * variable size that can be spilled -- A higher value will allow a
438    * variable of a given size to be spilled more efficiently with a smaller
439    * number of scratch messages, but will increase the likelihood of a
440    * collision between the MRFs reserved for spilling and other MRFs used by
441    * the program (and possibly increase GRF register pressure on platforms
442    * without hardware MRFs), what could cause register allocation to fail.
443    *
444    * For the moment reserve just enough space so a register of 32 bit
445    * component type and natural region width can be spilled without splitting
446    * into multiple (force_writemask_all) scratch messages.
447    */
448   unsigned
449   spill_max_size(const backend_shader *s)
450   {
451      /* FINISHME - On Gfx7+ it should be possible to avoid this limit
452       *            altogether by spilling directly from the temporary GRF
453       *            allocated to hold the result of the instruction (and the
454       *            scratch write header).
455       */
456      /* FINISHME - The shader's dispatch width probably belongs in
457       *            backend_shader (or some nonexistent fs_shader class?)
458       *            rather than in the visitor class.
459       */
460      return static_cast<const fs_visitor *>(s)->dispatch_width / 8;
461   }
462
463   /**
464    * First MRF register available for spilling.
465    */
466   unsigned
467   spill_base_mrf(const backend_shader *s)
468   {
469      /* We don't use the MRF hack on Gfx9+ */
470      assert(s->devinfo->ver < 9);
471      return BRW_MAX_MRF(s->devinfo->ver) - spill_max_size(s) - 1;
472   }
473}
474
475void
476fs_reg_alloc::setup_live_interference(unsigned node,
477                                      int node_start_ip, int node_end_ip)
478{
479   /* Mark any virtual grf that is live between the start of the program and
480    * the last use of a payload node interfering with that payload node.
481    */
482   for (int i = 0; i < payload_node_count; i++) {
483      if (payload_last_use_ip[i] == -1)
484         continue;
485
486      /* Note that we use a <= comparison, unlike vgrfs_interfere(),
487       * in order to not have to worry about the uniform issue described in
488       * calculate_live_intervals().
489       */
490      if (node_start_ip <= payload_last_use_ip[i])
491         ra_add_node_interference(g, node, first_payload_node + i);
492   }
493
494   /* If we have the MRF hack enabled, mark this node as interfering with all
495    * MRF registers.
496    */
497   if (first_mrf_hack_node >= 0) {
498      for (int i = spill_base_mrf(fs); i < BRW_MAX_MRF(devinfo->ver); i++)
499         ra_add_node_interference(g, node, first_mrf_hack_node + i);
500   }
501
502   /* Everything interferes with the scratch header */
503   if (scratch_header_node >= 0)
504      ra_add_node_interference(g, node, scratch_header_node);
505
506   /* Add interference with every vgrf whose live range intersects this
507    * node's.  We only need to look at nodes below this one as the reflexivity
508    * of interference will take care of the rest.
509    */
510   for (unsigned n2 = first_vgrf_node;
511        n2 <= (unsigned)last_vgrf_node && n2 < node; n2++) {
512      unsigned vgrf = n2 - first_vgrf_node;
513      if (!(node_end_ip <= live.vgrf_start[vgrf] ||
514            live.vgrf_end[vgrf] <= node_start_ip))
515         ra_add_node_interference(g, node, n2);
516   }
517}
518
519void
520fs_reg_alloc::setup_inst_interference(const fs_inst *inst)
521{
522   /* Certain instructions can't safely use the same register for their
523    * sources and destination.  Add interference.
524    */
525   if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
526      for (unsigned i = 0; i < inst->sources; i++) {
527         if (inst->src[i].file == VGRF) {
528            ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
529                                        first_vgrf_node + inst->src[i].nr);
530         }
531      }
532   }
533
534   /* In 16-wide instructions we have an issue where a compressed
535    * instruction is actually two instructions executed simultaneously.
536    * It's actually ok to have the source and destination registers be
537    * the same.  In this case, each instruction over-writes its own
538    * source and there's no problem.  The real problem here is if the
539    * source and destination registers are off by one.  Then you can end
540    * up in a scenario where the first instruction over-writes the
541    * source of the second instruction.  Since the compiler doesn't know
542    * about this level of granularity, we simply make the source and
543    * destination interfere.
544    */
545   if (inst->exec_size >= 16 && inst->dst.file == VGRF) {
546      for (int i = 0; i < inst->sources; ++i) {
547         if (inst->src[i].file == VGRF) {
548            ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
549                                        first_vgrf_node + inst->src[i].nr);
550         }
551      }
552   }
553
554   if (grf127_send_hack_node >= 0) {
555      /* At Intel Broadwell PRM, vol 07, section "Instruction Set Reference",
556       * subsection "EUISA Instructions", Send Message (page 990):
557       *
558       * "r127 must not be used for return address when there is a src and
559       * dest overlap in send instruction."
560       *
561       * We are avoiding using grf127 as part of the destination of send
562       * messages adding a node interference to the grf127_send_hack_node.
563       * This node has a fixed assignment to grf127.
564       *
565       * We don't apply it to SIMD16 instructions because previous code avoids
566       * any register overlap between sources and destination.
567       */
568      if (inst->exec_size < 16 && inst->is_send_from_grf() &&
569          inst->dst.file == VGRF)
570         ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
571                                     grf127_send_hack_node);
572
573      /* Spilling instruction are generated as SEND messages from MRF but as
574       * Gfx7+ supports sending from GRF the driver will maps assingn these
575       * MRF registers to a GRF. Implementations reuses the dest of the send
576       * message as source. So as we will have an overlap for sure, we create
577       * an interference between destination and grf127.
578       */
579      if ((inst->opcode == SHADER_OPCODE_GFX7_SCRATCH_READ ||
580           inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_READ) &&
581          inst->dst.file == VGRF)
582         ra_add_node_interference(g, first_vgrf_node + inst->dst.nr,
583                                     grf127_send_hack_node);
584   }
585
586   /* From the Skylake PRM Vol. 2a docs for sends:
587    *
588    *    "It is required that the second block of GRFs does not overlap with
589    *    the first block."
590    *
591    * Normally, this is taken care of by fixup_sends_duplicate_payload() but
592    * in the case where one of the registers is an undefined value, the
593    * register allocator may decide that they don't interfere even though
594    * they're used as sources in the same instruction.  We also need to add
595    * interference here.
596    */
597   if (devinfo->ver >= 9) {
598      if (inst->opcode == SHADER_OPCODE_SEND && inst->ex_mlen > 0 &&
599          inst->src[2].file == VGRF && inst->src[3].file == VGRF &&
600          inst->src[2].nr != inst->src[3].nr)
601         ra_add_node_interference(g, first_vgrf_node + inst->src[2].nr,
602                                     first_vgrf_node + inst->src[3].nr);
603   }
604
605   /* When we do send-from-GRF for FB writes, we need to ensure that the last
606    * write instruction sends from a high register.  This is because the
607    * vertex fetcher wants to start filling the low payload registers while
608    * the pixel data port is still working on writing out the memory.  If we
609    * don't do this, we get rendering artifacts.
610    *
611    * We could just do "something high".  Instead, we just pick the highest
612    * register that works.
613    */
614   if (inst->eot) {
615      const int vgrf = inst->opcode == SHADER_OPCODE_SEND ?
616                       inst->src[2].nr : inst->src[0].nr;
617      int reg = BRW_MAX_GRF - fs->alloc.sizes[vgrf];
618
619      if (first_mrf_hack_node >= 0) {
620         /* If something happened to spill, we want to push the EOT send
621          * register early enough in the register file that we don't
622          * conflict with any used MRF hack registers.
623          */
624         reg -= BRW_MAX_MRF(devinfo->ver) - spill_base_mrf(fs);
625      } else if (grf127_send_hack_node >= 0) {
626         /* Avoid r127 which might be unusable if the node was previously
627          * written by a SIMD8 SEND message with source/destination overlap.
628          */
629         reg--;
630      }
631
632      ra_set_node_reg(g, first_vgrf_node + vgrf, reg);
633
634      if (inst->ex_mlen > 0) {
635         const int vgrf = inst->src[3].nr;
636         reg -= fs->alloc.sizes[vgrf];
637         ra_set_node_reg(g, first_vgrf_node + vgrf, reg);
638      }
639   }
640}
641
642void
643fs_reg_alloc::build_interference_graph(bool allow_spilling)
644{
645   /* Compute the RA node layout */
646   node_count = 0;
647   first_payload_node = node_count;
648   node_count += payload_node_count;
649   if (devinfo->ver >= 7 && devinfo->ver < 9 && allow_spilling) {
650      first_mrf_hack_node = node_count;
651      node_count += BRW_MAX_GRF - GFX7_MRF_HACK_START;
652   } else {
653      first_mrf_hack_node = -1;
654   }
655   if (devinfo->ver >= 8) {
656      grf127_send_hack_node = node_count;
657      node_count ++;
658   } else {
659      grf127_send_hack_node = -1;
660   }
661   first_vgrf_node = node_count;
662   node_count += fs->alloc.count;
663   last_vgrf_node = node_count - 1;
664   if (devinfo->ver >= 9 && allow_spilling) {
665      scratch_header_node = node_count++;
666   } else {
667      scratch_header_node = -1;
668   }
669   first_spill_node = node_count;
670
671   fs->calculate_payload_ranges(payload_node_count,
672                                payload_last_use_ip);
673
674   assert(g == NULL);
675   g = ra_alloc_interference_graph(compiler->fs_reg_sets[rsi].regs, node_count);
676   ralloc_steal(mem_ctx, g);
677
678   /* Set up the payload nodes */
679   for (int i = 0; i < payload_node_count; i++)
680      ra_set_node_reg(g, first_payload_node + i, i);
681
682   if (first_mrf_hack_node >= 0) {
683      /* Mark each MRF reg node as being allocated to its physical
684       * register.
685       *
686       * The alternative would be to have per-physical-register classes,
687       * which would just be silly.
688       */
689      for (int i = 0; i < BRW_MAX_MRF(devinfo->ver); i++) {
690         ra_set_node_reg(g, first_mrf_hack_node + i,
691                            GFX7_MRF_HACK_START + i);
692      }
693   }
694
695   if (grf127_send_hack_node >= 0)
696      ra_set_node_reg(g, grf127_send_hack_node, 127);
697
698   /* Specify the classes of each virtual register. */
699   for (unsigned i = 0; i < fs->alloc.count; i++) {
700      unsigned size = fs->alloc.sizes[i];
701
702      assert(size <= ARRAY_SIZE(compiler->fs_reg_sets[rsi].classes) &&
703             "Register allocation relies on split_virtual_grfs()");
704
705      ra_set_node_class(g, first_vgrf_node + i,
706                        compiler->fs_reg_sets[rsi].classes[size - 1]);
707   }
708
709   /* Special case: on pre-Gfx7 hardware that supports PLN, the second operand
710    * of a PLN instruction needs to be an even-numbered register, so we have a
711    * special register class aligned_bary_class to handle this case.
712    */
713   if (compiler->fs_reg_sets[rsi].aligned_bary_class) {
714      foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
715         if (inst->opcode == FS_OPCODE_LINTERP && inst->src[0].file == VGRF &&
716             fs->alloc.sizes[inst->src[0].nr] ==
717               aligned_bary_size(fs->dispatch_width)) {
718            ra_set_node_class(g, first_vgrf_node + inst->src[0].nr,
719                              compiler->fs_reg_sets[rsi].aligned_bary_class);
720         }
721      }
722   }
723
724   /* Add interference based on the live range of the register */
725   for (unsigned i = 0; i < fs->alloc.count; i++) {
726      setup_live_interference(first_vgrf_node + i,
727                              live.vgrf_start[i],
728                              live.vgrf_end[i]);
729   }
730
731   /* Add interference based on the instructions in which a register is used.
732    */
733   foreach_block_and_inst(block, fs_inst, inst, fs->cfg)
734      setup_inst_interference(inst);
735}
736
737void
738fs_reg_alloc::discard_interference_graph()
739{
740   ralloc_free(g);
741   g = NULL;
742   have_spill_costs = false;
743}
744
745void
746fs_reg_alloc::emit_unspill(const fs_builder &bld,
747                           struct shader_stats *stats,
748                           fs_reg dst,
749                           uint32_t spill_offset, unsigned count)
750{
751   const intel_device_info *devinfo = bld.shader->devinfo;
752   const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
753                             REG_SIZE;
754   assert(count % reg_size == 0);
755
756   for (unsigned i = 0; i < count / reg_size; i++) {
757      ++stats->fill_count;
758
759      fs_inst *unspill_inst;
760      if (devinfo->ver >= 9) {
761         fs_reg header = this->scratch_header;
762         fs_builder ubld = bld.exec_all().group(1, 0);
763         assert(spill_offset % 16 == 0);
764         unspill_inst = ubld.MOV(component(header, 2),
765                                 brw_imm_ud(spill_offset / 16));
766         _mesa_set_add(spill_insts, unspill_inst);
767
768         unsigned bti;
769         fs_reg ex_desc;
770         if (devinfo->verx10 >= 125) {
771            bti = GFX9_BTI_BINDLESS;
772            ex_desc = component(this->scratch_header, 0);
773         } else {
774            bti = GFX8_BTI_STATELESS_NON_COHERENT;
775            ex_desc = brw_imm_ud(0);
776         }
777
778         fs_reg srcs[] = { brw_imm_ud(0), ex_desc, header };
779         unspill_inst = bld.emit(SHADER_OPCODE_SEND, dst,
780                                 srcs, ARRAY_SIZE(srcs));
781         unspill_inst->mlen = 1;
782         unspill_inst->header_size = 1;
783         unspill_inst->size_written = reg_size * REG_SIZE;
784         unspill_inst->send_has_side_effects = false;
785         unspill_inst->send_is_volatile = true;
786         unspill_inst->sfid = GFX7_SFID_DATAPORT_DATA_CACHE;
787         unspill_inst->desc =
788            brw_dp_desc(devinfo, bti,
789                        BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
790                        BRW_DATAPORT_OWORD_BLOCK_DWORDS(reg_size * 8));
791      } else if (devinfo->ver >= 7 && spill_offset < (1 << 12) * REG_SIZE) {
792         /* The Gfx7 descriptor-based offset is 12 bits of HWORD units.
793          * Because the Gfx7-style scratch block read is hardwired to BTI 255,
794          * on Gfx9+ it would cause the DC to do an IA-coherent read, what
795          * largely outweighs the slight advantage from not having to provide
796          * the address as part of the message header, so we're better off
797          * using plain old oword block reads.
798          */
799         unspill_inst = bld.emit(SHADER_OPCODE_GFX7_SCRATCH_READ, dst);
800         unspill_inst->offset = spill_offset;
801      } else {
802         unspill_inst = bld.emit(SHADER_OPCODE_GFX4_SCRATCH_READ, dst);
803         unspill_inst->offset = spill_offset;
804         unspill_inst->base_mrf = spill_base_mrf(bld.shader);
805         unspill_inst->mlen = 1; /* header contains offset */
806      }
807      _mesa_set_add(spill_insts, unspill_inst);
808
809      dst.offset += reg_size * REG_SIZE;
810      spill_offset += reg_size * REG_SIZE;
811   }
812}
813
814void
815fs_reg_alloc::emit_spill(const fs_builder &bld,
816                         struct shader_stats *stats,
817                         fs_reg src,
818                         uint32_t spill_offset, unsigned count)
819{
820   const intel_device_info *devinfo = bld.shader->devinfo;
821   const unsigned reg_size = src.component_size(bld.dispatch_width()) /
822                             REG_SIZE;
823   assert(count % reg_size == 0);
824
825   for (unsigned i = 0; i < count / reg_size; i++) {
826      ++stats->spill_count;
827
828      fs_inst *spill_inst;
829      if (devinfo->ver >= 9) {
830         fs_reg header = this->scratch_header;
831         fs_builder ubld = bld.exec_all().group(1, 0);
832         assert(spill_offset % 16 == 0);
833         spill_inst = ubld.MOV(component(header, 2),
834                               brw_imm_ud(spill_offset / 16));
835         _mesa_set_add(spill_insts, spill_inst);
836
837         unsigned bti;
838         fs_reg ex_desc;
839         if (devinfo->verx10 >= 125) {
840            bti = GFX9_BTI_BINDLESS;
841            ex_desc = component(this->scratch_header, 0);
842         } else {
843            bti = GFX8_BTI_STATELESS_NON_COHERENT;
844            ex_desc = brw_imm_ud(0);
845         }
846
847         fs_reg srcs[] = { brw_imm_ud(0), ex_desc, header, src };
848         spill_inst = bld.emit(SHADER_OPCODE_SEND, bld.null_reg_f(),
849                               srcs, ARRAY_SIZE(srcs));
850         spill_inst->mlen = 1;
851         spill_inst->ex_mlen = reg_size;
852         spill_inst->size_written = 0;
853         spill_inst->header_size = 1;
854         spill_inst->send_has_side_effects = true;
855         spill_inst->send_is_volatile = false;
856         spill_inst->sfid = GFX7_SFID_DATAPORT_DATA_CACHE;
857         spill_inst->desc =
858            brw_dp_desc(devinfo, bti,
859                        GFX6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE,
860                        BRW_DATAPORT_OWORD_BLOCK_DWORDS(reg_size * 8));
861      } else {
862         spill_inst = bld.emit(SHADER_OPCODE_GFX4_SCRATCH_WRITE,
863                               bld.null_reg_f(), src);
864         spill_inst->offset = spill_offset;
865         spill_inst->mlen = 1 + reg_size; /* header, value */
866         spill_inst->base_mrf = spill_base_mrf(bld.shader);
867      }
868      _mesa_set_add(spill_insts, spill_inst);
869
870      src.offset += reg_size * REG_SIZE;
871      spill_offset += reg_size * REG_SIZE;
872   }
873}
874
875void
876fs_reg_alloc::set_spill_costs()
877{
878   float block_scale = 1.0;
879   float spill_costs[fs->alloc.count];
880   bool no_spill[fs->alloc.count];
881
882   for (unsigned i = 0; i < fs->alloc.count; i++) {
883      spill_costs[i] = 0.0;
884      no_spill[i] = false;
885   }
886
887   /* Calculate costs for spilling nodes.  Call it a cost of 1 per
888    * spill/unspill we'll have to do, and guess that the insides of
889    * loops run 10 times.
890    */
891   foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
892      for (unsigned int i = 0; i < inst->sources; i++) {
893	 if (inst->src[i].file == VGRF)
894            spill_costs[inst->src[i].nr] += regs_read(inst, i) * block_scale;
895      }
896
897      if (inst->dst.file == VGRF)
898         spill_costs[inst->dst.nr] += regs_written(inst) * block_scale;
899
900      /* Don't spill anything we generated while spilling */
901      if (_mesa_set_search(spill_insts, inst)) {
902         for (unsigned int i = 0; i < inst->sources; i++) {
903	    if (inst->src[i].file == VGRF)
904               no_spill[inst->src[i].nr] = true;
905         }
906	 if (inst->dst.file == VGRF)
907            no_spill[inst->dst.nr] = true;
908      }
909
910      switch (inst->opcode) {
911
912      case BRW_OPCODE_DO:
913	 block_scale *= 10;
914	 break;
915
916      case BRW_OPCODE_WHILE:
917	 block_scale /= 10;
918	 break;
919
920      case BRW_OPCODE_IF:
921      case BRW_OPCODE_IFF:
922         block_scale *= 0.5;
923         break;
924
925      case BRW_OPCODE_ENDIF:
926         block_scale /= 0.5;
927         break;
928
929      default:
930	 break;
931      }
932   }
933
934   for (unsigned i = 0; i < fs->alloc.count; i++) {
935      /* Do the no_spill check first.  Registers that are used as spill
936       * temporaries may have been allocated after we calculated liveness so
937       * we shouldn't look their liveness up.  Fortunately, they're always
938       * used in SCRATCH_READ/WRITE instructions so they'll always be flagged
939       * no_spill.
940       */
941      if (no_spill[i])
942         continue;
943
944      int live_length = live.vgrf_end[i] - live.vgrf_start[i];
945      if (live_length <= 0)
946         continue;
947
948      /* Divide the cost (in number of spills/fills) by the log of the length
949       * of the live range of the register.  This will encourage spill logic
950       * to spill long-living things before spilling short-lived things where
951       * spilling is less likely to actually do us any good.  We use the log
952       * of the length because it will fall off very quickly and not cause us
953       * to spill medium length registers with more uses.
954       */
955      float adjusted_cost = spill_costs[i] / logf(live_length);
956      ra_set_node_spill_cost(g, first_vgrf_node + i, adjusted_cost);
957   }
958
959   have_spill_costs = true;
960}
961
962int
963fs_reg_alloc::choose_spill_reg()
964{
965   if (!have_spill_costs)
966      set_spill_costs();
967
968   int node = ra_get_best_spill_node(g);
969   if (node < 0)
970      return -1;
971
972   assert(node >= first_vgrf_node);
973   return node - first_vgrf_node;
974}
975
976fs_reg
977fs_reg_alloc::alloc_scratch_header()
978{
979   int vgrf = fs->alloc.allocate(1);
980   assert(first_vgrf_node + vgrf == scratch_header_node);
981   ra_set_node_class(g, scratch_header_node,
982                        compiler->fs_reg_sets[rsi].classes[0]);
983
984   setup_live_interference(scratch_header_node, 0, INT_MAX);
985
986   return fs_reg(VGRF, vgrf, BRW_REGISTER_TYPE_UD);
987}
988
989fs_reg
990fs_reg_alloc::alloc_spill_reg(unsigned size, int ip)
991{
992   int vgrf = fs->alloc.allocate(size);
993   int n = ra_add_node(g, compiler->fs_reg_sets[rsi].classes[size - 1]);
994   assert(n == first_vgrf_node + vgrf);
995   assert(n == first_spill_node + spill_node_count);
996
997   setup_live_interference(n, ip - 1, ip + 1);
998
999   /* Add interference between this spill node and any other spill nodes for
1000    * the same instruction.
1001    */
1002   for (int s = 0; s < spill_node_count; s++) {
1003      if (spill_vgrf_ip[s] == ip)
1004         ra_add_node_interference(g, n, first_spill_node + s);
1005   }
1006
1007   /* Add this spill node to the list for next time */
1008   if (spill_node_count >= spill_vgrf_ip_alloc) {
1009      if (spill_vgrf_ip_alloc == 0)
1010         spill_vgrf_ip_alloc = 16;
1011      else
1012         spill_vgrf_ip_alloc *= 2;
1013      spill_vgrf_ip = reralloc(mem_ctx, spill_vgrf_ip, int,
1014                               spill_vgrf_ip_alloc);
1015   }
1016   spill_vgrf_ip[spill_node_count++] = ip;
1017
1018   return fs_reg(VGRF, vgrf);
1019}
1020
1021void
1022fs_reg_alloc::spill_reg(unsigned spill_reg)
1023{
1024   int size = fs->alloc.sizes[spill_reg];
1025   unsigned int spill_offset = fs->last_scratch;
1026   assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
1027
1028   /* Spills may use MRFs 13-15 in the SIMD16 case.  Our texturing is done
1029    * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
1030    * up to m13 (gfx6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
1031    * m15 (gfx4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
1032    * depth), starting from m1.  In summary: We may not be able to spill in
1033    * SIMD16 mode, because we'd stomp the FB writes.
1034    */
1035   if (!fs->spilled_any_registers) {
1036      if (devinfo->ver >= 9) {
1037         this->scratch_header = alloc_scratch_header();
1038         fs_builder ubld = fs->bld.exec_all().group(8, 0).at(
1039            fs->cfg->first_block(), fs->cfg->first_block()->start());
1040
1041         fs_inst *inst;
1042         if (devinfo->verx10 >= 125) {
1043            inst = ubld.MOV(this->scratch_header, brw_imm_ud(0));
1044            _mesa_set_add(spill_insts, inst);
1045            inst = ubld.group(1, 0).AND(component(this->scratch_header, 0),
1046                                        retype(brw_vec1_grf(0, 5),
1047                                               BRW_REGISTER_TYPE_UD),
1048                                        brw_imm_ud(INTEL_MASK(31, 10)));
1049            _mesa_set_add(spill_insts, inst);
1050         } else {
1051            inst = ubld.emit(SHADER_OPCODE_SCRATCH_HEADER,
1052                             this->scratch_header);
1053            _mesa_set_add(spill_insts, inst);
1054         }
1055      } else {
1056         bool mrf_used[BRW_MAX_MRF(devinfo->ver)];
1057         get_used_mrfs(fs, mrf_used);
1058
1059         for (int i = spill_base_mrf(fs); i < BRW_MAX_MRF(devinfo->ver); i++) {
1060            if (mrf_used[i]) {
1061               fs->fail("Register spilling not supported with m%d used", i);
1062             return;
1063            }
1064         }
1065      }
1066
1067      fs->spilled_any_registers = true;
1068   }
1069
1070   fs->last_scratch += size * REG_SIZE;
1071
1072   /* We're about to replace all uses of this register.  It no longer
1073    * conflicts with anything so we can get rid of its interference.
1074    */
1075   ra_set_node_spill_cost(g, first_vgrf_node + spill_reg, 0);
1076   ra_reset_node_interference(g, first_vgrf_node + spill_reg);
1077
1078   /* Generate spill/unspill instructions for the objects being
1079    * spilled.  Right now, we spill or unspill the whole thing to a
1080    * virtual grf of the same size.  For most instructions, though, we
1081    * could just spill/unspill the GRF being accessed.
1082    */
1083   int ip = 0;
1084   foreach_block_and_inst (block, fs_inst, inst, fs->cfg) {
1085      const fs_builder ibld = fs_builder(fs, block, inst);
1086      exec_node *before = inst->prev;
1087      exec_node *after = inst->next;
1088
1089      for (unsigned int i = 0; i < inst->sources; i++) {
1090	 if (inst->src[i].file == VGRF &&
1091             inst->src[i].nr == spill_reg) {
1092            int count = regs_read(inst, i);
1093            int subset_spill_offset = spill_offset +
1094               ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE);
1095            fs_reg unspill_dst = alloc_spill_reg(count, ip);
1096
1097            inst->src[i].nr = unspill_dst.nr;
1098            inst->src[i].offset %= REG_SIZE;
1099
1100            /* We read the largest power-of-two divisor of the register count
1101             * (because only POT scratch read blocks are allowed by the
1102             * hardware) up to the maximum supported block size.
1103             */
1104            const unsigned width =
1105               MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
1106
1107            /* Set exec_all() on unspill messages under the (rather
1108             * pessimistic) assumption that there is no one-to-one
1109             * correspondence between channels of the spilled variable in
1110             * scratch space and the scratch read message, which operates on
1111             * 32 bit channels.  It shouldn't hurt in any case because the
1112             * unspill destination is a block-local temporary.
1113             */
1114            emit_unspill(ibld.exec_all().group(width, 0), &fs->shader_stats,
1115                         unspill_dst, subset_spill_offset, count);
1116	 }
1117      }
1118
1119      if (inst->dst.file == VGRF &&
1120          inst->dst.nr == spill_reg &&
1121          inst->opcode != SHADER_OPCODE_UNDEF) {
1122         int subset_spill_offset = spill_offset +
1123            ROUND_DOWN_TO(inst->dst.offset, REG_SIZE);
1124         fs_reg spill_src = alloc_spill_reg(regs_written(inst), ip);
1125
1126         inst->dst.nr = spill_src.nr;
1127         inst->dst.offset %= REG_SIZE;
1128
1129         /* If we're immediately spilling the register, we should not use
1130          * destination dependency hints.  Doing so will cause the GPU do
1131          * try to read and write the register at the same time and may
1132          * hang the GPU.
1133          */
1134         inst->no_dd_clear = false;
1135         inst->no_dd_check = false;
1136
1137         /* Calculate the execution width of the scratch messages (which work
1138          * in terms of 32 bit components so we have a fixed number of eight
1139          * channels per spilled register).  We attempt to write one
1140          * exec_size-wide component of the variable at a time without
1141          * exceeding the maximum number of (fake) MRF registers reserved for
1142          * spills.
1143          */
1144         const unsigned width = 8 * MIN2(
1145            DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE),
1146            spill_max_size(fs));
1147
1148         /* Spills should only write data initialized by the instruction for
1149          * whichever channels are enabled in the execution mask.  If that's
1150          * not possible we'll have to emit a matching unspill before the
1151          * instruction and set force_writemask_all on the spill.
1152          */
1153         const bool per_channel =
1154            inst->dst.is_contiguous() && type_sz(inst->dst.type) == 4 &&
1155            inst->exec_size == width;
1156
1157         /* Builder used to emit the scratch messages. */
1158         const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
1159
1160	 /* If our write is going to affect just part of the
1161          * regs_written(inst), then we need to unspill the destination since
1162          * we write back out all of the regs_written().  If the original
1163          * instruction had force_writemask_all set and is not a partial
1164          * write, there should be no need for the unspill since the
1165          * instruction will be overwriting the whole destination in any case.
1166	  */
1167         if (inst->is_partial_write() ||
1168             (!inst->force_writemask_all && !per_channel))
1169            emit_unspill(ubld, &fs->shader_stats, spill_src,
1170                         subset_spill_offset, regs_written(inst));
1171
1172         emit_spill(ubld.at(block, inst->next), &fs->shader_stats, spill_src,
1173                    subset_spill_offset, regs_written(inst));
1174      }
1175
1176      for (fs_inst *inst = (fs_inst *)before->next;
1177           inst != after; inst = (fs_inst *)inst->next)
1178         setup_inst_interference(inst);
1179
1180      /* We don't advance the ip for scratch read/write instructions
1181       * because we consider them to have the same ip as instruction we're
1182       * spilling around for the purposes of interference.  Also, we're
1183       * inserting spill instructions without re-running liveness analysis
1184       * and we don't want to mess up our IPs.
1185       */
1186      if (!_mesa_set_search(spill_insts, inst))
1187         ip++;
1188   }
1189
1190   assert(ip == live_instr_count);
1191}
1192
1193bool
1194fs_reg_alloc::assign_regs(bool allow_spilling, bool spill_all)
1195{
1196   build_interference_graph(fs->spilled_any_registers || spill_all);
1197
1198   bool spilled = false;
1199   while (1) {
1200      /* Debug of register spilling: Go spill everything. */
1201      if (unlikely(spill_all)) {
1202         int reg = choose_spill_reg();
1203         if (reg != -1) {
1204            spill_reg(reg);
1205            continue;
1206         }
1207      }
1208
1209      if (ra_allocate(g))
1210         break;
1211
1212      if (!allow_spilling)
1213         return false;
1214
1215      /* Failed to allocate registers.  Spill a reg, and the caller will
1216       * loop back into here to try again.
1217       */
1218      int reg = choose_spill_reg();
1219      if (reg == -1)
1220         return false;
1221
1222      /* If we're going to spill but we've never spilled before, we need to
1223       * re-build the interference graph with MRFs enabled to allow spilling.
1224       */
1225      if (!fs->spilled_any_registers) {
1226         discard_interference_graph();
1227         build_interference_graph(true);
1228      }
1229
1230      spilled = true;
1231
1232      spill_reg(reg);
1233   }
1234
1235   if (spilled)
1236      fs->invalidate_analysis(DEPENDENCY_INSTRUCTIONS | DEPENDENCY_VARIABLES);
1237
1238   /* Get the chosen virtual registers for each node, and map virtual
1239    * regs in the register classes back down to real hardware reg
1240    * numbers.
1241    */
1242   unsigned hw_reg_mapping[fs->alloc.count];
1243   fs->grf_used = fs->first_non_payload_grf;
1244   for (unsigned i = 0; i < fs->alloc.count; i++) {
1245      int reg = ra_get_node_reg(g, first_vgrf_node + i);
1246
1247      hw_reg_mapping[i] = reg;
1248      fs->grf_used = MAX2(fs->grf_used,
1249			  hw_reg_mapping[i] + fs->alloc.sizes[i]);
1250   }
1251
1252   foreach_block_and_inst(block, fs_inst, inst, fs->cfg) {
1253      assign_reg(hw_reg_mapping, &inst->dst);
1254      for (int i = 0; i < inst->sources; i++) {
1255         assign_reg(hw_reg_mapping, &inst->src[i]);
1256      }
1257   }
1258
1259   fs->alloc.count = fs->grf_used;
1260
1261   return true;
1262}
1263
1264bool
1265fs_visitor::assign_regs(bool allow_spilling, bool spill_all)
1266{
1267   fs_reg_alloc alloc(this);
1268   bool success = alloc.assign_regs(allow_spilling, spill_all);
1269   if (!success && allow_spilling) {
1270      fail("no register to spill:\n");
1271      dump_instructions(NULL);
1272   }
1273   return success;
1274}
1275