1/*
2 * Copyright 2011 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "nv50_ir.h"
24#include "nv50_ir_target.h"
25
26#include <algorithm>
27#include <stack>
28#include <limits>
29#include <unordered_map>
30
31namespace nv50_ir {
32
33#define MAX_REGISTER_FILE_SIZE 256
34
35class RegisterSet
36{
37public:
38   RegisterSet(const Target *);
39
40   void init(const Target *);
41   void reset(DataFile, bool resetMax = false);
42
43   void periodicMask(DataFile f, uint32_t lock, uint32_t unlock);
44   void intersect(DataFile f, const RegisterSet *);
45
46   bool assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg);
47   void release(DataFile f, int32_t reg, unsigned int size);
48   void occupy(DataFile f, int32_t reg, unsigned int size);
49   void occupy(const Value *);
50   void occupyMask(DataFile f, int32_t reg, uint8_t mask);
51   bool isOccupied(DataFile f, int32_t reg, unsigned int size) const;
52   bool testOccupy(const Value *);
53   bool testOccupy(DataFile f, int32_t reg, unsigned int size);
54
55   inline int getMaxAssigned(DataFile f) const { return fill[f]; }
56
57   inline unsigned int getFileSize(DataFile f) const
58   {
59      return last[f] + 1;
60   }
61
62   inline unsigned int units(DataFile f, unsigned int size) const
63   {
64      return size >> unit[f];
65   }
66   // for regs of size >= 4, id is counted in 4-byte words (like nv50/c0 binary)
67   inline unsigned int idToBytes(const Value *v) const
68   {
69      return v->reg.data.id * MIN2(v->reg.size, 4);
70   }
71   inline unsigned int idToUnits(const Value *v) const
72   {
73      return units(v->reg.file, idToBytes(v));
74   }
75   inline int bytesToId(Value *v, unsigned int bytes) const
76   {
77      if (v->reg.size < 4)
78         return units(v->reg.file, bytes);
79      return bytes / 4;
80   }
81   inline int unitsToId(DataFile f, int u, uint8_t size) const
82   {
83      if (u < 0)
84         return -1;
85      return (size < 4) ? u : ((u << unit[f]) / 4);
86   }
87
88   void print(DataFile f) const;
89
90   const bool restrictedGPR16Range;
91
92private:
93   BitSet bits[LAST_REGISTER_FILE + 1];
94
95   int unit[LAST_REGISTER_FILE + 1]; // log2 of allocation granularity
96
97   int last[LAST_REGISTER_FILE + 1];
98   int fill[LAST_REGISTER_FILE + 1];
99};
100
101void
102RegisterSet::reset(DataFile f, bool resetMax)
103{
104   bits[f].fill(0);
105   if (resetMax)
106      fill[f] = -1;
107}
108
109void
110RegisterSet::init(const Target *targ)
111{
112   for (unsigned int rf = 0; rf <= LAST_REGISTER_FILE; ++rf) {
113      DataFile f = static_cast<DataFile>(rf);
114      last[rf] = targ->getFileSize(f) - 1;
115      unit[rf] = targ->getFileUnit(f);
116      fill[rf] = -1;
117      assert(last[rf] < MAX_REGISTER_FILE_SIZE);
118      bits[rf].allocate(last[rf] + 1, true);
119   }
120}
121
122RegisterSet::RegisterSet(const Target *targ)
123  : restrictedGPR16Range(targ->getChipset() < 0xc0)
124{
125   init(targ);
126   for (unsigned int i = 0; i <= LAST_REGISTER_FILE; ++i)
127      reset(static_cast<DataFile>(i));
128}
129
130void
131RegisterSet::periodicMask(DataFile f, uint32_t lock, uint32_t unlock)
132{
133   bits[f].periodicMask32(lock, unlock);
134}
135
136void
137RegisterSet::intersect(DataFile f, const RegisterSet *set)
138{
139   bits[f] |= set->bits[f];
140}
141
142void
143RegisterSet::print(DataFile f) const
144{
145   INFO("GPR:");
146   bits[f].print();
147   INFO("\n");
148}
149
150bool
151RegisterSet::assign(int32_t& reg, DataFile f, unsigned int size, unsigned int maxReg)
152{
153   reg = bits[f].findFreeRange(size, maxReg);
154   if (reg < 0)
155      return false;
156   fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
157   return true;
158}
159
160bool
161RegisterSet::isOccupied(DataFile f, int32_t reg, unsigned int size) const
162{
163   return bits[f].testRange(reg, size);
164}
165
166void
167RegisterSet::occupy(const Value *v)
168{
169   occupy(v->reg.file, idToUnits(v), v->reg.size >> unit[v->reg.file]);
170}
171
172void
173RegisterSet::occupyMask(DataFile f, int32_t reg, uint8_t mask)
174{
175   bits[f].setMask(reg & ~31, static_cast<uint32_t>(mask) << (reg % 32));
176}
177
178void
179RegisterSet::occupy(DataFile f, int32_t reg, unsigned int size)
180{
181   bits[f].setRange(reg, size);
182
183   INFO_DBG(0, REG_ALLOC, "reg occupy: %u[%i] %u\n", f, reg, size);
184
185   fill[f] = MAX2(fill[f], (int32_t)(reg + size - 1));
186}
187
188bool
189RegisterSet::testOccupy(const Value *v)
190{
191   return testOccupy(v->reg.file,
192                     idToUnits(v), v->reg.size >> unit[v->reg.file]);
193}
194
195bool
196RegisterSet::testOccupy(DataFile f, int32_t reg, unsigned int size)
197{
198   if (isOccupied(f, reg, size))
199      return false;
200   occupy(f, reg, size);
201   return true;
202}
203
204void
205RegisterSet::release(DataFile f, int32_t reg, unsigned int size)
206{
207   bits[f].clrRange(reg, size);
208
209   INFO_DBG(0, REG_ALLOC, "reg release: %u[%i] %u\n", f, reg, size);
210}
211
212class RegAlloc
213{
214public:
215   RegAlloc(Program *program) : prog(program), func(NULL), sequence(0) { }
216
217   bool exec();
218   bool execFunc();
219
220private:
221   class PhiMovesPass : public Pass {
222   private:
223      virtual bool visit(BasicBlock *);
224      inline bool needNewElseBlock(BasicBlock *b, BasicBlock *p);
225      inline void splitEdges(BasicBlock *b);
226   };
227
228   class ArgumentMovesPass : public Pass {
229   private:
230      virtual bool visit(BasicBlock *);
231   };
232
233   class BuildIntervalsPass : public Pass {
234   private:
235      virtual bool visit(BasicBlock *);
236      void collectLiveValues(BasicBlock *);
237      void addLiveRange(Value *, const BasicBlock *, int end);
238   };
239
240   class InsertConstraintsPass : public Pass {
241   public:
242      InsertConstraintsPass() : targ(NULL) { }
243      bool exec(Function *func);
244   private:
245      virtual bool visit(BasicBlock *);
246
247      void insertConstraintMove(Instruction *, int s);
248      bool insertConstraintMoves();
249
250      void condenseDefs(Instruction *);
251      void condenseDefs(Instruction *, const int first, const int last);
252      void condenseSrcs(Instruction *, const int first, const int last);
253
254      void addHazard(Instruction *i, const ValueRef *src);
255      void textureMask(TexInstruction *);
256      void addConstraint(Instruction *, int s, int n);
257      bool detectConflict(Instruction *, int s);
258
259      // target specific functions, TODO: put in subclass or Target
260      void texConstraintNV50(TexInstruction *);
261      void texConstraintNVC0(TexInstruction *);
262      void texConstraintNVE0(TexInstruction *);
263      void texConstraintGM107(TexInstruction *);
264
265      bool isScalarTexGM107(TexInstruction *);
266      void handleScalarTexGM107(TexInstruction *);
267
268      std::list<Instruction *> constrList;
269
270      const Target *targ;
271   };
272
273   bool buildLiveSets(BasicBlock *);
274
275private:
276   Program *prog;
277   Function *func;
278
279   // instructions in control flow / chronological order
280   ArrayList insns;
281
282   int sequence; // for manual passes through CFG
283};
284
285typedef std::pair<Value *, Value *> ValuePair;
286
287class MergedDefs
288{
289private:
290   std::list<ValueDef *>& entry(Value *val) {
291      auto it = defs.find(val);
292
293      if (it == defs.end()) {
294         std::list<ValueDef *> &res = defs[val];
295         res = val->defs;
296         return res;
297      } else {
298         return (*it).second;
299      }
300   }
301
302   std::unordered_map<Value *, std::list<ValueDef *> > defs;
303
304public:
305   std::list<ValueDef *>& operator()(Value *val) {
306      return entry(val);
307   }
308
309   void add(Value *val, const std::list<ValueDef *> &vals) {
310      assert(val);
311      std::list<ValueDef *> &valdefs = entry(val);
312      valdefs.insert(valdefs.end(), vals.begin(), vals.end());
313   }
314
315   void removeDefsOfInstruction(Instruction *insn) {
316      for (int d = 0; insn->defExists(d); ++d) {
317         ValueDef *def = &insn->def(d);
318         defs.erase(def->get());
319         for (auto &p : defs)
320            p.second.remove(def);
321      }
322   }
323
324   void merge() {
325      for (auto &p : defs)
326         p.first->defs = p.second;
327   }
328};
329
330class SpillCodeInserter
331{
332public:
333   SpillCodeInserter(Function *fn, MergedDefs &mergedDefs) : func(fn), mergedDefs(mergedDefs), stackSize(0), stackBase(0) { }
334
335   bool run(const std::list<ValuePair>&);
336
337   Symbol *assignSlot(const Interval&, const unsigned int size);
338   Value *offsetSlot(Value *, const LValue *);
339   inline int32_t getStackSize() const { return stackSize; }
340
341private:
342   Function *func;
343   MergedDefs &mergedDefs;
344
345   struct SpillSlot
346   {
347      Interval occup;
348      std::list<Value *> residents; // needed to recalculate occup
349      Symbol *sym;
350      int32_t offset;
351      inline uint8_t size() const { return sym->reg.size; }
352   };
353   std::list<SpillSlot> slots;
354   int32_t stackSize;
355   int32_t stackBase;
356
357   LValue *unspill(Instruction *usei, LValue *, Value *slot);
358   void spill(Instruction *defi, Value *slot, LValue *);
359};
360
361void
362RegAlloc::BuildIntervalsPass::addLiveRange(Value *val,
363                                           const BasicBlock *bb,
364                                           int end)
365{
366   Instruction *insn = val->getUniqueInsn();
367
368   if (!insn)
369      insn = bb->getFirst();
370
371   assert(bb->getFirst()->serial <= bb->getExit()->serial);
372   assert(bb->getExit()->serial + 1 >= end);
373
374   int begin = insn->serial;
375   if (begin < bb->getEntry()->serial || begin > bb->getExit()->serial)
376      begin = bb->getEntry()->serial;
377
378   INFO_DBG(prog->dbgFlags, REG_ALLOC, "%%%i <- live range [%i(%i), %i)\n",
379            val->id, begin, insn->serial, end);
380
381   if (begin != end) // empty ranges are only added as hazards for fixed regs
382      val->livei.extend(begin, end);
383}
384
385bool
386RegAlloc::PhiMovesPass::needNewElseBlock(BasicBlock *b, BasicBlock *p)
387{
388   if (b->cfg.incidentCount() <= 1)
389      return false;
390
391   int n = 0;
392   for (Graph::EdgeIterator ei = p->cfg.outgoing(); !ei.end(); ei.next())
393      if (ei.getType() == Graph::Edge::TREE ||
394          ei.getType() == Graph::Edge::FORWARD)
395         ++n;
396   return (n == 2);
397}
398
399struct PhiMapHash {
400   size_t operator()(const std::pair<Instruction *, BasicBlock *>& val) const {
401      return std::hash<Instruction*>()(val.first) * 31 +
402         std::hash<BasicBlock*>()(val.second);
403   }
404};
405
406typedef std::unordered_map<
407   std::pair<Instruction *, BasicBlock *>, Value *, PhiMapHash> PhiMap;
408
409// Critical edges need to be split up so that work can be inserted along
410// specific edge transitions. Unfortunately manipulating incident edges into a
411// BB invalidates all the PHI nodes since their sources are implicitly ordered
412// by incident edge order.
413//
414// TODO: Make it so that that is not the case, and PHI nodes store pointers to
415// the original BBs.
416void
417RegAlloc::PhiMovesPass::splitEdges(BasicBlock *bb)
418{
419   BasicBlock *pb, *pn;
420   Instruction *phi;
421   Graph::EdgeIterator ei;
422   std::stack<BasicBlock *> stack;
423   int j = 0;
424
425   for (ei = bb->cfg.incident(); !ei.end(); ei.next()) {
426      pb = BasicBlock::get(ei.getNode());
427      assert(pb);
428      if (needNewElseBlock(bb, pb))
429         stack.push(pb);
430   }
431
432   // No critical edges were found, no need to perform any work.
433   if (stack.empty())
434      return;
435
436   // We're about to, potentially, reorder the inbound edges. This means that
437   // we need to hold on to the (phi, bb) -> src mapping, and fix up the phi
438   // nodes after the graph has been modified.
439   PhiMap phis;
440
441   j = 0;
442   for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
443      pb = BasicBlock::get(ei.getNode());
444      for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next)
445         phis.insert(std::make_pair(std::make_pair(phi, pb), phi->getSrc(j)));
446   }
447
448   while (!stack.empty()) {
449      pb = stack.top();
450      pn = new BasicBlock(func);
451      stack.pop();
452
453      pb->cfg.detach(&bb->cfg);
454      pb->cfg.attach(&pn->cfg, Graph::Edge::TREE);
455      pn->cfg.attach(&bb->cfg, Graph::Edge::FORWARD);
456
457      assert(pb->getExit()->op != OP_CALL);
458      if (pb->getExit()->asFlow()->target.bb == bb)
459         pb->getExit()->asFlow()->target.bb = pn;
460
461      for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
462         PhiMap::iterator it = phis.find(std::make_pair(phi, pb));
463         assert(it != phis.end());
464         phis.insert(std::make_pair(std::make_pair(phi, pn), it->second));
465         phis.erase(it);
466      }
467   }
468
469   // Now go through and fix up all of the phi node sources.
470   j = 0;
471   for (ei = bb->cfg.incident(); !ei.end(); ei.next(), j++) {
472      pb = BasicBlock::get(ei.getNode());
473      for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
474         PhiMap::const_iterator it = phis.find(std::make_pair(phi, pb));
475         assert(it != phis.end());
476
477         phi->setSrc(j, it->second);
478      }
479   }
480}
481
482// For each operand of each PHI in b, generate a new value by inserting a MOV
483// at the end of the block it is coming from and replace the operand with its
484// result. This eliminates liveness conflicts and enables us to let values be
485// copied to the right register if such a conflict exists nonetheless.
486//
487// These MOVs are also crucial in making sure the live intervals of phi srces
488// are extended until the end of the loop, since they are not included in the
489// live-in sets.
490bool
491RegAlloc::PhiMovesPass::visit(BasicBlock *bb)
492{
493   Instruction *phi, *mov;
494
495   splitEdges(bb);
496
497   // insert MOVs (phi->src(j) should stem from j-th in-BB)
498   int j = 0;
499   for (Graph::EdgeIterator ei = bb->cfg.incident(); !ei.end(); ei.next()) {
500      BasicBlock *pb = BasicBlock::get(ei.getNode());
501      if (!pb->isTerminated())
502         pb->insertTail(new_FlowInstruction(func, OP_BRA, bb));
503
504      for (phi = bb->getPhi(); phi && phi->op == OP_PHI; phi = phi->next) {
505         LValue *tmp = new_LValue(func, phi->getDef(0)->asLValue());
506         mov = new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
507
508         mov->setSrc(0, phi->getSrc(j));
509         mov->setDef(0, tmp);
510         phi->setSrc(j, tmp);
511
512         pb->insertBefore(pb->getExit(), mov);
513      }
514      ++j;
515   }
516
517   return true;
518}
519
520bool
521RegAlloc::ArgumentMovesPass::visit(BasicBlock *bb)
522{
523   // Bind function call inputs/outputs to the same physical register
524   // the callee uses, inserting moves as appropriate for the case a
525   // conflict arises.
526   for (Instruction *i = bb->getEntry(); i; i = i->next) {
527      FlowInstruction *cal = i->asFlow();
528      // TODO: Handle indirect calls.
529      // Right now they should only be generated for builtins.
530      if (!cal || cal->op != OP_CALL || cal->builtin || cal->indirect)
531         continue;
532      RegisterSet clobberSet(prog->getTarget());
533
534      // Bind input values.
535      for (int s = cal->indirect ? 1 : 0; cal->srcExists(s); ++s) {
536         const int t = cal->indirect ? (s - 1) : s;
537         LValue *tmp = new_LValue(func, cal->getSrc(s)->asLValue());
538         tmp->reg.data.id = cal->target.fn->ins[t].rep()->reg.data.id;
539
540         Instruction *mov =
541            new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
542         mov->setDef(0, tmp);
543         mov->setSrc(0, cal->getSrc(s));
544         cal->setSrc(s, tmp);
545
546         bb->insertBefore(cal, mov);
547      }
548
549      // Bind output values.
550      for (int d = 0; cal->defExists(d); ++d) {
551         LValue *tmp = new_LValue(func, cal->getDef(d)->asLValue());
552         tmp->reg.data.id = cal->target.fn->outs[d].rep()->reg.data.id;
553
554         Instruction *mov =
555            new_Instruction(func, OP_MOV, typeOfSize(tmp->reg.size));
556         mov->setSrc(0, tmp);
557         mov->setDef(0, cal->getDef(d));
558         cal->setDef(d, tmp);
559
560         bb->insertAfter(cal, mov);
561         clobberSet.occupy(tmp);
562      }
563
564      // Bind clobbered values.
565      for (std::deque<Value *>::iterator it = cal->target.fn->clobbers.begin();
566           it != cal->target.fn->clobbers.end();
567           ++it) {
568         if (clobberSet.testOccupy(*it)) {
569            Value *tmp = new_LValue(func, (*it)->asLValue());
570            tmp->reg.data.id = (*it)->reg.data.id;
571            cal->setDef(cal->defCount(), tmp);
572         }
573      }
574   }
575
576   // Update the clobber set of the function.
577   if (BasicBlock::get(func->cfgExit) == bb) {
578      func->buildDefSets();
579      for (unsigned int i = 0; i < bb->defSet.getSize(); ++i)
580         if (bb->defSet.test(i))
581            func->clobbers.push_back(func->getLValue(i));
582   }
583
584   return true;
585}
586
587// Build the set of live-in variables of bb.
588bool
589RegAlloc::buildLiveSets(BasicBlock *bb)
590{
591   Function *f = bb->getFunction();
592   BasicBlock *bn;
593   Instruction *i;
594   unsigned int s, d;
595
596   INFO_DBG(prog->dbgFlags, REG_ALLOC, "buildLiveSets(BB:%i)\n", bb->getId());
597
598   bb->liveSet.allocate(func->allLValues.getSize(), false);
599
600   int n = 0;
601   for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
602      bn = BasicBlock::get(ei.getNode());
603      if (bn == bb)
604         continue;
605      if (bn->cfg.visit(sequence))
606         if (!buildLiveSets(bn))
607            return false;
608      if (n++ || bb->liveSet.marker)
609         bb->liveSet |= bn->liveSet;
610      else
611         bb->liveSet = bn->liveSet;
612   }
613   if (!n && !bb->liveSet.marker)
614      bb->liveSet.fill(0);
615   bb->liveSet.marker = true;
616
617   if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
618      INFO("BB:%i live set of out blocks:\n", bb->getId());
619      bb->liveSet.print();
620   }
621
622   // if (!bb->getEntry())
623   //   return true;
624
625   if (bb == BasicBlock::get(f->cfgExit)) {
626      for (std::deque<ValueRef>::iterator it = f->outs.begin();
627           it != f->outs.end(); ++it) {
628         assert(it->get()->asLValue());
629         bb->liveSet.set(it->get()->id);
630      }
631   }
632
633   for (i = bb->getExit(); i && i != bb->getEntry()->prev; i = i->prev) {
634      for (d = 0; i->defExists(d); ++d)
635         bb->liveSet.clr(i->getDef(d)->id);
636      for (s = 0; i->srcExists(s); ++s)
637         if (i->getSrc(s)->asLValue())
638            bb->liveSet.set(i->getSrc(s)->id);
639   }
640   for (i = bb->getPhi(); i && i->op == OP_PHI; i = i->next)
641      bb->liveSet.clr(i->getDef(0)->id);
642
643   if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
644      INFO("BB:%i live set after propagation:\n", bb->getId());
645      bb->liveSet.print();
646   }
647
648   return true;
649}
650
651void
652RegAlloc::BuildIntervalsPass::collectLiveValues(BasicBlock *bb)
653{
654   BasicBlock *bbA = NULL, *bbB = NULL;
655
656   if (bb->cfg.outgoingCount()) {
657      // trickery to save a loop of OR'ing liveSets
658      // aliasing works fine with BitSet::setOr
659      for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
660         if (bbA) {
661            bb->liveSet.setOr(&bbA->liveSet, &bbB->liveSet);
662            bbA = bb;
663         } else {
664            bbA = bbB;
665         }
666         bbB = BasicBlock::get(ei.getNode());
667      }
668      bb->liveSet.setOr(&bbB->liveSet, bbA ? &bbA->liveSet : NULL);
669   } else
670   if (bb->cfg.incidentCount()) {
671      bb->liveSet.fill(0);
672   }
673}
674
675bool
676RegAlloc::BuildIntervalsPass::visit(BasicBlock *bb)
677{
678   collectLiveValues(bb);
679
680   INFO_DBG(prog->dbgFlags, REG_ALLOC, "BuildIntervals(BB:%i)\n", bb->getId());
681
682   // go through out blocks and delete phi sources that do not originate from
683   // the current block from the live set
684   for (Graph::EdgeIterator ei = bb->cfg.outgoing(); !ei.end(); ei.next()) {
685      BasicBlock *out = BasicBlock::get(ei.getNode());
686
687      for (Instruction *i = out->getPhi(); i && i->op == OP_PHI; i = i->next) {
688         bb->liveSet.clr(i->getDef(0)->id);
689
690         for (int s = 0; i->srcExists(s); ++s) {
691            assert(i->src(s).getInsn());
692            if (i->getSrc(s)->getUniqueInsn()->bb == bb) // XXX: reachableBy ?
693               bb->liveSet.set(i->getSrc(s)->id);
694            else
695               bb->liveSet.clr(i->getSrc(s)->id);
696         }
697      }
698   }
699
700   // remaining live-outs are live until end
701   if (bb->getExit()) {
702      for (unsigned int j = 0; j < bb->liveSet.getSize(); ++j)
703         if (bb->liveSet.test(j))
704            addLiveRange(func->getLValue(j), bb, bb->getExit()->serial + 1);
705   }
706
707   for (Instruction *i = bb->getExit(); i && i->op != OP_PHI; i = i->prev) {
708      for (int d = 0; i->defExists(d); ++d) {
709         bb->liveSet.clr(i->getDef(d)->id);
710         if (i->getDef(d)->reg.data.id >= 0) // add hazard for fixed regs
711            i->getDef(d)->livei.extend(i->serial, i->serial);
712      }
713
714      for (int s = 0; i->srcExists(s); ++s) {
715         if (!i->getSrc(s)->asLValue())
716            continue;
717         if (!bb->liveSet.test(i->getSrc(s)->id)) {
718            bb->liveSet.set(i->getSrc(s)->id);
719            addLiveRange(i->getSrc(s), bb, i->serial);
720         }
721      }
722   }
723
724   if (bb == BasicBlock::get(func->cfg.getRoot())) {
725      for (std::deque<ValueDef>::iterator it = func->ins.begin();
726           it != func->ins.end(); ++it) {
727         if (it->get()->reg.data.id >= 0) // add hazard for fixed regs
728            it->get()->livei.extend(0, 1);
729      }
730   }
731
732   return true;
733}
734
735
736#define JOIN_MASK_PHI        (1 << 0)
737#define JOIN_MASK_UNION      (1 << 1)
738#define JOIN_MASK_MOV        (1 << 2)
739#define JOIN_MASK_TEX        (1 << 3)
740
741class GCRA
742{
743public:
744   GCRA(Function *, SpillCodeInserter&, MergedDefs&);
745   ~GCRA();
746
747   bool allocateRegisters(ArrayList& insns);
748
749   void printNodeInfo() const;
750
751private:
752   class RIG_Node : public Graph::Node
753   {
754   public:
755      RIG_Node();
756
757      void init(const RegisterSet&, LValue *);
758
759      void addInterference(RIG_Node *);
760      void addRegPreference(RIG_Node *);
761
762      inline LValue *getValue() const
763      {
764         return reinterpret_cast<LValue *>(data);
765      }
766      inline void setValue(LValue *lval) { data = lval; }
767
768      inline uint8_t getCompMask() const
769      {
770         return ((1 << colors) - 1) << (reg & 7);
771      }
772
773      static inline RIG_Node *get(const Graph::EdgeIterator& ei)
774      {
775         return static_cast<RIG_Node *>(ei.getNode());
776      }
777
778   public:
779      uint32_t degree;
780      uint16_t degreeLimit; // if deg < degLimit, node is trivially colourable
781      uint16_t maxReg;
782      uint16_t colors;
783
784      DataFile f;
785      int32_t reg;
786
787      float weight;
788
789      // list pointers for simplify() phase
790      RIG_Node *next;
791      RIG_Node *prev;
792
793      // union of the live intervals of all coalesced values (we want to retain
794      //  the separate intervals for testing interference of compound values)
795      Interval livei;
796
797      std::list<RIG_Node *> prefRegs;
798   };
799
800private:
801   inline RIG_Node *getNode(const LValue *v) const { return &nodes[v->id]; }
802
803   void buildRIG(ArrayList&);
804   bool coalesce(ArrayList&);
805   bool doCoalesce(ArrayList&, unsigned int mask);
806   void calculateSpillWeights();
807   bool simplify();
808   bool selectRegisters();
809   void cleanup(const bool success);
810
811   void simplifyEdge(RIG_Node *, RIG_Node *);
812   void simplifyNode(RIG_Node *);
813
814   void copyCompound(Value *dst, Value *src);
815   bool coalesceValues(Value *, Value *, bool force);
816   void resolveSplitsAndMerges();
817   void makeCompound(Instruction *, bool isSplit);
818
819   inline void checkInterference(const RIG_Node *, Graph::EdgeIterator&);
820
821   inline void insertOrderedTail(std::list<RIG_Node *>&, RIG_Node *);
822   void checkList(std::list<RIG_Node *>&);
823
824private:
825   std::stack<uint32_t> stack;
826
827   // list headers for simplify() phase
828   RIG_Node lo[2];
829   RIG_Node hi;
830
831   Graph RIG;
832   RIG_Node *nodes;
833   unsigned int nodeCount;
834
835   Function *func;
836   Program *prog;
837
838   struct RelDegree {
839      uint8_t data[17][17];
840
841      RelDegree() {
842         for (int i = 1; i <= 16; ++i)
843            for (int j = 1; j <= 16; ++j)
844               data[i][j] = j * ((i + j - 1) / j);
845      }
846
847      const uint8_t* operator[](std::size_t i) const {
848         return data[i];
849      }
850   };
851
852   static const RelDegree relDegree;
853
854   RegisterSet regs;
855
856   // need to fixup register id for participants of OP_MERGE/SPLIT
857   std::list<Instruction *> merges;
858   std::list<Instruction *> splits;
859
860   SpillCodeInserter& spill;
861   std::list<ValuePair> mustSpill;
862
863   MergedDefs &mergedDefs;
864};
865
866const GCRA::RelDegree GCRA::relDegree;
867
868GCRA::RIG_Node::RIG_Node() : Node(NULL), degree(0), degreeLimit(0), maxReg(0),
869   colors(0), f(FILE_NULL), reg(0), weight(0), next(this), prev(this)
870{
871}
872
873void
874GCRA::printNodeInfo() const
875{
876   for (unsigned int i = 0; i < nodeCount; ++i) {
877      if (!nodes[i].colors)
878         continue;
879      INFO("RIG_Node[%%%i]($[%u]%i): %u colors, weight %f, deg %u/%u\n X",
880           i,
881           nodes[i].f,nodes[i].reg,nodes[i].colors,
882           nodes[i].weight,
883           nodes[i].degree, nodes[i].degreeLimit);
884
885      for (Graph::EdgeIterator ei = nodes[i].outgoing(); !ei.end(); ei.next())
886         INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
887      for (Graph::EdgeIterator ei = nodes[i].incident(); !ei.end(); ei.next())
888         INFO(" %%%i", RIG_Node::get(ei)->getValue()->id);
889      INFO("\n");
890   }
891}
892
893static bool
894isShortRegOp(Instruction *insn)
895{
896   // Immediates are always in src1 (except zeroes, which end up getting
897   // replaced with a zero reg). Every other situation can be resolved by
898   // using a long encoding.
899   return insn->srcExists(1) && insn->src(1).getFile() == FILE_IMMEDIATE &&
900      insn->getSrc(1)->reg.data.u64;
901}
902
903// Check if this LValue is ever used in an instruction that can't be encoded
904// with long registers (i.e. > r63)
905static bool
906isShortRegVal(LValue *lval)
907{
908   if (lval->getInsn() == NULL)
909      return false;
910   for (Value::DefCIterator def = lval->defs.begin();
911        def != lval->defs.end(); ++def)
912      if (isShortRegOp((*def)->getInsn()))
913         return true;
914   for (Value::UseCIterator use = lval->uses.begin();
915        use != lval->uses.end(); ++use)
916      if (isShortRegOp((*use)->getInsn()))
917         return true;
918   return false;
919}
920
921void
922GCRA::RIG_Node::init(const RegisterSet& regs, LValue *lval)
923{
924   setValue(lval);
925   if (lval->reg.data.id >= 0)
926      lval->noSpill = lval->fixedReg = 1;
927
928   colors = regs.units(lval->reg.file, lval->reg.size);
929   f = lval->reg.file;
930   reg = -1;
931   if (lval->reg.data.id >= 0)
932      reg = regs.idToUnits(lval);
933
934   weight = std::numeric_limits<float>::infinity();
935   degree = 0;
936   maxReg = regs.getFileSize(f);
937   // On nv50, we lose a bit of gpr encoding when there's an embedded
938   // immediate.
939   if (regs.restrictedGPR16Range && f == FILE_GPR && (lval->reg.size == 2 || isShortRegVal(lval)))
940      maxReg /= 2;
941   degreeLimit = maxReg;
942   degreeLimit -= relDegree[1][colors] - 1;
943
944   livei.insert(lval->livei);
945}
946
947// Used when coalescing moves. The non-compound value will become one, e.g.:
948// mov b32 $r0 $r2            / merge b64 $r0d { $r0 $r1 }
949// split b64 { $r0 $r1 } $r0d / mov b64 $r0d f64 $r2d
950void
951GCRA::copyCompound(Value *dst, Value *src)
952{
953   LValue *ldst = dst->asLValue();
954   LValue *lsrc = src->asLValue();
955
956   if (ldst->compound && !lsrc->compound) {
957      LValue *swap = lsrc;
958      lsrc = ldst;
959      ldst = swap;
960   }
961
962   assert(!ldst->compound);
963
964   if (lsrc->compound) {
965      for (ValueDef *d : mergedDefs(ldst->join)) {
966         LValue *ldst = d->get()->asLValue();
967         if (!ldst->compound)
968            ldst->compMask = 0xff;
969         ldst->compound = 1;
970         ldst->compMask &= lsrc->compMask;
971      }
972   }
973}
974
975bool
976GCRA::coalesceValues(Value *dst, Value *src, bool force)
977{
978   LValue *rep = dst->join->asLValue();
979   LValue *val = src->join->asLValue();
980
981   if (!force && val->reg.data.id >= 0) {
982      rep = src->join->asLValue();
983      val = dst->join->asLValue();
984   }
985   RIG_Node *nRep = &nodes[rep->id];
986   RIG_Node *nVal = &nodes[val->id];
987
988   if (src->reg.file != dst->reg.file) {
989      if (!force)
990         return false;
991      WARN("forced coalescing of values in different files !\n");
992   }
993   if (!force && dst->reg.size != src->reg.size)
994      return false;
995
996   if ((rep->reg.data.id >= 0) && (rep->reg.data.id != val->reg.data.id)) {
997      if (force) {
998         if (val->reg.data.id >= 0)
999            WARN("forced coalescing of values in different fixed regs !\n");
1000      } else {
1001         if (val->reg.data.id >= 0)
1002            return false;
1003         // make sure that there is no overlap with the fixed register of rep
1004         for (ArrayList::Iterator it = func->allLValues.iterator();
1005              !it.end(); it.next()) {
1006            Value *reg = reinterpret_cast<Value *>(it.get())->asLValue();
1007            assert(reg);
1008            if (reg->interfers(rep) && reg->livei.overlaps(nVal->livei))
1009               return false;
1010         }
1011      }
1012   }
1013
1014   if (!force && nRep->livei.overlaps(nVal->livei))
1015      return false;
1016
1017   // TODO: Handle this case properly.
1018   if (!force && rep->compound && val->compound)
1019      return false;
1020
1021   INFO_DBG(prog->dbgFlags, REG_ALLOC, "joining %%%i($%i) <- %%%i\n",
1022            rep->id, rep->reg.data.id, val->id);
1023
1024   if (!force)
1025      copyCompound(dst, src);
1026
1027   // set join pointer of all values joined with val
1028   const std::list<ValueDef *> &defs = mergedDefs(val);
1029   for (ValueDef *def : defs)
1030      def->get()->join = rep;
1031   assert(rep->join == rep && val->join == rep);
1032
1033   // add val's definitions to rep and extend the live interval of its RIG node
1034   mergedDefs.add(rep, defs);
1035   nRep->livei.unify(nVal->livei);
1036   nRep->degreeLimit = MIN2(nRep->degreeLimit, nVal->degreeLimit);
1037   nRep->maxReg = MIN2(nRep->maxReg, nVal->maxReg);
1038   return true;
1039}
1040
1041bool
1042GCRA::coalesce(ArrayList& insns)
1043{
1044   bool ret = doCoalesce(insns, JOIN_MASK_PHI);
1045   if (!ret)
1046      return false;
1047   switch (func->getProgram()->getTarget()->getChipset() & ~0xf) {
1048   case 0x50:
1049   case 0x80:
1050   case 0x90:
1051   case 0xa0:
1052      ret = doCoalesce(insns, JOIN_MASK_UNION | JOIN_MASK_TEX);
1053      break;
1054   case 0xc0:
1055   case 0xd0:
1056   case 0xe0:
1057   case 0xf0:
1058   case 0x100:
1059   case 0x110:
1060   case 0x120:
1061   case 0x130:
1062   case 0x140:
1063   case 0x160:
1064   case 0x170:
1065      ret = doCoalesce(insns, JOIN_MASK_UNION);
1066      break;
1067   default:
1068      break;
1069   }
1070   if (!ret)
1071      return false;
1072   return doCoalesce(insns, JOIN_MASK_MOV);
1073}
1074
1075static inline uint8_t makeCompMask(int compSize, int base, int size)
1076{
1077   uint8_t m = ((1 << size) - 1) << base;
1078
1079   switch (compSize) {
1080   case 1:
1081      return 0xff;
1082   case 2:
1083      m |= (m << 2);
1084      return (m << 4) | m;
1085   case 3:
1086   case 4:
1087      return (m << 4) | m;
1088   default:
1089      assert(compSize <= 8);
1090      return m;
1091   }
1092}
1093
1094void
1095GCRA::makeCompound(Instruction *insn, bool split)
1096{
1097   LValue *rep = (split ? insn->getSrc(0) : insn->getDef(0))->asLValue();
1098
1099   if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC) {
1100      INFO("makeCompound(split = %i): ", split);
1101      insn->print();
1102   }
1103
1104   const unsigned int size = getNode(rep)->colors;
1105   unsigned int base = 0;
1106
1107   if (!rep->compound)
1108      rep->compMask = 0xff;
1109   rep->compound = 1;
1110
1111   for (int c = 0; split ? insn->defExists(c) : insn->srcExists(c); ++c) {
1112      LValue *val = (split ? insn->getDef(c) : insn->getSrc(c))->asLValue();
1113
1114      val->compound = 1;
1115      if (!val->compMask)
1116         val->compMask = 0xff;
1117      val->compMask &= makeCompMask(size, base, getNode(val)->colors);
1118      assert(val->compMask);
1119
1120      INFO_DBG(prog->dbgFlags, REG_ALLOC, "compound: %%%i:%02x <- %%%i:%02x\n",
1121           rep->id, rep->compMask, val->id, val->compMask);
1122
1123      base += getNode(val)->colors;
1124   }
1125   assert(base == size);
1126}
1127
1128bool
1129GCRA::doCoalesce(ArrayList& insns, unsigned int mask)
1130{
1131   int c, n;
1132
1133   for (n = 0; n < insns.getSize(); ++n) {
1134      Instruction *i;
1135      Instruction *insn = reinterpret_cast<Instruction *>(insns.get(n));
1136
1137      switch (insn->op) {
1138      case OP_PHI:
1139         if (!(mask & JOIN_MASK_PHI))
1140            break;
1141         for (c = 0; insn->srcExists(c); ++c)
1142            if (!coalesceValues(insn->getDef(0), insn->getSrc(c), false)) {
1143               // this is bad
1144               ERROR("failed to coalesce phi operands\n");
1145               return false;
1146            }
1147         break;
1148      case OP_UNION:
1149      case OP_MERGE:
1150         if (!(mask & JOIN_MASK_UNION))
1151            break;
1152         for (c = 0; insn->srcExists(c); ++c)
1153            coalesceValues(insn->getDef(0), insn->getSrc(c), true);
1154         if (insn->op == OP_MERGE) {
1155            merges.push_back(insn);
1156            if (insn->srcExists(1))
1157               makeCompound(insn, false);
1158         }
1159         break;
1160      case OP_SPLIT:
1161         if (!(mask & JOIN_MASK_UNION))
1162            break;
1163         splits.push_back(insn);
1164         for (c = 0; insn->defExists(c); ++c)
1165            coalesceValues(insn->getSrc(0), insn->getDef(c), true);
1166         makeCompound(insn, true);
1167         break;
1168      case OP_MOV:
1169         if (!(mask & JOIN_MASK_MOV))
1170            break;
1171         i = NULL;
1172         if (!insn->getDef(0)->uses.empty())
1173            i = (*insn->getDef(0)->uses.begin())->getInsn();
1174         // if this is a contraint-move there will only be a single use
1175         if (i && i->op == OP_MERGE) // do we really still need this ?
1176            break;
1177         i = insn->getSrc(0)->getUniqueInsn();
1178         if (i && !i->constrainedDefs()) {
1179            coalesceValues(insn->getDef(0), insn->getSrc(0), false);
1180         }
1181         break;
1182      case OP_TEX:
1183      case OP_TXB:
1184      case OP_TXL:
1185      case OP_TXF:
1186      case OP_TXQ:
1187      case OP_TXD:
1188      case OP_TXG:
1189      case OP_TXLQ:
1190      case OP_TEXCSAA:
1191      case OP_TEXPREP:
1192         if (!(mask & JOIN_MASK_TEX))
1193            break;
1194         for (c = 0; insn->srcExists(c) && c != insn->predSrc; ++c)
1195            coalesceValues(insn->getDef(c), insn->getSrc(c), true);
1196         break;
1197      default:
1198         break;
1199      }
1200   }
1201   return true;
1202}
1203
1204void
1205GCRA::RIG_Node::addInterference(RIG_Node *node)
1206{
1207   this->degree += relDegree[node->colors][colors];
1208   node->degree += relDegree[colors][node->colors];
1209
1210   this->attach(node, Graph::Edge::CROSS);
1211}
1212
1213void
1214GCRA::RIG_Node::addRegPreference(RIG_Node *node)
1215{
1216   prefRegs.push_back(node);
1217}
1218
1219GCRA::GCRA(Function *fn, SpillCodeInserter& spill, MergedDefs& mergedDefs) :
1220   nodes(NULL),
1221   nodeCount(0),
1222   func(fn),
1223   regs(fn->getProgram()->getTarget()),
1224   spill(spill),
1225   mergedDefs(mergedDefs)
1226{
1227   prog = func->getProgram();
1228}
1229
1230GCRA::~GCRA()
1231{
1232   if (nodes)
1233      delete[] nodes;
1234}
1235
1236void
1237GCRA::checkList(std::list<RIG_Node *>& lst)
1238{
1239   GCRA::RIG_Node *prev = NULL;
1240
1241   for (std::list<RIG_Node *>::iterator it = lst.begin();
1242        it != lst.end();
1243        ++it) {
1244      assert((*it)->getValue()->join == (*it)->getValue());
1245      if (prev)
1246         assert(prev->livei.begin() <= (*it)->livei.begin());
1247      prev = *it;
1248   }
1249}
1250
1251void
1252GCRA::insertOrderedTail(std::list<RIG_Node *>& list, RIG_Node *node)
1253{
1254   if (node->livei.isEmpty())
1255      return;
1256   // only the intervals of joined values don't necessarily arrive in order
1257   std::list<RIG_Node *>::iterator prev, it;
1258   for (it = list.end(); it != list.begin(); it = prev) {
1259      prev = it;
1260      --prev;
1261      if ((*prev)->livei.begin() <= node->livei.begin())
1262         break;
1263   }
1264   list.insert(it, node);
1265}
1266
1267void
1268GCRA::buildRIG(ArrayList& insns)
1269{
1270   std::list<RIG_Node *> values, active;
1271
1272   for (std::deque<ValueDef>::iterator it = func->ins.begin();
1273        it != func->ins.end(); ++it)
1274      insertOrderedTail(values, getNode(it->get()->asLValue()));
1275
1276   for (int i = 0; i < insns.getSize(); ++i) {
1277      Instruction *insn = reinterpret_cast<Instruction *>(insns.get(i));
1278      for (int d = 0; insn->defExists(d); ++d)
1279         if (insn->getDef(d)->reg.file <= LAST_REGISTER_FILE &&
1280             insn->getDef(d)->rep() == insn->getDef(d))
1281            insertOrderedTail(values, getNode(insn->getDef(d)->asLValue()));
1282   }
1283   checkList(values);
1284
1285   while (!values.empty()) {
1286      RIG_Node *cur = values.front();
1287
1288      for (std::list<RIG_Node *>::iterator it = active.begin();
1289           it != active.end();) {
1290         RIG_Node *node = *it;
1291
1292         if (node->livei.end() <= cur->livei.begin()) {
1293            it = active.erase(it);
1294         } else {
1295            if (node->f == cur->f && node->livei.overlaps(cur->livei))
1296               cur->addInterference(node);
1297            ++it;
1298         }
1299      }
1300      values.pop_front();
1301      active.push_back(cur);
1302   }
1303}
1304
1305void
1306GCRA::calculateSpillWeights()
1307{
1308   for (unsigned int i = 0; i < nodeCount; ++i) {
1309      RIG_Node *const n = &nodes[i];
1310      if (!nodes[i].colors || nodes[i].livei.isEmpty())
1311         continue;
1312      if (nodes[i].reg >= 0) {
1313         // update max reg
1314         regs.occupy(n->f, n->reg, n->colors);
1315         continue;
1316      }
1317      LValue *val = nodes[i].getValue();
1318
1319      if (!val->noSpill) {
1320         int rc = 0;
1321         for (ValueDef *def : mergedDefs(val))
1322            rc += def->get()->refCount();
1323
1324         nodes[i].weight =
1325            (float)rc * (float)rc / (float)nodes[i].livei.extent();
1326      }
1327
1328      if (nodes[i].degree < nodes[i].degreeLimit) {
1329         int l = 0;
1330         if (val->reg.size > 4)
1331            l = 1;
1332         DLLIST_ADDHEAD(&lo[l], &nodes[i]);
1333      } else {
1334         DLLIST_ADDHEAD(&hi, &nodes[i]);
1335      }
1336   }
1337   if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1338      printNodeInfo();
1339}
1340
1341void
1342GCRA::simplifyEdge(RIG_Node *a, RIG_Node *b)
1343{
1344   bool move = b->degree >= b->degreeLimit;
1345
1346   INFO_DBG(prog->dbgFlags, REG_ALLOC,
1347            "edge: (%%%i, deg %u/%u) >-< (%%%i, deg %u/%u)\n",
1348            a->getValue()->id, a->degree, a->degreeLimit,
1349            b->getValue()->id, b->degree, b->degreeLimit);
1350
1351   b->degree -= relDegree[a->colors][b->colors];
1352
1353   move = move && b->degree < b->degreeLimit;
1354   if (move && !DLLIST_EMPTY(b)) {
1355      int l = (b->getValue()->reg.size > 4) ? 1 : 0;
1356      DLLIST_DEL(b);
1357      DLLIST_ADDTAIL(&lo[l], b);
1358   }
1359}
1360
1361void
1362GCRA::simplifyNode(RIG_Node *node)
1363{
1364   for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1365      simplifyEdge(node, RIG_Node::get(ei));
1366
1367   for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1368      simplifyEdge(node, RIG_Node::get(ei));
1369
1370   DLLIST_DEL(node);
1371   stack.push(node->getValue()->id);
1372
1373   INFO_DBG(prog->dbgFlags, REG_ALLOC, "SIMPLIFY: pushed %%%i%s\n",
1374            node->getValue()->id,
1375            (node->degree < node->degreeLimit) ? "" : "(spill)");
1376}
1377
1378bool
1379GCRA::simplify()
1380{
1381   for (;;) {
1382      if (!DLLIST_EMPTY(&lo[0])) {
1383         do {
1384            simplifyNode(lo[0].next);
1385         } while (!DLLIST_EMPTY(&lo[0]));
1386      } else
1387      if (!DLLIST_EMPTY(&lo[1])) {
1388         simplifyNode(lo[1].next);
1389      } else
1390      if (!DLLIST_EMPTY(&hi)) {
1391         RIG_Node *best = hi.next;
1392         unsigned bestMaxReg = best->maxReg;
1393         float bestScore = best->weight / (float)best->degree;
1394         // Spill candidate. First go through the ones with the highest max
1395         // register, then the ones with lower. That way the ones with the
1396         // lowest requirement will be allocated first, since it's a stack.
1397         for (RIG_Node *it = best->next; it != &hi; it = it->next) {
1398            float score = it->weight / (float)it->degree;
1399            if (score < bestScore || it->maxReg > bestMaxReg) {
1400               best = it;
1401               bestScore = score;
1402               bestMaxReg = it->maxReg;
1403            }
1404         }
1405         if (isinf(bestScore)) {
1406            ERROR("no viable spill candidates left\n");
1407            return false;
1408         }
1409         simplifyNode(best);
1410      } else {
1411         return true;
1412      }
1413   }
1414}
1415
1416void
1417GCRA::checkInterference(const RIG_Node *node, Graph::EdgeIterator& ei)
1418{
1419   const RIG_Node *intf = RIG_Node::get(ei);
1420
1421   if (intf->reg < 0)
1422      return;
1423   LValue *vA = node->getValue();
1424   LValue *vB = intf->getValue();
1425
1426   const uint8_t intfMask = ((1 << intf->colors) - 1) << (intf->reg & 7);
1427
1428   if (vA->compound | vB->compound) {
1429      // NOTE: this only works for >aligned< register tuples !
1430      for (const ValueDef *D : mergedDefs(vA)) {
1431      for (const ValueDef *d : mergedDefs(vB)) {
1432         const LValue *vD = D->get()->asLValue();
1433         const LValue *vd = d->get()->asLValue();
1434
1435         if (!vD->livei.overlaps(vd->livei)) {
1436            INFO_DBG(prog->dbgFlags, REG_ALLOC, "(%%%i) X (%%%i): no overlap\n",
1437                     vD->id, vd->id);
1438            continue;
1439         }
1440
1441         uint8_t mask = vD->compound ? vD->compMask : ~0;
1442         if (vd->compound) {
1443            assert(vB->compound);
1444            mask &= vd->compMask & vB->compMask;
1445         } else {
1446            mask &= intfMask;
1447         }
1448
1449         INFO_DBG(prog->dbgFlags, REG_ALLOC,
1450                  "(%%%i)%02x X (%%%i)%02x & %02x: $r%i.%02x\n",
1451                  vD->id,
1452                  vD->compound ? vD->compMask : 0xff,
1453                  vd->id,
1454                  vd->compound ? vd->compMask : intfMask,
1455                  vB->compMask, intf->reg & ~7, mask);
1456         if (mask)
1457            regs.occupyMask(node->f, intf->reg & ~7, mask);
1458      }
1459      }
1460   } else {
1461      INFO_DBG(prog->dbgFlags, REG_ALLOC,
1462               "(%%%i) X (%%%i): $r%i + %u\n",
1463               vA->id, vB->id, intf->reg, intf->colors);
1464      regs.occupy(node->f, intf->reg, intf->colors);
1465   }
1466}
1467
1468bool
1469GCRA::selectRegisters()
1470{
1471   INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nSELECT phase\n");
1472
1473   while (!stack.empty()) {
1474      RIG_Node *node = &nodes[stack.top()];
1475      stack.pop();
1476
1477      regs.reset(node->f);
1478
1479      INFO_DBG(prog->dbgFlags, REG_ALLOC, "\nNODE[%%%i, %u colors]\n",
1480               node->getValue()->id, node->colors);
1481
1482      for (Graph::EdgeIterator ei = node->outgoing(); !ei.end(); ei.next())
1483         checkInterference(node, ei);
1484      for (Graph::EdgeIterator ei = node->incident(); !ei.end(); ei.next())
1485         checkInterference(node, ei);
1486
1487      if (!node->prefRegs.empty()) {
1488         for (std::list<RIG_Node *>::const_iterator it = node->prefRegs.begin();
1489              it != node->prefRegs.end();
1490              ++it) {
1491            if ((*it)->reg >= 0 &&
1492                regs.testOccupy(node->f, (*it)->reg, node->colors)) {
1493               node->reg = (*it)->reg;
1494               break;
1495            }
1496         }
1497      }
1498      if (node->reg >= 0)
1499         continue;
1500      LValue *lval = node->getValue();
1501      if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1502         regs.print(node->f);
1503      bool ret = regs.assign(node->reg, node->f, node->colors, node->maxReg);
1504      if (ret) {
1505         INFO_DBG(prog->dbgFlags, REG_ALLOC, "assigned reg %i\n", node->reg);
1506         lval->compMask = node->getCompMask();
1507      } else {
1508         INFO_DBG(prog->dbgFlags, REG_ALLOC, "must spill: %%%i (size %u)\n",
1509                  lval->id, lval->reg.size);
1510         Symbol *slot = NULL;
1511         if (lval->reg.file == FILE_GPR)
1512            slot = spill.assignSlot(node->livei, lval->reg.size);
1513         mustSpill.push_back(ValuePair(lval, slot));
1514      }
1515   }
1516   if (!mustSpill.empty())
1517      return false;
1518   for (unsigned int i = 0; i < nodeCount; ++i) {
1519      LValue *lval = nodes[i].getValue();
1520      if (nodes[i].reg >= 0 && nodes[i].colors > 0)
1521         lval->reg.data.id =
1522            regs.unitsToId(nodes[i].f, nodes[i].reg, lval->reg.size);
1523   }
1524   return true;
1525}
1526
1527bool
1528GCRA::allocateRegisters(ArrayList& insns)
1529{
1530   bool ret;
1531
1532   INFO_DBG(prog->dbgFlags, REG_ALLOC,
1533            "allocateRegisters to %u instructions\n", insns.getSize());
1534
1535   nodeCount = func->allLValues.getSize();
1536   nodes = new RIG_Node[nodeCount];
1537   if (!nodes)
1538      return false;
1539   for (unsigned int i = 0; i < nodeCount; ++i) {
1540      LValue *lval = reinterpret_cast<LValue *>(func->allLValues.get(i));
1541      if (lval) {
1542         nodes[i].init(regs, lval);
1543         RIG.insert(&nodes[i]);
1544
1545         if (lval->inFile(FILE_GPR) && lval->getInsn() != NULL) {
1546            Instruction *insn = lval->getInsn();
1547            if (insn->op != OP_MAD && insn->op != OP_FMA && insn->op != OP_SAD)
1548               continue;
1549            // For both of the cases below, we only want to add the preference
1550            // if all arguments are in registers.
1551            if (insn->src(0).getFile() != FILE_GPR ||
1552                insn->src(1).getFile() != FILE_GPR ||
1553                insn->src(2).getFile() != FILE_GPR)
1554               continue;
1555            if (prog->getTarget()->getChipset() < 0xc0) {
1556               // Outputting a flag is not supported with short encodings nor
1557               // with immediate arguments.
1558               // See handleMADforNV50.
1559               if (insn->flagsDef >= 0)
1560                  continue;
1561            } else {
1562               // We can only fold immediate arguments if dst == src2. This
1563               // only matters if one of the first two arguments is an
1564               // immediate. This form is also only supported for floats.
1565               // See handleMADforNVC0.
1566               ImmediateValue imm;
1567               if (insn->dType != TYPE_F32)
1568                  continue;
1569               if (!insn->src(0).getImmediate(imm) &&
1570                   !insn->src(1).getImmediate(imm))
1571                  continue;
1572            }
1573
1574            nodes[i].addRegPreference(getNode(insn->getSrc(2)->asLValue()));
1575         }
1576      }
1577   }
1578
1579   // coalesce first, we use only 1 RIG node for a group of joined values
1580   ret = coalesce(insns);
1581   if (!ret)
1582      goto out;
1583
1584   if (func->getProgram()->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1585      func->printLiveIntervals();
1586
1587   buildRIG(insns);
1588   calculateSpillWeights();
1589   ret = simplify();
1590   if (!ret)
1591      goto out;
1592
1593   ret = selectRegisters();
1594   if (!ret) {
1595      INFO_DBG(prog->dbgFlags, REG_ALLOC,
1596               "selectRegisters failed, inserting spill code ...\n");
1597      regs.reset(FILE_GPR, true);
1598      spill.run(mustSpill);
1599      if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1600         func->print();
1601   } else {
1602      mergedDefs.merge();
1603      prog->maxGPR = std::max(prog->maxGPR, regs.getMaxAssigned(FILE_GPR));
1604   }
1605
1606out:
1607   cleanup(ret);
1608   return ret;
1609}
1610
1611void
1612GCRA::cleanup(const bool success)
1613{
1614   mustSpill.clear();
1615
1616   for (ArrayList::Iterator it = func->allLValues.iterator();
1617        !it.end(); it.next()) {
1618      LValue *lval =  reinterpret_cast<LValue *>(it.get());
1619
1620      lval->livei.clear();
1621
1622      lval->compound = 0;
1623      lval->compMask = 0;
1624
1625      if (lval->join == lval)
1626         continue;
1627
1628      if (success)
1629         lval->reg.data.id = lval->join->reg.data.id;
1630      else
1631         lval->join = lval;
1632   }
1633
1634   if (success)
1635      resolveSplitsAndMerges();
1636   splits.clear(); // avoid duplicate entries on next coalesce pass
1637   merges.clear();
1638
1639   delete[] nodes;
1640   nodes = NULL;
1641   hi.next = hi.prev = &hi;
1642   lo[0].next = lo[0].prev = &lo[0];
1643   lo[1].next = lo[1].prev = &lo[1];
1644}
1645
1646Symbol *
1647SpillCodeInserter::assignSlot(const Interval &livei, const unsigned int size)
1648{
1649   SpillSlot slot;
1650   int32_t offsetBase = stackSize;
1651   int32_t offset;
1652   std::list<SpillSlot>::iterator pos = slots.end(), it = slots.begin();
1653
1654   if (!func->stackPtr) {
1655      // Later, we compute the address as (offsetBase + tlsBase)
1656      // tlsBase might not be size-aligned, so we add just enough
1657      // to give the final address the correct alignment
1658      offsetBase = align(offsetBase + func->tlsBase, size) - func->tlsBase;
1659   } else {
1660      offsetBase = align(offsetBase, size);
1661   }
1662
1663   slot.sym = NULL;
1664
1665   for (offset = offsetBase; offset < stackSize; offset += size) {
1666      const int32_t entryEnd = offset + size;
1667      while (it != slots.end() && it->offset < offset)
1668         ++it;
1669      if (it == slots.end()) // no slots left
1670         break;
1671      std::list<SpillSlot>::iterator bgn = it;
1672
1673      while (it != slots.end() && it->offset < entryEnd) {
1674         it->occup.print();
1675         if (it->occup.overlaps(livei))
1676            break;
1677         ++it;
1678      }
1679      if (it == slots.end() || it->offset >= entryEnd) {
1680         // fits
1681         for (; bgn != slots.end() && bgn->offset < entryEnd; ++bgn) {
1682            bgn->occup.insert(livei);
1683            if (bgn->size() == size)
1684               slot.sym = bgn->sym;
1685         }
1686         break;
1687      }
1688   }
1689   if (!slot.sym) {
1690      stackSize = offset + size;
1691      slot.offset = offset;
1692      slot.sym = new_Symbol(func->getProgram(), FILE_MEMORY_LOCAL);
1693      if (!func->stackPtr)
1694         offset += func->tlsBase;
1695      slot.sym->setAddress(NULL, offset);
1696      slot.sym->reg.size = size;
1697      slots.insert(pos, slot)->occup.insert(livei);
1698   }
1699   return slot.sym;
1700}
1701
1702Value *
1703SpillCodeInserter::offsetSlot(Value *base, const LValue *lval)
1704{
1705   if (!lval->compound || (lval->compMask & 0x1))
1706      return base;
1707   Value *slot = cloneShallow(func, base);
1708
1709   slot->reg.data.offset += (ffs(lval->compMask) - 1) * lval->reg.size;
1710   slot->reg.size = lval->reg.size;
1711
1712   return slot;
1713}
1714
1715void
1716SpillCodeInserter::spill(Instruction *defi, Value *slot, LValue *lval)
1717{
1718   const DataType ty = typeOfSize(lval->reg.size);
1719
1720   slot = offsetSlot(slot, lval);
1721
1722   Instruction *st;
1723   if (slot->reg.file == FILE_MEMORY_LOCAL) {
1724      lval->noSpill = 1;
1725      if (ty != TYPE_B96) {
1726         st = new_Instruction(func, OP_STORE, ty);
1727         st->setSrc(0, slot);
1728         st->setSrc(1, lval);
1729      } else {
1730         st = new_Instruction(func, OP_SPLIT, ty);
1731         st->setSrc(0, lval);
1732         for (int d = 0; d < lval->reg.size / 4; ++d)
1733            st->setDef(d, new_LValue(func, FILE_GPR));
1734
1735         for (int d = lval->reg.size / 4 - 1; d >= 0; --d) {
1736            Value *tmp = cloneShallow(func, slot);
1737            tmp->reg.size = 4;
1738            tmp->reg.data.offset += 4 * d;
1739
1740            Instruction *s = new_Instruction(func, OP_STORE, TYPE_U32);
1741            s->setSrc(0, tmp);
1742            s->setSrc(1, st->getDef(d));
1743            defi->bb->insertAfter(defi, s);
1744         }
1745      }
1746   } else {
1747      st = new_Instruction(func, OP_CVT, ty);
1748      st->setDef(0, slot);
1749      st->setSrc(0, lval);
1750      if (lval->reg.file == FILE_FLAGS)
1751         st->flagsSrc = 0;
1752   }
1753   defi->bb->insertAfter(defi, st);
1754}
1755
1756LValue *
1757SpillCodeInserter::unspill(Instruction *usei, LValue *lval, Value *slot)
1758{
1759   const DataType ty = typeOfSize(lval->reg.size);
1760
1761   slot = offsetSlot(slot, lval);
1762   lval = cloneShallow(func, lval);
1763
1764   Instruction *ld;
1765   if (slot->reg.file == FILE_MEMORY_LOCAL) {
1766      lval->noSpill = 1;
1767      if (ty != TYPE_B96) {
1768         ld = new_Instruction(func, OP_LOAD, ty);
1769      } else {
1770         ld = new_Instruction(func, OP_MERGE, ty);
1771         for (int d = 0; d < lval->reg.size / 4; ++d) {
1772            Value *tmp = cloneShallow(func, slot);
1773            LValue *val;
1774            tmp->reg.size = 4;
1775            tmp->reg.data.offset += 4 * d;
1776
1777            Instruction *l = new_Instruction(func, OP_LOAD, TYPE_U32);
1778            l->setDef(0, (val = new_LValue(func, FILE_GPR)));
1779            l->setSrc(0, tmp);
1780            usei->bb->insertBefore(usei, l);
1781            ld->setSrc(d, val);
1782            val->noSpill = 1;
1783         }
1784         ld->setDef(0, lval);
1785         usei->bb->insertBefore(usei, ld);
1786         return lval;
1787      }
1788   } else {
1789      ld = new_Instruction(func, OP_CVT, ty);
1790   }
1791   ld->setDef(0, lval);
1792   ld->setSrc(0, slot);
1793   if (lval->reg.file == FILE_FLAGS)
1794      ld->flagsDef = 0;
1795
1796   usei->bb->insertBefore(usei, ld);
1797   return lval;
1798}
1799
1800static bool
1801value_cmp(ValueRef *a, ValueRef *b) {
1802   Instruction *ai = a->getInsn(), *bi = b->getInsn();
1803   if (ai->bb != bi->bb)
1804      return ai->bb->getId() < bi->bb->getId();
1805   return ai->serial < bi->serial;
1806}
1807
1808// For each value that is to be spilled, go through all its definitions.
1809// A value can have multiple definitions if it has been coalesced before.
1810// For each definition, first go through all its uses and insert an unspill
1811// instruction before it, then replace the use with the temporary register.
1812// Unspill can be either a load from memory or simply a move to another
1813// register file.
1814// For "Pseudo" instructions (like PHI, SPLIT, MERGE) we can erase the use
1815// if we have spilled to a memory location, or simply with the new register.
1816// No load or conversion instruction should be needed.
1817bool
1818SpillCodeInserter::run(const std::list<ValuePair>& lst)
1819{
1820   for (std::list<ValuePair>::const_iterator it = lst.begin(); it != lst.end();
1821        ++it) {
1822      LValue *lval = it->first->asLValue();
1823      Symbol *mem = it->second ? it->second->asSym() : NULL;
1824
1825      // Keep track of which instructions to delete later. Deleting them
1826      // inside the loop is unsafe since a single instruction may have
1827      // multiple destinations that all need to be spilled (like OP_SPLIT).
1828      std::unordered_set<Instruction *> to_del;
1829
1830      std::list<ValueDef *> &defs = mergedDefs(lval);
1831      for (Value::DefIterator d = defs.begin(); d != defs.end();
1832           ++d) {
1833         Value *slot = mem ?
1834            static_cast<Value *>(mem) : new_LValue(func, FILE_GPR);
1835         Value *tmp = NULL;
1836         Instruction *last = NULL;
1837
1838         LValue *dval = (*d)->get()->asLValue();
1839         Instruction *defi = (*d)->getInsn();
1840
1841         // Sort all the uses by BB/instruction so that we don't unspill
1842         // multiple times in a row, and also remove a source of
1843         // non-determinism.
1844         std::vector<ValueRef *> refs(dval->uses.begin(), dval->uses.end());
1845         std::sort(refs.begin(), refs.end(), value_cmp);
1846
1847         // Unspill at each use *before* inserting spill instructions,
1848         // we don't want to have the spill instructions in the use list here.
1849         for (std::vector<ValueRef*>::const_iterator it = refs.begin();
1850              it != refs.end(); ++it) {
1851            ValueRef *u = *it;
1852            Instruction *usei = u->getInsn();
1853            assert(usei);
1854            if (usei->isPseudo()) {
1855               tmp = (slot->reg.file == FILE_MEMORY_LOCAL) ? NULL : slot;
1856               last = NULL;
1857            } else {
1858               if (!last || (usei != last->next && usei != last))
1859                  tmp = unspill(usei, dval, slot);
1860               last = usei;
1861            }
1862            u->set(tmp);
1863         }
1864
1865         assert(defi);
1866         if (defi->isPseudo()) {
1867            d = defs.erase(d);
1868            --d;
1869            if (slot->reg.file == FILE_MEMORY_LOCAL)
1870               to_del.insert(defi);
1871            else
1872               defi->setDef(0, slot);
1873         } else {
1874            spill(defi, slot, dval);
1875         }
1876      }
1877
1878      for (std::unordered_set<Instruction *>::const_iterator it = to_del.begin();
1879           it != to_del.end(); ++it) {
1880         mergedDefs.removeDefsOfInstruction(*it);
1881         delete_Instruction(func->getProgram(), *it);
1882      }
1883   }
1884
1885   // TODO: We're not trying to reuse old slots in a potential next iteration.
1886   //  We have to update the slots' livei intervals to be able to do that.
1887   stackBase = stackSize;
1888   slots.clear();
1889   return true;
1890}
1891
1892bool
1893RegAlloc::exec()
1894{
1895   for (IteratorRef it = prog->calls.iteratorDFS(false);
1896        !it->end(); it->next()) {
1897      func = Function::get(reinterpret_cast<Graph::Node *>(it->get()));
1898
1899      func->tlsBase = prog->tlsSize;
1900      if (!execFunc())
1901         return false;
1902      prog->tlsSize += func->tlsSize;
1903   }
1904   return true;
1905}
1906
1907bool
1908RegAlloc::execFunc()
1909{
1910   MergedDefs mergedDefs;
1911   InsertConstraintsPass insertConstr;
1912   PhiMovesPass insertPhiMoves;
1913   ArgumentMovesPass insertArgMoves;
1914   BuildIntervalsPass buildIntervals;
1915   SpillCodeInserter insertSpills(func, mergedDefs);
1916
1917   GCRA gcra(func, insertSpills, mergedDefs);
1918
1919   unsigned int i, retries;
1920   bool ret;
1921
1922   if (!func->ins.empty()) {
1923      // Insert a nop at the entry so inputs only used by the first instruction
1924      // don't count as having an empty live range.
1925      Instruction *nop = new_Instruction(func, OP_NOP, TYPE_NONE);
1926      BasicBlock::get(func->cfg.getRoot())->insertHead(nop);
1927   }
1928
1929   ret = insertConstr.exec(func);
1930   if (!ret)
1931      goto out;
1932
1933   ret = insertPhiMoves.run(func);
1934   if (!ret)
1935      goto out;
1936
1937   ret = insertArgMoves.run(func);
1938   if (!ret)
1939      goto out;
1940
1941   // TODO: need to fix up spill slot usage ranges to support > 1 retry
1942   for (retries = 0; retries < 3; ++retries) {
1943      if (retries && (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC))
1944         INFO("Retry: %i\n", retries);
1945      if (prog->dbgFlags & NV50_IR_DEBUG_REG_ALLOC)
1946         func->print();
1947
1948      // spilling to registers may add live ranges, need to rebuild everything
1949      ret = true;
1950      for (sequence = func->cfg.nextSequence(), i = 0;
1951           ret && i <= func->loopNestingBound;
1952           sequence = func->cfg.nextSequence(), ++i)
1953         ret = buildLiveSets(BasicBlock::get(func->cfg.getRoot()));
1954      // reset marker
1955      for (ArrayList::Iterator bi = func->allBBlocks.iterator();
1956           !bi.end(); bi.next())
1957         BasicBlock::get(bi)->liveSet.marker = false;
1958      if (!ret)
1959         break;
1960      func->orderInstructions(this->insns);
1961
1962      ret = buildIntervals.run(func);
1963      if (!ret)
1964         break;
1965      ret = gcra.allocateRegisters(insns);
1966      if (ret)
1967         break; // success
1968   }
1969   INFO_DBG(prog->dbgFlags, REG_ALLOC, "RegAlloc done: %i\n", ret);
1970
1971   func->tlsSize = insertSpills.getStackSize();
1972out:
1973   return ret;
1974}
1975
1976// TODO: check if modifying Instruction::join here breaks anything
1977void
1978GCRA::resolveSplitsAndMerges()
1979{
1980   for (std::list<Instruction *>::iterator it = splits.begin();
1981        it != splits.end();
1982        ++it) {
1983      Instruction *split = *it;
1984      unsigned int reg = regs.idToBytes(split->getSrc(0));
1985      for (int d = 0; split->defExists(d); ++d) {
1986         Value *v = split->getDef(d);
1987         v->reg.data.id = regs.bytesToId(v, reg);
1988         v->join = v;
1989         reg += v->reg.size;
1990      }
1991   }
1992   splits.clear();
1993
1994   for (std::list<Instruction *>::iterator it = merges.begin();
1995        it != merges.end();
1996        ++it) {
1997      Instruction *merge = *it;
1998      unsigned int reg = regs.idToBytes(merge->getDef(0));
1999      for (int s = 0; merge->srcExists(s); ++s) {
2000         Value *v = merge->getSrc(s);
2001         v->reg.data.id = regs.bytesToId(v, reg);
2002         v->join = v;
2003         // If the value is defined by a phi/union node, we also need to
2004         // perform the same fixup on that node's sources, since after RA
2005         // their registers should be identical.
2006         if (v->getInsn()->op == OP_PHI || v->getInsn()->op == OP_UNION) {
2007            Instruction *phi = v->getInsn();
2008            for (int phis = 0; phi->srcExists(phis); ++phis) {
2009               phi->getSrc(phis)->join = v;
2010               phi->getSrc(phis)->reg.data.id = v->reg.data.id;
2011            }
2012         }
2013         reg += v->reg.size;
2014      }
2015   }
2016   merges.clear();
2017}
2018
2019bool Program::registerAllocation()
2020{
2021   RegAlloc ra(this);
2022   return ra.exec();
2023}
2024
2025bool
2026RegAlloc::InsertConstraintsPass::exec(Function *ir)
2027{
2028   constrList.clear();
2029
2030   bool ret = run(ir, true, true);
2031   if (ret)
2032      ret = insertConstraintMoves();
2033   return ret;
2034}
2035
2036// TODO: make part of texture insn
2037void
2038RegAlloc::InsertConstraintsPass::textureMask(TexInstruction *tex)
2039{
2040   Value *def[4];
2041   int c, k, d;
2042   uint8_t mask = 0;
2043
2044   for (d = 0, k = 0, c = 0; c < 4; ++c) {
2045      if (!(tex->tex.mask & (1 << c)))
2046         continue;
2047      if (tex->getDef(k)->refCount()) {
2048         mask |= 1 << c;
2049         def[d++] = tex->getDef(k);
2050      }
2051      ++k;
2052   }
2053   tex->tex.mask = mask;
2054
2055   for (c = 0; c < d; ++c)
2056      tex->setDef(c, def[c]);
2057   for (; c < 4; ++c)
2058      tex->setDef(c, NULL);
2059}
2060
2061bool
2062RegAlloc::InsertConstraintsPass::detectConflict(Instruction *cst, int s)
2063{
2064   Value *v = cst->getSrc(s);
2065
2066   // current register allocation can't handle it if a value participates in
2067   // multiple constraints
2068   for (Value::UseIterator it = v->uses.begin(); it != v->uses.end(); ++it) {
2069      if (cst != (*it)->getInsn())
2070         return true;
2071   }
2072
2073   // can start at s + 1 because detectConflict is called on all sources
2074   for (int c = s + 1; cst->srcExists(c); ++c)
2075      if (v == cst->getSrc(c))
2076         return true;
2077
2078   Instruction *defi = v->getInsn();
2079
2080   return (!defi || defi->constrainedDefs());
2081}
2082
2083void
2084RegAlloc::InsertConstraintsPass::addConstraint(Instruction *i, int s, int n)
2085{
2086   Instruction *cst;
2087   int d;
2088
2089   // first, look for an existing identical constraint op
2090   for (std::list<Instruction *>::iterator it = constrList.begin();
2091        it != constrList.end();
2092        ++it) {
2093      cst = (*it);
2094      if (!i->bb->dominatedBy(cst->bb))
2095         break;
2096      for (d = 0; d < n; ++d)
2097         if (cst->getSrc(d) != i->getSrc(d + s))
2098            break;
2099      if (d >= n) {
2100         for (d = 0; d < n; ++d, ++s)
2101            i->setSrc(s, cst->getDef(d));
2102         return;
2103      }
2104   }
2105   cst = new_Instruction(func, OP_CONSTRAINT, i->dType);
2106
2107   for (d = 0; d < n; ++s, ++d) {
2108      cst->setDef(d, new_LValue(func, FILE_GPR));
2109      cst->setSrc(d, i->getSrc(s));
2110      i->setSrc(s, cst->getDef(d));
2111   }
2112   i->bb->insertBefore(i, cst);
2113
2114   constrList.push_back(cst);
2115}
2116
2117// Add a dummy use of the pointer source of >= 8 byte loads after the load
2118// to prevent it from being assigned a register which overlapping the load's
2119// destination, which would produce random corruptions.
2120void
2121RegAlloc::InsertConstraintsPass::addHazard(Instruction *i, const ValueRef *src)
2122{
2123   Instruction *hzd = new_Instruction(func, OP_NOP, TYPE_NONE);
2124   hzd->setSrc(0, src->get());
2125   i->bb->insertAfter(i, hzd);
2126
2127}
2128
2129// b32 { %r0 %r1 %r2 %r3 } -> b128 %r0q
2130void
2131RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn)
2132{
2133   int n;
2134   for (n = 0; insn->defExists(n) && insn->def(n).getFile() == FILE_GPR; ++n);
2135   condenseDefs(insn, 0, n - 1);
2136}
2137
2138void
2139RegAlloc::InsertConstraintsPass::condenseDefs(Instruction *insn,
2140                                              const int a, const int b)
2141{
2142   uint8_t size = 0;
2143   if (a >= b)
2144      return;
2145   for (int s = a; s <= b; ++s)
2146      size += insn->getDef(s)->reg.size;
2147   if (!size)
2148      return;
2149
2150   LValue *lval = new_LValue(func, FILE_GPR);
2151   lval->reg.size = size;
2152
2153   Instruction *split = new_Instruction(func, OP_SPLIT, typeOfSize(size));
2154   split->setSrc(0, lval);
2155   for (int d = a; d <= b; ++d) {
2156      split->setDef(d - a, insn->getDef(d));
2157      insn->setDef(d, NULL);
2158   }
2159   insn->setDef(a, lval);
2160
2161   for (int k = a + 1, d = b + 1; insn->defExists(d); ++d, ++k) {
2162      insn->setDef(k, insn->getDef(d));
2163      insn->setDef(d, NULL);
2164   }
2165   // carry over predicate if any (mainly for OP_UNION uses)
2166   split->setPredicate(insn->cc, insn->getPredicate());
2167
2168   insn->bb->insertAfter(insn, split);
2169   constrList.push_back(split);
2170}
2171
2172void
2173RegAlloc::InsertConstraintsPass::condenseSrcs(Instruction *insn,
2174                                              const int a, const int b)
2175{
2176   uint8_t size = 0;
2177   if (a >= b)
2178      return;
2179   for (int s = a; s <= b; ++s)
2180      size += insn->getSrc(s)->reg.size;
2181   if (!size)
2182      return;
2183   LValue *lval = new_LValue(func, FILE_GPR);
2184   lval->reg.size = size;
2185
2186   Value *save[3];
2187   insn->takeExtraSources(0, save);
2188
2189   Instruction *merge = new_Instruction(func, OP_MERGE, typeOfSize(size));
2190   merge->setDef(0, lval);
2191   for (int s = a, i = 0; s <= b; ++s, ++i) {
2192      merge->setSrc(i, insn->getSrc(s));
2193   }
2194   insn->moveSources(b + 1, a - b);
2195   insn->setSrc(a, lval);
2196   insn->bb->insertBefore(insn, merge);
2197
2198   insn->putExtraSources(0, save);
2199
2200   constrList.push_back(merge);
2201}
2202
2203bool
2204RegAlloc::InsertConstraintsPass::isScalarTexGM107(TexInstruction *tex)
2205{
2206   if (tex->tex.sIndirectSrc >= 0 ||
2207       tex->tex.rIndirectSrc >= 0 ||
2208       tex->tex.derivAll)
2209      return false;
2210
2211   if (tex->tex.mask == 5 || tex->tex.mask == 6)
2212      return false;
2213
2214   switch (tex->op) {
2215   case OP_TEX:
2216   case OP_TXF:
2217   case OP_TXG:
2218   case OP_TXL:
2219      break;
2220   default:
2221      return false;
2222   }
2223
2224   // legal variants:
2225   // TEXS.1D.LZ
2226   // TEXS.2D
2227   // TEXS.2D.LZ
2228   // TEXS.2D.LL
2229   // TEXS.2D.DC
2230   // TEXS.2D.LL.DC
2231   // TEXS.2D.LZ.DC
2232   // TEXS.A2D
2233   // TEXS.A2D.LZ
2234   // TEXS.A2D.LZ.DC
2235   // TEXS.3D
2236   // TEXS.3D.LZ
2237   // TEXS.CUBE
2238   // TEXS.CUBE.LL
2239
2240   // TLDS.1D.LZ
2241   // TLDS.1D.LL
2242   // TLDS.2D.LZ
2243   // TLSD.2D.LZ.AOFFI
2244   // TLDS.2D.LZ.MZ
2245   // TLDS.2D.LL
2246   // TLDS.2D.LL.AOFFI
2247   // TLDS.A2D.LZ
2248   // TLDS.3D.LZ
2249
2250   // TLD4S: all 2D/RECT variants and only offset
2251
2252   switch (tex->op) {
2253   case OP_TEX:
2254      if (tex->tex.useOffsets)
2255         return false;
2256
2257      switch (tex->tex.target.getEnum()) {
2258      case TEX_TARGET_1D:
2259      case TEX_TARGET_2D_ARRAY_SHADOW:
2260         return tex->tex.levelZero;
2261      case TEX_TARGET_CUBE:
2262         return !tex->tex.levelZero;
2263      case TEX_TARGET_2D:
2264      case TEX_TARGET_2D_ARRAY:
2265      case TEX_TARGET_2D_SHADOW:
2266      case TEX_TARGET_3D:
2267      case TEX_TARGET_RECT:
2268      case TEX_TARGET_RECT_SHADOW:
2269         return true;
2270      default:
2271         return false;
2272      }
2273
2274   case OP_TXL:
2275      if (tex->tex.useOffsets)
2276         return false;
2277
2278      switch (tex->tex.target.getEnum()) {
2279      case TEX_TARGET_2D:
2280      case TEX_TARGET_2D_SHADOW:
2281      case TEX_TARGET_RECT:
2282      case TEX_TARGET_RECT_SHADOW:
2283      case TEX_TARGET_CUBE:
2284         return true;
2285      default:
2286         return false;
2287      }
2288
2289   case OP_TXF:
2290      switch (tex->tex.target.getEnum()) {
2291      case TEX_TARGET_1D:
2292         return !tex->tex.useOffsets;
2293      case TEX_TARGET_2D:
2294      case TEX_TARGET_RECT:
2295         return true;
2296      case TEX_TARGET_2D_ARRAY:
2297      case TEX_TARGET_2D_MS:
2298      case TEX_TARGET_3D:
2299         return !tex->tex.useOffsets && tex->tex.levelZero;
2300      default:
2301         return false;
2302      }
2303
2304   case OP_TXG:
2305      if (tex->tex.useOffsets > 1)
2306         return false;
2307      if (tex->tex.mask != 0x3 && tex->tex.mask != 0xf)
2308         return false;
2309
2310      switch (tex->tex.target.getEnum()) {
2311      case TEX_TARGET_2D:
2312      case TEX_TARGET_2D_MS:
2313      case TEX_TARGET_2D_SHADOW:
2314      case TEX_TARGET_RECT:
2315      case TEX_TARGET_RECT_SHADOW:
2316         return true;
2317      default:
2318         return false;
2319      }
2320
2321   default:
2322      return false;
2323   }
2324}
2325
2326void
2327RegAlloc::InsertConstraintsPass::handleScalarTexGM107(TexInstruction *tex)
2328{
2329   int defCount = tex->defCount(0xff);
2330   int srcCount = tex->srcCount(0xff);
2331
2332   tex->tex.scalar = true;
2333
2334   // 1. handle defs
2335   if (defCount > 3)
2336      condenseDefs(tex, 2, 3);
2337   if (defCount > 1)
2338      condenseDefs(tex, 0, 1);
2339
2340   // 2. handle srcs
2341   // special case for TXF.A2D
2342   if (tex->op == OP_TXF && tex->tex.target == TEX_TARGET_2D_ARRAY) {
2343      assert(srcCount >= 3);
2344      condenseSrcs(tex, 1, 2);
2345   } else {
2346      if (srcCount > 3)
2347         condenseSrcs(tex, 2, 3);
2348      // only if we have more than 2 sources
2349      if (srcCount > 2)
2350         condenseSrcs(tex, 0, 1);
2351   }
2352
2353   assert(!tex->defExists(2) && !tex->srcExists(2));
2354}
2355
2356void
2357RegAlloc::InsertConstraintsPass::texConstraintGM107(TexInstruction *tex)
2358{
2359   int n, s;
2360
2361   if (isTextureOp(tex->op))
2362      textureMask(tex);
2363
2364   if (targ->getChipset() < NVISA_GV100_CHIPSET) {
2365      if (isScalarTexGM107(tex)) {
2366         handleScalarTexGM107(tex);
2367         return;
2368      }
2369
2370      assert(!tex->tex.scalar);
2371      condenseDefs(tex);
2372   } else {
2373      if (isTextureOp(tex->op)) {
2374         int defCount = tex->defCount(0xff);
2375         if (defCount > 3)
2376            condenseDefs(tex, 2, 3);
2377         if (defCount > 1)
2378            condenseDefs(tex, 0, 1);
2379      } else {
2380         condenseDefs(tex);
2381      }
2382   }
2383
2384   if (isSurfaceOp(tex->op)) {
2385      int s = tex->tex.target.getDim() +
2386         (tex->tex.target.isArray() || tex->tex.target.isCube());
2387      int n = 0;
2388
2389      switch (tex->op) {
2390      case OP_SUSTB:
2391      case OP_SUSTP:
2392         n = 4;
2393         break;
2394      case OP_SUREDB:
2395      case OP_SUREDP:
2396         if (tex->subOp == NV50_IR_SUBOP_ATOM_CAS)
2397            n = 2;
2398         break;
2399      default:
2400         break;
2401      }
2402
2403      if (s > 1)
2404         condenseSrcs(tex, 0, s - 1);
2405      if (n > 1)
2406         condenseSrcs(tex, 1, n); // do not condense the tex handle
2407   } else
2408   if (isTextureOp(tex->op)) {
2409      if (tex->op != OP_TXQ) {
2410         s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2411         if (tex->op == OP_TXD) {
2412            // Indirect handle belongs in the first arg
2413            if (tex->tex.rIndirectSrc >= 0)
2414               s++;
2415            if (!tex->tex.target.isArray() && tex->tex.useOffsets)
2416               s++;
2417         }
2418         n = tex->srcCount(0xff, true) - s;
2419         // TODO: Is this necessary? Perhaps just has to be aligned to the
2420         // level that the first arg is, not necessarily to 4. This
2421         // requirement has not been rigorously verified, as it has been on
2422         // Kepler.
2423         if (n > 0 && n < 3) {
2424            if (tex->srcExists(n + s)) // move potential predicate out of the way
2425               tex->moveSources(n + s, 3 - n);
2426            while (n < 3)
2427               tex->setSrc(s + n++, new_LValue(func, FILE_GPR));
2428         }
2429      } else {
2430         s = tex->srcCount(0xff, true);
2431         n = 0;
2432      }
2433
2434      if (s > 1)
2435         condenseSrcs(tex, 0, s - 1);
2436      if (n > 1) // NOTE: first call modified positions already
2437         condenseSrcs(tex, 1, n);
2438   }
2439}
2440
2441void
2442RegAlloc::InsertConstraintsPass::texConstraintNVE0(TexInstruction *tex)
2443{
2444   if (isTextureOp(tex->op))
2445      textureMask(tex);
2446   condenseDefs(tex);
2447
2448   if (tex->op == OP_SUSTB || tex->op == OP_SUSTP) {
2449      condenseSrcs(tex, 3, 6);
2450   } else
2451   if (isTextureOp(tex->op)) {
2452      int n = tex->srcCount(0xff, true);
2453      int s = n > 4 ? 4 : n;
2454      if (n > 4 && n < 7) {
2455         if (tex->srcExists(n)) // move potential predicate out of the way
2456            tex->moveSources(n, 7 - n);
2457
2458         while (n < 7)
2459            tex->setSrc(n++, new_LValue(func, FILE_GPR));
2460      }
2461      if (s > 1)
2462         condenseSrcs(tex, 0, s - 1);
2463      if (n > 4)
2464         condenseSrcs(tex, 1, n - s);
2465   }
2466}
2467
2468void
2469RegAlloc::InsertConstraintsPass::texConstraintNVC0(TexInstruction *tex)
2470{
2471   int n, s;
2472
2473   if (isTextureOp(tex->op))
2474      textureMask(tex);
2475
2476   if (tex->op == OP_TXQ) {
2477      s = tex->srcCount(0xff);
2478      n = 0;
2479   } else if (isSurfaceOp(tex->op)) {
2480      s = tex->tex.target.getDim() + (tex->tex.target.isArray() || tex->tex.target.isCube());
2481      if (tex->op == OP_SUSTB || tex->op == OP_SUSTP)
2482         n = 4;
2483      else
2484         n = 0;
2485   } else {
2486      s = tex->tex.target.getArgCount() - tex->tex.target.isMS();
2487      if (!tex->tex.target.isArray() &&
2488          (tex->tex.rIndirectSrc >= 0 || tex->tex.sIndirectSrc >= 0))
2489         ++s;
2490      if (tex->op == OP_TXD && tex->tex.useOffsets)
2491         ++s;
2492      n = tex->srcCount(0xff) - s;
2493      assert(n <= 4);
2494   }
2495
2496   if (s > 1)
2497      condenseSrcs(tex, 0, s - 1);
2498   if (n > 1) // NOTE: first call modified positions already
2499      condenseSrcs(tex, 1, n);
2500
2501   condenseDefs(tex);
2502}
2503
2504void
2505RegAlloc::InsertConstraintsPass::texConstraintNV50(TexInstruction *tex)
2506{
2507   Value *pred = tex->getPredicate();
2508   if (pred)
2509      tex->setPredicate(tex->cc, NULL);
2510
2511   textureMask(tex);
2512
2513   assert(tex->defExists(0) && tex->srcExists(0));
2514   // make src and def count match
2515   int c;
2516   for (c = 0; tex->srcExists(c) || tex->defExists(c); ++c) {
2517      if (!tex->srcExists(c))
2518         tex->setSrc(c, new_LValue(func, tex->getSrc(0)->asLValue()));
2519      else
2520         insertConstraintMove(tex, c);
2521      if (!tex->defExists(c))
2522         tex->setDef(c, new_LValue(func, tex->getDef(0)->asLValue()));
2523   }
2524   if (pred)
2525      tex->setPredicate(tex->cc, pred);
2526   condenseDefs(tex);
2527   condenseSrcs(tex, 0, c - 1);
2528}
2529
2530// Insert constraint markers for instructions whose multiple sources must be
2531// located in consecutive registers.
2532bool
2533RegAlloc::InsertConstraintsPass::visit(BasicBlock *bb)
2534{
2535   TexInstruction *tex;
2536   Instruction *next;
2537   int s, size;
2538
2539   targ = bb->getProgram()->getTarget();
2540
2541   for (Instruction *i = bb->getEntry(); i; i = next) {
2542      next = i->next;
2543
2544      if ((tex = i->asTex())) {
2545         switch (targ->getChipset() & ~0xf) {
2546         case 0x50:
2547         case 0x80:
2548         case 0x90:
2549         case 0xa0:
2550            texConstraintNV50(tex);
2551            break;
2552         case 0xc0:
2553         case 0xd0:
2554            texConstraintNVC0(tex);
2555            break;
2556         case 0xe0:
2557         case 0xf0:
2558         case 0x100:
2559            texConstraintNVE0(tex);
2560            break;
2561         case 0x110:
2562         case 0x120:
2563         case 0x130:
2564         case 0x140:
2565         case 0x160:
2566         case 0x170:
2567            texConstraintGM107(tex);
2568            break;
2569         default:
2570            break;
2571         }
2572      } else
2573      if (i->op == OP_EXPORT || i->op == OP_STORE) {
2574         for (size = typeSizeof(i->dType), s = 1; size > 0; ++s) {
2575            assert(i->srcExists(s));
2576            size -= i->getSrc(s)->reg.size;
2577         }
2578         condenseSrcs(i, 1, s - 1);
2579      } else
2580      if (i->op == OP_LOAD || i->op == OP_VFETCH) {
2581         condenseDefs(i);
2582         if (i->src(0).isIndirect(0) && typeSizeof(i->dType) >= 8)
2583            addHazard(i, i->src(0).getIndirect(0));
2584         if (i->src(0).isIndirect(1) && typeSizeof(i->dType) >= 8)
2585            addHazard(i, i->src(0).getIndirect(1));
2586         if (i->op == OP_LOAD && i->fixed && targ->getChipset() < 0xc0) {
2587            // Add a hazard to make sure we keep the op around. These are used
2588            // for membars.
2589            Instruction *nop = new_Instruction(func, OP_NOP, i->dType);
2590            nop->setSrc(0, i->getDef(0));
2591            i->bb->insertAfter(i, nop);
2592         }
2593      } else
2594      if (i->op == OP_UNION ||
2595          i->op == OP_MERGE ||
2596          i->op == OP_SPLIT) {
2597         constrList.push_back(i);
2598      } else
2599      if (i->op == OP_ATOM && i->subOp == NV50_IR_SUBOP_ATOM_CAS &&
2600          targ->getChipset() < 0xc0) {
2601         // Like a hazard, but for a def.
2602         Instruction *nop = new_Instruction(func, OP_NOP, i->dType);
2603         nop->setSrc(0, i->getDef(0));
2604         i->bb->insertAfter(i, nop);
2605      }
2606   }
2607   return true;
2608}
2609
2610void
2611RegAlloc::InsertConstraintsPass::insertConstraintMove(Instruction *cst, int s)
2612{
2613   const uint8_t size = cst->src(s).getSize();
2614
2615   assert(cst->getSrc(s)->defs.size() == 1); // still SSA
2616
2617   Instruction *defi = cst->getSrc(s)->defs.front()->getInsn();
2618
2619   bool imm = defi->op == OP_MOV &&
2620      defi->src(0).getFile() == FILE_IMMEDIATE;
2621   bool load = defi->op == OP_LOAD &&
2622      defi->src(0).getFile() == FILE_MEMORY_CONST &&
2623      !defi->src(0).isIndirect(0);
2624   // catch some cases where don't really need MOVs
2625   if (cst->getSrc(s)->refCount() == 1 && !defi->constrainedDefs()
2626       && defi->op != OP_MERGE && defi->op != OP_SPLIT) {
2627      if (imm || load) {
2628         // Move the defi right before the cst. No point in expanding
2629         // the range.
2630         defi->bb->remove(defi);
2631         cst->bb->insertBefore(cst, defi);
2632      }
2633      return;
2634   }
2635
2636   LValue *lval = new_LValue(func, cst->src(s).getFile());
2637   lval->reg.size = size;
2638
2639   Instruction *mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2640   mov->setDef(0, lval);
2641   mov->setSrc(0, cst->getSrc(s));
2642
2643   if (load) {
2644      mov->op = OP_LOAD;
2645      mov->setSrc(0, defi->getSrc(0));
2646   } else if (imm) {
2647      mov->setSrc(0, defi->getSrc(0));
2648   }
2649
2650   if (defi->getPredicate())
2651      mov->setPredicate(defi->cc, defi->getPredicate());
2652
2653   cst->setSrc(s, mov->getDef(0));
2654   cst->bb->insertBefore(cst, mov);
2655
2656   cst->getDef(0)->asLValue()->noSpill = 1; // doesn't help
2657}
2658
2659// Insert extra moves so that, if multiple register constraints on a value are
2660// in conflict, these conflicts can be resolved.
2661bool
2662RegAlloc::InsertConstraintsPass::insertConstraintMoves()
2663{
2664   for (std::list<Instruction *>::iterator it = constrList.begin();
2665        it != constrList.end();
2666        ++it) {
2667      Instruction *cst = *it;
2668      Instruction *mov;
2669
2670      if (cst->op == OP_SPLIT && false) {
2671         // spilling splits is annoying, just make sure they're separate
2672         for (int d = 0; cst->defExists(d); ++d) {
2673            if (!cst->getDef(d)->refCount())
2674               continue;
2675            LValue *lval = new_LValue(func, cst->def(d).getFile());
2676            const uint8_t size = cst->def(d).getSize();
2677            lval->reg.size = size;
2678
2679            mov = new_Instruction(func, OP_MOV, typeOfSize(size));
2680            mov->setSrc(0, lval);
2681            mov->setDef(0, cst->getDef(d));
2682            cst->setDef(d, mov->getSrc(0));
2683            cst->bb->insertAfter(cst, mov);
2684
2685            cst->getSrc(0)->asLValue()->noSpill = 1;
2686            mov->getSrc(0)->asLValue()->noSpill = 1;
2687         }
2688      } else
2689      if (cst->op == OP_MERGE || cst->op == OP_UNION) {
2690         for (int s = 0; cst->srcExists(s); ++s) {
2691            const uint8_t size = cst->src(s).getSize();
2692
2693            if (!cst->getSrc(s)->defs.size()) {
2694               mov = new_Instruction(func, OP_NOP, typeOfSize(size));
2695               mov->setDef(0, cst->getSrc(s));
2696               cst->bb->insertBefore(cst, mov);
2697               continue;
2698            }
2699
2700            insertConstraintMove(cst, s);
2701         }
2702      }
2703   }
2704
2705   return true;
2706}
2707
2708} // namespace nv50_ir
2709