1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "aco_builder.h"
27 #include "aco_ir.h"
28
29 #include "common/sid.h"
30
31 #include <algorithm>
32 #include <cstring>
33 #include <map>
34 #include <set>
35 #include <stack>
36 #include <unordered_map>
37 #include <unordered_set>
38 #include <vector>
39
40 namespace std {
41 template <> struct hash<aco::Temp> {
42 size_t operator()(aco::Temp temp) const noexcept
43 {
44 uint32_t v;
45 std::memcpy(&v, &temp, sizeof(temp));
46 return std::hash<uint32_t>{}(v);
47 }
48 };
49 } // namespace std
50
51 /*
52 * Implements the spilling algorithm on SSA-form from
53 * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
54 * by Matthias Braun and Sebastian Hack.
55 */
56
57 namespace aco {
58
59 namespace {
60
61 struct remat_info {
62 Instruction* instr;
63 };
64
65 struct spill_ctx {
66 RegisterDemand target_pressure;
67 Program* program;
68 std::vector<std::vector<RegisterDemand>> register_demand;
69 std::vector<std::map<Temp, Temp>> renames;
70 std::vector<std::unordered_map<Temp, uint32_t>> spills_entry;
71 std::vector<std::unordered_map<Temp, uint32_t>> spills_exit;
72
73 std::vector<bool> processed;
74 std::stack<Block*, std::vector<Block*>> loop_header;
75 std::vector<std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
76 std::vector<std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
77 std::vector<std::vector<std::pair<Temp, uint32_t>>> local_next_use_distance; /* Working buffer */
78 std::vector<std::pair<RegClass, std::unordered_set<uint32_t>>> interferences;
79 std::vector<std::vector<uint32_t>> affinities;
80 std::vector<bool> is_reloaded;
81 std::unordered_map<Temp, remat_info> remat;
82 std::set<Instruction*> unused_remats;
83 unsigned wave_size;
84
85 unsigned sgpr_spill_slots;
86 unsigned vgpr_spill_slots;
87 Temp scratch_rsrc;
88
spill_ctxaco::__anon6971::spill_ctx89 spill_ctx(const RegisterDemand target_pressure_, Program* program_,
90 std::vector<std::vector<RegisterDemand>> register_demand_)
91 : target_pressure(target_pressure_), program(program_),
92 register_demand(std::move(register_demand_)), renames(program->blocks.size()),
93 spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
94 processed(program->blocks.size(), false), wave_size(program->wave_size),
95 sgpr_spill_slots(0), vgpr_spill_slots(0)
96 {}
97
add_affinityaco::__anon6971::spill_ctx98 void add_affinity(uint32_t first, uint32_t second)
99 {
100 unsigned found_first = affinities.size();
101 unsigned found_second = affinities.size();
102 for (unsigned i = 0; i < affinities.size(); i++) {
103 std::vector<uint32_t>& vec = affinities[i];
104 for (uint32_t entry : vec) {
105 if (entry == first)
106 found_first = i;
107 else if (entry == second)
108 found_second = i;
109 }
110 }
111 if (found_first == affinities.size() && found_second == affinities.size()) {
112 affinities.emplace_back(std::vector<uint32_t>({first, second}));
113 } else if (found_first < affinities.size() && found_second == affinities.size()) {
114 affinities[found_first].push_back(second);
115 } else if (found_second < affinities.size() && found_first == affinities.size()) {
116 affinities[found_second].push_back(first);
117 } else if (found_first != found_second) {
118 /* merge second into first */
119 affinities[found_first].insert(affinities[found_first].end(),
120 affinities[found_second].begin(),
121 affinities[found_second].end());
122 affinities.erase(std::next(affinities.begin(), found_second));
123 } else {
124 assert(found_first == found_second);
125 }
126 }
127
add_interferenceaco::__anon6971::spill_ctx128 void add_interference(uint32_t first, uint32_t second)
129 {
130 if (interferences[first].first.type() != interferences[second].first.type())
131 return;
132
133 bool inserted = interferences[first].second.insert(second).second;
134 if (inserted)
135 interferences[second].second.insert(first);
136 }
137
allocate_spill_idaco::__anon6971::spill_ctx138 uint32_t allocate_spill_id(RegClass rc)
139 {
140 interferences.emplace_back(rc, std::unordered_set<uint32_t>());
141 is_reloaded.push_back(false);
142 return next_spill_id++;
143 }
144
145 uint32_t next_spill_id = 0;
146 };
147
148 int32_t
get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)149 get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
150 {
151
152 if (idx_a == -1)
153 return idx_b;
154 if (idx_b == -1)
155 return idx_a;
156 if (is_linear) {
157 while (idx_a != idx_b) {
158 if (idx_a > idx_b)
159 idx_a = program->blocks[idx_a].linear_idom;
160 else
161 idx_b = program->blocks[idx_b].linear_idom;
162 }
163 } else {
164 while (idx_a != idx_b) {
165 if (idx_a > idx_b)
166 idx_a = program->blocks[idx_a].logical_idom;
167 else
168 idx_b = program->blocks[idx_b].logical_idom;
169 }
170 }
171 assert(idx_a != -1);
172 return idx_a;
173 }
174
175 void
next_uses_per_block(spill_ctx& ctx, unsigned block_idx, uint32_t& worklist)176 next_uses_per_block(spill_ctx& ctx, unsigned block_idx, uint32_t& worklist)
177 {
178 Block* block = &ctx.program->blocks[block_idx];
179 ctx.next_use_distances_start[block_idx] = ctx.next_use_distances_end[block_idx];
180 auto& next_use_distances_start = ctx.next_use_distances_start[block_idx];
181
182 /* to compute the next use distance at the beginning of the block, we have to add the block's
183 * size */
184 for (std::unordered_map<Temp, std::pair<uint32_t, uint32_t>>::iterator it =
185 next_use_distances_start.begin();
186 it != next_use_distances_start.end(); ++it)
187 it->second.second = it->second.second + block->instructions.size();
188
189 int idx = block->instructions.size() - 1;
190 while (idx >= 0) {
191 aco_ptr<Instruction>& instr = block->instructions[idx];
192
193 if (instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi)
194 break;
195
196 for (const Definition& def : instr->definitions) {
197 if (def.isTemp())
198 next_use_distances_start.erase(def.getTemp());
199 }
200
201 for (const Operand& op : instr->operands) {
202 /* omit exec mask */
203 if (op.isFixed() && op.physReg() == exec)
204 continue;
205 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
206 continue;
207 if (op.isTemp())
208 next_use_distances_start[op.getTemp()] = {block_idx, idx};
209 }
210 idx--;
211 }
212
213 assert(block_idx != 0 || next_use_distances_start.empty());
214 std::unordered_set<Temp> phi_defs;
215 while (idx >= 0) {
216 aco_ptr<Instruction>& instr = block->instructions[idx];
217 assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
218
219 std::pair<uint32_t, uint32_t> distance{block_idx, 0};
220
221 auto it = instr->definitions[0].isTemp() ? next_use_distances_start.find(instr->definitions[0].getTemp())
222 : next_use_distances_start.end();
223 if (it != next_use_distances_start.end() &&
224 phi_defs.insert(instr->definitions[0].getTemp()).second) {
225 distance = it->second;
226 }
227
228 for (unsigned i = 0; i < instr->operands.size(); i++) {
229 unsigned pred_idx =
230 instr->opcode == aco_opcode::p_phi ? block->logical_preds[i] : block->linear_preds[i];
231 if (instr->operands[i].isTemp()) {
232 auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
233 std::make_pair(instr->operands[i].getTemp(), distance));
234 const bool inserted = insert_result.second;
235 std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
236 if (inserted || entry_distance != distance)
237 worklist = std::max(worklist, pred_idx + 1);
238 entry_distance = distance;
239 }
240 }
241 idx--;
242 }
243
244 /* all remaining live vars must be live-out at the predecessors */
245 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances_start) {
246 Temp temp = pair.first;
247 if (phi_defs.count(temp)) {
248 continue;
249 }
250 uint32_t distance = pair.second.second;
251 uint32_t dom = pair.second.first;
252 std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
253 for (unsigned pred_idx : preds) {
254 if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
255 distance += 0xFFFF;
256 auto insert_result = ctx.next_use_distances_end[pred_idx].insert(
257 std::make_pair(temp, std::pair<uint32_t, uint32_t>{}));
258 const bool inserted = insert_result.second;
259 std::pair<uint32_t, uint32_t>& entry_distance = insert_result.first->second;
260
261 if (!inserted) {
262 dom = get_dominator(dom, entry_distance.first, ctx.program, temp.is_linear());
263 distance = std::min(entry_distance.second, distance);
264 }
265 if (entry_distance != std::pair<uint32_t, uint32_t>{dom, distance}) {
266 worklist = std::max(worklist, pred_idx + 1);
267 entry_distance = {dom, distance};
268 }
269 }
270 }
271 }
272
273 void
compute_global_next_uses(spill_ctx& ctx)274 compute_global_next_uses(spill_ctx& ctx)
275 {
276 ctx.next_use_distances_start.resize(ctx.program->blocks.size());
277 ctx.next_use_distances_end.resize(ctx.program->blocks.size());
278
279 uint32_t worklist = ctx.program->blocks.size();
280 while (worklist) {
281 unsigned block_idx = --worklist;
282 next_uses_per_block(ctx, block_idx, worklist);
283 }
284 }
285
286 bool
should_rematerialize(aco_ptr<Instruction>& instr)287 should_rematerialize(aco_ptr<Instruction>& instr)
288 {
289 /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
290 if (instr->format != Format::VOP1 && instr->format != Format::SOP1 &&
291 instr->format != Format::PSEUDO && instr->format != Format::SOPK)
292 return false;
293 /* TODO: pseudo-instruction rematerialization is only supported for
294 * p_create_vector/p_parallelcopy */
295 if (instr->isPseudo() && instr->opcode != aco_opcode::p_create_vector &&
296 instr->opcode != aco_opcode::p_parallelcopy)
297 return false;
298 if (instr->isSOPK() && instr->opcode != aco_opcode::s_movk_i32)
299 return false;
300
301 for (const Operand& op : instr->operands) {
302 /* TODO: rematerialization using temporaries isn't yet supported */
303 if (!op.isConstant())
304 return false;
305 }
306
307 /* TODO: rematerialization with multiple definitions isn't yet supported */
308 if (instr->definitions.size() > 1)
309 return false;
310
311 return true;
312 }
313
314 aco_ptr<Instruction>
do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)315 do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
316 {
317 std::unordered_map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
318 if (remat != ctx.remat.end()) {
319 Instruction* instr = remat->second.instr;
320 assert((instr->isVOP1() || instr->isSOP1() || instr->isPseudo() || instr->isSOPK()) &&
321 "unsupported");
322 assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector ||
323 instr->opcode == aco_opcode::p_parallelcopy) &&
324 "unsupported");
325 assert(instr->definitions.size() == 1 && "unsupported");
326
327 aco_ptr<Instruction> res;
328 if (instr->isVOP1()) {
329 res.reset(create_instruction<VOP1_instruction>(
330 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
331 } else if (instr->isSOP1()) {
332 res.reset(create_instruction<SOP1_instruction>(
333 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
334 } else if (instr->isPseudo()) {
335 res.reset(create_instruction<Pseudo_instruction>(
336 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
337 } else if (instr->isSOPK()) {
338 res.reset(create_instruction<SOPK_instruction>(
339 instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
340 res->sopk().imm = instr->sopk().imm;
341 }
342 for (unsigned i = 0; i < instr->operands.size(); i++) {
343 res->operands[i] = instr->operands[i];
344 if (instr->operands[i].isTemp()) {
345 assert(false && "unsupported");
346 if (ctx.remat.count(instr->operands[i].getTemp()))
347 ctx.unused_remats.erase(ctx.remat[instr->operands[i].getTemp()].instr);
348 }
349 }
350 res->definitions[0] = Definition(new_name);
351 return res;
352 } else {
353 aco_ptr<Pseudo_instruction> reload{
354 create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
355 reload->operands[0] = Operand::c32(spill_id);
356 reload->definitions[0] = Definition(new_name);
357 ctx.is_reloaded[spill_id] = true;
358 return reload;
359 }
360 }
361
362 void
get_rematerialize_info(spill_ctx& ctx)363 get_rematerialize_info(spill_ctx& ctx)
364 {
365 for (Block& block : ctx.program->blocks) {
366 bool logical = false;
367 for (aco_ptr<Instruction>& instr : block.instructions) {
368 if (instr->opcode == aco_opcode::p_logical_start)
369 logical = true;
370 else if (instr->opcode == aco_opcode::p_logical_end)
371 logical = false;
372 if (logical && should_rematerialize(instr)) {
373 for (const Definition& def : instr->definitions) {
374 if (def.isTemp()) {
375 ctx.remat[def.getTemp()] = remat_info{instr.get()};
376 ctx.unused_remats.insert(instr.get());
377 }
378 }
379 }
380 }
381 }
382 }
383
384 void
update_local_next_uses(spill_ctx& ctx, Block* block, std::vector<std::vector<std::pair<Temp, uint32_t>>>& local_next_uses)385 update_local_next_uses(spill_ctx& ctx, Block* block,
386 std::vector<std::vector<std::pair<Temp, uint32_t>>>& local_next_uses)
387 {
388 if (local_next_uses.size() < block->instructions.size()) {
389 /* Allocate more next-use-maps. Note that by never reducing the vector size, we enable
390 * future calls to this function to re-use already allocated map memory. */
391 local_next_uses.resize(block->instructions.size());
392 }
393
394 local_next_uses[block->instructions.size() - 1].clear();
395 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
396 ctx.next_use_distances_end[block->index]) {
397 local_next_uses[block->instructions.size() - 1].push_back(std::make_pair<Temp, uint32_t>(
398 (Temp)pair.first, pair.second.second + block->instructions.size()));
399 }
400
401 for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
402 aco_ptr<Instruction>& instr = block->instructions[idx];
403 if (!instr)
404 break;
405 if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
406 break;
407
408 if (idx != (int)block->instructions.size() - 1) {
409 local_next_uses[idx] = local_next_uses[idx + 1];
410 }
411
412 for (const Operand& op : instr->operands) {
413 if (op.isFixed() && op.physReg() == exec)
414 continue;
415 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
416 continue;
417 if (op.isTemp()) {
418 auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
419 [op](auto& pair) { return pair.first == op.getTemp(); });
420 if (it == local_next_uses[idx].end()) {
421 local_next_uses[idx].push_back(std::make_pair<Temp, uint32_t>(op.getTemp(), idx));
422 } else {
423 it->second = idx;
424 }
425 }
426 }
427 for (const Definition& def : instr->definitions) {
428 if (def.isTemp()) {
429 auto it = std::find_if(local_next_uses[idx].begin(), local_next_uses[idx].end(),
430 [def](auto& pair) { return pair.first == def.getTemp(); });
431 if (it != local_next_uses[idx].end()) {
432 local_next_uses[idx].erase(it);
433 }
434 }
435 }
436 }
437 }
438
439 RegisterDemand
get_demand_before(spill_ctx& ctx, unsigned block_idx, unsigned idx)440 get_demand_before(spill_ctx& ctx, unsigned block_idx, unsigned idx)
441 {
442 if (idx == 0) {
443 RegisterDemand demand = ctx.register_demand[block_idx][idx];
444 aco_ptr<Instruction>& instr = ctx.program->blocks[block_idx].instructions[idx];
445 aco_ptr<Instruction> instr_before(nullptr);
446 return get_demand_before(demand, instr, instr_before);
447 } else {
448 return ctx.register_demand[block_idx][idx - 1];
449 }
450 }
451
452 RegisterDemand
get_live_in_demand(spill_ctx& ctx, unsigned block_idx)453 get_live_in_demand(spill_ctx& ctx, unsigned block_idx)
454 {
455 unsigned idx = 0;
456 RegisterDemand reg_pressure = RegisterDemand();
457 Block& block = ctx.program->blocks[block_idx];
458 for (aco_ptr<Instruction>& phi : block.instructions) {
459 if (!is_phi(phi))
460 break;
461 idx++;
462
463 /* Killed phi definitions increase pressure in the predecessor but not
464 * the block they're in. Since the loops below are both to control
465 * pressure of the start of this block and the ends of it's
466 * predecessors, we need to count killed unspilled phi definitions here. */
467 if (phi->definitions[0].isTemp() && phi->definitions[0].isKill() &&
468 !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()))
469 reg_pressure += phi->definitions[0].getTemp();
470 }
471
472 reg_pressure += get_demand_before(ctx, block_idx, idx);
473
474 /* Consider register pressure from linear predecessors. This can affect
475 * reg_pressure if the branch instructions define sgprs. */
476 for (unsigned pred : block.linear_preds)
477 reg_pressure.sgpr =
478 std::max<int16_t>(reg_pressure.sgpr, ctx.register_demand[pred].back().sgpr);
479
480 return reg_pressure;
481 }
482
483 RegisterDemand
init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)484 init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
485 {
486 RegisterDemand spilled_registers;
487
488 /* first block, nothing was spilled before */
489 if (block_idx == 0)
490 return {0, 0};
491
492 /* next use distances at the beginning of the current block */
493 const auto& next_use_distances = ctx.next_use_distances_start[block_idx];
494
495 /* loop header block */
496 if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
497 assert(block->linear_preds[0] == block_idx - 1);
498 assert(block->logical_preds[0] == block_idx - 1);
499
500 /* create new loop_info */
501 ctx.loop_header.emplace(block);
502
503 /* check how many live-through variables should be spilled */
504 RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
505 RegisterDemand loop_demand = reg_pressure;
506 unsigned i = block_idx;
507 while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
508 assert(ctx.program->blocks.size() > i);
509 loop_demand.update(ctx.program->blocks[i++].register_demand);
510 }
511 unsigned loop_end = i;
512
513 for (auto spilled : ctx.spills_exit[block_idx - 1]) {
514 auto it = next_use_distances.find(spilled.first);
515
516 /* variable is not live at loop entry: probably a phi operand */
517 if (it == next_use_distances.end())
518 continue;
519
520 /* keep constants and live-through variables spilled */
521 if (it->second.first >= loop_end || ctx.remat.count(spilled.first)) {
522 ctx.spills_entry[block_idx][spilled.first] = spilled.second;
523 spilled_registers += spilled.first;
524 loop_demand -= spilled.first;
525 }
526 }
527
528 /* select live-through variables and constants */
529 RegType type = RegType::vgpr;
530 while (loop_demand.exceeds(ctx.target_pressure)) {
531 /* if VGPR demand is low enough, select SGPRs */
532 if (type == RegType::vgpr && loop_demand.vgpr <= ctx.target_pressure.vgpr)
533 type = RegType::sgpr;
534 /* if SGPR demand is low enough, break */
535 if (type == RegType::sgpr && loop_demand.sgpr <= ctx.target_pressure.sgpr)
536 break;
537
538 unsigned distance = 0;
539 Temp to_spill;
540 for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
541 next_use_distances) {
542 if (pair.first.type() == type &&
543 (pair.second.first >= loop_end ||
544 (ctx.remat.count(pair.first) && type == RegType::sgpr)) &&
545 pair.second.second > distance && !ctx.spills_entry[block_idx].count(pair.first)) {
546 to_spill = pair.first;
547 distance = pair.second.second;
548 }
549 }
550
551 /* select SGPRs or break */
552 if (distance == 0) {
553 if (type == RegType::sgpr)
554 break;
555 type = RegType::sgpr;
556 continue;
557 }
558
559 uint32_t spill_id;
560 if (!ctx.spills_exit[block_idx - 1].count(to_spill)) {
561 spill_id = ctx.allocate_spill_id(to_spill.regClass());
562 } else {
563 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
564 }
565
566 ctx.spills_entry[block_idx][to_spill] = spill_id;
567 spilled_registers += to_spill;
568 loop_demand -= to_spill;
569 }
570
571 /* shortcut */
572 if (!loop_demand.exceeds(ctx.target_pressure))
573 return spilled_registers;
574
575 /* if reg pressure is too high at beginning of loop, add variables with furthest use */
576 reg_pressure -= spilled_registers;
577
578 while (reg_pressure.exceeds(ctx.target_pressure)) {
579 unsigned distance = 0;
580 Temp to_spill;
581 type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
582
583 for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
584 next_use_distances) {
585 if (pair.first.type() == type && pair.second.second > distance &&
586 !ctx.spills_entry[block_idx].count(pair.first)) {
587 to_spill = pair.first;
588 distance = pair.second.second;
589 }
590 }
591 assert(distance != 0);
592 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
593 spilled_registers += to_spill;
594 reg_pressure -= to_spill;
595 }
596
597 return spilled_registers;
598 }
599
600 /* branch block */
601 if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
602 /* keep variables spilled if they are alive and not used in the current block */
603 unsigned pred_idx = block->linear_preds[0];
604 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
605 if (pair.first.type() != RegType::sgpr) {
606 continue;
607 }
608 auto next_use_distance_it = next_use_distances.find(pair.first);
609 if (next_use_distance_it != next_use_distances.end() &&
610 next_use_distance_it->second.first != block_idx) {
611 ctx.spills_entry[block_idx].insert(pair);
612 spilled_registers.sgpr += pair.first.size();
613 }
614 }
615 if (block->logical_preds.size() == 1) {
616 pred_idx = block->logical_preds[0];
617 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
618 if (pair.first.type() != RegType::vgpr) {
619 continue;
620 }
621 auto next_use_distance_it = next_use_distances.find(pair.first);
622 if (next_use_distance_it != next_use_distances.end() &&
623 next_use_distance_it->second.first != block_idx) {
624 ctx.spills_entry[block_idx].insert(pair);
625 spilled_registers.vgpr += pair.first.size();
626 }
627 }
628 }
629
630 /* if register demand is still too high, we just keep all spilled live vars
631 * and process the block */
632 if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
633 pred_idx = block->linear_preds[0];
634 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
635 if (pair.first.type() == RegType::sgpr && next_use_distances.count(pair.first) &&
636 ctx.spills_entry[block_idx].insert(pair).second) {
637 spilled_registers.sgpr += pair.first.size();
638 }
639 }
640 }
641 if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr &&
642 block->logical_preds.size() == 1) {
643 pred_idx = block->logical_preds[0];
644 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
645 if (pair.first.type() == RegType::vgpr && next_use_distances.count(pair.first) &&
646 ctx.spills_entry[block_idx].insert(pair).second) {
647 spilled_registers.vgpr += pair.first.size();
648 }
649 }
650 }
651
652 return spilled_registers;
653 }
654
655 /* else: merge block */
656 std::set<Temp> partial_spills;
657
658 /* keep variables spilled on all incoming paths */
659 for (const std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair : next_use_distances) {
660 std::vector<unsigned>& preds =
661 pair.first.is_linear() ? block->linear_preds : block->logical_preds;
662 /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload
663 * it. Otherwise, if any predecessor reloads it, ensure it's reloaded on all other
664 * predecessors. The idea is that it's better in practice to rematerialize redundantly than to
665 * create lots of phis. */
666 /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db
667 * doesn't seem to exercise this path much) */
668 bool remat = ctx.remat.count(pair.first);
669 bool spill = !remat;
670 uint32_t spill_id = 0;
671 for (unsigned pred_idx : preds) {
672 /* variable is not even live at the predecessor: probably from a phi */
673 if (!ctx.next_use_distances_end[pred_idx].count(pair.first)) {
674 spill = false;
675 break;
676 }
677 if (!ctx.spills_exit[pred_idx].count(pair.first)) {
678 if (!remat)
679 spill = false;
680 } else {
681 partial_spills.insert(pair.first);
682 /* it might be that on one incoming path, the variable has a different spill_id, but
683 * add_couple_code() will take care of that. */
684 spill_id = ctx.spills_exit[pred_idx][pair.first];
685 if (remat)
686 spill = true;
687 }
688 }
689 if (spill) {
690 ctx.spills_entry[block_idx][pair.first] = spill_id;
691 partial_spills.erase(pair.first);
692 spilled_registers += pair.first;
693 }
694 }
695
696 /* same for phis */
697 for (aco_ptr<Instruction>& phi : block->instructions) {
698 if (!is_phi(phi))
699 break;
700 if (!phi->definitions[0].isTemp())
701 continue;
702
703 std::vector<unsigned>& preds =
704 phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
705 bool is_all_spilled = true;
706 for (unsigned i = 0; i < phi->operands.size(); i++) {
707 if (phi->operands[i].isUndefined())
708 continue;
709 is_all_spilled &= phi->operands[i].isTemp() &&
710 ctx.spills_exit[preds[i]].count(phi->operands[i].getTemp());
711 }
712
713 if (is_all_spilled) {
714 /* The phi is spilled at all predecessors. Keep it spilled. */
715 ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] =
716 ctx.allocate_spill_id(phi->definitions[0].regClass());
717 spilled_registers += phi->definitions[0].getTemp();
718 } else {
719 /* Phis might increase the register pressure. */
720 partial_spills.insert(phi->definitions[0].getTemp());
721 }
722 }
723
724 /* if reg pressure at first instruction is still too high, add partially spilled variables */
725 RegisterDemand reg_pressure = get_live_in_demand(ctx, block_idx);
726 reg_pressure -= spilled_registers;
727
728 while (reg_pressure.exceeds(ctx.target_pressure)) {
729 assert(!partial_spills.empty());
730 std::set<Temp>::iterator it = partial_spills.begin();
731 Temp to_spill = Temp();
732 unsigned distance = 0;
733 RegType type = reg_pressure.vgpr > ctx.target_pressure.vgpr ? RegType::vgpr : RegType::sgpr;
734
735 while (it != partial_spills.end()) {
736 assert(!ctx.spills_entry[block_idx].count(*it));
737
738 if (it->type() == type && next_use_distances.at(*it).second > distance) {
739 distance = next_use_distances.at(*it).second;
740 to_spill = *it;
741 }
742 ++it;
743 }
744 assert(distance != 0);
745
746 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
747 partial_spills.erase(to_spill);
748 spilled_registers += to_spill;
749 reg_pressure -= to_spill;
750 }
751
752 return spilled_registers;
753 }
754
755 void
add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)756 add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
757 {
758 /* no coupling code necessary */
759 if (block->linear_preds.size() == 0)
760 return;
761
762 std::vector<aco_ptr<Instruction>> instructions;
763 /* branch block: TODO take other branch into consideration */
764 if (block->linear_preds.size() == 1 &&
765 !(block->kind & (block_kind_loop_exit | block_kind_loop_header))) {
766 assert(ctx.processed[block->linear_preds[0]]);
767 assert(ctx.register_demand[block_idx].size() == block->instructions.size());
768 std::vector<RegisterDemand> reg_demand;
769 unsigned insert_idx = 0;
770 RegisterDemand demand_before = get_demand_before(ctx, block_idx, 0);
771
772 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
773 ctx.next_use_distances_start[block_idx]) {
774 const unsigned pred_idx = block->linear_preds[0];
775
776 if (!live.first.is_linear())
777 continue;
778 /* still spilled */
779 if (ctx.spills_entry[block_idx].count(live.first))
780 continue;
781
782 /* in register at end of predecessor */
783 auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
784 if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
785 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
786 if (it != ctx.renames[pred_idx].end())
787 ctx.renames[block_idx].insert(*it);
788 continue;
789 }
790
791 /* variable is spilled at predecessor and live at current block: create reload instruction */
792 Temp new_name = ctx.program->allocateTmp(live.first.regClass());
793 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, spills_exit_it->second);
794 instructions.emplace_back(std::move(reload));
795 reg_demand.push_back(demand_before);
796 ctx.renames[block_idx][live.first] = new_name;
797 }
798
799 if (block->logical_preds.size() == 1) {
800 do {
801 assert(insert_idx < block->instructions.size());
802 instructions.emplace_back(std::move(block->instructions[insert_idx]));
803 reg_demand.push_back(ctx.register_demand[block_idx][insert_idx]);
804 insert_idx++;
805 } while (instructions.back()->opcode != aco_opcode::p_logical_start);
806
807 unsigned pred_idx = block->logical_preds[0];
808 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& live :
809 ctx.next_use_distances_start[block_idx]) {
810 if (live.first.is_linear())
811 continue;
812 /* still spilled */
813 if (ctx.spills_entry[block_idx].count(live.first))
814 continue;
815
816 /* in register at end of predecessor */
817 auto spills_exit_it = ctx.spills_exit[pred_idx].find(live.first);
818 if (spills_exit_it == ctx.spills_exit[pred_idx].end()) {
819 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
820 if (it != ctx.renames[pred_idx].end())
821 ctx.renames[block_idx].insert(*it);
822 continue;
823 }
824
825 /* variable is spilled at predecessor and live at current block:
826 * create reload instruction */
827 Temp new_name = ctx.program->allocateTmp(live.first.regClass());
828 aco_ptr<Instruction> reload =
829 do_reload(ctx, live.first, new_name, spills_exit_it->second);
830 instructions.emplace_back(std::move(reload));
831 reg_demand.emplace_back(reg_demand.back());
832 ctx.renames[block_idx][live.first] = new_name;
833 }
834 }
835
836 /* combine new reload instructions with original block */
837 if (!instructions.empty()) {
838 reg_demand.insert(reg_demand.end(),
839 std::next(ctx.register_demand[block->index].begin(), insert_idx),
840 ctx.register_demand[block->index].end());
841 ctx.register_demand[block_idx] = std::move(reg_demand);
842 instructions.insert(instructions.end(),
843 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
844 std::next(block->instructions.begin(), insert_idx)),
845 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(
846 block->instructions.end()));
847 block->instructions = std::move(instructions);
848 }
849 return;
850 }
851
852 /* loop header and merge blocks: check if all (linear) predecessors have been processed */
853 for (ASSERTED unsigned pred : block->linear_preds)
854 assert(ctx.processed[pred]);
855
856 /* iterate the phi nodes for which operands to spill at the predecessor */
857 for (aco_ptr<Instruction>& phi : block->instructions) {
858 if (!is_phi(phi))
859 break;
860
861 /* if the phi is not spilled, add to instructions */
862 if (!phi->definitions[0].isTemp() ||
863 !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp())) {
864 instructions.emplace_back(std::move(phi));
865 continue;
866 }
867
868 std::vector<unsigned>& preds =
869 phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
870 uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
871
872 for (unsigned i = 0; i < phi->operands.size(); i++) {
873 if (phi->operands[i].isUndefined())
874 continue;
875
876 unsigned pred_idx = preds[i];
877 Operand spill_op = phi->operands[i];
878
879 if (spill_op.isTemp()) {
880 assert(phi->operands[i].isKill());
881 Temp var = phi->operands[i].getTemp();
882
883 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
884 /* prevent the definining instruction from being DCE'd if it could be rematerialized */
885 if (rename_it == ctx.renames[preds[i]].end() && ctx.remat.count(var))
886 ctx.unused_remats.erase(ctx.remat[var].instr);
887
888 /* check if variable is already spilled at predecessor */
889 auto spilled = ctx.spills_exit[pred_idx].find(var);
890 if (spilled != ctx.spills_exit[pred_idx].end()) {
891 if (spilled->second != def_spill_id)
892 ctx.add_affinity(def_spill_id, spilled->second);
893 continue;
894 }
895
896 /* rename if necessary */
897 if (rename_it != ctx.renames[pred_idx].end()) {
898 spill_op.setTemp(rename_it->second);
899 ctx.renames[pred_idx].erase(rename_it);
900 }
901 }
902
903 uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
904
905 /* add interferences and affinity */
906 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx])
907 ctx.add_interference(spill_id, pair.second);
908 ctx.add_affinity(def_spill_id, spill_id);
909
910 aco_ptr<Pseudo_instruction> spill{
911 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
912 spill->operands[0] = spill_op;
913 spill->operands[1] = Operand::c32(spill_id);
914 Block& pred = ctx.program->blocks[pred_idx];
915 unsigned idx = pred.instructions.size();
916 do {
917 assert(idx != 0);
918 idx--;
919 } while (phi->opcode == aco_opcode::p_phi &&
920 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
921 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
922 pred.instructions.insert(it, std::move(spill));
923
924 /* Add the original name to predecessor's spilled variables */
925 if (spill_op.isTemp())
926 ctx.spills_exit[pred_idx][phi->operands[i].getTemp()] = spill_id;
927 }
928
929 /* remove phi from instructions */
930 phi.reset();
931 }
932
933 /* iterate all (other) spilled variables for which to spill at the predecessor */
934 // TODO: would be better to have them sorted: first vgprs and first with longest distance
935 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
936 std::vector<unsigned> preds =
937 pair.first.is_linear() ? block->linear_preds : block->logical_preds;
938
939 for (unsigned pred_idx : preds) {
940 /* variable is already spilled at predecessor */
941 auto spilled = ctx.spills_exit[pred_idx].find(pair.first);
942 if (spilled != ctx.spills_exit[pred_idx].end()) {
943 if (spilled->second != pair.second)
944 ctx.add_affinity(pair.second, spilled->second);
945 continue;
946 }
947
948 /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
949 if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
950 continue;
951
952 /* add interferences between spilled variable and predecessors exit spills */
953 for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
954 if (exit_spill.first == pair.first)
955 continue;
956 ctx.add_interference(exit_spill.second, pair.second);
957 }
958
959 /* variable is in register at predecessor and has to be spilled */
960 /* rename if necessary */
961 Temp var = pair.first;
962 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
963 if (rename_it != ctx.renames[pred_idx].end()) {
964 var = rename_it->second;
965 ctx.renames[pred_idx].erase(rename_it);
966 }
967
968 aco_ptr<Pseudo_instruction> spill{
969 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
970 spill->operands[0] = Operand(var);
971 spill->operands[1] = Operand::c32(pair.second);
972 Block& pred = ctx.program->blocks[pred_idx];
973 unsigned idx = pred.instructions.size();
974 do {
975 assert(idx != 0);
976 idx--;
977 } while (pair.first.type() == RegType::vgpr &&
978 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
979 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
980 pred.instructions.insert(it, std::move(spill));
981 ctx.spills_exit[pred.index][pair.first] = pair.second;
982 }
983 }
984
985 /* iterate phis for which operands to reload */
986 for (aco_ptr<Instruction>& phi : instructions) {
987 assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
988 assert(!phi->definitions[0].isTemp() ||
989 !ctx.spills_entry[block_idx].count(phi->definitions[0].getTemp()));
990
991 std::vector<unsigned>& preds =
992 phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
993 for (unsigned i = 0; i < phi->operands.size(); i++) {
994 if (!phi->operands[i].isTemp())
995 continue;
996 unsigned pred_idx = preds[i];
997
998 /* if the operand was reloaded, rename */
999 if (!ctx.spills_exit[pred_idx].count(phi->operands[i].getTemp())) {
1000 std::map<Temp, Temp>::iterator it =
1001 ctx.renames[pred_idx].find(phi->operands[i].getTemp());
1002 if (it != ctx.renames[pred_idx].end()) {
1003 phi->operands[i].setTemp(it->second);
1004 /* prevent the definining instruction from being DCE'd if it could be rematerialized */
1005 } else {
1006 auto remat_it = ctx.remat.find(phi->operands[i].getTemp());
1007 if (remat_it != ctx.remat.end()) {
1008 ctx.unused_remats.erase(remat_it->second.instr);
1009 }
1010 }
1011 continue;
1012 }
1013
1014 Temp tmp = phi->operands[i].getTemp();
1015
1016 /* reload phi operand at end of predecessor block */
1017 Temp new_name = ctx.program->allocateTmp(tmp.regClass());
1018 Block& pred = ctx.program->blocks[pred_idx];
1019 unsigned idx = pred.instructions.size();
1020 do {
1021 assert(idx != 0);
1022 idx--;
1023 } while (phi->opcode == aco_opcode::p_phi &&
1024 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1025 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1026 aco_ptr<Instruction> reload =
1027 do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
1028
1029 /* reload spilled exec mask directly to exec */
1030 if (!phi->definitions[0].isTemp()) {
1031 assert(phi->definitions[0].isFixed() && phi->definitions[0].physReg() == exec);
1032 reload->definitions[0] = phi->definitions[0];
1033 phi->operands[i] = Operand(exec, ctx.program->lane_mask);
1034 } else {
1035 ctx.spills_exit[pred_idx].erase(tmp);
1036 ctx.renames[pred_idx][tmp] = new_name;
1037 phi->operands[i].setTemp(new_name);
1038 }
1039
1040 pred.instructions.insert(it, std::move(reload));
1041 }
1042 }
1043
1044 /* iterate live variables for which to reload */
1045 // TODO: reload at current block if variable is spilled on all predecessors
1046 for (std::pair<const Temp, std::pair<uint32_t, uint32_t>>& pair :
1047 ctx.next_use_distances_start[block_idx]) {
1048 /* skip spilled variables */
1049 if (ctx.spills_entry[block_idx].count(pair.first))
1050 continue;
1051 std::vector<unsigned> preds =
1052 pair.first.is_linear() ? block->linear_preds : block->logical_preds;
1053
1054 /* variable is dead at predecessor, it must be from a phi */
1055 bool is_dead = false;
1056 for (unsigned pred_idx : preds) {
1057 if (!ctx.next_use_distances_end[pred_idx].count(pair.first))
1058 is_dead = true;
1059 }
1060 if (is_dead)
1061 continue;
1062 for (unsigned pred_idx : preds) {
1063 /* the variable is not spilled at the predecessor */
1064 if (!ctx.spills_exit[pred_idx].count(pair.first))
1065 continue;
1066
1067 /* variable is spilled at predecessor and has to be reloaded */
1068 Temp new_name = ctx.program->allocateTmp(pair.first.regClass());
1069 Block& pred = ctx.program->blocks[pred_idx];
1070 unsigned idx = pred.instructions.size();
1071 do {
1072 assert(idx != 0);
1073 idx--;
1074 } while (pair.first.type() == RegType::vgpr &&
1075 pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
1076 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
1077
1078 aco_ptr<Instruction> reload =
1079 do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
1080 pred.instructions.insert(it, std::move(reload));
1081
1082 ctx.spills_exit[pred.index].erase(pair.first);
1083 ctx.renames[pred.index][pair.first] = new_name;
1084 }
1085
1086 /* check if we have to create a new phi for this variable */
1087 Temp rename = Temp();
1088 bool is_same = true;
1089 for (unsigned pred_idx : preds) {
1090 if (!ctx.renames[pred_idx].count(pair.first)) {
1091 if (rename == Temp())
1092 rename = pair.first;
1093 else
1094 is_same = rename == pair.first;
1095 } else {
1096 if (rename == Temp())
1097 rename = ctx.renames[pred_idx][pair.first];
1098 else
1099 is_same = rename == ctx.renames[pred_idx][pair.first];
1100 }
1101
1102 if (!is_same)
1103 break;
1104 }
1105
1106 if (!is_same) {
1107 /* the variable was renamed differently in the predecessors: we have to create a phi */
1108 aco_opcode opcode = pair.first.is_linear() ? aco_opcode::p_linear_phi : aco_opcode::p_phi;
1109 aco_ptr<Pseudo_instruction> phi{
1110 create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
1111 rename = ctx.program->allocateTmp(pair.first.regClass());
1112 for (unsigned i = 0; i < phi->operands.size(); i++) {
1113 Temp tmp;
1114 if (ctx.renames[preds[i]].count(pair.first)) {
1115 tmp = ctx.renames[preds[i]][pair.first];
1116 } else if (preds[i] >= block_idx) {
1117 tmp = rename;
1118 } else {
1119 tmp = pair.first;
1120 /* prevent the definining instruction from being DCE'd if it could be rematerialized */
1121 if (ctx.remat.count(tmp))
1122 ctx.unused_remats.erase(ctx.remat[tmp].instr);
1123 }
1124 phi->operands[i] = Operand(tmp);
1125 }
1126 phi->definitions[0] = Definition(rename);
1127 instructions.emplace_back(std::move(phi));
1128 }
1129
1130 /* the variable was renamed: add new name to renames */
1131 if (!(rename == Temp() || rename == pair.first))
1132 ctx.renames[block_idx][pair.first] = rename;
1133 }
1134
1135 /* combine phis with instructions */
1136 unsigned idx = 0;
1137 while (!block->instructions[idx]) {
1138 idx++;
1139 }
1140
1141 if (!ctx.processed[block_idx]) {
1142 assert(!(block->kind & block_kind_loop_header));
1143 RegisterDemand demand_before = get_demand_before(ctx, block_idx, idx);
1144 ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(),
1145 ctx.register_demand[block->index].begin() + idx);
1146 ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(),
1147 instructions.size(), demand_before);
1148 }
1149
1150 std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
1151 instructions.insert(
1152 instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
1153 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
1154 block->instructions = std::move(instructions);
1155 }
1156
1157 void
process_block(spill_ctx& ctx, unsigned block_idx, Block* block, RegisterDemand spilled_registers)1158 process_block(spill_ctx& ctx, unsigned block_idx, Block* block, RegisterDemand spilled_registers)
1159 {
1160 assert(!ctx.processed[block_idx]);
1161
1162 std::vector<aco_ptr<Instruction>> instructions;
1163 unsigned idx = 0;
1164
1165 /* phis are handled separetely */
1166 while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
1167 block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
1168 instructions.emplace_back(std::move(block->instructions[idx++]));
1169 }
1170
1171 if (block->register_demand.exceeds(ctx.target_pressure)) {
1172 update_local_next_uses(ctx, block, ctx.local_next_use_distance);
1173 } else {
1174 /* We won't use local_next_use_distance, so no initialization needed */
1175 }
1176
1177 auto& current_spills = ctx.spills_exit[block_idx];
1178
1179 while (idx < block->instructions.size()) {
1180 aco_ptr<Instruction>& instr = block->instructions[idx];
1181
1182 std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1183
1184 /* rename and reload operands */
1185 for (Operand& op : instr->operands) {
1186 if (!op.isTemp())
1187 continue;
1188 if (!current_spills.count(op.getTemp())) {
1189 /* the Operand is in register: check if it was renamed */
1190 auto rename_it = ctx.renames[block_idx].find(op.getTemp());
1191 if (rename_it != ctx.renames[block_idx].end()) {
1192 op.setTemp(rename_it->second);
1193 } else {
1194 /* prevent its definining instruction from being DCE'd if it could be rematerialized */
1195 auto remat_it = ctx.remat.find(op.getTemp());
1196 if (remat_it != ctx.remat.end()) {
1197 ctx.unused_remats.erase(remat_it->second.instr);
1198 }
1199 }
1200 continue;
1201 }
1202 /* the Operand is spilled: add it to reloads */
1203 Temp new_tmp = ctx.program->allocateTmp(op.regClass());
1204 ctx.renames[block_idx][op.getTemp()] = new_tmp;
1205 reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1206 current_spills.erase(op.getTemp());
1207 op.setTemp(new_tmp);
1208 spilled_registers -= new_tmp;
1209 }
1210
1211 /* check if register demand is low enough before and after the current instruction */
1212 if (block->register_demand.exceeds(ctx.target_pressure)) {
1213
1214 RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1215 new_demand.update(get_demand_before(ctx, block_idx, idx));
1216
1217 assert(!ctx.local_next_use_distance.empty());
1218
1219 /* if reg pressure is too high, spill variable with furthest next use */
1220 while ((new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1221 unsigned distance = 0;
1222 Temp to_spill;
1223 bool do_rematerialize = false;
1224 RegType type = RegType::sgpr;
1225 if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr)
1226 type = RegType::vgpr;
1227
1228 for (std::pair<Temp, uint32_t> pair : ctx.local_next_use_distance[idx]) {
1229 if (pair.first.type() != type)
1230 continue;
1231 bool can_rematerialize = ctx.remat.count(pair.first);
1232 if (((pair.second > distance && can_rematerialize == do_rematerialize) ||
1233 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1234 !current_spills.count(pair.first)) {
1235 to_spill = pair.first;
1236 distance = pair.second;
1237 do_rematerialize = can_rematerialize;
1238 }
1239 }
1240
1241 assert(distance != 0 && distance > idx);
1242 uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1243
1244 /* add interferences with currently spilled variables */
1245 for (std::pair<Temp, uint32_t> pair : current_spills)
1246 ctx.add_interference(spill_id, pair.second);
1247 for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads)
1248 ctx.add_interference(spill_id, pair.second.second);
1249
1250 current_spills[to_spill] = spill_id;
1251 spilled_registers += to_spill;
1252
1253 /* rename if necessary */
1254 if (ctx.renames[block_idx].count(to_spill)) {
1255 to_spill = ctx.renames[block_idx][to_spill];
1256 }
1257
1258 /* add spill to new instructions */
1259 aco_ptr<Pseudo_instruction> spill{
1260 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1261 spill->operands[0] = Operand(to_spill);
1262 spill->operands[1] = Operand::c32(spill_id);
1263 instructions.emplace_back(std::move(spill));
1264 }
1265 }
1266
1267 /* add reloads and instruction to new instructions */
1268 for (std::pair<const Temp, std::pair<Temp, uint32_t>>& pair : reloads) {
1269 aco_ptr<Instruction> reload =
1270 do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1271 instructions.emplace_back(std::move(reload));
1272 }
1273 instructions.emplace_back(std::move(instr));
1274 idx++;
1275 }
1276
1277 block->instructions = std::move(instructions);
1278 }
1279
1280 void
spill_block(spill_ctx& ctx, unsigned block_idx)1281 spill_block(spill_ctx& ctx, unsigned block_idx)
1282 {
1283 Block* block = &ctx.program->blocks[block_idx];
1284
1285 /* determine set of variables which are spilled at the beginning of the block */
1286 RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1287
1288 /* add interferences for spilled variables */
1289 for (auto it = ctx.spills_entry[block_idx].begin(); it != ctx.spills_entry[block_idx].end();
1290 ++it) {
1291 for (auto it2 = std::next(it); it2 != ctx.spills_entry[block_idx].end(); ++it2)
1292 ctx.add_interference(it->second, it2->second);
1293 }
1294
1295 bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1296 if (!is_loop_header) {
1297 /* add spill/reload code on incoming control flow edges */
1298 add_coupling_code(ctx, block, block_idx);
1299 }
1300
1301 const auto& current_spills = ctx.spills_entry[block_idx];
1302
1303 /* check conditions to process this block */
1304 bool process = (block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1305 !ctx.renames[block_idx].empty() || ctx.unused_remats.size();
1306
1307 for (auto it = current_spills.begin(); !process && it != current_spills.end(); ++it) {
1308 if (ctx.next_use_distances_start[block_idx].at(it->first).first == block_idx)
1309 process = true;
1310 }
1311
1312 assert(ctx.spills_exit[block_idx].empty());
1313 ctx.spills_exit[block_idx] = current_spills;
1314 if (process) {
1315 process_block(ctx, block_idx, block, spilled_registers);
1316 }
1317
1318 ctx.processed[block_idx] = true;
1319
1320 /* check if the next block leaves the current loop */
1321 if (block->loop_nest_depth == 0 ||
1322 ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1323 return;
1324
1325 Block* loop_header = ctx.loop_header.top();
1326
1327 /* preserve original renames at end of loop header block */
1328 std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1329
1330 /* add coupling code to all loop header predecessors */
1331 add_coupling_code(ctx, loop_header, loop_header->index);
1332
1333 /* propagate new renames through loop: i.e. repair the SSA */
1334 renames.swap(ctx.renames[loop_header->index]);
1335 for (std::pair<Temp, Temp> rename : renames) {
1336 for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1337 Block& current = ctx.program->blocks[idx];
1338 std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1339
1340 /* first rename phis */
1341 while (instr_it != current.instructions.end()) {
1342 aco_ptr<Instruction>& phi = *instr_it;
1343 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1344 break;
1345 /* no need to rename the loop header phis once again. this happened in
1346 * add_coupling_code() */
1347 if (idx == loop_header->index) {
1348 instr_it++;
1349 continue;
1350 }
1351
1352 for (Operand& op : phi->operands) {
1353 if (!op.isTemp())
1354 continue;
1355 if (op.getTemp() == rename.first)
1356 op.setTemp(rename.second);
1357 }
1358 instr_it++;
1359 }
1360
1361 /* variable is not live at beginning of this block */
1362 if (ctx.next_use_distances_start[idx].count(rename.first) == 0)
1363 continue;
1364
1365 /* if the variable is live at the block's exit, add rename */
1366 if (ctx.next_use_distances_end[idx].count(rename.first) != 0)
1367 ctx.renames[idx].insert(rename);
1368
1369 /* rename all uses in this block */
1370 bool renamed = false;
1371 while (!renamed && instr_it != current.instructions.end()) {
1372 aco_ptr<Instruction>& instr = *instr_it;
1373 for (Operand& op : instr->operands) {
1374 if (!op.isTemp())
1375 continue;
1376 if (op.getTemp() == rename.first) {
1377 op.setTemp(rename.second);
1378 /* we can stop with this block as soon as the variable is spilled */
1379 if (instr->opcode == aco_opcode::p_spill)
1380 renamed = true;
1381 }
1382 }
1383 instr_it++;
1384 }
1385 }
1386 }
1387
1388 /* remove loop header info from stack */
1389 ctx.loop_header.pop();
1390 }
1391
1392 Temp
load_scratch_resource(spill_ctx& ctx, Temp& scratch_offset, Block& block, std::vector<aco_ptr<Instruction>>& instructions, unsigned offset)1393 load_scratch_resource(spill_ctx& ctx, Temp& scratch_offset, Block& block,
1394 std::vector<aco_ptr<Instruction>>& instructions, unsigned offset)
1395 {
1396 Builder bld(ctx.program);
1397 if (block.kind & block_kind_top_level) {
1398 bld.reset(&instructions);
1399 } else {
1400 for (int block_idx = block.index; block_idx >= 0; block_idx--) {
1401 if (!(ctx.program->blocks[block_idx].kind & block_kind_top_level))
1402 continue;
1403
1404 /* find p_logical_end */
1405 std::vector<aco_ptr<Instruction>>& prev_instructions = ctx.program->blocks[block_idx].instructions;
1406 unsigned idx = prev_instructions.size() - 1;
1407 while (prev_instructions[idx]->opcode != aco_opcode::p_logical_end)
1408 idx--;
1409 bld.reset(&prev_instructions, std::next(prev_instructions.begin(), idx));
1410 break;
1411 }
1412 }
1413
1414 /* GFX9+ uses scratch_* instructions, which don't use a resource. Return a SADDR instead. */
1415 if (ctx.program->gfx_level >= GFX9)
1416 return bld.copy(bld.def(s1), Operand::c32(offset));
1417
1418 Temp private_segment_buffer = ctx.program->private_segment_buffer;
1419 if (ctx.program->stage.hw != HWStage::CS)
1420 private_segment_buffer =
1421 bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, Operand::zero());
1422
1423 if (offset)
1424 scratch_offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc),
1425 scratch_offset, Operand::c32(offset));
1426
1427 uint32_t rsrc_conf =
1428 S_008F0C_ADD_TID_ENABLE(1) | S_008F0C_INDEX_STRIDE(ctx.program->wave_size == 64 ? 3 : 2);
1429
1430 if (ctx.program->gfx_level >= GFX10) {
1431 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
1432 S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
1433 S_008F0C_RESOURCE_LEVEL(ctx.program->gfx_level < GFX11);
1434 } else if (ctx.program->gfx_level <= GFX7) {
1435 /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
1436 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1437 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1438 }
1439 /* older generations need element size = 4 bytes. element size removed in GFX9 */
1440 if (ctx.program->gfx_level <= GFX8)
1441 rsrc_conf |= S_008F0C_ELEMENT_SIZE(1);
1442
1443 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4), private_segment_buffer,
1444 Operand::c32(-1u), Operand::c32(rsrc_conf));
1445 }
1446
1447 void
setup_vgpr_spill_reload(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions, uint32_t spill_slot, unsigned* offset)1448 setup_vgpr_spill_reload(spill_ctx& ctx, Block& block,
1449 std::vector<aco_ptr<Instruction>>& instructions, uint32_t spill_slot,
1450 unsigned* offset)
1451 {
1452 Temp scratch_offset = ctx.program->scratch_offset;
1453
1454 *offset = spill_slot * 4;
1455 if (ctx.program->gfx_level >= GFX9) {
1456 *offset += ctx.program->dev.scratch_global_offset_min;
1457
1458 if (ctx.scratch_rsrc == Temp()) {
1459 int32_t saddr = ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size -
1460 ctx.program->dev.scratch_global_offset_min;
1461 ctx.scratch_rsrc =
1462 load_scratch_resource(ctx, scratch_offset, block, instructions, saddr);
1463 }
1464 } else {
1465 bool add_offset_to_sgpr =
1466 ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size +
1467 ctx.vgpr_spill_slots * 4 >
1468 4096;
1469 if (!add_offset_to_sgpr)
1470 *offset += ctx.program->config->scratch_bytes_per_wave / ctx.program->wave_size;
1471
1472 if (ctx.scratch_rsrc == Temp()) {
1473 unsigned rsrc_offset =
1474 add_offset_to_sgpr ? ctx.program->config->scratch_bytes_per_wave : 0;
1475 ctx.scratch_rsrc =
1476 load_scratch_resource(ctx, scratch_offset, block, instructions, rsrc_offset);
1477 }
1478 }
1479 }
1480
1481 void
spill_vgpr(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions, aco_ptr<Instruction>& spill, std::vector<uint32_t>& slots)1482 spill_vgpr(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions,
1483 aco_ptr<Instruction>& spill, std::vector<uint32_t>& slots)
1484 {
1485 ctx.program->config->spilled_vgprs += spill->operands[0].size();
1486
1487 uint32_t spill_id = spill->operands[1].constantValue();
1488 uint32_t spill_slot = slots[spill_id];
1489
1490 unsigned offset;
1491 setup_vgpr_spill_reload(ctx, block, instructions, spill_slot, &offset);
1492
1493 assert(spill->operands[0].isTemp());
1494 Temp temp = spill->operands[0].getTemp();
1495 assert(temp.type() == RegType::vgpr && !temp.is_linear());
1496
1497 Builder bld(ctx.program, &instructions);
1498 if (temp.size() > 1) {
1499 Instruction* split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector,
1500 Format::PSEUDO, 1, temp.size())};
1501 split->operands[0] = Operand(temp);
1502 for (unsigned i = 0; i < temp.size(); i++)
1503 split->definitions[i] = bld.def(v1);
1504 bld.insert(split);
1505 for (unsigned i = 0; i < temp.size(); i++, offset += 4) {
1506 Temp elem = split->definitions[i].getTemp();
1507 if (ctx.program->gfx_level >= GFX9) {
1508 bld.scratch(aco_opcode::scratch_store_dword, Operand(v1), ctx.scratch_rsrc, elem,
1509 offset, memory_sync_info(storage_vgpr_spill, semantic_private));
1510 } else {
1511 Instruction* instr =
1512 bld.mubuf(aco_opcode::buffer_store_dword, ctx.scratch_rsrc, Operand(v1),
1513 ctx.program->scratch_offset, elem, offset, false, true);
1514 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1515 }
1516 }
1517 } else if (ctx.program->gfx_level >= GFX9) {
1518 bld.scratch(aco_opcode::scratch_store_dword, Operand(v1), ctx.scratch_rsrc, temp, offset,
1519 memory_sync_info(storage_vgpr_spill, semantic_private));
1520 } else {
1521 Instruction* instr = bld.mubuf(aco_opcode::buffer_store_dword, ctx.scratch_rsrc, Operand(v1),
1522 ctx.program->scratch_offset, temp, offset, false, true);
1523 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1524 }
1525 }
1526
1527 void
reload_vgpr(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions, aco_ptr<Instruction>& reload, std::vector<uint32_t>& slots)1528 reload_vgpr(spill_ctx& ctx, Block& block, std::vector<aco_ptr<Instruction>>& instructions,
1529 aco_ptr<Instruction>& reload, std::vector<uint32_t>& slots)
1530 {
1531 uint32_t spill_id = reload->operands[0].constantValue();
1532 uint32_t spill_slot = slots[spill_id];
1533
1534 unsigned offset;
1535 setup_vgpr_spill_reload(ctx, block, instructions, spill_slot, &offset);
1536
1537 Definition def = reload->definitions[0];
1538
1539 Builder bld(ctx.program, &instructions);
1540 if (def.size() > 1) {
1541 Instruction* vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector,
1542 Format::PSEUDO, def.size(), 1)};
1543 vec->definitions[0] = def;
1544 for (unsigned i = 0; i < def.size(); i++, offset += 4) {
1545 Temp tmp = bld.tmp(v1);
1546 vec->operands[i] = Operand(tmp);
1547 if (ctx.program->gfx_level >= GFX9) {
1548 bld.scratch(aco_opcode::scratch_load_dword, Definition(tmp), Operand(v1),
1549 ctx.scratch_rsrc, offset,
1550 memory_sync_info(storage_vgpr_spill, semantic_private));
1551 } else {
1552 Instruction* instr =
1553 bld.mubuf(aco_opcode::buffer_load_dword, Definition(tmp), ctx.scratch_rsrc,
1554 Operand(v1), ctx.program->scratch_offset, offset, false, true);
1555 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1556 }
1557 }
1558 bld.insert(vec);
1559 } else if (ctx.program->gfx_level >= GFX9) {
1560 bld.scratch(aco_opcode::scratch_load_dword, def, Operand(v1), ctx.scratch_rsrc, offset,
1561 memory_sync_info(storage_vgpr_spill, semantic_private));
1562 } else {
1563 Instruction* instr = bld.mubuf(aco_opcode::buffer_load_dword, def, ctx.scratch_rsrc,
1564 Operand(v1), ctx.program->scratch_offset, offset, false, true);
1565 instr->mubuf().sync = memory_sync_info(storage_vgpr_spill, semantic_private);
1566 }
1567 }
1568
1569 void
add_interferences(spill_ctx& ctx, std::vector<bool>& is_assigned, std::vector<uint32_t>& slots, std::vector<bool>& slots_used, unsigned id)1570 add_interferences(spill_ctx& ctx, std::vector<bool>& is_assigned, std::vector<uint32_t>& slots,
1571 std::vector<bool>& slots_used, unsigned id)
1572 {
1573 for (unsigned other : ctx.interferences[id].second) {
1574 if (!is_assigned[other])
1575 continue;
1576
1577 RegClass other_rc = ctx.interferences[other].first;
1578 unsigned slot = slots[other];
1579 std::fill(slots_used.begin() + slot, slots_used.begin() + slot + other_rc.size(), true);
1580 }
1581 }
1582
1583 unsigned
find_available_slot(std::vector<bool>& used, unsigned wave_size, unsigned size, bool is_sgpr)1584 find_available_slot(std::vector<bool>& used, unsigned wave_size, unsigned size, bool is_sgpr)
1585 {
1586 unsigned wave_size_minus_one = wave_size - 1;
1587 unsigned slot = 0;
1588
1589 while (true) {
1590 bool available = true;
1591 for (unsigned i = 0; i < size; i++) {
1592 if (slot + i < used.size() && used[slot + i]) {
1593 available = false;
1594 break;
1595 }
1596 }
1597 if (!available) {
1598 slot++;
1599 continue;
1600 }
1601
1602 if (is_sgpr && ((slot & wave_size_minus_one) > wave_size - size)) {
1603 slot = align(slot, wave_size);
1604 continue;
1605 }
1606
1607 std::fill(used.begin(), used.end(), false);
1608
1609 if (slot + size > used.size())
1610 used.resize(slot + size);
1611
1612 return slot;
1613 }
1614 }
1615
1616 void
assign_spill_slots_helper(spill_ctx& ctx, RegType type, std::vector<bool>& is_assigned, std::vector<uint32_t>& slots, unsigned* num_slots)1617 assign_spill_slots_helper(spill_ctx& ctx, RegType type, std::vector<bool>& is_assigned,
1618 std::vector<uint32_t>& slots, unsigned* num_slots)
1619 {
1620 std::vector<bool> slots_used;
1621
1622 /* assign slots for ids with affinities first */
1623 for (std::vector<uint32_t>& vec : ctx.affinities) {
1624 if (ctx.interferences[vec[0]].first.type() != type)
1625 continue;
1626
1627 for (unsigned id : vec) {
1628 if (!ctx.is_reloaded[id])
1629 continue;
1630
1631 add_interferences(ctx, is_assigned, slots, slots_used, id);
1632 }
1633
1634 unsigned slot = find_available_slot(
1635 slots_used, ctx.wave_size, ctx.interferences[vec[0]].first.size(), type == RegType::sgpr);
1636
1637 for (unsigned id : vec) {
1638 assert(!is_assigned[id]);
1639
1640 if (ctx.is_reloaded[id]) {
1641 slots[id] = slot;
1642 is_assigned[id] = true;
1643 }
1644 }
1645 }
1646
1647 /* assign slots for ids without affinities */
1648 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1649 if (is_assigned[id] || !ctx.is_reloaded[id] || ctx.interferences[id].first.type() != type)
1650 continue;
1651
1652 add_interferences(ctx, is_assigned, slots, slots_used, id);
1653
1654 unsigned slot = find_available_slot(
1655 slots_used, ctx.wave_size, ctx.interferences[id].first.size(), type == RegType::sgpr);
1656
1657 slots[id] = slot;
1658 is_assigned[id] = true;
1659 }
1660
1661 *num_slots = slots_used.size();
1662 }
1663
1664 void
assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)1665 assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr)
1666 {
1667 std::vector<uint32_t> slots(ctx.interferences.size());
1668 std::vector<bool> is_assigned(ctx.interferences.size());
1669
1670 /* first, handle affinities: just merge all interferences into both spill ids */
1671 for (std::vector<uint32_t>& vec : ctx.affinities) {
1672 for (unsigned i = 0; i < vec.size(); i++) {
1673 for (unsigned j = i + 1; j < vec.size(); j++) {
1674 assert(vec[i] != vec[j]);
1675 bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1676 ctx.is_reloaded[vec[i]] = reloaded;
1677 ctx.is_reloaded[vec[j]] = reloaded;
1678 }
1679 }
1680 }
1681 for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1682 for (ASSERTED uint32_t id : ctx.interferences[i].second)
1683 assert(i != id);
1684
1685 /* for each spill slot, assign as many spill ids as possible */
1686 assign_spill_slots_helper(ctx, RegType::sgpr, is_assigned, slots, &ctx.sgpr_spill_slots);
1687 assign_spill_slots_helper(ctx, RegType::vgpr, is_assigned, slots, &ctx.vgpr_spill_slots);
1688
1689 for (unsigned id = 0; id < is_assigned.size(); id++)
1690 assert(is_assigned[id] || !ctx.is_reloaded[id]);
1691
1692 for (std::vector<uint32_t>& vec : ctx.affinities) {
1693 for (unsigned i = 0; i < vec.size(); i++) {
1694 for (unsigned j = i + 1; j < vec.size(); j++) {
1695 assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1696 if (!is_assigned[vec[i]])
1697 continue;
1698 assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1699 assert(ctx.interferences[vec[i]].first.type() ==
1700 ctx.interferences[vec[j]].first.type());
1701 assert(slots[vec[i]] == slots[vec[j]]);
1702 }
1703 }
1704 }
1705
1706 /* hope, we didn't mess up */
1707 std::vector<Temp> vgpr_spill_temps((ctx.sgpr_spill_slots + ctx.wave_size - 1) / ctx.wave_size);
1708 assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1709
1710 /* replace pseudo instructions with actual hardware instructions */
1711 unsigned last_top_level_block_idx = 0;
1712 std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
1713 for (Block& block : ctx.program->blocks) {
1714
1715 /* after loops, we insert a user if there was a reload inside the loop */
1716 if (block.loop_nest_depth == 0) {
1717 int end_vgprs = 0;
1718 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1719 if (reload_in_loop[i])
1720 end_vgprs++;
1721 }
1722
1723 if (end_vgprs > 0) {
1724 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
1725 aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
1726 int k = 0;
1727 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1728 if (reload_in_loop[i])
1729 destr->operands[k++] = Operand(vgpr_spill_temps[i]);
1730 reload_in_loop[i] = false;
1731 }
1732 /* find insertion point */
1733 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1734 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1735 ++it;
1736 block.instructions.insert(it, std::move(destr));
1737 }
1738 }
1739
1740 if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
1741 last_top_level_block_idx = block.index;
1742
1743 /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1744 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1745 if (vgpr_spill_temps[i] == Temp())
1746 continue;
1747
1748 bool can_destroy = true;
1749 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block.index]) {
1750
1751 if (ctx.interferences[pair.second].first.type() == RegType::sgpr &&
1752 slots[pair.second] / ctx.wave_size == i) {
1753 can_destroy = false;
1754 break;
1755 }
1756 }
1757 if (can_destroy)
1758 vgpr_spill_temps[i] = Temp();
1759 }
1760 }
1761
1762 std::vector<aco_ptr<Instruction>>::iterator it;
1763 std::vector<aco_ptr<Instruction>> instructions;
1764 instructions.reserve(block.instructions.size());
1765 Builder bld(ctx.program, &instructions);
1766 for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1767
1768 if ((*it)->opcode == aco_opcode::p_spill) {
1769 uint32_t spill_id = (*it)->operands[1].constantValue();
1770
1771 if (!ctx.is_reloaded[spill_id]) {
1772 /* never reloaded, so don't spill */
1773 } else if (!is_assigned[spill_id]) {
1774 unreachable("No spill slot assigned for spill id");
1775 } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1776 spill_vgpr(ctx, block, instructions, *it, slots);
1777 } else {
1778 ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1779
1780 uint32_t spill_slot = slots[spill_id];
1781
1782 /* check if the linear vgpr already exists */
1783 if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1784 Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1785 vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1786 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
1787 aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1788 create->definitions[0] = Definition(linear_vgpr);
1789 /* find the right place to insert this definition */
1790 if (last_top_level_block_idx == block.index) {
1791 /* insert right before the current instruction */
1792 instructions.emplace_back(std::move(create));
1793 } else {
1794 assert(last_top_level_block_idx < block.index);
1795 /* insert before the branch at last top level block */
1796 std::vector<aco_ptr<Instruction>>& block_instrs =
1797 ctx.program->blocks[last_top_level_block_idx].instructions;
1798 block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
1799 }
1800 }
1801
1802 /* spill sgpr: just add the vgpr temp to operands */
1803 Pseudo_instruction* spill =
1804 create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1805 spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1806 spill->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
1807 spill->operands[2] = (*it)->operands[0];
1808 instructions.emplace_back(aco_ptr<Instruction>(spill));
1809 }
1810
1811 } else if ((*it)->opcode == aco_opcode::p_reload) {
1812 uint32_t spill_id = (*it)->operands[0].constantValue();
1813 assert(ctx.is_reloaded[spill_id]);
1814
1815 if (!is_assigned[spill_id]) {
1816 unreachable("No spill slot assigned for spill id");
1817 } else if (ctx.interferences[spill_id].first.type() == RegType::vgpr) {
1818 reload_vgpr(ctx, block, instructions, *it, slots);
1819 } else {
1820 uint32_t spill_slot = slots[spill_id];
1821 reload_in_loop[spill_slot / ctx.wave_size] = block.loop_nest_depth > 0;
1822
1823 /* check if the linear vgpr already exists */
1824 if (vgpr_spill_temps[spill_slot / ctx.wave_size] == Temp()) {
1825 Temp linear_vgpr = ctx.program->allocateTmp(v1.as_linear());
1826 vgpr_spill_temps[spill_slot / ctx.wave_size] = linear_vgpr;
1827 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(
1828 aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1829 create->definitions[0] = Definition(linear_vgpr);
1830 /* find the right place to insert this definition */
1831 if (last_top_level_block_idx == block.index) {
1832 /* insert right before the current instruction */
1833 instructions.emplace_back(std::move(create));
1834 } else {
1835 assert(last_top_level_block_idx < block.index);
1836 /* insert before the branch at last top level block */
1837 std::vector<aco_ptr<Instruction>>& block_instrs =
1838 ctx.program->blocks[last_top_level_block_idx].instructions;
1839 block_instrs.insert(std::prev(block_instrs.end()), std::move(create));
1840 }
1841 }
1842
1843 /* reload sgpr: just add the vgpr temp to operands */
1844 Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(
1845 aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1846 reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / ctx.wave_size]);
1847 reload->operands[1] = Operand::c32(spill_slot % ctx.wave_size);
1848 reload->definitions[0] = (*it)->definitions[0];
1849 instructions.emplace_back(aco_ptr<Instruction>(reload));
1850 }
1851 } else if (!ctx.unused_remats.count(it->get())) {
1852 instructions.emplace_back(std::move(*it));
1853 }
1854 }
1855 block.instructions = std::move(instructions);
1856 }
1857
1858 /* update required scratch memory */
1859 ctx.program->config->scratch_bytes_per_wave +=
1860 align(ctx.vgpr_spill_slots * 4 * ctx.program->wave_size, 1024);
1861
1862 /* SSA elimination inserts copies for logical phis right before p_logical_end
1863 * So if a linear vgpr is used between that p_logical_end and the branch,
1864 * we need to ensure logical phis don't choose a definition which aliases
1865 * the linear vgpr.
1866 * TODO: Moving the spills and reloads to before p_logical_end might produce
1867 * slightly better code. */
1868 for (Block& block : ctx.program->blocks) {
1869 /* loops exits are already handled */
1870 if (block.logical_preds.size() <= 1)
1871 continue;
1872
1873 bool has_logical_phis = false;
1874 for (aco_ptr<Instruction>& instr : block.instructions) {
1875 if (instr->opcode == aco_opcode::p_phi) {
1876 has_logical_phis = true;
1877 break;
1878 } else if (instr->opcode != aco_opcode::p_linear_phi) {
1879 break;
1880 }
1881 }
1882 if (!has_logical_phis)
1883 continue;
1884
1885 std::set<Temp> vgprs;
1886 for (unsigned pred_idx : block.logical_preds) {
1887 Block& pred = ctx.program->blocks[pred_idx];
1888 for (int i = pred.instructions.size() - 1; i >= 0; i--) {
1889 aco_ptr<Instruction>& pred_instr = pred.instructions[i];
1890 if (pred_instr->opcode == aco_opcode::p_logical_end) {
1891 break;
1892 } else if (pred_instr->opcode == aco_opcode::p_spill ||
1893 pred_instr->opcode == aco_opcode::p_reload) {
1894 vgprs.insert(pred_instr->operands[0].getTemp());
1895 }
1896 }
1897 }
1898 if (!vgprs.size())
1899 continue;
1900
1901 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(
1902 aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
1903 int k = 0;
1904 for (Temp tmp : vgprs) {
1905 destr->operands[k++] = Operand(tmp);
1906 }
1907 /* find insertion point */
1908 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1909 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1910 ++it;
1911 block.instructions.insert(it, std::move(destr));
1912 }
1913 }
1914
1915 } /* end namespace */
1916
1917 void
1918 spill(Program* program, live& live_vars)
1919 {
1920 program->config->spilled_vgprs = 0;
1921 program->config->spilled_sgprs = 0;
1922
1923 program->progress = CompilationProgress::after_spilling;
1924
1925 /* no spilling when register pressure is low enough */
1926 if (program->num_waves > 0)
1927 return;
1928
1929 /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1930 lower_to_cssa(program, live_vars);
1931
1932 /* calculate target register demand */
1933 const RegisterDemand demand = program->max_reg_demand; /* current max */
1934 const uint16_t sgpr_limit = get_addr_sgpr_from_waves(program, program->min_waves);
1935 const uint16_t vgpr_limit = get_addr_vgpr_from_waves(program, program->min_waves);
1936 uint16_t extra_vgprs = 0;
1937 uint16_t extra_sgprs = 0;
1938
1939 /* calculate extra VGPRs required for spilling SGPRs */
1940 if (demand.sgpr > sgpr_limit) {
1941 unsigned sgpr_spills = demand.sgpr - sgpr_limit;
1942 extra_vgprs = DIV_ROUND_UP(sgpr_spills, program->wave_size) + 1;
1943 }
1944 /* add extra SGPRs required for spilling VGPRs */
1945 if (demand.vgpr + extra_vgprs > vgpr_limit) {
1946 if (program->gfx_level >= GFX9)
1947 extra_sgprs = 1; /* SADDR */
1948 else
1949 extra_sgprs = 5; /* scratch_resource (s4) + scratch_offset (s1) */
1950 if (demand.sgpr + extra_sgprs > sgpr_limit) {
1951 /* re-calculate in case something has changed */
1952 unsigned sgpr_spills = demand.sgpr + extra_sgprs - sgpr_limit;
1953 extra_vgprs = DIV_ROUND_UP(sgpr_spills, program->wave_size) + 1;
1954 }
1955 }
1956 /* the spiller has to target the following register demand */
1957 const RegisterDemand target(vgpr_limit - extra_vgprs, sgpr_limit - extra_sgprs);
1958
1959 /* initialize ctx */
1960 spill_ctx ctx(target, program, live_vars.register_demand);
1961 compute_global_next_uses(ctx);
1962 get_rematerialize_info(ctx);
1963
1964 /* create spills and reloads */
1965 for (unsigned i = 0; i < program->blocks.size(); i++)
1966 spill_block(ctx, i);
1967
1968 /* assign spill slots and DCE rematerialized code */
1969 assign_spill_slots(ctx, extra_vgprs);
1970
1971 /* update live variable information */
1972 live_vars = live_var_analysis(program);
1973
1974 assert(program->num_waves > 0);
1975 }
1976
1977 } // namespace aco
1978