aco: implement VGPR spilling
[mesa.git] / src / amd / compiler / aco_spill.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "aco_ir.h"
27 #include "aco_builder.h"
28 #include "sid.h"
29
30 #include <map>
31 #include <stack>
32
33 /*
34 * Implements the spilling algorithm on SSA-form from
35 * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
36 * by Matthias Braun and Sebastian Hack.
37 */
38
39 namespace aco {
40
41 namespace {
42
43 struct remat_info {
44 Instruction *instr;
45 };
46
47 struct spill_ctx {
48 RegisterDemand target_pressure;
49 Program* program;
50 std::vector<std::vector<RegisterDemand>> register_demand;
51 std::vector<std::map<Temp, Temp>> renames;
52 std::vector<std::map<Temp, uint32_t>> spills_entry;
53 std::vector<std::map<Temp, uint32_t>> spills_exit;
54 std::vector<bool> processed;
55 std::stack<Block*> loop_header;
56 std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
57 std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
58 std::vector<std::pair<RegClass, std::set<uint32_t>>> interferences;
59 std::vector<std::vector<uint32_t>> affinities;
60 std::vector<bool> is_reloaded;
61 std::map<Temp, remat_info> remat;
62 std::map<Instruction *, bool> remat_used;
63
64 spill_ctx(const RegisterDemand target_pressure, Program* program,
65 std::vector<std::vector<RegisterDemand>> register_demand)
66 : target_pressure(target_pressure), program(program),
67 register_demand(register_demand), renames(program->blocks.size()),
68 spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
69 processed(program->blocks.size(), false) {}
70
71 void add_affinity(uint32_t first, uint32_t second)
72 {
73 unsigned found_first = affinities.size();
74 unsigned found_second = affinities.size();
75 for (unsigned i = 0; i < affinities.size(); i++) {
76 std::vector<uint32_t>& vec = affinities[i];
77 for (uint32_t entry : vec) {
78 if (entry == first)
79 found_first = i;
80 else if (entry == second)
81 found_second = i;
82 }
83 }
84 if (found_first == affinities.size() && found_second == affinities.size()) {
85 affinities.emplace_back(std::vector<uint32_t>({first, second}));
86 } else if (found_first < affinities.size() && found_second == affinities.size()) {
87 affinities[found_first].push_back(second);
88 } else if (found_second < affinities.size() && found_first == affinities.size()) {
89 affinities[found_second].push_back(first);
90 } else if (found_first != found_second) {
91 /* merge second into first */
92 affinities[found_first].insert(affinities[found_first].end(), affinities[found_second].begin(), affinities[found_second].end());
93 affinities.erase(std::next(affinities.begin(), found_second));
94 } else {
95 assert(found_first == found_second);
96 }
97 }
98
99 uint32_t allocate_spill_id(RegClass rc)
100 {
101 interferences.emplace_back(rc, std::set<uint32_t>());
102 is_reloaded.push_back(false);
103 return next_spill_id++;
104 }
105
106 uint32_t next_spill_id = 0;
107 };
108
109 int32_t get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
110 {
111
112 if (idx_a == -1)
113 return idx_b;
114 if (idx_b == -1)
115 return idx_a;
116 if (is_linear) {
117 while (idx_a != idx_b) {
118 if (idx_a > idx_b)
119 idx_a = program->blocks[idx_a].linear_idom;
120 else
121 idx_b = program->blocks[idx_b].linear_idom;
122 }
123 } else {
124 while (idx_a != idx_b) {
125 if (idx_a > idx_b)
126 idx_a = program->blocks[idx_a].logical_idom;
127 else
128 idx_b = program->blocks[idx_b].logical_idom;
129 }
130 }
131 assert(idx_a != -1);
132 return idx_a;
133 }
134
135 void next_uses_per_block(spill_ctx& ctx, unsigned block_idx, std::set<uint32_t>& worklist)
136 {
137 Block* block = &ctx.program->blocks[block_idx];
138 std::map<Temp, std::pair<uint32_t, uint32_t>> next_uses = ctx.next_use_distances_end[block_idx];
139
140 /* to compute the next use distance at the beginning of the block, we have to add the block's size */
141 for (std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = next_uses.begin(); it != next_uses.end(); ++it)
142 it->second.second = it->second.second + block->instructions.size();
143
144 int idx = block->instructions.size() - 1;
145 while (idx >= 0) {
146 aco_ptr<Instruction>& instr = block->instructions[idx];
147
148 if (instr->opcode == aco_opcode::p_linear_phi ||
149 instr->opcode == aco_opcode::p_phi)
150 break;
151
152 for (const Definition& def : instr->definitions) {
153 if (def.isTemp())
154 next_uses.erase(def.getTemp());
155 }
156
157 for (const Operand& op : instr->operands) {
158 /* omit exec mask */
159 if (op.isFixed() && op.physReg() == exec)
160 continue;
161 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
162 continue;
163 if (op.isTemp())
164 next_uses[op.getTemp()] = {block_idx, idx};
165 }
166 idx--;
167 }
168
169 assert(block_idx != 0 || next_uses.empty());
170 ctx.next_use_distances_start[block_idx] = next_uses;
171 while (idx >= 0) {
172 aco_ptr<Instruction>& instr = block->instructions[idx];
173 assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
174
175 for (unsigned i = 0; i < instr->operands.size(); i++) {
176 unsigned pred_idx = instr->opcode == aco_opcode::p_phi ?
177 block->logical_preds[i] :
178 block->linear_preds[i];
179 if (instr->operands[i].isTemp()) {
180 if (instr->operands[i].getTemp() == ctx.program->blocks[pred_idx].live_out_exec)
181 continue;
182 if (ctx.next_use_distances_end[pred_idx].find(instr->operands[i].getTemp()) == ctx.next_use_distances_end[pred_idx].end() ||
183 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] != std::pair<uint32_t, uint32_t>{block_idx, 0})
184 worklist.insert(pred_idx);
185 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] = {block_idx, 0};
186 }
187 }
188 next_uses.erase(instr->definitions[0].getTemp());
189 idx--;
190 }
191
192 /* all remaining live vars must be live-out at the predecessors */
193 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : next_uses) {
194 Temp temp = pair.first;
195 uint32_t distance = pair.second.second;
196 uint32_t dom = pair.second.first;
197 std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
198 for (unsigned pred_idx : preds) {
199 if (temp == ctx.program->blocks[pred_idx].live_out_exec)
200 continue;
201 if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
202 distance += 0xFFFF;
203 if (ctx.next_use_distances_end[pred_idx].find(temp) != ctx.next_use_distances_end[pred_idx].end()) {
204 dom = get_dominator(dom, ctx.next_use_distances_end[pred_idx][temp].first, ctx.program, temp.is_linear());
205 distance = std::min(ctx.next_use_distances_end[pred_idx][temp].second, distance);
206 }
207 if (ctx.next_use_distances_end[pred_idx][temp] != std::pair<uint32_t, uint32_t>{dom, distance})
208 worklist.insert(pred_idx);
209 ctx.next_use_distances_end[pred_idx][temp] = {dom, distance};
210 }
211 }
212
213 }
214
215 void compute_global_next_uses(spill_ctx& ctx, std::vector<std::set<Temp>>& live_out)
216 {
217 ctx.next_use_distances_start.resize(ctx.program->blocks.size());
218 ctx.next_use_distances_end.resize(ctx.program->blocks.size());
219 std::set<uint32_t> worklist;
220 for (Block& block : ctx.program->blocks)
221 worklist.insert(block.index);
222
223 while (!worklist.empty()) {
224 std::set<unsigned>::reverse_iterator b_it = worklist.rbegin();
225 unsigned block_idx = *b_it;
226 worklist.erase(block_idx);
227 next_uses_per_block(ctx, block_idx, worklist);
228 }
229 }
230
231 bool should_rematerialize(aco_ptr<Instruction>& instr)
232 {
233 /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
234 if (instr->format != Format::VOP1 && instr->format != Format::SOP1 && instr->format != Format::PSEUDO)
235 return false;
236 /* TODO: pseudo-instruction rematerialization is only supported for p_create_vector */
237 if (instr->format == Format::PSEUDO && instr->opcode != aco_opcode::p_create_vector)
238 return false;
239
240 for (const Operand& op : instr->operands) {
241 /* TODO: rematerialization using temporaries isn't yet supported */
242 if (op.isTemp())
243 return false;
244 }
245
246 /* TODO: rematerialization with multiple definitions isn't yet supported */
247 if (instr->definitions.size() > 1)
248 return false;
249
250 return true;
251 }
252
253 aco_ptr<Instruction> do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
254 {
255 std::map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
256 if (remat != ctx.remat.end()) {
257 Instruction *instr = remat->second.instr;
258 assert((instr->format == Format::VOP1 || instr->format == Format::SOP1 || instr->format == Format::PSEUDO) && "unsupported");
259 assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector) && "unsupported");
260 assert(instr->definitions.size() == 1 && "unsupported");
261
262 aco_ptr<Instruction> res;
263 if (instr->format == Format::VOP1) {
264 res.reset(create_instruction<VOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
265 } else if (instr->format == Format::SOP1) {
266 res.reset(create_instruction<SOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
267 } else if (instr->format == Format::PSEUDO) {
268 res.reset(create_instruction<Instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
269 }
270 for (unsigned i = 0; i < instr->operands.size(); i++) {
271 res->operands[i] = instr->operands[i];
272 if (instr->operands[i].isTemp()) {
273 assert(false && "unsupported");
274 if (ctx.remat.count(instr->operands[i].getTemp()))
275 ctx.remat_used[ctx.remat[instr->operands[i].getTemp()].instr] = true;
276 }
277 }
278 res->definitions[0] = Definition(new_name);
279 return res;
280 } else {
281 aco_ptr<Pseudo_instruction> reload{create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
282 reload->operands[0] = Operand(spill_id);
283 reload->definitions[0] = Definition(new_name);
284 ctx.is_reloaded[spill_id] = true;
285 return reload;
286 }
287 }
288
289 void get_rematerialize_info(spill_ctx& ctx)
290 {
291 for (Block& block : ctx.program->blocks) {
292 bool logical = false;
293 for (aco_ptr<Instruction>& instr : block.instructions) {
294 if (instr->opcode == aco_opcode::p_logical_start)
295 logical = true;
296 else if (instr->opcode == aco_opcode::p_logical_end)
297 logical = false;
298 if (logical && should_rematerialize(instr)) {
299 for (const Definition& def : instr->definitions) {
300 if (def.isTemp()) {
301 ctx.remat[def.getTemp()] = (remat_info){instr.get()};
302 ctx.remat_used[instr.get()] = false;
303 }
304 }
305 }
306 }
307 }
308 }
309
310 std::vector<std::map<Temp, uint32_t>> local_next_uses(spill_ctx& ctx, Block* block)
311 {
312 std::vector<std::map<Temp, uint32_t>> local_next_uses(block->instructions.size());
313
314 std::map<Temp, uint32_t> next_uses;
315 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block->index])
316 next_uses[pair.first] = pair.second.second + block->instructions.size();
317
318 for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
319 aco_ptr<Instruction>& instr = block->instructions[idx];
320 if (!instr)
321 break;
322 if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
323 break;
324
325 for (const Operand& op : instr->operands) {
326 if (op.isFixed() && op.physReg() == exec)
327 continue;
328 if (op.regClass().type() == RegType::vgpr && op.regClass().is_linear())
329 continue;
330 if (op.isTemp())
331 next_uses[op.getTemp()] = idx;
332 }
333 for (const Definition& def : instr->definitions) {
334 if (def.isTemp())
335 next_uses.erase(def.getTemp());
336 }
337 local_next_uses[idx] = next_uses;
338 }
339 return local_next_uses;
340 }
341
342
343 RegisterDemand init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
344 {
345 RegisterDemand spilled_registers;
346
347 /* first block, nothing was spilled before */
348 if (block_idx == 0)
349 return {0, 0};
350
351 /* loop header block */
352 if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
353 assert(block->linear_preds[0] == block_idx - 1);
354 assert(block->logical_preds[0] == block_idx - 1);
355
356 /* create new loop_info */
357 ctx.loop_header.emplace(block);
358
359 /* check how many live-through variables should be spilled */
360 RegisterDemand new_demand;
361 unsigned i = block_idx;
362 while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
363 assert(ctx.program->blocks.size() > i);
364 new_demand.update(ctx.program->blocks[i].register_demand);
365 i++;
366 }
367 unsigned loop_end = i;
368
369 /* select live-through vgpr variables */
370 while (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
371 unsigned distance = 0;
372 Temp to_spill;
373 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
374 if (pair.first.type() == RegType::vgpr &&
375 pair.second.first >= loop_end &&
376 pair.second.second > distance &&
377 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
378 to_spill = pair.first;
379 distance = pair.second.second;
380 }
381 }
382 if (distance == 0)
383 break;
384
385 uint32_t spill_id;
386 if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
387 spill_id = ctx.allocate_spill_id(to_spill.regClass());
388 } else {
389 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
390 }
391
392 ctx.spills_entry[block_idx][to_spill] = spill_id;
393 spilled_registers.vgpr += to_spill.size();
394 }
395
396 /* select live-through sgpr variables */
397 while (new_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
398 unsigned distance = 0;
399 Temp to_spill;
400 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
401 if (pair.first.type() == RegType::sgpr &&
402 pair.second.first >= loop_end &&
403 pair.second.second > distance &&
404 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
405 to_spill = pair.first;
406 distance = pair.second.second;
407 }
408 }
409 if (distance == 0)
410 break;
411
412 uint32_t spill_id;
413 if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
414 spill_id = ctx.allocate_spill_id(to_spill.regClass());
415 } else {
416 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
417 }
418
419 ctx.spills_entry[block_idx][to_spill] = spill_id;
420 spilled_registers.sgpr += to_spill.size();
421 }
422
423
424
425 /* shortcut */
426 if (!RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure))
427 return spilled_registers;
428
429 /* if reg pressure is too high at beginning of loop, add variables with furthest use */
430 unsigned idx = 0;
431 while (block->instructions[idx]->opcode == aco_opcode::p_phi || block->instructions[idx]->opcode == aco_opcode::p_linear_phi)
432 idx++;
433
434 assert(idx != 0 && "loop without phis: TODO");
435 idx--;
436 RegisterDemand reg_pressure = ctx.register_demand[block_idx][idx] - spilled_registers;
437 while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
438 unsigned distance = 0;
439 Temp to_spill;
440 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
441 if (pair.first.type() == RegType::sgpr &&
442 pair.second.second > distance &&
443 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
444 to_spill = pair.first;
445 distance = pair.second.second;
446 }
447 }
448 assert(distance != 0);
449
450 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
451 spilled_registers.sgpr += to_spill.size();
452 reg_pressure.sgpr -= to_spill.size();
453 }
454 while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
455 unsigned distance = 0;
456 Temp to_spill;
457 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
458 if (pair.first.type() == RegType::vgpr &&
459 pair.second.second > distance &&
460 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
461 to_spill = pair.first;
462 distance = pair.second.second;
463 }
464 }
465 assert(distance != 0);
466 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
467 spilled_registers.vgpr += to_spill.size();
468 reg_pressure.vgpr -= to_spill.size();
469 }
470
471 return spilled_registers;
472 }
473
474 /* branch block */
475 if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
476 /* keep variables spilled if they are alive and not used in the current block */
477 unsigned pred_idx = block->linear_preds[0];
478 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
479 if (pair.first.type() == RegType::sgpr &&
480 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
481 ctx.next_use_distances_start[block_idx][pair.first].second > block_idx) {
482 ctx.spills_entry[block_idx].insert(pair);
483 spilled_registers.sgpr += pair.first.size();
484 }
485 }
486 if (block->logical_preds.size() == 1) {
487 pred_idx = block->logical_preds[0];
488 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
489 if (pair.first.type() == RegType::vgpr &&
490 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
491 ctx.next_use_distances_start[block_idx][pair.first].second > block_idx) {
492 ctx.spills_entry[block_idx].insert(pair);
493 spilled_registers.vgpr += pair.first.size();
494 }
495 }
496 }
497
498 /* if register demand is still too high, we just keep all spilled live vars and process the block */
499 if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
500 pred_idx = block->linear_preds[0];
501 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
502 if (pair.first.type() == RegType::sgpr &&
503 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
504 ctx.spills_entry[block_idx].insert(pair).second) {
505 spilled_registers.sgpr += pair.first.size();
506 }
507 }
508 }
509 if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr && block->logical_preds.size() == 1) {
510 pred_idx = block->logical_preds[0];
511 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
512 if (pair.first.type() == RegType::vgpr &&
513 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
514 ctx.spills_entry[block_idx].insert(pair).second) {
515 spilled_registers.vgpr += pair.first.size();
516 }
517 }
518 }
519
520 return spilled_registers;
521 }
522
523 /* else: merge block */
524 std::set<Temp> partial_spills;
525
526 /* keep variables spilled on all incoming paths */
527 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
528 std::vector<unsigned>& preds = pair.first.is_linear() ? block->linear_preds : block->logical_preds;
529 /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload it.
530 * Otherwise, if any predecessor reloads it, ensure it's reloaded on all other predecessors.
531 * The idea is that it's better in practice to rematerialize redundantly than to create lots of phis. */
532 /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db doesn't seem to exercise this path much) */
533 bool remat = ctx.remat.count(pair.first);
534 bool spill = !remat;
535 uint32_t spill_id = 0;
536 for (unsigned pred_idx : preds) {
537 /* variable is not even live at the predecessor: probably from a phi */
538 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end()) {
539 spill = false;
540 break;
541 }
542 if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end()) {
543 if (!remat)
544 spill = false;
545 } else {
546 partial_spills.insert(pair.first);
547 /* it might be that on one incoming path, the variable has a different spill_id, but add_couple_code() will take care of that. */
548 spill_id = ctx.spills_exit[pred_idx][pair.first];
549 if (remat)
550 spill = true;
551 }
552 }
553 if (spill) {
554 ctx.spills_entry[block_idx][pair.first] = spill_id;
555 partial_spills.erase(pair.first);
556 spilled_registers += pair.first;
557 }
558 }
559
560 /* same for phis */
561 unsigned idx = 0;
562 while (block->instructions[idx]->opcode == aco_opcode::p_linear_phi ||
563 block->instructions[idx]->opcode == aco_opcode::p_phi) {
564 aco_ptr<Instruction>& phi = block->instructions[idx];
565 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
566 bool spill = true;
567
568 for (unsigned i = 0; i < phi->operands.size(); i++) {
569 if (phi->operands[i].isUndefined())
570 continue;
571 assert(phi->operands[i].isTemp());
572 if (ctx.spills_exit[preds[i]].find(phi->operands[i].getTemp()) == ctx.spills_exit[preds[i]].end())
573 spill = false;
574 else
575 partial_spills.insert(phi->definitions[0].getTemp());
576 }
577 if (spill) {
578 ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] = ctx.allocate_spill_id(phi->definitions[0].regClass());
579 partial_spills.erase(phi->definitions[0].getTemp());
580 spilled_registers += phi->definitions[0].getTemp();
581 }
582
583 idx++;
584 }
585
586 /* if reg pressure at first instruction is still too high, add partially spilled variables */
587 RegisterDemand reg_pressure;
588 if (idx == 0) {
589 for (const Definition& def : block->instructions[idx]->definitions) {
590 if (def.isTemp()) {
591 reg_pressure -= def.getTemp();
592 }
593 }
594 for (const Operand& op : block->instructions[idx]->operands) {
595 if (op.isTemp() && op.isFirstKill()) {
596 reg_pressure += op.getTemp();
597 }
598 }
599 } else {
600 idx--;
601 }
602 reg_pressure += ctx.register_demand[block_idx][idx] - spilled_registers;
603
604 while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
605 assert(!partial_spills.empty());
606
607 std::set<Temp>::iterator it = partial_spills.begin();
608 Temp to_spill = *it;
609 unsigned distance = ctx.next_use_distances_start[block_idx][*it].second;
610 while (it != partial_spills.end()) {
611 assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
612
613 if (it->type() == RegType::sgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
614 distance = ctx.next_use_distances_start[block_idx][*it].second;
615 to_spill = *it;
616 }
617 ++it;
618 }
619 assert(distance != 0);
620
621 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
622 partial_spills.erase(to_spill);
623 spilled_registers.sgpr += to_spill.size();
624 reg_pressure.sgpr -= to_spill.size();
625 }
626
627 while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
628 assert(!partial_spills.empty());
629
630 std::set<Temp>::iterator it = partial_spills.begin();
631 Temp to_spill = *it;
632 unsigned distance = ctx.next_use_distances_start[block_idx][*it].second;
633 while (it != partial_spills.end()) {
634 assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
635
636 if (it->type() == RegType::vgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
637 distance = ctx.next_use_distances_start[block_idx][*it].second;
638 to_spill = *it;
639 }
640 ++it;
641 }
642 assert(distance != 0);
643
644 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
645 partial_spills.erase(to_spill);
646 spilled_registers.vgpr += to_spill.size();
647 reg_pressure.vgpr -= to_spill.size();
648 }
649
650 return spilled_registers;
651 }
652
653
654 void add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
655 {
656 /* no coupling code necessary */
657 if (block->linear_preds.size() == 0)
658 return;
659
660 std::vector<aco_ptr<Instruction>> instructions;
661 /* branch block: TODO take other branch into consideration */
662 if (block->linear_preds.size() == 1 && !(block->kind & block_kind_loop_exit)) {
663 assert(ctx.processed[block->linear_preds[0]]);
664 assert(ctx.register_demand[block_idx].size() == block->instructions.size());
665 std::vector<RegisterDemand> reg_demand;
666 unsigned insert_idx = 0;
667 unsigned pred_idx = block->linear_preds[0];
668
669 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
670 if (!live.first.is_linear())
671 continue;
672 /* still spilled */
673 if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
674 continue;
675
676 /* in register at end of predecessor */
677 if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
678 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
679 if (it != ctx.renames[pred_idx].end())
680 ctx.renames[block_idx].insert(*it);
681 continue;
682 }
683
684 /* variable is spilled at predecessor and live at current block: create reload instruction */
685 Temp new_name = {ctx.program->allocateId(), live.first.regClass()};
686 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
687 instructions.emplace_back(std::move(reload));
688 reg_demand.push_back(RegisterDemand());
689 ctx.renames[block_idx][live.first] = new_name;
690 }
691
692 if (block->logical_preds.size() == 1) {
693 do {
694 assert(insert_idx < block->instructions.size());
695 instructions.emplace_back(std::move(block->instructions[insert_idx]));
696 reg_demand.push_back(ctx.register_demand[block_idx][insert_idx]);
697 insert_idx++;
698 } while (instructions.back()->opcode != aco_opcode::p_logical_start);
699
700 unsigned pred_idx = block->logical_preds[0];
701 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
702 if (live.first.is_linear())
703 continue;
704 /* still spilled */
705 if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
706 continue;
707
708 /* in register at end of predecessor */
709 if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
710 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
711 if (it != ctx.renames[pred_idx].end())
712 ctx.renames[block_idx].insert(*it);
713 continue;
714 }
715
716 /* variable is spilled at predecessor and live at current block: create reload instruction */
717 Temp new_name = {ctx.program->allocateId(), live.first.regClass()};
718 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
719 instructions.emplace_back(std::move(reload));
720 reg_demand.emplace_back(reg_demand.back());
721 ctx.renames[block_idx][live.first] = new_name;
722 }
723 }
724
725 /* combine new reload instructions with original block */
726 if (!instructions.empty()) {
727 reg_demand.insert(reg_demand.end(), std::next(ctx.register_demand[block->index].begin(), insert_idx),
728 ctx.register_demand[block->index].end());
729 ctx.register_demand[block_idx] = std::move(reg_demand);
730 instructions.insert(instructions.end(),
731 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(std::next(block->instructions.begin(), insert_idx)),
732 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
733 block->instructions = std::move(instructions);
734 }
735 return;
736 }
737
738 /* loop header and merge blocks: check if all (linear) predecessors have been processed */
739 for (ASSERTED unsigned pred : block->linear_preds)
740 assert(ctx.processed[pred]);
741
742 /* iterate the phi nodes for which operands to spill at the predecessor */
743 for (aco_ptr<Instruction>& phi : block->instructions) {
744 if (phi->opcode != aco_opcode::p_phi &&
745 phi->opcode != aco_opcode::p_linear_phi)
746 break;
747
748 /* if the phi is not spilled, add to instructions */
749 if (ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end()) {
750 instructions.emplace_back(std::move(phi));
751 continue;
752 }
753
754 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
755 uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
756
757 for (unsigned i = 0; i < phi->operands.size(); i++) {
758 if (phi->operands[i].isUndefined())
759 continue;
760
761 unsigned pred_idx = preds[i];
762 assert(phi->operands[i].isTemp() && phi->operands[i].isKill());
763 Temp var = phi->operands[i].getTemp();
764
765 /* build interferences between the phi def and all spilled variables at the predecessor blocks */
766 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
767 if (var == pair.first)
768 continue;
769 ctx.interferences[def_spill_id].second.emplace(pair.second);
770 ctx.interferences[pair.second].second.emplace(def_spill_id);
771 }
772
773 /* check if variable is already spilled at predecessor */
774 std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(var);
775 if (spilled != ctx.spills_exit[pred_idx].end()) {
776 if (spilled->second != def_spill_id)
777 ctx.add_affinity(def_spill_id, spilled->second);
778 continue;
779 }
780
781 /* rename if necessary */
782 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
783 if (rename_it != ctx.renames[pred_idx].end()) {
784 var = rename_it->second;
785 ctx.renames[pred_idx].erase(rename_it);
786 }
787
788 uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
789 ctx.add_affinity(def_spill_id, spill_id);
790 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
791 spill->operands[0] = Operand(var);
792 spill->operands[1] = Operand(spill_id);
793 Block& pred = ctx.program->blocks[pred_idx];
794 unsigned idx = pred.instructions.size();
795 do {
796 assert(idx != 0);
797 idx--;
798 } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
799 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
800 pred.instructions.insert(it, std::move(spill));
801 ctx.spills_exit[pred_idx][phi->operands[i].getTemp()] = spill_id;
802 }
803
804 /* remove phi from instructions */
805 phi.reset();
806 }
807
808 /* iterate all (other) spilled variables for which to spill at the predecessor */
809 // TODO: would be better to have them sorted: first vgprs and first with longest distance
810 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
811 std::vector<unsigned> preds = pair.first.is_linear() ? block->linear_preds : block->logical_preds;
812
813 for (unsigned pred_idx : preds) {
814 /* variable is already spilled at predecessor */
815 std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(pair.first);
816 if (spilled != ctx.spills_exit[pred_idx].end()) {
817 if (spilled->second != pair.second)
818 ctx.add_affinity(pair.second, spilled->second);
819 continue;
820 }
821
822 /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
823 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
824 continue;
825
826 /* add interferences between spilled variable and predecessors exit spills */
827 for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
828 if (exit_spill.first == pair.first)
829 continue;
830 ctx.interferences[exit_spill.second].second.emplace(pair.second);
831 ctx.interferences[pair.second].second.emplace(exit_spill.second);
832 }
833
834 /* variable is in register at predecessor and has to be spilled */
835 /* rename if necessary */
836 Temp var = pair.first;
837 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
838 if (rename_it != ctx.renames[pred_idx].end()) {
839 var = rename_it->second;
840 ctx.renames[pred_idx].erase(rename_it);
841 }
842
843 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
844 spill->operands[0] = Operand(var);
845 spill->operands[1] = Operand(pair.second);
846 Block& pred = ctx.program->blocks[pred_idx];
847 unsigned idx = pred.instructions.size();
848 do {
849 assert(idx != 0);
850 idx--;
851 } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
852 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
853 pred.instructions.insert(it, std::move(spill));
854 ctx.spills_exit[pred.index][pair.first] = pair.second;
855 }
856 }
857
858 /* iterate phis for which operands to reload */
859 for (aco_ptr<Instruction>& phi : instructions) {
860 assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
861 assert(ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end());
862
863 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
864 for (unsigned i = 0; i < phi->operands.size(); i++) {
865 if (!phi->operands[i].isTemp())
866 continue;
867 unsigned pred_idx = preds[i];
868
869 /* rename operand */
870 if (ctx.spills_exit[pred_idx].find(phi->operands[i].getTemp()) == ctx.spills_exit[pred_idx].end()) {
871 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(phi->operands[i].getTemp());
872 if (it != ctx.renames[pred_idx].end())
873 phi->operands[i].setTemp(it->second);
874 continue;
875 }
876
877 Temp tmp = phi->operands[i].getTemp();
878
879 /* reload phi operand at end of predecessor block */
880 Temp new_name = {ctx.program->allocateId(), tmp.regClass()};
881 Block& pred = ctx.program->blocks[pred_idx];
882 unsigned idx = pred.instructions.size();
883 do {
884 assert(idx != 0);
885 idx--;
886 } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
887 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
888
889 aco_ptr<Instruction> reload = do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
890 pred.instructions.insert(it, std::move(reload));
891
892 ctx.spills_exit[pred_idx].erase(tmp);
893 ctx.renames[pred_idx][tmp] = new_name;
894 phi->operands[i].setTemp(new_name);
895 }
896 }
897
898 /* iterate live variables for which to reload */
899 // TODO: reload at current block if variable is spilled on all predecessors
900 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
901 /* skip spilled variables */
902 if (ctx.spills_entry[block_idx].find(pair.first) != ctx.spills_entry[block_idx].end())
903 continue;
904 std::vector<unsigned> preds = pair.first.is_linear() ? block->linear_preds : block->logical_preds;
905
906 /* variable is dead at predecessor, it must be from a phi */
907 bool is_dead = false;
908 for (unsigned pred_idx : preds) {
909 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
910 is_dead = true;
911 }
912 if (is_dead)
913 continue;
914 for (unsigned pred_idx : preds) {
915 /* the variable is not spilled at the predecessor */
916 if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end())
917 continue;
918
919 /* variable is spilled at predecessor and has to be reloaded */
920 Temp new_name = {ctx.program->allocateId(), pair.first.regClass()};
921 Block& pred = ctx.program->blocks[pred_idx];
922 unsigned idx = pred.instructions.size();
923 do {
924 assert(idx != 0);
925 idx--;
926 } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
927 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
928
929 aco_ptr<Instruction> reload = do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
930 pred.instructions.insert(it, std::move(reload));
931
932 ctx.spills_exit[pred.index].erase(pair.first);
933 ctx.renames[pred.index][pair.first] = new_name;
934 }
935
936 /* check if we have to create a new phi for this variable */
937 Temp rename = Temp();
938 bool is_same = true;
939 for (unsigned pred_idx : preds) {
940 if (ctx.renames[pred_idx].find(pair.first) == ctx.renames[pred_idx].end()) {
941 if (rename == Temp())
942 rename = pair.first;
943 else
944 is_same = rename == pair.first;
945 } else {
946 if (rename == Temp())
947 rename = ctx.renames[pred_idx][pair.first];
948 else
949 is_same = rename == ctx.renames[pred_idx][pair.first];
950 }
951
952 if (!is_same)
953 break;
954 }
955
956 if (!is_same) {
957 /* the variable was renamed differently in the predecessors: we have to create a phi */
958 aco_opcode opcode = pair.first.is_linear() ? aco_opcode::p_linear_phi : aco_opcode::p_phi;
959 aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
960 rename = {ctx.program->allocateId(), pair.first.regClass()};
961 for (unsigned i = 0; i < phi->operands.size(); i++) {
962 Temp tmp;
963 if (ctx.renames[preds[i]].find(pair.first) != ctx.renames[preds[i]].end())
964 tmp = ctx.renames[preds[i]][pair.first];
965 else if (preds[i] >= block_idx)
966 tmp = rename;
967 else
968 tmp = pair.first;
969 phi->operands[i] = Operand(tmp);
970 }
971 phi->definitions[0] = Definition(rename);
972 instructions.emplace_back(std::move(phi));
973 }
974
975 /* the variable was renamed: add new name to renames */
976 if (!(rename == Temp() || rename == pair.first))
977 ctx.renames[block_idx][pair.first] = rename;
978 }
979
980 /* combine phis with instructions */
981 unsigned idx = 0;
982 while (!block->instructions[idx]) {
983 idx++;
984 }
985
986 ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(), ctx.register_demand[block->index].begin() + idx);
987 ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(), instructions.size(), RegisterDemand());
988
989 std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
990 instructions.insert(instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
991 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
992 block->instructions = std::move(instructions);
993 }
994
995 void process_block(spill_ctx& ctx, unsigned block_idx, Block* block,
996 std::map<Temp, uint32_t> &current_spills, RegisterDemand spilled_registers)
997 {
998 std::vector<std::map<Temp, uint32_t>> local_next_use_distance;
999 std::vector<aco_ptr<Instruction>> instructions;
1000 unsigned idx = 0;
1001
1002 /* phis are handled separetely */
1003 while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
1004 block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
1005 aco_ptr<Instruction>& instr = block->instructions[idx];
1006 for (const Operand& op : instr->operands) {
1007 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1008 if (op.isTemp() && ctx.remat.count(op.getTemp()))
1009 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1010 }
1011 instructions.emplace_back(std::move(instr));
1012 idx++;
1013 }
1014
1015 if (block->register_demand.exceeds(ctx.target_pressure))
1016 local_next_use_distance = local_next_uses(ctx, block);
1017
1018 while (idx < block->instructions.size()) {
1019 aco_ptr<Instruction>& instr = block->instructions[idx];
1020
1021 std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1022 std::map<Temp, uint32_t> spills;
1023 /* rename and reload operands */
1024 for (Operand& op : instr->operands) {
1025 if (!op.isTemp())
1026 continue;
1027 if (current_spills.find(op.getTemp()) == current_spills.end()) {
1028 /* the Operand is in register: check if it was renamed */
1029 if (ctx.renames[block_idx].find(op.getTemp()) != ctx.renames[block_idx].end())
1030 op.setTemp(ctx.renames[block_idx][op.getTemp()]);
1031 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1032 if (ctx.remat.count(op.getTemp()))
1033 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1034 continue;
1035 }
1036 /* the Operand is spilled: add it to reloads */
1037 Temp new_tmp = {ctx.program->allocateId(), op.regClass()};
1038 ctx.renames[block_idx][op.getTemp()] = new_tmp;
1039 reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1040 current_spills.erase(op.getTemp());
1041 op.setTemp(new_tmp);
1042 spilled_registers -= new_tmp;
1043 }
1044
1045 /* check if register demand is low enough before and after the current instruction */
1046 if (block->register_demand.exceeds(ctx.target_pressure)) {
1047
1048 RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1049 if (idx == 0) {
1050 RegisterDemand demand_before = new_demand;
1051 for (const Definition& def : instr->definitions)
1052 demand_before -= def.getTemp();
1053 for (const Operand& op : instr->operands) {
1054 if (op.isFirstKill())
1055 demand_before += op.getTemp();
1056 }
1057 new_demand.update(demand_before);
1058 } else {
1059 new_demand.update(ctx.register_demand[block_idx][idx - 1]);
1060 }
1061
1062 assert(!local_next_use_distance.empty());
1063
1064 /* if reg pressure is too high, spill variable with furthest next use */
1065 while (RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1066 unsigned distance = 0;
1067 Temp to_spill;
1068 bool do_rematerialize = false;
1069 if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
1070 for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1071 bool can_rematerialize = ctx.remat.count(pair.first);
1072 if (pair.first.type() == RegType::vgpr &&
1073 ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1074 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1075 current_spills.find(pair.first) == current_spills.end() &&
1076 ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1077 to_spill = pair.first;
1078 distance = pair.second;
1079 do_rematerialize = can_rematerialize;
1080 }
1081 }
1082 } else {
1083 for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1084 bool can_rematerialize = ctx.remat.count(pair.first);
1085 if (pair.first.type() == RegType::sgpr &&
1086 ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1087 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1088 current_spills.find(pair.first) == current_spills.end() &&
1089 ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1090 to_spill = pair.first;
1091 distance = pair.second;
1092 do_rematerialize = can_rematerialize;
1093 }
1094 }
1095 }
1096
1097 assert(distance != 0 && distance > idx);
1098 uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1099
1100 /* add interferences with currently spilled variables */
1101 for (std::pair<Temp, uint32_t> pair : current_spills) {
1102 ctx.interferences[spill_id].second.emplace(pair.second);
1103 ctx.interferences[pair.second].second.emplace(spill_id);
1104 }
1105 for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1106 ctx.interferences[spill_id].second.emplace(pair.second.second);
1107 ctx.interferences[pair.second.second].second.emplace(spill_id);
1108 }
1109
1110 current_spills[to_spill] = spill_id;
1111 spilled_registers += to_spill;
1112
1113 /* rename if necessary */
1114 if (ctx.renames[block_idx].find(to_spill) != ctx.renames[block_idx].end()) {
1115 to_spill = ctx.renames[block_idx][to_spill];
1116 }
1117
1118 /* add spill to new instructions */
1119 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1120 spill->operands[0] = Operand(to_spill);
1121 spill->operands[1] = Operand(spill_id);
1122 instructions.emplace_back(std::move(spill));
1123 }
1124 }
1125
1126 /* add reloads and instruction to new instructions */
1127 for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1128 aco_ptr<Instruction> reload = do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1129 instructions.emplace_back(std::move(reload));
1130 }
1131 instructions.emplace_back(std::move(instr));
1132 idx++;
1133 }
1134
1135 block->instructions = std::move(instructions);
1136 ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1137 }
1138
1139 void spill_block(spill_ctx& ctx, unsigned block_idx)
1140 {
1141 Block* block = &ctx.program->blocks[block_idx];
1142 ctx.processed[block_idx] = true;
1143
1144 /* determine set of variables which are spilled at the beginning of the block */
1145 RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1146
1147 /* add interferences for spilled variables */
1148 for (std::pair<Temp, uint32_t> x : ctx.spills_entry[block_idx]) {
1149 for (std::pair<Temp, uint32_t> y : ctx.spills_entry[block_idx])
1150 if (x.second != y.second)
1151 ctx.interferences[x.second].second.emplace(y.second);
1152 }
1153
1154 bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1155 if (!is_loop_header) {
1156 /* add spill/reload code on incoming control flow edges */
1157 add_coupling_code(ctx, block, block_idx);
1158 }
1159
1160 std::map<Temp, uint32_t> current_spills = ctx.spills_entry[block_idx];
1161
1162 /* check conditions to process this block */
1163 bool process = RegisterDemand(block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1164 !ctx.renames[block_idx].empty() ||
1165 ctx.remat_used.size();
1166
1167 std::map<Temp, uint32_t>::iterator it = current_spills.begin();
1168 while (!process && it != current_spills.end()) {
1169 if (ctx.next_use_distances_start[block_idx][it->first].first == block_idx)
1170 process = true;
1171 ++it;
1172 }
1173
1174 if (process)
1175 process_block(ctx, block_idx, block, current_spills, spilled_registers);
1176 else
1177 ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1178
1179 /* check if the next block leaves the current loop */
1180 if (block->loop_nest_depth == 0 || ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1181 return;
1182
1183 Block* loop_header = ctx.loop_header.top();
1184
1185 /* preserve original renames at end of loop header block */
1186 std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1187
1188 /* add coupling code to all loop header predecessors */
1189 add_coupling_code(ctx, loop_header, loop_header->index);
1190
1191 /* update remat_used for phis added in add_coupling_code() */
1192 for (aco_ptr<Instruction>& instr : loop_header->instructions) {
1193 if (!is_phi(instr))
1194 break;
1195 for (const Operand& op : instr->operands) {
1196 if (op.isTemp() && ctx.remat.count(op.getTemp()))
1197 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1198 }
1199 }
1200
1201 /* propagate new renames through loop: i.e. repair the SSA */
1202 renames.swap(ctx.renames[loop_header->index]);
1203 for (std::pair<Temp, Temp> rename : renames) {
1204 for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1205 Block& current = ctx.program->blocks[idx];
1206 std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1207
1208 /* first rename phis */
1209 while (instr_it != current.instructions.end()) {
1210 aco_ptr<Instruction>& phi = *instr_it;
1211 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1212 break;
1213 /* no need to rename the loop header phis once again. this happened in add_coupling_code() */
1214 if (idx == loop_header->index) {
1215 instr_it++;
1216 continue;
1217 }
1218
1219 for (Operand& op : phi->operands) {
1220 if (!op.isTemp())
1221 continue;
1222 if (op.getTemp() == rename.first)
1223 op.setTemp(rename.second);
1224 }
1225 instr_it++;
1226 }
1227
1228 std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = ctx.next_use_distances_start[idx].find(rename.first);
1229
1230 /* variable is not live at beginning of this block */
1231 if (it == ctx.next_use_distances_start[idx].end())
1232 continue;
1233
1234 /* if the variable is live at the block's exit, add rename */
1235 if (ctx.next_use_distances_end[idx].find(rename.first) != ctx.next_use_distances_end[idx].end())
1236 ctx.renames[idx].insert(rename);
1237
1238 /* rename all uses in this block */
1239 bool renamed = false;
1240 while (!renamed && instr_it != current.instructions.end()) {
1241 aco_ptr<Instruction>& instr = *instr_it;
1242 for (Operand& op : instr->operands) {
1243 if (!op.isTemp())
1244 continue;
1245 if (op.getTemp() == rename.first) {
1246 op.setTemp(rename.second);
1247 /* we can stop with this block as soon as the variable is spilled */
1248 if (instr->opcode == aco_opcode::p_spill)
1249 renamed = true;
1250 }
1251 }
1252 instr_it++;
1253 }
1254 }
1255 }
1256
1257 /* remove loop header info from stack */
1258 ctx.loop_header.pop();
1259 }
1260
1261 Temp load_scratch_resource(spill_ctx& ctx, Temp& scratch_offset,
1262 std::vector<aco_ptr<Instruction>>& instructions,
1263 unsigned offset, bool is_top_level)
1264 {
1265 Builder bld(ctx.program);
1266 if (is_top_level) {
1267 bld.reset(&instructions);
1268 } else {
1269 /* find p_logical_end */
1270 unsigned idx = instructions.size() - 1;
1271 while (instructions[idx]->opcode != aco_opcode::p_logical_end)
1272 idx--;
1273 bld.reset(&instructions, std::next(instructions.begin(), idx));
1274 }
1275
1276 Temp private_segment_buffer = ctx.program->private_segment_buffer;
1277 if (ctx.program->stage != compute_cs)
1278 private_segment_buffer = bld.smem(aco_opcode::s_load_dwordx2, bld.def(s2), private_segment_buffer, Operand(0u));
1279
1280 if (offset)
1281 scratch_offset = bld.sop2(aco_opcode::s_add_u32, bld.def(s1), bld.def(s1, scc), scratch_offset, Operand(offset));
1282
1283 uint32_t rsrc_conf = S_008F0C_ADD_TID_ENABLE(1) |
1284 S_008F0C_INDEX_STRIDE(ctx.program->wave_size == 64 ? 3 : 2);
1285
1286 if (ctx.program->chip_class >= GFX10) {
1287 rsrc_conf |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
1288 S_008F0C_OOB_SELECT(3) |
1289 S_008F0C_RESOURCE_LEVEL(1);
1290 } else if (ctx.program->chip_class <= GFX7) { /* dfmt modifies stride on GFX8/GFX9 when ADD_TID_EN=1 */
1291 rsrc_conf |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1292 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1293 }
1294 /* older generations need element size = 16 bytes. element size removed in GFX9 */
1295 if (ctx.program->chip_class <= GFX8)
1296 rsrc_conf |= S_008F0C_ELEMENT_SIZE(3);
1297
1298 return bld.pseudo(aco_opcode::p_create_vector, bld.def(s4),
1299 private_segment_buffer, Operand(-1u),
1300 Operand(rsrc_conf));
1301 }
1302
1303 void assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr) {
1304 std::map<uint32_t, uint32_t> sgpr_slot;
1305 std::map<uint32_t, uint32_t> vgpr_slot;
1306 std::vector<bool> is_assigned(ctx.interferences.size());
1307
1308 /* first, handle affinities: just merge all interferences into both spill ids */
1309 for (std::vector<uint32_t>& vec : ctx.affinities) {
1310 for (unsigned i = 0; i < vec.size(); i++) {
1311 for (unsigned j = i + 1; j < vec.size(); j++) {
1312 assert(vec[i] != vec[j]);
1313 for (uint32_t id : ctx.interferences[vec[i]].second)
1314 ctx.interferences[id].second.insert(vec[j]);
1315 for (uint32_t id : ctx.interferences[vec[j]].second)
1316 ctx.interferences[id].second.insert(vec[i]);
1317 ctx.interferences[vec[i]].second.insert(ctx.interferences[vec[j]].second.begin(), ctx.interferences[vec[j]].second.end());
1318 ctx.interferences[vec[j]].second.insert(ctx.interferences[vec[i]].second.begin(), ctx.interferences[vec[i]].second.end());
1319
1320 bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1321 ctx.is_reloaded[vec[i]] = reloaded;
1322 ctx.is_reloaded[vec[j]] = reloaded;
1323 }
1324 }
1325 }
1326 for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1327 for (ASSERTED uint32_t id : ctx.interferences[i].second)
1328 assert(i != id);
1329
1330 /* for each spill slot, assign as many spill ids as possible */
1331 std::vector<std::set<uint32_t>> spill_slot_interferences;
1332 unsigned slot_idx = 0;
1333 bool done = false;
1334
1335 /* assign sgpr spill slots */
1336 while (!done) {
1337 done = true;
1338 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1339 if (is_assigned[id] || !ctx.is_reloaded[id])
1340 continue;
1341 if (ctx.interferences[id].first.type() != RegType::sgpr)
1342 continue;
1343
1344 /* check interferences */
1345 bool interferes = false;
1346 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++) {
1347 if (i == spill_slot_interferences.size())
1348 spill_slot_interferences.emplace_back(std::set<uint32_t>());
1349 if (spill_slot_interferences[i].find(id) != spill_slot_interferences[i].end() || i / 64 != slot_idx / 64) {
1350 interferes = true;
1351 break;
1352 }
1353 }
1354 if (interferes) {
1355 done = false;
1356 continue;
1357 }
1358
1359 /* we found a spill id which can be assigned to current spill slot */
1360 sgpr_slot[id] = slot_idx;
1361 is_assigned[id] = true;
1362 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++)
1363 spill_slot_interferences[i].insert(ctx.interferences[id].second.begin(), ctx.interferences[id].second.end());
1364
1365 /* add all affinities: there are no additional interferences */
1366 for (std::vector<uint32_t>& vec : ctx.affinities) {
1367 bool found_affinity = false;
1368 for (uint32_t entry : vec) {
1369 if (entry == id) {
1370 found_affinity = true;
1371 break;
1372 }
1373 }
1374 if (!found_affinity)
1375 continue;
1376 for (uint32_t entry : vec) {
1377 sgpr_slot[entry] = slot_idx;
1378 is_assigned[entry] = true;
1379 }
1380 }
1381 }
1382 slot_idx++;
1383 }
1384
1385 unsigned sgpr_spill_slots = spill_slot_interferences.size();
1386 spill_slot_interferences.clear();
1387 slot_idx = 0;
1388 done = false;
1389
1390 /* assign vgpr spill slots */
1391 while (!done) {
1392 done = true;
1393 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1394 if (is_assigned[id] || !ctx.is_reloaded[id])
1395 continue;
1396 if (ctx.interferences[id].first.type() != RegType::vgpr)
1397 continue;
1398
1399 /* check interferences */
1400 bool interferes = false;
1401 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++) {
1402 if (i == spill_slot_interferences.size())
1403 spill_slot_interferences.emplace_back(std::set<uint32_t>());
1404 /* check for interference and ensure that vector regs are stored next to each other */
1405 if (spill_slot_interferences[i].find(id) != spill_slot_interferences[i].end()) {
1406 interferes = true;
1407 break;
1408 }
1409 }
1410 if (interferes) {
1411 done = false;
1412 continue;
1413 }
1414
1415 /* we found a spill id which can be assigned to current spill slot */
1416 vgpr_slot[id] = slot_idx;
1417 is_assigned[id] = true;
1418 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++)
1419 spill_slot_interferences[i].insert(ctx.interferences[id].second.begin(), ctx.interferences[id].second.end());
1420
1421 /* add all affinities: there are no additional interferences */
1422 for (std::vector<uint32_t>& vec : ctx.affinities) {
1423 bool found_affinity = false;
1424 for (uint32_t entry : vec) {
1425 if (entry == id) {
1426 found_affinity = true;
1427 break;
1428 }
1429 }
1430 if (!found_affinity)
1431 continue;
1432 for (uint32_t entry : vec) {
1433 vgpr_slot[entry] = slot_idx;
1434 is_assigned[entry] = true;
1435 }
1436 }
1437 }
1438 slot_idx++;
1439 }
1440
1441 unsigned vgpr_spill_slots = spill_slot_interferences.size();
1442
1443 for (unsigned id = 0; id < is_assigned.size(); id++)
1444 assert(is_assigned[id] || !ctx.is_reloaded[id]);
1445
1446 for (std::vector<uint32_t>& vec : ctx.affinities) {
1447 for (unsigned i = 0; i < vec.size(); i++) {
1448 for (unsigned j = i + 1; j < vec.size(); j++) {
1449 assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1450 if (!is_assigned[vec[i]])
1451 continue;
1452 assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1453 assert(ctx.interferences[vec[i]].first.type() == ctx.interferences[vec[j]].first.type());
1454 if (ctx.interferences[vec[i]].first.type() == RegType::sgpr)
1455 assert(sgpr_slot[vec[i]] == sgpr_slot[vec[j]]);
1456 else
1457 assert(vgpr_slot[vec[i]] == vgpr_slot[vec[j]]);
1458 }
1459 }
1460 }
1461
1462 /* hope, we didn't mess up */
1463 std::vector<Temp> vgpr_spill_temps((sgpr_spill_slots + 63) / 64);
1464 assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1465
1466 /* replace pseudo instructions with actual hardware instructions */
1467 Temp scratch_offset = ctx.program->scratch_offset, scratch_rsrc = Temp();
1468 unsigned last_top_level_block_idx = 0;
1469 std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
1470 for (Block& block : ctx.program->blocks) {
1471
1472 /* after loops, we insert a user if there was a reload inside the loop */
1473 if (block.loop_nest_depth == 0) {
1474 int end_vgprs = 0;
1475 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1476 if (reload_in_loop[i])
1477 end_vgprs++;
1478 }
1479
1480 if (end_vgprs > 0) {
1481 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
1482 int k = 0;
1483 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1484 if (reload_in_loop[i])
1485 destr->operands[k++] = Operand(vgpr_spill_temps[i]);
1486 reload_in_loop[i] = false;
1487 }
1488 /* find insertion point */
1489 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1490 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1491 ++it;
1492 block.instructions.insert(it, std::move(destr));
1493 }
1494 }
1495
1496 if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
1497 last_top_level_block_idx = block.index;
1498
1499 /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1500 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1501 if (vgpr_spill_temps[i] == Temp())
1502 continue;
1503
1504 bool can_destroy = true;
1505 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[block.linear_preds[0]]) {
1506
1507 if (sgpr_slot.find(pair.second) != sgpr_slot.end() &&
1508 sgpr_slot[pair.second] / 64 == i) {
1509 can_destroy = false;
1510 break;
1511 }
1512 }
1513 if (can_destroy)
1514 vgpr_spill_temps[i] = Temp();
1515 }
1516 }
1517
1518 std::vector<aco_ptr<Instruction>>::iterator it;
1519 std::vector<aco_ptr<Instruction>> instructions;
1520 instructions.reserve(block.instructions.size());
1521 Builder bld(ctx.program, &instructions);
1522 for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1523
1524 if ((*it)->opcode == aco_opcode::p_spill) {
1525 uint32_t spill_id = (*it)->operands[1].constantValue();
1526
1527 if (!ctx.is_reloaded[spill_id]) {
1528 /* never reloaded, so don't spill */
1529 } else if (vgpr_slot.find(spill_id) != vgpr_slot.end()) {
1530 /* spill vgpr */
1531 ctx.program->config->spilled_vgprs += (*it)->operands[0].size();
1532 uint32_t spill_slot = vgpr_slot[spill_id];
1533 bool add_offset = ctx.program->config->scratch_bytes_per_wave + vgpr_spill_slots * 4 > 4096;
1534 unsigned base_offset = add_offset ? 0 : ctx.program->config->scratch_bytes_per_wave;
1535
1536 /* check if the scratch resource descriptor already exists */
1537 if (scratch_rsrc == Temp()) {
1538 unsigned offset = ctx.program->config->scratch_bytes_per_wave - base_offset;
1539 scratch_rsrc = load_scratch_resource(ctx, scratch_offset,
1540 last_top_level_block_idx == block.index ?
1541 instructions : ctx.program->blocks[last_top_level_block_idx].instructions,
1542 offset,
1543 last_top_level_block_idx == block.index);
1544 }
1545
1546 unsigned offset = base_offset + spill_slot * 4;
1547 aco_opcode opcode;
1548 assert((*it)->operands[0].isTemp());
1549 Temp temp = (*it)->operands[0].getTemp();
1550 assert(temp.type() == RegType::vgpr && !temp.is_linear());
1551 switch (temp.size()) {
1552 case 1: opcode = aco_opcode::buffer_store_dword; break;
1553 case 2: opcode = aco_opcode::buffer_store_dwordx2; break;
1554 case 6: temp = bld.tmp(v3); /* fallthrough */
1555 case 3: opcode = aco_opcode::buffer_store_dwordx3; break;
1556 case 8: temp = bld.tmp(v4); /* fallthrough */
1557 case 4: opcode = aco_opcode::buffer_store_dwordx4; break;
1558 default: {
1559 Instruction* split{create_instruction<Pseudo_instruction>(aco_opcode::p_split_vector, Format::PSEUDO, 1, temp.size())};
1560 split->operands[0] = Operand(temp);
1561 for (unsigned i = 0; i < temp.size(); i++)
1562 split->definitions[i] = bld.def(v1);
1563 bld.insert(split);
1564 opcode = aco_opcode::buffer_store_dword;
1565 for (unsigned i = 0; i < temp.size(); i++)
1566 bld.mubuf(opcode, Operand(), scratch_rsrc, scratch_offset, split->definitions[i].getTemp(), offset + i * 4, false);
1567 continue;
1568 }
1569 }
1570
1571 if ((*it)->operands[0].size() > 4) {
1572 Temp temp2 = bld.pseudo(aco_opcode::p_split_vector, bld.def(temp.regClass()), Definition(temp), (*it)->operands[0]);
1573 bld.mubuf(opcode, Operand(), scratch_rsrc, scratch_offset, temp2, offset, false);
1574 offset += temp.size() * 4;
1575 }
1576 bld.mubuf(opcode, Operand(), scratch_rsrc, scratch_offset, temp, offset, false);
1577
1578 } else if (sgpr_slot.find(spill_id) != sgpr_slot.end()) {
1579 ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1580
1581 uint32_t spill_slot = sgpr_slot[spill_id];
1582
1583 /* check if the linear vgpr already exists */
1584 if (vgpr_spill_temps[spill_slot / 64] == Temp()) {
1585 Temp linear_vgpr = {ctx.program->allocateId(), v1.as_linear()};
1586 vgpr_spill_temps[spill_slot / 64] = linear_vgpr;
1587 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1588 create->definitions[0] = Definition(linear_vgpr);
1589 /* find the right place to insert this definition */
1590 if (last_top_level_block_idx == block.index) {
1591 /* insert right before the current instruction */
1592 instructions.emplace_back(std::move(create));
1593 } else {
1594 assert(last_top_level_block_idx < block.index);
1595 /* insert before the branch at last top level block */
1596 std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1597 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1598 }
1599 }
1600
1601 /* spill sgpr: just add the vgpr temp to operands */
1602 Pseudo_instruction* spill = create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1603 spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / 64]);
1604 spill->operands[1] = Operand(spill_slot % 64);
1605 spill->operands[2] = (*it)->operands[0];
1606 instructions.emplace_back(aco_ptr<Instruction>(spill));
1607 } else {
1608 unreachable("No spill slot assigned for spill id");
1609 }
1610
1611 } else if ((*it)->opcode == aco_opcode::p_reload) {
1612 uint32_t spill_id = (*it)->operands[0].constantValue();
1613 assert(ctx.is_reloaded[spill_id]);
1614
1615 if (vgpr_slot.find(spill_id) != vgpr_slot.end()) {
1616 /* reload vgpr */
1617 uint32_t spill_slot = vgpr_slot[spill_id];
1618 bool add_offset = ctx.program->config->scratch_bytes_per_wave + vgpr_spill_slots * 4 > 4096;
1619 unsigned base_offset = add_offset ? 0 : ctx.program->config->scratch_bytes_per_wave;
1620
1621 /* check if the scratch resource descriptor already exists */
1622 if (scratch_rsrc == Temp()) {
1623 unsigned offset = ctx.program->config->scratch_bytes_per_wave - base_offset;
1624 scratch_rsrc = load_scratch_resource(ctx, scratch_offset,
1625 last_top_level_block_idx == block.index ?
1626 instructions : ctx.program->blocks[last_top_level_block_idx].instructions,
1627 offset,
1628 last_top_level_block_idx == block.index);
1629 }
1630
1631 unsigned offset = base_offset + spill_slot * 4;
1632 aco_opcode opcode;
1633 Definition def = (*it)->definitions[0];
1634 switch (def.size()) {
1635 case 1: opcode = aco_opcode::buffer_load_dword; break;
1636 case 2: opcode = aco_opcode::buffer_load_dwordx2; break;
1637 case 6: def = bld.def(v3); /* fallthrough */
1638 case 3: opcode = aco_opcode::buffer_load_dwordx3; break;
1639 case 8: def = bld.def(v4); /* fallthrough */
1640 case 4: opcode = aco_opcode::buffer_load_dwordx4; break;
1641 default: {
1642 Instruction* vec{create_instruction<Pseudo_instruction>(aco_opcode::p_create_vector, Format::PSEUDO, def.size(), 1)};
1643 vec->definitions[0] = def;
1644 opcode = aco_opcode::buffer_load_dword;
1645 for (unsigned i = 0; i < def.size(); i++) {
1646 Temp tmp = bld.tmp(v1);
1647 vec->operands[i] = Operand(tmp);
1648 bld.mubuf(opcode, Definition(tmp), Operand(), scratch_rsrc, scratch_offset, offset + i * 4, false);
1649 }
1650 bld.insert(vec);
1651 continue;
1652 }
1653 }
1654
1655 bld.mubuf(opcode, def, Operand(), scratch_rsrc, scratch_offset, offset, false);
1656 if ((*it)->definitions[0].size() > 4) {
1657 Temp temp2 = bld.mubuf(opcode, bld.def(def.regClass()), Operand(), scratch_rsrc, scratch_offset, offset + def.size() * 4, false);
1658 bld.pseudo(aco_opcode::p_create_vector, (*it)->definitions[0], def.getTemp(), temp2);
1659 }
1660
1661 } else if (sgpr_slot.find(spill_id) != sgpr_slot.end()) {
1662 uint32_t spill_slot = sgpr_slot[spill_id];
1663 reload_in_loop[spill_slot / 64] = block.loop_nest_depth > 0;
1664
1665 /* check if the linear vgpr already exists */
1666 if (vgpr_spill_temps[spill_slot / 64] == Temp()) {
1667 Temp linear_vgpr = {ctx.program->allocateId(), v1.as_linear()};
1668 vgpr_spill_temps[spill_slot / 64] = linear_vgpr;
1669 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1670 create->definitions[0] = Definition(linear_vgpr);
1671 /* find the right place to insert this definition */
1672 if (last_top_level_block_idx == block.index) {
1673 /* insert right before the current instruction */
1674 instructions.emplace_back(std::move(create));
1675 } else {
1676 assert(last_top_level_block_idx < block.index);
1677 /* insert before the branch at last top level block */
1678 std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1679 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1680 }
1681 }
1682
1683 /* reload sgpr: just add the vgpr temp to operands */
1684 Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1685 reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / 64]);
1686 reload->operands[1] = Operand(spill_slot % 64);
1687 reload->definitions[0] = (*it)->definitions[0];
1688 instructions.emplace_back(aco_ptr<Instruction>(reload));
1689 } else {
1690 unreachable("No spill slot assigned for spill id");
1691 }
1692 } else if (!ctx.remat_used.count(it->get()) || ctx.remat_used[it->get()]) {
1693 instructions.emplace_back(std::move(*it));
1694 }
1695
1696 }
1697 block.instructions = std::move(instructions);
1698 }
1699
1700 /* update required scratch memory */
1701 ctx.program->config->scratch_bytes_per_wave += align(vgpr_spill_slots * 4 * ctx.program->wave_size, 1024);
1702
1703 /* SSA elimination inserts copies for logical phis right before p_logical_end
1704 * So if a linear vgpr is used between that p_logical_end and the branch,
1705 * we need to ensure logical phis don't choose a definition which aliases
1706 * the linear vgpr.
1707 * TODO: Moving the spills and reloads to before p_logical_end might produce
1708 * slightly better code. */
1709 for (Block& block : ctx.program->blocks) {
1710 /* loops exits are already handled */
1711 if (block.logical_preds.size() <= 1)
1712 continue;
1713
1714 bool has_logical_phis = false;
1715 for (aco_ptr<Instruction>& instr : block.instructions) {
1716 if (instr->opcode == aco_opcode::p_phi) {
1717 has_logical_phis = true;
1718 break;
1719 } else if (instr->opcode != aco_opcode::p_linear_phi) {
1720 break;
1721 }
1722 }
1723 if (!has_logical_phis)
1724 continue;
1725
1726 std::set<Temp> vgprs;
1727 for (unsigned pred_idx : block.logical_preds) {
1728 Block& pred = ctx.program->blocks[pred_idx];
1729 for (int i = pred.instructions.size() - 1; i >= 0; i--) {
1730 aco_ptr<Instruction>& pred_instr = pred.instructions[i];
1731 if (pred_instr->opcode == aco_opcode::p_logical_end) {
1732 break;
1733 } else if (pred_instr->opcode == aco_opcode::p_spill ||
1734 pred_instr->opcode == aco_opcode::p_reload) {
1735 vgprs.insert(pred_instr->operands[0].getTemp());
1736 }
1737 }
1738 }
1739 if (!vgprs.size())
1740 continue;
1741
1742 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
1743 int k = 0;
1744 for (Temp tmp : vgprs) {
1745 destr->operands[k++] = Operand(tmp);
1746 }
1747 /* find insertion point */
1748 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1749 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1750 ++it;
1751 block.instructions.insert(it, std::move(destr));
1752 }
1753 }
1754
1755 } /* end namespace */
1756
1757
1758 void spill(Program* program, live& live_vars, const struct radv_nir_compiler_options *options)
1759 {
1760 program->config->spilled_vgprs = 0;
1761 program->config->spilled_sgprs = 0;
1762
1763 /* no spilling when register pressure is low enough */
1764 if (program->num_waves > 0)
1765 return;
1766
1767 /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1768 lower_to_cssa(program, live_vars, options);
1769
1770 /* calculate target register demand */
1771 RegisterDemand register_target = program->max_reg_demand;
1772 if (register_target.sgpr > program->sgpr_limit)
1773 register_target.vgpr += (register_target.sgpr - program->sgpr_limit + 63 + 32) / 64;
1774 register_target.sgpr = program->sgpr_limit;
1775
1776 if (register_target.vgpr > program->vgpr_limit)
1777 register_target.sgpr = program->sgpr_limit - 5;
1778 register_target.vgpr = program->vgpr_limit - (register_target.vgpr - program->max_reg_demand.vgpr);
1779
1780 int spills_to_vgpr = (program->max_reg_demand.sgpr - register_target.sgpr + 63 + 32) / 64;
1781
1782 /* initialize ctx */
1783 spill_ctx ctx(register_target, program, live_vars.register_demand);
1784 compute_global_next_uses(ctx, live_vars.live_out);
1785 get_rematerialize_info(ctx);
1786
1787 /* create spills and reloads */
1788 for (unsigned i = 0; i < program->blocks.size(); i++)
1789 spill_block(ctx, i);
1790
1791 /* assign spill slots and DCE rematerialized code */
1792 assign_spill_slots(ctx, spills_to_vgpr);
1793
1794 /* update live variable information */
1795 live_vars = live_var_analysis(program, options);
1796
1797 assert(program->num_waves >= 0);
1798 }
1799
1800 }
1801