aco: fix transitive affinities of spilled variables
[mesa.git] / src / amd / compiler / aco_spill.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "aco_ir.h"
27 #include <map>
28 #include <stack>
29 #include "vulkan/radv_shader.h"
30
31
32 /*
33 * Implements the spilling algorithm on SSA-form from
34 * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
35 * by Matthias Braun and Sebastian Hack.
36 */
37
38 namespace aco {
39
40 namespace {
41
42 struct remat_info {
43 Instruction *instr;
44 };
45
46 struct spill_ctx {
47 RegisterDemand target_pressure;
48 Program* program;
49 std::vector<std::vector<RegisterDemand>> register_demand;
50 std::vector<std::map<Temp, Temp>> renames;
51 std::vector<std::map<Temp, uint32_t>> spills_entry;
52 std::vector<std::map<Temp, uint32_t>> spills_exit;
53 std::vector<bool> processed;
54 std::stack<Block*> loop_header;
55 std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
56 std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
57 std::vector<std::pair<RegClass, std::set<uint32_t>>> interferences;
58 std::vector<std::vector<uint32_t>> affinities;
59 std::vector<bool> is_reloaded;
60 std::map<Temp, remat_info> remat;
61 std::map<Instruction *, bool> remat_used;
62
63 spill_ctx(const RegisterDemand target_pressure, Program* program,
64 std::vector<std::vector<RegisterDemand>> register_demand)
65 : target_pressure(target_pressure), program(program),
66 register_demand(register_demand), renames(program->blocks.size()),
67 spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
68 processed(program->blocks.size(), false) {}
69
70 void add_affinity(uint32_t first, uint32_t second)
71 {
72 unsigned found_first = affinities.size();
73 unsigned found_second = affinities.size();
74 for (unsigned i = 0; i < affinities.size(); i++) {
75 std::vector<uint32_t>& vec = affinities[i];
76 for (uint32_t entry : vec) {
77 if (entry == first)
78 found_first = i;
79 else if (entry == second)
80 found_second = i;
81 }
82 }
83 if (found_first == affinities.size() && found_second == affinities.size()) {
84 affinities.emplace_back(std::vector<uint32_t>({first, second}));
85 } else if (found_first < affinities.size() && found_second == affinities.size()) {
86 affinities[found_first].push_back(second);
87 } else if (found_second < affinities.size() && found_first == affinities.size()) {
88 affinities[found_second].push_back(first);
89 } else if (found_first != found_second) {
90 /* merge second into first */
91 affinities[found_first].insert(affinities[found_first].end(), affinities[found_second].begin(), affinities[found_second].end());
92 affinities.erase(std::next(affinities.begin(), found_second));
93 } else {
94 assert(found_first == found_second);
95 }
96 }
97
98 uint32_t allocate_spill_id(RegClass rc)
99 {
100 interferences.emplace_back(rc, std::set<uint32_t>());
101 is_reloaded.push_back(false);
102 return next_spill_id++;
103 }
104
105 uint32_t next_spill_id = 0;
106 };
107
108 int32_t get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
109 {
110
111 if (idx_a == -1)
112 return idx_b;
113 if (idx_b == -1)
114 return idx_a;
115 if (is_linear) {
116 while (idx_a != idx_b) {
117 if (idx_a > idx_b)
118 idx_a = program->blocks[idx_a].linear_idom;
119 else
120 idx_b = program->blocks[idx_b].linear_idom;
121 }
122 } else {
123 while (idx_a != idx_b) {
124 if (idx_a > idx_b)
125 idx_a = program->blocks[idx_a].logical_idom;
126 else
127 idx_b = program->blocks[idx_b].logical_idom;
128 }
129 }
130 assert(idx_a != -1);
131 return idx_a;
132 }
133
134 void next_uses_per_block(spill_ctx& ctx, unsigned block_idx, std::set<uint32_t>& worklist)
135 {
136 Block* block = &ctx.program->blocks[block_idx];
137 std::map<Temp, std::pair<uint32_t, uint32_t>> next_uses = ctx.next_use_distances_end[block_idx];
138
139 /* to compute the next use distance at the beginning of the block, we have to add the block's size */
140 for (std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = next_uses.begin(); it != next_uses.end();) {
141 it->second.second = it->second.second + block->instructions.size();
142
143 /* remove the live out exec mask as we really don't want to spill it */
144 if (it->first == block->live_out_exec)
145 it = next_uses.erase(it);
146 else
147 ++it;
148 }
149
150 int idx = block->instructions.size() - 1;
151 while (idx >= 0) {
152 aco_ptr<Instruction>& instr = block->instructions[idx];
153
154 if (instr->opcode == aco_opcode::p_linear_phi ||
155 instr->opcode == aco_opcode::p_phi)
156 break;
157
158 for (const Definition& def : instr->definitions) {
159 if (def.isTemp())
160 next_uses.erase(def.getTemp());
161 }
162
163 for (const Operand& op : instr->operands) {
164 /* omit exec mask */
165 if (op.isFixed() && op.physReg() == exec)
166 continue;
167 if (op.isTemp())
168 next_uses[op.getTemp()] = {block_idx, idx};
169 }
170 idx--;
171 }
172
173 assert(block_idx != 0 || next_uses.empty());
174 ctx.next_use_distances_start[block_idx] = next_uses;
175 while (idx >= 0) {
176 aco_ptr<Instruction>& instr = block->instructions[idx];
177 assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
178
179 for (unsigned i = 0; i < instr->operands.size(); i++) {
180 unsigned pred_idx = instr->opcode == aco_opcode::p_phi ?
181 block->logical_preds[i] :
182 block->linear_preds[i];
183 if (instr->operands[i].isTemp()) {
184 if (ctx.next_use_distances_end[pred_idx].find(instr->operands[i].getTemp()) == ctx.next_use_distances_end[pred_idx].end() ||
185 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] != std::pair<uint32_t, uint32_t>{block_idx, 0})
186 worklist.insert(pred_idx);
187 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] = {block_idx, 0};
188 }
189 }
190 next_uses.erase(instr->definitions[0].getTemp());
191 idx--;
192 }
193
194 /* all remaining live vars must be live-out at the predecessors */
195 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : next_uses) {
196 Temp temp = pair.first;
197 uint32_t distance = pair.second.second;
198 uint32_t dom = pair.second.first;
199 std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
200 for (unsigned pred_idx : preds) {
201 if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
202 distance += 0xFFFF;
203 if (ctx.next_use_distances_end[pred_idx].find(temp) != ctx.next_use_distances_end[pred_idx].end()) {
204 dom = get_dominator(dom, ctx.next_use_distances_end[pred_idx][temp].first, ctx.program, temp.is_linear());
205 distance = std::min(ctx.next_use_distances_end[pred_idx][temp].second, distance);
206 }
207 if (ctx.next_use_distances_end[pred_idx][temp] != std::pair<uint32_t, uint32_t>{dom, distance})
208 worklist.insert(pred_idx);
209 ctx.next_use_distances_end[pred_idx][temp] = {dom, distance};
210 }
211 }
212
213 }
214
215 void compute_global_next_uses(spill_ctx& ctx, std::vector<std::set<Temp>>& live_out)
216 {
217 ctx.next_use_distances_start.resize(ctx.program->blocks.size());
218 ctx.next_use_distances_end.resize(ctx.program->blocks.size());
219 std::set<uint32_t> worklist;
220 for (Block& block : ctx.program->blocks)
221 worklist.insert(block.index);
222
223 while (!worklist.empty()) {
224 std::set<unsigned>::reverse_iterator b_it = worklist.rbegin();
225 unsigned block_idx = *b_it;
226 worklist.erase(block_idx);
227 next_uses_per_block(ctx, block_idx, worklist);
228 }
229 }
230
231 bool should_rematerialize(aco_ptr<Instruction>& instr)
232 {
233 /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
234 if (instr->format != Format::VOP1 && instr->format != Format::SOP1 && instr->format != Format::PSEUDO)
235 return false;
236 /* TODO: pseudo-instruction rematerialization is only supported for p_create_vector */
237 if (instr->format == Format::PSEUDO && instr->opcode != aco_opcode::p_create_vector)
238 return false;
239
240 for (const Operand& op : instr->operands) {
241 /* TODO: rematerialization using temporaries isn't yet supported */
242 if (op.isTemp())
243 return false;
244 }
245
246 /* TODO: rematerialization with multiple definitions isn't yet supported */
247 if (instr->definitions.size() > 1)
248 return false;
249
250 return true;
251 }
252
253 aco_ptr<Instruction> do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
254 {
255 std::map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
256 if (remat != ctx.remat.end()) {
257 Instruction *instr = remat->second.instr;
258 assert((instr->format == Format::VOP1 || instr->format == Format::SOP1 || instr->format == Format::PSEUDO) && "unsupported");
259 assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector) && "unsupported");
260 assert(instr->definitions.size() == 1 && "unsupported");
261
262 aco_ptr<Instruction> res;
263 if (instr->format == Format::VOP1) {
264 res.reset(create_instruction<VOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
265 } else if (instr->format == Format::SOP1) {
266 res.reset(create_instruction<SOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
267 } else if (instr->format == Format::PSEUDO) {
268 res.reset(create_instruction<Instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
269 }
270 for (unsigned i = 0; i < instr->operands.size(); i++) {
271 res->operands[i] = instr->operands[i];
272 if (instr->operands[i].isTemp()) {
273 assert(false && "unsupported");
274 if (ctx.remat.count(instr->operands[i].getTemp()))
275 ctx.remat_used[ctx.remat[instr->operands[i].getTemp()].instr] = true;
276 }
277 }
278 res->definitions[0] = Definition(new_name);
279 return res;
280 } else {
281 aco_ptr<Pseudo_instruction> reload{create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
282 reload->operands[0] = Operand(spill_id);
283 reload->definitions[0] = Definition(new_name);
284 ctx.is_reloaded[spill_id] = true;
285 return reload;
286 }
287 }
288
289 void get_rematerialize_info(spill_ctx& ctx)
290 {
291 for (Block& block : ctx.program->blocks) {
292 bool logical = false;
293 for (aco_ptr<Instruction>& instr : block.instructions) {
294 if (instr->opcode == aco_opcode::p_logical_start)
295 logical = true;
296 else if (instr->opcode == aco_opcode::p_logical_end)
297 logical = false;
298 if (logical && should_rematerialize(instr)) {
299 for (const Definition& def : instr->definitions) {
300 if (def.isTemp()) {
301 ctx.remat[def.getTemp()] = (remat_info){instr.get()};
302 ctx.remat_used[instr.get()] = false;
303 }
304 }
305 }
306 }
307 }
308 }
309
310 std::vector<std::map<Temp, uint32_t>> local_next_uses(spill_ctx& ctx, Block* block)
311 {
312 std::vector<std::map<Temp, uint32_t>> local_next_uses(block->instructions.size());
313
314 std::map<Temp, uint32_t> next_uses;
315 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block->index]) {
316 /* omit live out exec mask */
317 if (pair.first == block->live_out_exec)
318 continue;
319
320 next_uses[pair.first] = pair.second.second + block->instructions.size();
321 }
322
323 for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
324 aco_ptr<Instruction>& instr = block->instructions[idx];
325 if (!instr)
326 break;
327 if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
328 break;
329
330 for (const Operand& op : instr->operands) {
331 if (op.isFixed() && op.physReg() == exec)
332 continue;
333 if (op.isTemp())
334 next_uses[op.getTemp()] = idx;
335 }
336 for (const Definition& def : instr->definitions) {
337 if (def.isTemp())
338 next_uses.erase(def.getTemp());
339 }
340 local_next_uses[idx] = next_uses;
341 }
342 return local_next_uses;
343 }
344
345
346 RegisterDemand init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
347 {
348 RegisterDemand spilled_registers;
349
350 /* first block, nothing was spilled before */
351 if (block_idx == 0)
352 return {0, 0};
353
354 /* loop header block */
355 if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
356 assert(block->linear_preds[0] == block_idx - 1);
357 assert(block->logical_preds[0] == block_idx - 1);
358
359 /* create new loop_info */
360 ctx.loop_header.emplace(block);
361
362 /* check how many live-through variables should be spilled */
363 RegisterDemand new_demand;
364 unsigned i = block_idx;
365 while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
366 assert(ctx.program->blocks.size() > i);
367 new_demand.update(ctx.program->blocks[i].register_demand);
368 i++;
369 }
370 unsigned loop_end = i;
371
372 /* select live-through vgpr variables */
373 while (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
374 unsigned distance = 0;
375 Temp to_spill;
376 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
377 if (pair.first.type() == RegType::vgpr &&
378 pair.second.first >= loop_end &&
379 pair.second.second > distance &&
380 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
381 to_spill = pair.first;
382 distance = pair.second.second;
383 }
384 }
385 if (distance == 0)
386 break;
387
388 uint32_t spill_id;
389 if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
390 spill_id = ctx.allocate_spill_id(to_spill.regClass());
391 } else {
392 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
393 }
394
395 ctx.spills_entry[block_idx][to_spill] = spill_id;
396 spilled_registers.vgpr += to_spill.size();
397 }
398
399 /* select live-through sgpr variables */
400 while (new_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
401 unsigned distance = 0;
402 Temp to_spill;
403 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
404 if (pair.first.type() == RegType::sgpr &&
405 pair.second.first >= loop_end &&
406 pair.second.second > distance &&
407 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
408 to_spill = pair.first;
409 distance = pair.second.second;
410 }
411 }
412 if (distance == 0)
413 break;
414
415 uint32_t spill_id;
416 if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
417 spill_id = ctx.allocate_spill_id(to_spill.regClass());
418 } else {
419 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
420 }
421
422 ctx.spills_entry[block_idx][to_spill] = spill_id;
423 spilled_registers.sgpr += to_spill.size();
424 }
425
426
427
428 /* shortcut */
429 if (!RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure))
430 return spilled_registers;
431
432 /* if reg pressure is too high at beginning of loop, add variables with furthest use */
433 unsigned idx = 0;
434 while (block->instructions[idx]->opcode == aco_opcode::p_phi || block->instructions[idx]->opcode == aco_opcode::p_linear_phi)
435 idx++;
436
437 assert(idx != 0 && "loop without phis: TODO");
438 idx--;
439 RegisterDemand reg_pressure = ctx.register_demand[block_idx][idx] - spilled_registers;
440 while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
441 unsigned distance = 0;
442 Temp to_spill;
443 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
444 if (pair.first.type() == RegType::sgpr &&
445 pair.second.second > distance &&
446 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
447 to_spill = pair.first;
448 distance = pair.second.second;
449 }
450 }
451 assert(distance != 0);
452
453 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
454 spilled_registers.sgpr += to_spill.size();
455 reg_pressure.sgpr -= to_spill.size();
456 }
457 while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
458 unsigned distance = 0;
459 Temp to_spill;
460 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
461 if (pair.first.type() == RegType::vgpr &&
462 pair.second.second > distance &&
463 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
464 to_spill = pair.first;
465 distance = pair.second.second;
466 }
467 }
468 assert(distance != 0);
469 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
470 spilled_registers.vgpr += to_spill.size();
471 reg_pressure.vgpr -= to_spill.size();
472 }
473
474 return spilled_registers;
475 }
476
477 /* branch block */
478 if (block->linear_preds.size() == 1) {
479 /* keep variables spilled if they are alive and not used in the current block */
480 unsigned pred_idx = block->linear_preds[0];
481 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
482 if (pair.first.type() == RegType::sgpr &&
483 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
484 ctx.next_use_distances_start[block_idx][pair.first].second > block_idx) {
485 ctx.spills_entry[block_idx].insert(pair);
486 spilled_registers.sgpr += pair.first.size();
487 }
488 }
489 if (block->logical_preds.size() == 1) {
490 pred_idx = block->logical_preds[0];
491 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
492 if (pair.first.type() == RegType::vgpr &&
493 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
494 ctx.next_use_distances_end[pred_idx][pair.first].second > block_idx) {
495 ctx.spills_entry[block_idx].insert(pair);
496 spilled_registers.vgpr += pair.first.size();
497 }
498 }
499 }
500
501 /* if register demand is still too high, we just keep all spilled live vars and process the block */
502 if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
503 pred_idx = block->linear_preds[0];
504 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
505 if (pair.first.type() == RegType::sgpr &&
506 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
507 ctx.spills_entry[block_idx].insert(pair).second) {
508 spilled_registers.sgpr += pair.first.size();
509 }
510 }
511 }
512 if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr && block->logical_preds.size() == 1) {
513 pred_idx = block->logical_preds[0];
514 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
515 if (pair.first.type() == RegType::vgpr &&
516 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
517 ctx.spills_entry[block_idx].insert(pair).second) {
518 spilled_registers.vgpr += pair.first.size();
519 }
520 }
521 }
522
523 return spilled_registers;
524 }
525
526 /* else: merge block */
527 std::set<Temp> partial_spills;
528
529 /* keep variables spilled on all incoming paths */
530 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
531 std::vector<unsigned>& preds = pair.first.type() == RegType::vgpr ? block->logical_preds : block->linear_preds;
532 /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload it.
533 * Otherwise, if any predecessor reloads it, ensure it's reloaded on all other predecessors.
534 * The idea is that it's better in practice to rematerialize redundantly than to create lots of phis. */
535 /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db doesn't seem to exercise this path much) */
536 bool remat = ctx.remat.count(pair.first);
537 bool spill = !remat;
538 uint32_t spill_id = 0;
539 for (unsigned pred_idx : preds) {
540 /* variable is not even live at the predecessor: probably from a phi */
541 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end()) {
542 spill = false;
543 break;
544 }
545 if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end()) {
546 if (!remat)
547 spill = false;
548 } else {
549 partial_spills.insert(pair.first);
550 /* it might be that on one incoming path, the variable has a different spill_id, but add_couple_code() will take care of that. */
551 spill_id = ctx.spills_exit[pred_idx][pair.first];
552 if (remat)
553 spill = true;
554 }
555 }
556 if (spill) {
557 ctx.spills_entry[block_idx][pair.first] = spill_id;
558 partial_spills.erase(pair.first);
559 spilled_registers += pair.first;
560 }
561 }
562
563 /* same for phis */
564 unsigned idx = 0;
565 while (block->instructions[idx]->opcode == aco_opcode::p_linear_phi ||
566 block->instructions[idx]->opcode == aco_opcode::p_phi) {
567 aco_ptr<Instruction>& phi = block->instructions[idx];
568 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
569 bool spill = true;
570
571 for (unsigned i = 0; i < phi->operands.size(); i++) {
572 if (phi->operands[i].isUndefined())
573 continue;
574 assert(phi->operands[i].isTemp());
575 if (ctx.spills_exit[preds[i]].find(phi->operands[i].getTemp()) == ctx.spills_exit[preds[i]].end())
576 spill = false;
577 else
578 partial_spills.insert(phi->definitions[0].getTemp());
579 }
580 if (spill) {
581 ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] = ctx.allocate_spill_id(phi->definitions[0].regClass());
582 partial_spills.erase(phi->definitions[0].getTemp());
583 spilled_registers += phi->definitions[0].getTemp();
584 }
585
586 idx++;
587 }
588
589 /* if reg pressure at first instruction is still too high, add partially spilled variables */
590 RegisterDemand reg_pressure;
591 if (idx == 0) {
592 for (const Definition& def : block->instructions[idx]->definitions) {
593 if (def.isTemp()) {
594 reg_pressure -= def.getTemp();
595 }
596 }
597 for (const Operand& op : block->instructions[idx]->operands) {
598 if (op.isTemp() && op.isFirstKill()) {
599 reg_pressure += op.getTemp();
600 }
601 }
602 } else {
603 idx--;
604 }
605 reg_pressure += ctx.register_demand[block_idx][idx] - spilled_registers;
606
607 while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
608 assert(!partial_spills.empty());
609
610 std::set<Temp>::iterator it = partial_spills.begin();
611 Temp to_spill = *it;
612 unsigned distance = ctx.next_use_distances_start[block_idx][*it].second;
613 while (it != partial_spills.end()) {
614 assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
615
616 if (it->type() == RegType::sgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
617 distance = ctx.next_use_distances_start[block_idx][*it].second;
618 to_spill = *it;
619 }
620 ++it;
621 }
622 assert(distance != 0);
623
624 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
625 partial_spills.erase(to_spill);
626 spilled_registers.sgpr += to_spill.size();
627 reg_pressure.sgpr -= to_spill.size();
628 }
629
630 while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
631 assert(!partial_spills.empty());
632
633 std::set<Temp>::iterator it = partial_spills.begin();
634 Temp to_spill = *it;
635 unsigned distance = ctx.next_use_distances_start[block_idx][*it].second;
636 while (it != partial_spills.end()) {
637 assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
638
639 if (it->type() == RegType::vgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
640 distance = ctx.next_use_distances_start[block_idx][*it].second;
641 to_spill = *it;
642 }
643 ++it;
644 }
645 assert(distance != 0);
646
647 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
648 partial_spills.erase(to_spill);
649 spilled_registers.vgpr += to_spill.size();
650 reg_pressure.vgpr -= to_spill.size();
651 }
652
653 return spilled_registers;
654 }
655
656
657 void add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
658 {
659 /* no coupling code necessary */
660 if (block->linear_preds.size() == 0)
661 return;
662
663 std::vector<aco_ptr<Instruction>> instructions;
664 /* branch block: TODO take other branch into consideration */
665 if (block->linear_preds.size() == 1) {
666 assert(ctx.processed[block->linear_preds[0]]);
667
668 if (block->logical_preds.size() == 1) {
669 unsigned pred_idx = block->logical_preds[0];
670 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
671 if (live.first.type() == RegType::sgpr)
672 continue;
673 /* still spilled */
674 if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
675 continue;
676
677 /* in register at end of predecessor */
678 if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
679 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
680 if (it != ctx.renames[pred_idx].end())
681 ctx.renames[block_idx].insert(*it);
682 continue;
683 }
684
685 /* variable is spilled at predecessor and live at current block: create reload instruction */
686 Temp new_name = {ctx.program->allocateId(), live.first.regClass()};
687 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
688 instructions.emplace_back(std::move(reload));
689 ctx.renames[block_idx][live.first] = new_name;
690 }
691 }
692
693 unsigned pred_idx = block->linear_preds[0];
694 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
695 if (live.first.type() == RegType::vgpr)
696 continue;
697 /* still spilled */
698 if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
699 continue;
700
701 /* in register at end of predecessor */
702 if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
703 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
704 if (it != ctx.renames[pred_idx].end())
705 ctx.renames[block_idx].insert(*it);
706 continue;
707 }
708
709 /* variable is spilled at predecessor and live at current block: create reload instruction */
710 Temp new_name = {ctx.program->allocateId(), live.first.regClass()};
711 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
712 instructions.emplace_back(std::move(reload));
713 ctx.renames[block_idx][live.first] = new_name;
714 }
715
716 /* combine new reload instructions with original block */
717 if (!instructions.empty()) {
718 unsigned insert_idx = 0;
719 while (block->instructions[insert_idx]->opcode == aco_opcode::p_phi ||
720 block->instructions[insert_idx]->opcode == aco_opcode::p_linear_phi) {
721 insert_idx++;
722 }
723 ctx.register_demand[block->index].insert(std::next(ctx.register_demand[block->index].begin(), insert_idx),
724 instructions.size(), RegisterDemand());
725 block->instructions.insert(std::next(block->instructions.begin(), insert_idx),
726 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(instructions.begin()),
727 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(instructions.end()));
728 }
729 return;
730 }
731
732 /* loop header and merge blocks: check if all (linear) predecessors have been processed */
733 for (ASSERTED unsigned pred : block->linear_preds)
734 assert(ctx.processed[pred]);
735
736 /* iterate the phi nodes for which operands to spill at the predecessor */
737 for (aco_ptr<Instruction>& phi : block->instructions) {
738 if (phi->opcode != aco_opcode::p_phi &&
739 phi->opcode != aco_opcode::p_linear_phi)
740 break;
741
742 /* if the phi is not spilled, add to instructions */
743 if (ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end()) {
744 instructions.emplace_back(std::move(phi));
745 continue;
746 }
747
748 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
749 uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
750
751 for (unsigned i = 0; i < phi->operands.size(); i++) {
752 if (phi->operands[i].isUndefined())
753 continue;
754
755 unsigned pred_idx = preds[i];
756 assert(phi->operands[i].isTemp() && phi->operands[i].isKill());
757 Temp var = phi->operands[i].getTemp();
758
759 /* build interferences between the phi def and all spilled variables at the predecessor blocks */
760 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
761 if (var == pair.first)
762 continue;
763 ctx.interferences[def_spill_id].second.emplace(pair.second);
764 ctx.interferences[pair.second].second.emplace(def_spill_id);
765 }
766
767 /* check if variable is already spilled at predecessor */
768 std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(var);
769 if (spilled != ctx.spills_exit[pred_idx].end()) {
770 if (spilled->second != def_spill_id)
771 ctx.add_affinity(def_spill_id, spilled->second);
772 continue;
773 }
774
775 /* rename if necessary */
776 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
777 if (rename_it != ctx.renames[pred_idx].end()) {
778 var = rename_it->second;
779 ctx.renames[pred_idx].erase(rename_it);
780 }
781
782 uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
783 ctx.add_affinity(def_spill_id, spill_id);
784 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
785 spill->operands[0] = Operand(var);
786 spill->operands[1] = Operand(spill_id);
787 Block& pred = ctx.program->blocks[pred_idx];
788 unsigned idx = pred.instructions.size();
789 do {
790 assert(idx != 0);
791 idx--;
792 } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
793 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
794 pred.instructions.insert(it, std::move(spill));
795 ctx.spills_exit[pred_idx][phi->operands[i].getTemp()] = spill_id;
796 }
797
798 /* remove phi from instructions */
799 phi.reset();
800 }
801
802 /* iterate all (other) spilled variables for which to spill at the predecessor */
803 // TODO: would be better to have them sorted: first vgprs and first with longest distance
804 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
805 std::vector<unsigned> preds = pair.first.type() == RegType::vgpr ? block->logical_preds : block->linear_preds;
806
807 for (unsigned pred_idx : preds) {
808 /* add interferences between spilled variable and predecessors exit spills */
809 for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
810 if (exit_spill.first == pair.first)
811 continue;
812 ctx.interferences[exit_spill.second].second.emplace(pair.second);
813 ctx.interferences[pair.second].second.emplace(exit_spill.second);
814 }
815
816 /* variable is already spilled at predecessor */
817 std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(pair.first);
818 if (spilled != ctx.spills_exit[pred_idx].end()) {
819 if (spilled->second != pair.second)
820 ctx.add_affinity(pair.second, spilled->second);
821 continue;
822 }
823
824 /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
825 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
826 continue;
827
828 /* variable is in register at predecessor and has to be spilled */
829 /* rename if necessary */
830 Temp var = pair.first;
831 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
832 if (rename_it != ctx.renames[pred_idx].end()) {
833 var = rename_it->second;
834 ctx.renames[pred_idx].erase(rename_it);
835 }
836
837 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
838 spill->operands[0] = Operand(var);
839 spill->operands[1] = Operand(pair.second);
840 Block& pred = ctx.program->blocks[pred_idx];
841 unsigned idx = pred.instructions.size();
842 do {
843 assert(idx != 0);
844 idx--;
845 } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
846 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
847 pred.instructions.insert(it, std::move(spill));
848 ctx.spills_exit[pred.index][pair.first] = pair.second;
849 }
850 }
851
852 /* iterate phis for which operands to reload */
853 for (aco_ptr<Instruction>& phi : instructions) {
854 assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
855 assert(ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end());
856
857 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
858 for (unsigned i = 0; i < phi->operands.size(); i++) {
859 if (!phi->operands[i].isTemp())
860 continue;
861 unsigned pred_idx = preds[i];
862
863 /* rename operand */
864 if (ctx.spills_exit[pred_idx].find(phi->operands[i].getTemp()) == ctx.spills_exit[pred_idx].end()) {
865 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(phi->operands[i].getTemp());
866 if (it != ctx.renames[pred_idx].end())
867 phi->operands[i].setTemp(it->second);
868 continue;
869 }
870
871 Temp tmp = phi->operands[i].getTemp();
872
873 /* reload phi operand at end of predecessor block */
874 Temp new_name = {ctx.program->allocateId(), tmp.regClass()};
875 Block& pred = ctx.program->blocks[pred_idx];
876 unsigned idx = pred.instructions.size();
877 do {
878 assert(idx != 0);
879 idx--;
880 } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
881 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
882
883 aco_ptr<Instruction> reload = do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
884 pred.instructions.insert(it, std::move(reload));
885
886 ctx.spills_exit[pred_idx].erase(tmp);
887 ctx.renames[pred_idx][tmp] = new_name;
888 phi->operands[i].setTemp(new_name);
889 }
890 }
891
892 /* iterate live variables for which to reload */
893 // TODO: reload at current block if variable is spilled on all predecessors
894 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
895 /* skip spilled variables */
896 if (ctx.spills_entry[block_idx].find(pair.first) != ctx.spills_entry[block_idx].end())
897 continue;
898 std::vector<unsigned> preds = pair.first.type() == RegType::vgpr ? block->logical_preds : block->linear_preds;
899
900 /* variable is dead at predecessor, it must be from a phi */
901 bool is_dead = false;
902 for (unsigned pred_idx : preds) {
903 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
904 is_dead = true;
905 }
906 if (is_dead)
907 continue;
908 for (unsigned pred_idx : preds) {
909 /* the variable is not spilled at the predecessor */
910 if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end())
911 continue;
912
913 /* variable is spilled at predecessor and has to be reloaded */
914 Temp new_name = {ctx.program->allocateId(), pair.first.regClass()};
915 Block& pred = ctx.program->blocks[pred_idx];
916 unsigned idx = pred.instructions.size();
917 do {
918 assert(idx != 0);
919 idx--;
920 } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
921 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
922
923 aco_ptr<Instruction> reload = do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
924 pred.instructions.insert(it, std::move(reload));
925
926 ctx.spills_exit[pred.index].erase(pair.first);
927 ctx.renames[pred.index][pair.first] = new_name;
928 }
929
930 /* check if we have to create a new phi for this variable */
931 Temp rename = Temp();
932 bool is_same = true;
933 for (unsigned pred_idx : preds) {
934 if (ctx.renames[pred_idx].find(pair.first) == ctx.renames[pred_idx].end()) {
935 if (rename == Temp())
936 rename = pair.first;
937 else
938 is_same = rename == pair.first;
939 } else {
940 if (rename == Temp())
941 rename = ctx.renames[pred_idx][pair.first];
942 else
943 is_same = rename == ctx.renames[pred_idx][pair.first];
944 }
945
946 if (!is_same)
947 break;
948 }
949
950 if (!is_same) {
951 /* the variable was renamed differently in the predecessors: we have to create a phi */
952 aco_opcode opcode = pair.first.type() == RegType::vgpr ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
953 aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
954 rename = {ctx.program->allocateId(), pair.first.regClass()};
955 for (unsigned i = 0; i < phi->operands.size(); i++) {
956 Temp tmp;
957 if (ctx.renames[preds[i]].find(pair.first) != ctx.renames[preds[i]].end())
958 tmp = ctx.renames[preds[i]][pair.first];
959 else if (preds[i] >= block_idx)
960 tmp = rename;
961 else
962 tmp = pair.first;
963 phi->operands[i] = Operand(tmp);
964 }
965 phi->definitions[0] = Definition(rename);
966 instructions.emplace_back(std::move(phi));
967 }
968
969 /* the variable was renamed: add new name to renames */
970 if (!(rename == Temp() || rename == pair.first))
971 ctx.renames[block_idx][pair.first] = rename;
972 }
973
974 /* combine phis with instructions */
975 unsigned idx = 0;
976 while (!block->instructions[idx]) {
977 idx++;
978 }
979
980 ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(), ctx.register_demand[block->index].begin() + idx);
981 ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(), instructions.size(), RegisterDemand());
982
983 std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
984 instructions.insert(instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
985 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
986 block->instructions = std::move(instructions);
987 }
988
989 void process_block(spill_ctx& ctx, unsigned block_idx, Block* block,
990 std::map<Temp, uint32_t> &current_spills, RegisterDemand spilled_registers)
991 {
992 std::vector<std::map<Temp, uint32_t>> local_next_use_distance;
993 std::vector<aco_ptr<Instruction>> instructions;
994 unsigned idx = 0;
995
996 /* phis are handled separetely */
997 while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
998 block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
999 aco_ptr<Instruction>& instr = block->instructions[idx];
1000 for (const Operand& op : instr->operands) {
1001 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1002 if (op.isTemp() && ctx.remat.count(op.getTemp()))
1003 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1004 }
1005 instructions.emplace_back(std::move(instr));
1006 idx++;
1007 }
1008
1009 if (block->register_demand.exceeds(ctx.target_pressure))
1010 local_next_use_distance = local_next_uses(ctx, block);
1011
1012 while (idx < block->instructions.size()) {
1013 aco_ptr<Instruction>& instr = block->instructions[idx];
1014
1015 std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1016 std::map<Temp, uint32_t> spills;
1017 /* rename and reload operands */
1018 for (Operand& op : instr->operands) {
1019 if (!op.isTemp())
1020 continue;
1021 if (current_spills.find(op.getTemp()) == current_spills.end()) {
1022 /* the Operand is in register: check if it was renamed */
1023 if (ctx.renames[block_idx].find(op.getTemp()) != ctx.renames[block_idx].end())
1024 op.setTemp(ctx.renames[block_idx][op.getTemp()]);
1025 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1026 if (ctx.remat.count(op.getTemp()))
1027 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1028 continue;
1029 }
1030 /* the Operand is spilled: add it to reloads */
1031 Temp new_tmp = {ctx.program->allocateId(), op.regClass()};
1032 ctx.renames[block_idx][op.getTemp()] = new_tmp;
1033 reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1034 current_spills.erase(op.getTemp());
1035 op.setTemp(new_tmp);
1036 spilled_registers -= new_tmp;
1037 }
1038
1039 /* check if register demand is low enough before and after the current instruction */
1040 if (block->register_demand.exceeds(ctx.target_pressure)) {
1041
1042 RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1043 if (idx == 0) {
1044 for (const Definition& def : instr->definitions) {
1045 if (!def.isTemp())
1046 continue;
1047 new_demand += def.getTemp();
1048 }
1049 } else {
1050 new_demand.update(ctx.register_demand[block_idx][idx - 1]);
1051 }
1052
1053 assert(!local_next_use_distance.empty());
1054
1055 /* if reg pressure is too high, spill variable with furthest next use */
1056 while (RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1057 unsigned distance = 0;
1058 Temp to_spill;
1059 bool do_rematerialize = false;
1060 if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
1061 for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1062 bool can_rematerialize = ctx.remat.count(pair.first);
1063 if (pair.first.type() == RegType::vgpr &&
1064 ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1065 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1066 current_spills.find(pair.first) == current_spills.end() &&
1067 ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1068 to_spill = pair.first;
1069 distance = pair.second;
1070 do_rematerialize = can_rematerialize;
1071 }
1072 }
1073 } else {
1074 for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1075 bool can_rematerialize = ctx.remat.count(pair.first);
1076 if (pair.first.type() == RegType::sgpr &&
1077 ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1078 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1079 current_spills.find(pair.first) == current_spills.end() &&
1080 ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1081 to_spill = pair.first;
1082 distance = pair.second;
1083 do_rematerialize = can_rematerialize;
1084 }
1085 }
1086 }
1087
1088 assert(distance != 0 && distance > idx);
1089 uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1090
1091 /* add interferences with currently spilled variables */
1092 for (std::pair<Temp, uint32_t> pair : current_spills) {
1093 ctx.interferences[spill_id].second.emplace(pair.second);
1094 ctx.interferences[pair.second].second.emplace(spill_id);
1095 }
1096 for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1097 ctx.interferences[spill_id].second.emplace(pair.second.second);
1098 ctx.interferences[pair.second.second].second.emplace(spill_id);
1099 }
1100
1101 current_spills[to_spill] = spill_id;
1102 spilled_registers += to_spill;
1103
1104 /* rename if necessary */
1105 if (ctx.renames[block_idx].find(to_spill) != ctx.renames[block_idx].end()) {
1106 to_spill = ctx.renames[block_idx][to_spill];
1107 }
1108
1109 /* add spill to new instructions */
1110 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1111 spill->operands[0] = Operand(to_spill);
1112 spill->operands[1] = Operand(spill_id);
1113 instructions.emplace_back(std::move(spill));
1114 }
1115 }
1116
1117 /* add reloads and instruction to new instructions */
1118 for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1119 aco_ptr<Instruction> reload = do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1120 instructions.emplace_back(std::move(reload));
1121 }
1122 instructions.emplace_back(std::move(instr));
1123 idx++;
1124 }
1125
1126 block->instructions = std::move(instructions);
1127 ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1128 }
1129
1130 void spill_block(spill_ctx& ctx, unsigned block_idx)
1131 {
1132 Block* block = &ctx.program->blocks[block_idx];
1133 ctx.processed[block_idx] = true;
1134
1135 /* determine set of variables which are spilled at the beginning of the block */
1136 RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1137
1138 /* add interferences for spilled variables */
1139 for (std::pair<Temp, uint32_t> x : ctx.spills_entry[block_idx]) {
1140 for (std::pair<Temp, uint32_t> y : ctx.spills_entry[block_idx])
1141 if (x.second != y.second)
1142 ctx.interferences[x.second].second.emplace(y.second);
1143 }
1144
1145 bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1146 if (!is_loop_header) {
1147 /* add spill/reload code on incoming control flow edges */
1148 add_coupling_code(ctx, block, block_idx);
1149 }
1150
1151 std::map<Temp, uint32_t> current_spills = ctx.spills_entry[block_idx];
1152
1153 /* check conditions to process this block */
1154 bool process = RegisterDemand(block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1155 !ctx.renames[block_idx].empty() ||
1156 ctx.remat_used.size();
1157
1158 std::map<Temp, uint32_t>::iterator it = current_spills.begin();
1159 while (!process && it != current_spills.end()) {
1160 if (ctx.next_use_distances_start[block_idx][it->first].first == block_idx)
1161 process = true;
1162 ++it;
1163 }
1164
1165 if (process)
1166 process_block(ctx, block_idx, block, current_spills, spilled_registers);
1167 else
1168 ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1169
1170 /* check if the next block leaves the current loop */
1171 if (block->loop_nest_depth == 0 || ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1172 return;
1173
1174 Block* loop_header = ctx.loop_header.top();
1175
1176 /* preserve original renames at end of loop header block */
1177 std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1178
1179 /* add coupling code to all loop header predecessors */
1180 add_coupling_code(ctx, loop_header, loop_header->index);
1181
1182 /* update remat_used for phis added in add_coupling_code() */
1183 for (aco_ptr<Instruction>& instr : loop_header->instructions) {
1184 if (!is_phi(instr))
1185 break;
1186 for (const Operand& op : instr->operands) {
1187 if (op.isTemp() && ctx.remat.count(op.getTemp()))
1188 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1189 }
1190 }
1191
1192 /* propagate new renames through loop: i.e. repair the SSA */
1193 renames.swap(ctx.renames[loop_header->index]);
1194 for (std::pair<Temp, Temp> rename : renames) {
1195 for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1196 Block& current = ctx.program->blocks[idx];
1197 std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1198
1199 /* first rename phis */
1200 while (instr_it != current.instructions.end()) {
1201 aco_ptr<Instruction>& phi = *instr_it;
1202 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1203 break;
1204 /* no need to rename the loop header phis once again. this happened in add_coupling_code() */
1205 if (idx == loop_header->index) {
1206 instr_it++;
1207 continue;
1208 }
1209
1210 for (Operand& op : phi->operands) {
1211 if (!op.isTemp())
1212 continue;
1213 if (op.getTemp() == rename.first)
1214 op.setTemp(rename.second);
1215 }
1216 instr_it++;
1217 }
1218
1219 std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = ctx.next_use_distances_start[idx].find(rename.first);
1220
1221 /* variable is not live at beginning of this block */
1222 if (it == ctx.next_use_distances_start[idx].end())
1223 continue;
1224
1225 /* if the variable is live at the block's exit, add rename */
1226 if (ctx.next_use_distances_end[idx].find(rename.first) != ctx.next_use_distances_end[idx].end())
1227 ctx.renames[idx].insert(rename);
1228
1229 /* rename all uses in this block */
1230 bool renamed = false;
1231 while (!renamed && instr_it != current.instructions.end()) {
1232 aco_ptr<Instruction>& instr = *instr_it;
1233 for (Operand& op : instr->operands) {
1234 if (!op.isTemp())
1235 continue;
1236 if (op.getTemp() == rename.first) {
1237 op.setTemp(rename.second);
1238 /* we can stop with this block as soon as the variable is spilled */
1239 if (instr->opcode == aco_opcode::p_spill)
1240 renamed = true;
1241 }
1242 }
1243 instr_it++;
1244 }
1245 }
1246 }
1247
1248 /* remove loop header info from stack */
1249 ctx.loop_header.pop();
1250 }
1251
1252 void assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr) {
1253 std::map<uint32_t, uint32_t> sgpr_slot;
1254 std::map<uint32_t, uint32_t> vgpr_slot;
1255 std::vector<bool> is_assigned(ctx.interferences.size());
1256
1257 /* first, handle affinities: just merge all interferences into both spill ids */
1258 for (std::vector<uint32_t>& vec : ctx.affinities) {
1259 for (unsigned i = 0; i < vec.size(); i++) {
1260 for (unsigned j = i + 1; j < vec.size(); j++) {
1261 assert(vec[i] != vec[j]);
1262 for (uint32_t id : ctx.interferences[vec[i]].second)
1263 ctx.interferences[id].second.insert(vec[j]);
1264 for (uint32_t id : ctx.interferences[vec[j]].second)
1265 ctx.interferences[id].second.insert(vec[i]);
1266 ctx.interferences[vec[i]].second.insert(ctx.interferences[vec[j]].second.begin(), ctx.interferences[vec[j]].second.end());
1267 ctx.interferences[vec[j]].second.insert(ctx.interferences[vec[i]].second.begin(), ctx.interferences[vec[i]].second.end());
1268
1269 bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1270 ctx.is_reloaded[vec[i]] = reloaded;
1271 ctx.is_reloaded[vec[j]] = reloaded;
1272 }
1273 }
1274 }
1275 for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1276 for (ASSERTED uint32_t id : ctx.interferences[i].second)
1277 assert(i != id);
1278
1279 /* for each spill slot, assign as many spill ids as possible */
1280 std::vector<std::set<uint32_t>> spill_slot_interferences;
1281 unsigned slot_idx = 0;
1282 bool done = false;
1283
1284 /* assign sgpr spill slots */
1285 while (!done) {
1286 done = true;
1287 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1288 if (is_assigned[id] || !ctx.is_reloaded[id])
1289 continue;
1290 if (ctx.interferences[id].first.type() != RegType::sgpr)
1291 continue;
1292
1293 /* check interferences */
1294 bool interferes = false;
1295 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++) {
1296 if (i == spill_slot_interferences.size())
1297 spill_slot_interferences.emplace_back(std::set<uint32_t>());
1298 if (spill_slot_interferences[i].find(id) != spill_slot_interferences[i].end() || i / 64 != slot_idx / 64) {
1299 interferes = true;
1300 break;
1301 }
1302 }
1303 if (interferes) {
1304 done = false;
1305 continue;
1306 }
1307
1308 /* we found a spill id which can be assigned to current spill slot */
1309 sgpr_slot[id] = slot_idx;
1310 is_assigned[id] = true;
1311 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++)
1312 spill_slot_interferences[i].insert(ctx.interferences[id].second.begin(), ctx.interferences[id].second.end());
1313
1314 /* add all affinities: there are no additional interferences */
1315 for (std::vector<uint32_t>& vec : ctx.affinities) {
1316 bool found_affinity = false;
1317 for (uint32_t entry : vec) {
1318 if (entry == id) {
1319 found_affinity = true;
1320 break;
1321 }
1322 }
1323 if (!found_affinity)
1324 continue;
1325 for (uint32_t entry : vec) {
1326 sgpr_slot[entry] = slot_idx;
1327 is_assigned[entry] = true;
1328 }
1329 }
1330 }
1331 slot_idx++;
1332 }
1333
1334 slot_idx = 0;
1335 done = false;
1336
1337 /* assign vgpr spill slots */
1338 while (!done) {
1339 done = true;
1340 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1341 if (is_assigned[id] || !ctx.is_reloaded[id])
1342 continue;
1343 if (ctx.interferences[id].first.type() != RegType::vgpr)
1344 continue;
1345
1346 /* check interferences */
1347 bool interferes = false;
1348 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++) {
1349 if (i == spill_slot_interferences.size())
1350 spill_slot_interferences.emplace_back(std::set<uint32_t>());
1351 /* check for interference and ensure that vector regs are stored next to each other */
1352 if (spill_slot_interferences[i].find(id) != spill_slot_interferences[i].end() || i / 64 != slot_idx / 64) {
1353 interferes = true;
1354 break;
1355 }
1356 }
1357 if (interferes) {
1358 done = false;
1359 continue;
1360 }
1361
1362 /* we found a spill id which can be assigned to current spill slot */
1363 vgpr_slot[id] = slot_idx;
1364 is_assigned[id] = true;
1365 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++)
1366 spill_slot_interferences[i].insert(ctx.interferences[id].second.begin(), ctx.interferences[id].second.end());
1367 }
1368 slot_idx++;
1369 }
1370
1371 for (unsigned id = 0; id < is_assigned.size(); id++)
1372 assert(is_assigned[id] || !ctx.is_reloaded[id]);
1373
1374 for (std::vector<uint32_t>& vec : ctx.affinities) {
1375 for (unsigned i = 0; i < vec.size(); i++) {
1376 for (unsigned j = i + 1; j < vec.size(); j++) {
1377 assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1378 if (!is_assigned[vec[i]])
1379 continue;
1380 assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1381 assert(ctx.interferences[vec[i]].first.type() == ctx.interferences[vec[j]].first.type());
1382 if (ctx.interferences[vec[i]].first.type() == RegType::sgpr)
1383 assert(sgpr_slot[vec[i]] == sgpr_slot[vec[j]]);
1384 else
1385 assert(vgpr_slot[vec[i]] == vgpr_slot[vec[j]]);
1386 }
1387 }
1388 }
1389
1390 /* hope, we didn't mess up */
1391 std::vector<Temp> vgpr_spill_temps((spill_slot_interferences.size() + 63) / 64);
1392 assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1393
1394 /* replace pseudo instructions with actual hardware instructions */
1395 unsigned last_top_level_block_idx = 0;
1396 std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
1397 for (Block& block : ctx.program->blocks) {
1398
1399 /* after loops, we insert a user if there was a reload inside the loop */
1400 if (block.loop_nest_depth == 0) {
1401 int end_vgprs = 0;
1402 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1403 if (reload_in_loop[i])
1404 end_vgprs++;
1405 }
1406
1407 if (end_vgprs > 0) {
1408 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
1409 int k = 0;
1410 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1411 if (reload_in_loop[i])
1412 destr->operands[k++] = Operand(vgpr_spill_temps[i]);
1413 reload_in_loop[i] = false;
1414 }
1415 /* find insertion point */
1416 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1417 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1418 ++it;
1419 block.instructions.insert(it, std::move(destr));
1420 }
1421 }
1422
1423 if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
1424 last_top_level_block_idx = block.index;
1425
1426 /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1427 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1428 if (vgpr_spill_temps[i] == Temp())
1429 continue;
1430
1431 bool can_destroy = true;
1432 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[block.linear_preds[0]]) {
1433
1434 if (sgpr_slot.find(pair.second) != sgpr_slot.end() &&
1435 sgpr_slot[pair.second] / 64 == i) {
1436 can_destroy = false;
1437 break;
1438 }
1439 }
1440 if (can_destroy)
1441 vgpr_spill_temps[i] = Temp();
1442 }
1443 }
1444
1445 std::vector<aco_ptr<Instruction>>::iterator it;
1446 std::vector<aco_ptr<Instruction>> instructions;
1447 instructions.reserve(block.instructions.size());
1448 for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1449
1450 if ((*it)->opcode == aco_opcode::p_spill) {
1451 uint32_t spill_id = (*it)->operands[1].constantValue();
1452
1453 if (!ctx.is_reloaded[spill_id]) {
1454 /* never reloaded, so don't spill */
1455 } else if (vgpr_slot.find(spill_id) != vgpr_slot.end()) {
1456 /* spill vgpr */
1457 ctx.program->config->spilled_vgprs += (*it)->operands[0].size();
1458
1459 assert(false && "vgpr spilling not yet implemented.");
1460 } else if (sgpr_slot.find(spill_id) != sgpr_slot.end()) {
1461 ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1462
1463 uint32_t spill_slot = sgpr_slot[spill_id];
1464
1465 /* check if the linear vgpr already exists */
1466 if (vgpr_spill_temps[spill_slot / 64] == Temp()) {
1467 Temp linear_vgpr = {ctx.program->allocateId(), v1.as_linear()};
1468 vgpr_spill_temps[spill_slot / 64] = linear_vgpr;
1469 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1470 create->definitions[0] = Definition(linear_vgpr);
1471 /* find the right place to insert this definition */
1472 if (last_top_level_block_idx == block.index) {
1473 /* insert right before the current instruction */
1474 instructions.emplace_back(std::move(create));
1475 } else {
1476 assert(last_top_level_block_idx < block.index);
1477 /* insert before the branch at last top level block */
1478 std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1479 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1480 }
1481 }
1482
1483 /* spill sgpr: just add the vgpr temp to operands */
1484 Pseudo_instruction* spill = create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1485 spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / 64]);
1486 spill->operands[1] = Operand(spill_slot % 64);
1487 spill->operands[2] = (*it)->operands[0];
1488 instructions.emplace_back(aco_ptr<Instruction>(spill));
1489 } else {
1490 unreachable("No spill slot assigned for spill id");
1491 }
1492
1493 } else if ((*it)->opcode == aco_opcode::p_reload) {
1494 uint32_t spill_id = (*it)->operands[0].constantValue();
1495 assert(ctx.is_reloaded[spill_id]);
1496
1497 if (vgpr_slot.find(spill_id) != vgpr_slot.end()) {
1498 /* reload vgpr */
1499 assert(false && "vgpr spilling not yet implemented.");
1500
1501 } else if (sgpr_slot.find(spill_id) != sgpr_slot.end()) {
1502 uint32_t spill_slot = sgpr_slot[spill_id];
1503 reload_in_loop[spill_slot / 64] = block.loop_nest_depth > 0;
1504
1505 /* check if the linear vgpr already exists */
1506 if (vgpr_spill_temps[spill_slot / 64] == Temp()) {
1507 Temp linear_vgpr = {ctx.program->allocateId(), v1.as_linear()};
1508 vgpr_spill_temps[spill_slot / 64] = linear_vgpr;
1509 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1510 create->definitions[0] = Definition(linear_vgpr);
1511 /* find the right place to insert this definition */
1512 if (last_top_level_block_idx == block.index) {
1513 /* insert right before the current instruction */
1514 instructions.emplace_back(std::move(create));
1515 } else {
1516 assert(last_top_level_block_idx < block.index);
1517 /* insert before the branch at last top level block */
1518 std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1519 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1520 }
1521 }
1522
1523 /* reload sgpr: just add the vgpr temp to operands */
1524 Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1525 reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / 64]);
1526 reload->operands[1] = Operand(spill_slot % 64);
1527 reload->definitions[0] = (*it)->definitions[0];
1528 instructions.emplace_back(aco_ptr<Instruction>(reload));
1529 } else {
1530 unreachable("No spill slot assigned for spill id");
1531 }
1532 } else if (!ctx.remat_used.count(it->get()) || ctx.remat_used[it->get()]) {
1533 instructions.emplace_back(std::move(*it));
1534 }
1535
1536 }
1537 block.instructions = std::move(instructions);
1538 }
1539
1540 /* SSA elimination inserts copies for logical phis right before p_logical_end
1541 * So if a linear vgpr is used between that p_logical_end and the branch,
1542 * we need to ensure logical phis don't choose a definition which aliases
1543 * the linear vgpr.
1544 * TODO: Moving the spills and reloads to before p_logical_end might produce
1545 * slightly better code. */
1546 for (Block& block : ctx.program->blocks) {
1547 /* loops exits are already handled */
1548 if (block.logical_preds.size() <= 1)
1549 continue;
1550
1551 bool has_logical_phis = false;
1552 for (aco_ptr<Instruction>& instr : block.instructions) {
1553 if (instr->opcode == aco_opcode::p_phi) {
1554 has_logical_phis = true;
1555 break;
1556 } else if (instr->opcode != aco_opcode::p_linear_phi) {
1557 break;
1558 }
1559 }
1560 if (!has_logical_phis)
1561 continue;
1562
1563 std::set<Temp> vgprs;
1564 for (unsigned pred_idx : block.logical_preds) {
1565 Block& pred = ctx.program->blocks[pred_idx];
1566 for (int i = pred.instructions.size() - 1; i >= 0; i--) {
1567 aco_ptr<Instruction>& pred_instr = pred.instructions[i];
1568 if (pred_instr->opcode == aco_opcode::p_logical_end) {
1569 break;
1570 } else if (pred_instr->opcode == aco_opcode::p_spill ||
1571 pred_instr->opcode == aco_opcode::p_reload) {
1572 vgprs.insert(pred_instr->operands[0].getTemp());
1573 }
1574 }
1575 }
1576 if (!vgprs.size())
1577 continue;
1578
1579 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
1580 int k = 0;
1581 for (Temp tmp : vgprs) {
1582 destr->operands[k++] = Operand(tmp);
1583 }
1584 /* find insertion point */
1585 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1586 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1587 ++it;
1588 block.instructions.insert(it, std::move(destr));
1589 }
1590 }
1591
1592 } /* end namespace */
1593
1594
1595 void spill(Program* program, live& live_vars, const struct radv_nir_compiler_options *options)
1596 {
1597 program->config->spilled_vgprs = 0;
1598 program->config->spilled_sgprs = 0;
1599
1600 /* no spilling when wave count is already high */
1601 if (program->num_waves >= 6)
1602 return;
1603
1604 /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1605 lower_to_cssa(program, live_vars, options);
1606
1607 /* else, we check if we can improve things a bit */
1608
1609 /* calculate target register demand */
1610 RegisterDemand max_reg_demand;
1611 for (Block& block : program->blocks) {
1612 max_reg_demand.update(block.register_demand);
1613 }
1614
1615 RegisterDemand target_pressure = {256, int16_t(program->sgpr_limit)};
1616 unsigned num_waves = 1;
1617 int spills_to_vgpr = (max_reg_demand.sgpr - program->sgpr_limit + 63) / 64;
1618
1619 /* test if it possible to increase occupancy with little spilling */
1620 for (unsigned num_waves_next = 2; num_waves_next <= program->max_waves; num_waves_next++) {
1621 RegisterDemand target_pressure_next = {int16_t((256 / num_waves_next) & ~3),
1622 int16_t(get_addr_sgpr_from_waves(program, num_waves_next))};
1623
1624 /* Currently no vgpr spilling supported.
1625 * Spill as many sgprs as necessary to not hinder occupancy */
1626 if (max_reg_demand.vgpr > target_pressure_next.vgpr)
1627 break;
1628 /* check that we have enough free vgprs to spill sgprs to */
1629 if (max_reg_demand.sgpr > target_pressure_next.sgpr) {
1630 /* add some buffer in case graph coloring is not perfect ... */
1631 const int spills_to_vgpr_next = (max_reg_demand.sgpr - target_pressure_next.sgpr + 63 + 32) / 64;
1632 if (spills_to_vgpr_next + max_reg_demand.vgpr > target_pressure_next.vgpr)
1633 break;
1634 spills_to_vgpr = spills_to_vgpr_next;
1635 }
1636
1637 target_pressure = target_pressure_next;
1638 num_waves = num_waves_next;
1639 }
1640
1641 assert(max_reg_demand.vgpr <= target_pressure.vgpr && "VGPR spilling not yet supported.");
1642 /* nothing to do */
1643 if (num_waves == program->num_waves)
1644 return;
1645
1646 /* initialize ctx */
1647 spill_ctx ctx(target_pressure, program, live_vars.register_demand);
1648 compute_global_next_uses(ctx, live_vars.live_out);
1649 get_rematerialize_info(ctx);
1650
1651 /* create spills and reloads */
1652 for (unsigned i = 0; i < program->blocks.size(); i++)
1653 spill_block(ctx, i);
1654
1655 /* assign spill slots and DCE rematerialized code */
1656 assign_spill_slots(ctx, spills_to_vgpr);
1657
1658 /* update live variable information */
1659 live_vars = live_var_analysis(program, options);
1660
1661 assert(program->num_waves >= num_waves);
1662 }
1663
1664 }
1665