aco: don't insert the exec mask into set of live-out variables when spilling
[mesa.git] / src / amd / compiler / aco_spill.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "aco_ir.h"
27 #include <map>
28 #include <stack>
29 #include "vulkan/radv_shader.h"
30
31
32 /*
33 * Implements the spilling algorithm on SSA-form from
34 * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
35 * by Matthias Braun and Sebastian Hack.
36 */
37
38 namespace aco {
39
40 namespace {
41
42 struct remat_info {
43 Instruction *instr;
44 };
45
46 struct spill_ctx {
47 RegisterDemand target_pressure;
48 Program* program;
49 std::vector<std::vector<RegisterDemand>> register_demand;
50 std::vector<std::map<Temp, Temp>> renames;
51 std::vector<std::map<Temp, uint32_t>> spills_entry;
52 std::vector<std::map<Temp, uint32_t>> spills_exit;
53 std::vector<bool> processed;
54 std::stack<Block*> loop_header;
55 std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_start;
56 std::vector<std::map<Temp, std::pair<uint32_t, uint32_t>>> next_use_distances_end;
57 std::vector<std::pair<RegClass, std::set<uint32_t>>> interferences;
58 std::vector<std::vector<uint32_t>> affinities;
59 std::vector<bool> is_reloaded;
60 std::map<Temp, remat_info> remat;
61 std::map<Instruction *, bool> remat_used;
62
63 spill_ctx(const RegisterDemand target_pressure, Program* program,
64 std::vector<std::vector<RegisterDemand>> register_demand)
65 : target_pressure(target_pressure), program(program),
66 register_demand(register_demand), renames(program->blocks.size()),
67 spills_entry(program->blocks.size()), spills_exit(program->blocks.size()),
68 processed(program->blocks.size(), false) {}
69
70 void add_affinity(uint32_t first, uint32_t second)
71 {
72 unsigned found_first = affinities.size();
73 unsigned found_second = affinities.size();
74 for (unsigned i = 0; i < affinities.size(); i++) {
75 std::vector<uint32_t>& vec = affinities[i];
76 for (uint32_t entry : vec) {
77 if (entry == first)
78 found_first = i;
79 else if (entry == second)
80 found_second = i;
81 }
82 }
83 if (found_first == affinities.size() && found_second == affinities.size()) {
84 affinities.emplace_back(std::vector<uint32_t>({first, second}));
85 } else if (found_first < affinities.size() && found_second == affinities.size()) {
86 affinities[found_first].push_back(second);
87 } else if (found_second < affinities.size() && found_first == affinities.size()) {
88 affinities[found_second].push_back(first);
89 } else if (found_first != found_second) {
90 /* merge second into first */
91 affinities[found_first].insert(affinities[found_first].end(), affinities[found_second].begin(), affinities[found_second].end());
92 affinities.erase(std::next(affinities.begin(), found_second));
93 } else {
94 assert(found_first == found_second);
95 }
96 }
97
98 uint32_t allocate_spill_id(RegClass rc)
99 {
100 interferences.emplace_back(rc, std::set<uint32_t>());
101 is_reloaded.push_back(false);
102 return next_spill_id++;
103 }
104
105 uint32_t next_spill_id = 0;
106 };
107
108 int32_t get_dominator(int idx_a, int idx_b, Program* program, bool is_linear)
109 {
110
111 if (idx_a == -1)
112 return idx_b;
113 if (idx_b == -1)
114 return idx_a;
115 if (is_linear) {
116 while (idx_a != idx_b) {
117 if (idx_a > idx_b)
118 idx_a = program->blocks[idx_a].linear_idom;
119 else
120 idx_b = program->blocks[idx_b].linear_idom;
121 }
122 } else {
123 while (idx_a != idx_b) {
124 if (idx_a > idx_b)
125 idx_a = program->blocks[idx_a].logical_idom;
126 else
127 idx_b = program->blocks[idx_b].logical_idom;
128 }
129 }
130 assert(idx_a != -1);
131 return idx_a;
132 }
133
134 void next_uses_per_block(spill_ctx& ctx, unsigned block_idx, std::set<uint32_t>& worklist)
135 {
136 Block* block = &ctx.program->blocks[block_idx];
137 std::map<Temp, std::pair<uint32_t, uint32_t>> next_uses = ctx.next_use_distances_end[block_idx];
138
139 /* to compute the next use distance at the beginning of the block, we have to add the block's size */
140 for (std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = next_uses.begin(); it != next_uses.end(); ++it)
141 it->second.second = it->second.second + block->instructions.size();
142
143 int idx = block->instructions.size() - 1;
144 while (idx >= 0) {
145 aco_ptr<Instruction>& instr = block->instructions[idx];
146
147 if (instr->opcode == aco_opcode::p_linear_phi ||
148 instr->opcode == aco_opcode::p_phi)
149 break;
150
151 for (const Definition& def : instr->definitions) {
152 if (def.isTemp())
153 next_uses.erase(def.getTemp());
154 }
155
156 for (const Operand& op : instr->operands) {
157 /* omit exec mask */
158 if (op.isFixed() && op.physReg() == exec)
159 continue;
160 if (op.isTemp())
161 next_uses[op.getTemp()] = {block_idx, idx};
162 }
163 idx--;
164 }
165
166 assert(block_idx != 0 || next_uses.empty());
167 ctx.next_use_distances_start[block_idx] = next_uses;
168 while (idx >= 0) {
169 aco_ptr<Instruction>& instr = block->instructions[idx];
170 assert(instr->opcode == aco_opcode::p_linear_phi || instr->opcode == aco_opcode::p_phi);
171
172 for (unsigned i = 0; i < instr->operands.size(); i++) {
173 unsigned pred_idx = instr->opcode == aco_opcode::p_phi ?
174 block->logical_preds[i] :
175 block->linear_preds[i];
176 if (instr->operands[i].isTemp()) {
177 if (instr->operands[i].getTemp() == ctx.program->blocks[pred_idx].live_out_exec)
178 continue;
179 if (ctx.next_use_distances_end[pred_idx].find(instr->operands[i].getTemp()) == ctx.next_use_distances_end[pred_idx].end() ||
180 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] != std::pair<uint32_t, uint32_t>{block_idx, 0})
181 worklist.insert(pred_idx);
182 ctx.next_use_distances_end[pred_idx][instr->operands[i].getTemp()] = {block_idx, 0};
183 }
184 }
185 next_uses.erase(instr->definitions[0].getTemp());
186 idx--;
187 }
188
189 /* all remaining live vars must be live-out at the predecessors */
190 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : next_uses) {
191 Temp temp = pair.first;
192 uint32_t distance = pair.second.second;
193 uint32_t dom = pair.second.first;
194 std::vector<unsigned>& preds = temp.is_linear() ? block->linear_preds : block->logical_preds;
195 for (unsigned pred_idx : preds) {
196 if (temp == ctx.program->blocks[pred_idx].live_out_exec)
197 continue;
198 if (ctx.program->blocks[pred_idx].loop_nest_depth > block->loop_nest_depth)
199 distance += 0xFFFF;
200 if (ctx.next_use_distances_end[pred_idx].find(temp) != ctx.next_use_distances_end[pred_idx].end()) {
201 dom = get_dominator(dom, ctx.next_use_distances_end[pred_idx][temp].first, ctx.program, temp.is_linear());
202 distance = std::min(ctx.next_use_distances_end[pred_idx][temp].second, distance);
203 }
204 if (ctx.next_use_distances_end[pred_idx][temp] != std::pair<uint32_t, uint32_t>{dom, distance})
205 worklist.insert(pred_idx);
206 ctx.next_use_distances_end[pred_idx][temp] = {dom, distance};
207 }
208 }
209
210 }
211
212 void compute_global_next_uses(spill_ctx& ctx, std::vector<std::set<Temp>>& live_out)
213 {
214 ctx.next_use_distances_start.resize(ctx.program->blocks.size());
215 ctx.next_use_distances_end.resize(ctx.program->blocks.size());
216 std::set<uint32_t> worklist;
217 for (Block& block : ctx.program->blocks)
218 worklist.insert(block.index);
219
220 while (!worklist.empty()) {
221 std::set<unsigned>::reverse_iterator b_it = worklist.rbegin();
222 unsigned block_idx = *b_it;
223 worklist.erase(block_idx);
224 next_uses_per_block(ctx, block_idx, worklist);
225 }
226 }
227
228 bool should_rematerialize(aco_ptr<Instruction>& instr)
229 {
230 /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
231 if (instr->format != Format::VOP1 && instr->format != Format::SOP1 && instr->format != Format::PSEUDO)
232 return false;
233 /* TODO: pseudo-instruction rematerialization is only supported for p_create_vector */
234 if (instr->format == Format::PSEUDO && instr->opcode != aco_opcode::p_create_vector)
235 return false;
236
237 for (const Operand& op : instr->operands) {
238 /* TODO: rematerialization using temporaries isn't yet supported */
239 if (op.isTemp())
240 return false;
241 }
242
243 /* TODO: rematerialization with multiple definitions isn't yet supported */
244 if (instr->definitions.size() > 1)
245 return false;
246
247 return true;
248 }
249
250 aco_ptr<Instruction> do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t spill_id)
251 {
252 std::map<Temp, remat_info>::iterator remat = ctx.remat.find(tmp);
253 if (remat != ctx.remat.end()) {
254 Instruction *instr = remat->second.instr;
255 assert((instr->format == Format::VOP1 || instr->format == Format::SOP1 || instr->format == Format::PSEUDO) && "unsupported");
256 assert((instr->format != Format::PSEUDO || instr->opcode == aco_opcode::p_create_vector) && "unsupported");
257 assert(instr->definitions.size() == 1 && "unsupported");
258
259 aco_ptr<Instruction> res;
260 if (instr->format == Format::VOP1) {
261 res.reset(create_instruction<VOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
262 } else if (instr->format == Format::SOP1) {
263 res.reset(create_instruction<SOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
264 } else if (instr->format == Format::PSEUDO) {
265 res.reset(create_instruction<Instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
266 }
267 for (unsigned i = 0; i < instr->operands.size(); i++) {
268 res->operands[i] = instr->operands[i];
269 if (instr->operands[i].isTemp()) {
270 assert(false && "unsupported");
271 if (ctx.remat.count(instr->operands[i].getTemp()))
272 ctx.remat_used[ctx.remat[instr->operands[i].getTemp()].instr] = true;
273 }
274 }
275 res->definitions[0] = Definition(new_name);
276 return res;
277 } else {
278 aco_ptr<Pseudo_instruction> reload{create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 1, 1)};
279 reload->operands[0] = Operand(spill_id);
280 reload->definitions[0] = Definition(new_name);
281 ctx.is_reloaded[spill_id] = true;
282 return reload;
283 }
284 }
285
286 void get_rematerialize_info(spill_ctx& ctx)
287 {
288 for (Block& block : ctx.program->blocks) {
289 bool logical = false;
290 for (aco_ptr<Instruction>& instr : block.instructions) {
291 if (instr->opcode == aco_opcode::p_logical_start)
292 logical = true;
293 else if (instr->opcode == aco_opcode::p_logical_end)
294 logical = false;
295 if (logical && should_rematerialize(instr)) {
296 for (const Definition& def : instr->definitions) {
297 if (def.isTemp()) {
298 ctx.remat[def.getTemp()] = (remat_info){instr.get()};
299 ctx.remat_used[instr.get()] = false;
300 }
301 }
302 }
303 }
304 }
305 }
306
307 std::vector<std::map<Temp, uint32_t>> local_next_uses(spill_ctx& ctx, Block* block)
308 {
309 std::vector<std::map<Temp, uint32_t>> local_next_uses(block->instructions.size());
310
311 std::map<Temp, uint32_t> next_uses;
312 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block->index])
313 next_uses[pair.first] = pair.second.second + block->instructions.size();
314
315 for (int idx = block->instructions.size() - 1; idx >= 0; idx--) {
316 aco_ptr<Instruction>& instr = block->instructions[idx];
317 if (!instr)
318 break;
319 if (instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi)
320 break;
321
322 for (const Operand& op : instr->operands) {
323 if (op.isFixed() && op.physReg() == exec)
324 continue;
325 if (op.isTemp())
326 next_uses[op.getTemp()] = idx;
327 }
328 for (const Definition& def : instr->definitions) {
329 if (def.isTemp())
330 next_uses.erase(def.getTemp());
331 }
332 local_next_uses[idx] = next_uses;
333 }
334 return local_next_uses;
335 }
336
337
338 RegisterDemand init_live_in_vars(spill_ctx& ctx, Block* block, unsigned block_idx)
339 {
340 RegisterDemand spilled_registers;
341
342 /* first block, nothing was spilled before */
343 if (block_idx == 0)
344 return {0, 0};
345
346 /* loop header block */
347 if (block->loop_nest_depth > ctx.program->blocks[block_idx - 1].loop_nest_depth) {
348 assert(block->linear_preds[0] == block_idx - 1);
349 assert(block->logical_preds[0] == block_idx - 1);
350
351 /* create new loop_info */
352 ctx.loop_header.emplace(block);
353
354 /* check how many live-through variables should be spilled */
355 RegisterDemand new_demand;
356 unsigned i = block_idx;
357 while (ctx.program->blocks[i].loop_nest_depth >= block->loop_nest_depth) {
358 assert(ctx.program->blocks.size() > i);
359 new_demand.update(ctx.program->blocks[i].register_demand);
360 i++;
361 }
362 unsigned loop_end = i;
363
364 /* select live-through vgpr variables */
365 while (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
366 unsigned distance = 0;
367 Temp to_spill;
368 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
369 if (pair.first.type() == RegType::vgpr &&
370 pair.second.first >= loop_end &&
371 pair.second.second > distance &&
372 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
373 to_spill = pair.first;
374 distance = pair.second.second;
375 }
376 }
377 if (distance == 0)
378 break;
379
380 uint32_t spill_id;
381 if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
382 spill_id = ctx.allocate_spill_id(to_spill.regClass());
383 } else {
384 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
385 }
386
387 ctx.spills_entry[block_idx][to_spill] = spill_id;
388 spilled_registers.vgpr += to_spill.size();
389 }
390
391 /* select live-through sgpr variables */
392 while (new_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
393 unsigned distance = 0;
394 Temp to_spill;
395 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_end[block_idx - 1]) {
396 if (pair.first.type() == RegType::sgpr &&
397 pair.second.first >= loop_end &&
398 pair.second.second > distance &&
399 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
400 to_spill = pair.first;
401 distance = pair.second.second;
402 }
403 }
404 if (distance == 0)
405 break;
406
407 uint32_t spill_id;
408 if (ctx.spills_exit[block_idx - 1].find(to_spill) == ctx.spills_exit[block_idx - 1].end()) {
409 spill_id = ctx.allocate_spill_id(to_spill.regClass());
410 } else {
411 spill_id = ctx.spills_exit[block_idx - 1][to_spill];
412 }
413
414 ctx.spills_entry[block_idx][to_spill] = spill_id;
415 spilled_registers.sgpr += to_spill.size();
416 }
417
418
419
420 /* shortcut */
421 if (!RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure))
422 return spilled_registers;
423
424 /* if reg pressure is too high at beginning of loop, add variables with furthest use */
425 unsigned idx = 0;
426 while (block->instructions[idx]->opcode == aco_opcode::p_phi || block->instructions[idx]->opcode == aco_opcode::p_linear_phi)
427 idx++;
428
429 assert(idx != 0 && "loop without phis: TODO");
430 idx--;
431 RegisterDemand reg_pressure = ctx.register_demand[block_idx][idx] - spilled_registers;
432 while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
433 unsigned distance = 0;
434 Temp to_spill;
435 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
436 if (pair.first.type() == RegType::sgpr &&
437 pair.second.second > distance &&
438 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
439 to_spill = pair.first;
440 distance = pair.second.second;
441 }
442 }
443 assert(distance != 0);
444
445 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
446 spilled_registers.sgpr += to_spill.size();
447 reg_pressure.sgpr -= to_spill.size();
448 }
449 while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
450 unsigned distance = 0;
451 Temp to_spill;
452 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
453 if (pair.first.type() == RegType::vgpr &&
454 pair.second.second > distance &&
455 ctx.spills_entry[block_idx].find(pair.first) == ctx.spills_entry[block_idx].end()) {
456 to_spill = pair.first;
457 distance = pair.second.second;
458 }
459 }
460 assert(distance != 0);
461 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
462 spilled_registers.vgpr += to_spill.size();
463 reg_pressure.vgpr -= to_spill.size();
464 }
465
466 return spilled_registers;
467 }
468
469 /* branch block */
470 if (block->linear_preds.size() == 1) {
471 /* keep variables spilled if they are alive and not used in the current block */
472 unsigned pred_idx = block->linear_preds[0];
473 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
474 if (pair.first.type() == RegType::sgpr &&
475 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
476 ctx.next_use_distances_start[block_idx][pair.first].second > block_idx) {
477 ctx.spills_entry[block_idx].insert(pair);
478 spilled_registers.sgpr += pair.first.size();
479 }
480 }
481 if (block->logical_preds.size() == 1) {
482 pred_idx = block->logical_preds[0];
483 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
484 if (pair.first.type() == RegType::vgpr &&
485 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
486 ctx.next_use_distances_end[pred_idx][pair.first].second > block_idx) {
487 ctx.spills_entry[block_idx].insert(pair);
488 spilled_registers.vgpr += pair.first.size();
489 }
490 }
491 }
492
493 /* if register demand is still too high, we just keep all spilled live vars and process the block */
494 if (block->register_demand.sgpr - spilled_registers.sgpr > ctx.target_pressure.sgpr) {
495 pred_idx = block->linear_preds[0];
496 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
497 if (pair.first.type() == RegType::sgpr &&
498 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
499 ctx.spills_entry[block_idx].insert(pair).second) {
500 spilled_registers.sgpr += pair.first.size();
501 }
502 }
503 }
504 if (block->register_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr && block->logical_preds.size() == 1) {
505 pred_idx = block->logical_preds[0];
506 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
507 if (pair.first.type() == RegType::vgpr &&
508 ctx.next_use_distances_start[block_idx].find(pair.first) != ctx.next_use_distances_start[block_idx].end() &&
509 ctx.spills_entry[block_idx].insert(pair).second) {
510 spilled_registers.vgpr += pair.first.size();
511 }
512 }
513 }
514
515 return spilled_registers;
516 }
517
518 /* else: merge block */
519 std::set<Temp> partial_spills;
520
521 /* keep variables spilled on all incoming paths */
522 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
523 std::vector<unsigned>& preds = pair.first.type() == RegType::vgpr ? block->logical_preds : block->linear_preds;
524 /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload it.
525 * Otherwise, if any predecessor reloads it, ensure it's reloaded on all other predecessors.
526 * The idea is that it's better in practice to rematerialize redundantly than to create lots of phis. */
527 /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db doesn't seem to exercise this path much) */
528 bool remat = ctx.remat.count(pair.first);
529 bool spill = !remat;
530 uint32_t spill_id = 0;
531 for (unsigned pred_idx : preds) {
532 /* variable is not even live at the predecessor: probably from a phi */
533 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end()) {
534 spill = false;
535 break;
536 }
537 if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end()) {
538 if (!remat)
539 spill = false;
540 } else {
541 partial_spills.insert(pair.first);
542 /* it might be that on one incoming path, the variable has a different spill_id, but add_couple_code() will take care of that. */
543 spill_id = ctx.spills_exit[pred_idx][pair.first];
544 if (remat)
545 spill = true;
546 }
547 }
548 if (spill) {
549 ctx.spills_entry[block_idx][pair.first] = spill_id;
550 partial_spills.erase(pair.first);
551 spilled_registers += pair.first;
552 }
553 }
554
555 /* same for phis */
556 unsigned idx = 0;
557 while (block->instructions[idx]->opcode == aco_opcode::p_linear_phi ||
558 block->instructions[idx]->opcode == aco_opcode::p_phi) {
559 aco_ptr<Instruction>& phi = block->instructions[idx];
560 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
561 bool spill = true;
562
563 for (unsigned i = 0; i < phi->operands.size(); i++) {
564 if (phi->operands[i].isUndefined())
565 continue;
566 assert(phi->operands[i].isTemp());
567 if (ctx.spills_exit[preds[i]].find(phi->operands[i].getTemp()) == ctx.spills_exit[preds[i]].end())
568 spill = false;
569 else
570 partial_spills.insert(phi->definitions[0].getTemp());
571 }
572 if (spill) {
573 ctx.spills_entry[block_idx][phi->definitions[0].getTemp()] = ctx.allocate_spill_id(phi->definitions[0].regClass());
574 partial_spills.erase(phi->definitions[0].getTemp());
575 spilled_registers += phi->definitions[0].getTemp();
576 }
577
578 idx++;
579 }
580
581 /* if reg pressure at first instruction is still too high, add partially spilled variables */
582 RegisterDemand reg_pressure;
583 if (idx == 0) {
584 for (const Definition& def : block->instructions[idx]->definitions) {
585 if (def.isTemp()) {
586 reg_pressure -= def.getTemp();
587 }
588 }
589 for (const Operand& op : block->instructions[idx]->operands) {
590 if (op.isTemp() && op.isFirstKill()) {
591 reg_pressure += op.getTemp();
592 }
593 }
594 } else {
595 idx--;
596 }
597 reg_pressure += ctx.register_demand[block_idx][idx] - spilled_registers;
598
599 while (reg_pressure.sgpr > ctx.target_pressure.sgpr) {
600 assert(!partial_spills.empty());
601
602 std::set<Temp>::iterator it = partial_spills.begin();
603 Temp to_spill = *it;
604 unsigned distance = ctx.next_use_distances_start[block_idx][*it].second;
605 while (it != partial_spills.end()) {
606 assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
607
608 if (it->type() == RegType::sgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
609 distance = ctx.next_use_distances_start[block_idx][*it].second;
610 to_spill = *it;
611 }
612 ++it;
613 }
614 assert(distance != 0);
615
616 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
617 partial_spills.erase(to_spill);
618 spilled_registers.sgpr += to_spill.size();
619 reg_pressure.sgpr -= to_spill.size();
620 }
621
622 while (reg_pressure.vgpr > ctx.target_pressure.vgpr) {
623 assert(!partial_spills.empty());
624
625 std::set<Temp>::iterator it = partial_spills.begin();
626 Temp to_spill = *it;
627 unsigned distance = ctx.next_use_distances_start[block_idx][*it].second;
628 while (it != partial_spills.end()) {
629 assert(ctx.spills_entry[block_idx].find(*it) == ctx.spills_entry[block_idx].end());
630
631 if (it->type() == RegType::vgpr && ctx.next_use_distances_start[block_idx][*it].second > distance) {
632 distance = ctx.next_use_distances_start[block_idx][*it].second;
633 to_spill = *it;
634 }
635 ++it;
636 }
637 assert(distance != 0);
638
639 ctx.spills_entry[block_idx][to_spill] = ctx.allocate_spill_id(to_spill.regClass());
640 partial_spills.erase(to_spill);
641 spilled_registers.vgpr += to_spill.size();
642 reg_pressure.vgpr -= to_spill.size();
643 }
644
645 return spilled_registers;
646 }
647
648
649 void add_coupling_code(spill_ctx& ctx, Block* block, unsigned block_idx)
650 {
651 /* no coupling code necessary */
652 if (block->linear_preds.size() == 0)
653 return;
654
655 std::vector<aco_ptr<Instruction>> instructions;
656 /* branch block: TODO take other branch into consideration */
657 if (block->linear_preds.size() == 1) {
658 assert(ctx.processed[block->linear_preds[0]]);
659
660 if (block->logical_preds.size() == 1) {
661 unsigned pred_idx = block->logical_preds[0];
662 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
663 if (live.first.type() == RegType::sgpr)
664 continue;
665 /* still spilled */
666 if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
667 continue;
668
669 /* in register at end of predecessor */
670 if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
671 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
672 if (it != ctx.renames[pred_idx].end())
673 ctx.renames[block_idx].insert(*it);
674 continue;
675 }
676
677 /* variable is spilled at predecessor and live at current block: create reload instruction */
678 Temp new_name = {ctx.program->allocateId(), live.first.regClass()};
679 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
680 instructions.emplace_back(std::move(reload));
681 ctx.renames[block_idx][live.first] = new_name;
682 }
683 }
684
685 unsigned pred_idx = block->linear_preds[0];
686 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> live : ctx.next_use_distances_start[block_idx]) {
687 if (live.first.type() == RegType::vgpr)
688 continue;
689 /* still spilled */
690 if (ctx.spills_entry[block_idx].find(live.first) != ctx.spills_entry[block_idx].end())
691 continue;
692
693 /* in register at end of predecessor */
694 if (ctx.spills_exit[pred_idx].find(live.first) == ctx.spills_exit[pred_idx].end()) {
695 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(live.first);
696 if (it != ctx.renames[pred_idx].end())
697 ctx.renames[block_idx].insert(*it);
698 continue;
699 }
700
701 /* variable is spilled at predecessor and live at current block: create reload instruction */
702 Temp new_name = {ctx.program->allocateId(), live.first.regClass()};
703 aco_ptr<Instruction> reload = do_reload(ctx, live.first, new_name, ctx.spills_exit[pred_idx][live.first]);
704 instructions.emplace_back(std::move(reload));
705 ctx.renames[block_idx][live.first] = new_name;
706 }
707
708 /* combine new reload instructions with original block */
709 if (!instructions.empty()) {
710 unsigned insert_idx = 0;
711 while (block->instructions[insert_idx]->opcode == aco_opcode::p_phi ||
712 block->instructions[insert_idx]->opcode == aco_opcode::p_linear_phi) {
713 insert_idx++;
714 }
715 ctx.register_demand[block->index].insert(std::next(ctx.register_demand[block->index].begin(), insert_idx),
716 instructions.size(), RegisterDemand());
717 block->instructions.insert(std::next(block->instructions.begin(), insert_idx),
718 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(instructions.begin()),
719 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(instructions.end()));
720 }
721 return;
722 }
723
724 /* loop header and merge blocks: check if all (linear) predecessors have been processed */
725 for (ASSERTED unsigned pred : block->linear_preds)
726 assert(ctx.processed[pred]);
727
728 /* iterate the phi nodes for which operands to spill at the predecessor */
729 for (aco_ptr<Instruction>& phi : block->instructions) {
730 if (phi->opcode != aco_opcode::p_phi &&
731 phi->opcode != aco_opcode::p_linear_phi)
732 break;
733
734 /* if the phi is not spilled, add to instructions */
735 if (ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end()) {
736 instructions.emplace_back(std::move(phi));
737 continue;
738 }
739
740 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
741 uint32_t def_spill_id = ctx.spills_entry[block_idx][phi->definitions[0].getTemp()];
742
743 for (unsigned i = 0; i < phi->operands.size(); i++) {
744 if (phi->operands[i].isUndefined())
745 continue;
746
747 unsigned pred_idx = preds[i];
748 assert(phi->operands[i].isTemp() && phi->operands[i].isKill());
749 Temp var = phi->operands[i].getTemp();
750
751 /* build interferences between the phi def and all spilled variables at the predecessor blocks */
752 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[pred_idx]) {
753 if (var == pair.first)
754 continue;
755 ctx.interferences[def_spill_id].second.emplace(pair.second);
756 ctx.interferences[pair.second].second.emplace(def_spill_id);
757 }
758
759 /* check if variable is already spilled at predecessor */
760 std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(var);
761 if (spilled != ctx.spills_exit[pred_idx].end()) {
762 if (spilled->second != def_spill_id)
763 ctx.add_affinity(def_spill_id, spilled->second);
764 continue;
765 }
766
767 /* rename if necessary */
768 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
769 if (rename_it != ctx.renames[pred_idx].end()) {
770 var = rename_it->second;
771 ctx.renames[pred_idx].erase(rename_it);
772 }
773
774 uint32_t spill_id = ctx.allocate_spill_id(phi->definitions[0].regClass());
775 ctx.add_affinity(def_spill_id, spill_id);
776 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
777 spill->operands[0] = Operand(var);
778 spill->operands[1] = Operand(spill_id);
779 Block& pred = ctx.program->blocks[pred_idx];
780 unsigned idx = pred.instructions.size();
781 do {
782 assert(idx != 0);
783 idx--;
784 } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
785 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
786 pred.instructions.insert(it, std::move(spill));
787 ctx.spills_exit[pred_idx][phi->operands[i].getTemp()] = spill_id;
788 }
789
790 /* remove phi from instructions */
791 phi.reset();
792 }
793
794 /* iterate all (other) spilled variables for which to spill at the predecessor */
795 // TODO: would be better to have them sorted: first vgprs and first with longest distance
796 for (std::pair<Temp, uint32_t> pair : ctx.spills_entry[block_idx]) {
797 std::vector<unsigned> preds = pair.first.type() == RegType::vgpr ? block->logical_preds : block->linear_preds;
798
799 for (unsigned pred_idx : preds) {
800 /* add interferences between spilled variable and predecessors exit spills */
801 for (std::pair<Temp, uint32_t> exit_spill : ctx.spills_exit[pred_idx]) {
802 if (exit_spill.first == pair.first)
803 continue;
804 ctx.interferences[exit_spill.second].second.emplace(pair.second);
805 ctx.interferences[pair.second].second.emplace(exit_spill.second);
806 }
807
808 /* variable is already spilled at predecessor */
809 std::map<Temp, uint32_t>::iterator spilled = ctx.spills_exit[pred_idx].find(pair.first);
810 if (spilled != ctx.spills_exit[pred_idx].end()) {
811 if (spilled->second != pair.second)
812 ctx.add_affinity(pair.second, spilled->second);
813 continue;
814 }
815
816 /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
817 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
818 continue;
819
820 /* variable is in register at predecessor and has to be spilled */
821 /* rename if necessary */
822 Temp var = pair.first;
823 std::map<Temp, Temp>::iterator rename_it = ctx.renames[pred_idx].find(var);
824 if (rename_it != ctx.renames[pred_idx].end()) {
825 var = rename_it->second;
826 ctx.renames[pred_idx].erase(rename_it);
827 }
828
829 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
830 spill->operands[0] = Operand(var);
831 spill->operands[1] = Operand(pair.second);
832 Block& pred = ctx.program->blocks[pred_idx];
833 unsigned idx = pred.instructions.size();
834 do {
835 assert(idx != 0);
836 idx--;
837 } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
838 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
839 pred.instructions.insert(it, std::move(spill));
840 ctx.spills_exit[pred.index][pair.first] = pair.second;
841 }
842 }
843
844 /* iterate phis for which operands to reload */
845 for (aco_ptr<Instruction>& phi : instructions) {
846 assert(phi->opcode == aco_opcode::p_phi || phi->opcode == aco_opcode::p_linear_phi);
847 assert(ctx.spills_entry[block_idx].find(phi->definitions[0].getTemp()) == ctx.spills_entry[block_idx].end());
848
849 std::vector<unsigned>& preds = phi->opcode == aco_opcode::p_phi ? block->logical_preds : block->linear_preds;
850 for (unsigned i = 0; i < phi->operands.size(); i++) {
851 if (!phi->operands[i].isTemp())
852 continue;
853 unsigned pred_idx = preds[i];
854
855 /* rename operand */
856 if (ctx.spills_exit[pred_idx].find(phi->operands[i].getTemp()) == ctx.spills_exit[pred_idx].end()) {
857 std::map<Temp, Temp>::iterator it = ctx.renames[pred_idx].find(phi->operands[i].getTemp());
858 if (it != ctx.renames[pred_idx].end())
859 phi->operands[i].setTemp(it->second);
860 continue;
861 }
862
863 Temp tmp = phi->operands[i].getTemp();
864
865 /* reload phi operand at end of predecessor block */
866 Temp new_name = {ctx.program->allocateId(), tmp.regClass()};
867 Block& pred = ctx.program->blocks[pred_idx];
868 unsigned idx = pred.instructions.size();
869 do {
870 assert(idx != 0);
871 idx--;
872 } while (phi->opcode == aco_opcode::p_phi && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
873 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
874
875 aco_ptr<Instruction> reload = do_reload(ctx, tmp, new_name, ctx.spills_exit[pred_idx][tmp]);
876 pred.instructions.insert(it, std::move(reload));
877
878 ctx.spills_exit[pred_idx].erase(tmp);
879 ctx.renames[pred_idx][tmp] = new_name;
880 phi->operands[i].setTemp(new_name);
881 }
882 }
883
884 /* iterate live variables for which to reload */
885 // TODO: reload at current block if variable is spilled on all predecessors
886 for (std::pair<Temp, std::pair<uint32_t, uint32_t>> pair : ctx.next_use_distances_start[block_idx]) {
887 /* skip spilled variables */
888 if (ctx.spills_entry[block_idx].find(pair.first) != ctx.spills_entry[block_idx].end())
889 continue;
890 std::vector<unsigned> preds = pair.first.type() == RegType::vgpr ? block->logical_preds : block->linear_preds;
891
892 /* variable is dead at predecessor, it must be from a phi */
893 bool is_dead = false;
894 for (unsigned pred_idx : preds) {
895 if (ctx.next_use_distances_end[pred_idx].find(pair.first) == ctx.next_use_distances_end[pred_idx].end())
896 is_dead = true;
897 }
898 if (is_dead)
899 continue;
900 for (unsigned pred_idx : preds) {
901 /* the variable is not spilled at the predecessor */
902 if (ctx.spills_exit[pred_idx].find(pair.first) == ctx.spills_exit[pred_idx].end())
903 continue;
904
905 /* variable is spilled at predecessor and has to be reloaded */
906 Temp new_name = {ctx.program->allocateId(), pair.first.regClass()};
907 Block& pred = ctx.program->blocks[pred_idx];
908 unsigned idx = pred.instructions.size();
909 do {
910 assert(idx != 0);
911 idx--;
912 } while (pair.first.type() == RegType::vgpr && pred.instructions[idx]->opcode != aco_opcode::p_logical_end);
913 std::vector<aco_ptr<Instruction>>::iterator it = std::next(pred.instructions.begin(), idx);
914
915 aco_ptr<Instruction> reload = do_reload(ctx, pair.first, new_name, ctx.spills_exit[pred.index][pair.first]);
916 pred.instructions.insert(it, std::move(reload));
917
918 ctx.spills_exit[pred.index].erase(pair.first);
919 ctx.renames[pred.index][pair.first] = new_name;
920 }
921
922 /* check if we have to create a new phi for this variable */
923 Temp rename = Temp();
924 bool is_same = true;
925 for (unsigned pred_idx : preds) {
926 if (ctx.renames[pred_idx].find(pair.first) == ctx.renames[pred_idx].end()) {
927 if (rename == Temp())
928 rename = pair.first;
929 else
930 is_same = rename == pair.first;
931 } else {
932 if (rename == Temp())
933 rename = ctx.renames[pred_idx][pair.first];
934 else
935 is_same = rename == ctx.renames[pred_idx][pair.first];
936 }
937
938 if (!is_same)
939 break;
940 }
941
942 if (!is_same) {
943 /* the variable was renamed differently in the predecessors: we have to create a phi */
944 aco_opcode opcode = pair.first.type() == RegType::vgpr ? aco_opcode::p_phi : aco_opcode::p_linear_phi;
945 aco_ptr<Pseudo_instruction> phi{create_instruction<Pseudo_instruction>(opcode, Format::PSEUDO, preds.size(), 1)};
946 rename = {ctx.program->allocateId(), pair.first.regClass()};
947 for (unsigned i = 0; i < phi->operands.size(); i++) {
948 Temp tmp;
949 if (ctx.renames[preds[i]].find(pair.first) != ctx.renames[preds[i]].end())
950 tmp = ctx.renames[preds[i]][pair.first];
951 else if (preds[i] >= block_idx)
952 tmp = rename;
953 else
954 tmp = pair.first;
955 phi->operands[i] = Operand(tmp);
956 }
957 phi->definitions[0] = Definition(rename);
958 instructions.emplace_back(std::move(phi));
959 }
960
961 /* the variable was renamed: add new name to renames */
962 if (!(rename == Temp() || rename == pair.first))
963 ctx.renames[block_idx][pair.first] = rename;
964 }
965
966 /* combine phis with instructions */
967 unsigned idx = 0;
968 while (!block->instructions[idx]) {
969 idx++;
970 }
971
972 ctx.register_demand[block->index].erase(ctx.register_demand[block->index].begin(), ctx.register_demand[block->index].begin() + idx);
973 ctx.register_demand[block->index].insert(ctx.register_demand[block->index].begin(), instructions.size(), RegisterDemand());
974
975 std::vector<aco_ptr<Instruction>>::iterator start = std::next(block->instructions.begin(), idx);
976 instructions.insert(instructions.end(), std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(start),
977 std::move_iterator<std::vector<aco_ptr<Instruction>>::iterator>(block->instructions.end()));
978 block->instructions = std::move(instructions);
979 }
980
981 void process_block(spill_ctx& ctx, unsigned block_idx, Block* block,
982 std::map<Temp, uint32_t> &current_spills, RegisterDemand spilled_registers)
983 {
984 std::vector<std::map<Temp, uint32_t>> local_next_use_distance;
985 std::vector<aco_ptr<Instruction>> instructions;
986 unsigned idx = 0;
987
988 /* phis are handled separetely */
989 while (block->instructions[idx]->opcode == aco_opcode::p_phi ||
990 block->instructions[idx]->opcode == aco_opcode::p_linear_phi) {
991 aco_ptr<Instruction>& instr = block->instructions[idx];
992 for (const Operand& op : instr->operands) {
993 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
994 if (op.isTemp() && ctx.remat.count(op.getTemp()))
995 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
996 }
997 instructions.emplace_back(std::move(instr));
998 idx++;
999 }
1000
1001 if (block->register_demand.exceeds(ctx.target_pressure))
1002 local_next_use_distance = local_next_uses(ctx, block);
1003
1004 while (idx < block->instructions.size()) {
1005 aco_ptr<Instruction>& instr = block->instructions[idx];
1006
1007 std::map<Temp, std::pair<Temp, uint32_t>> reloads;
1008 std::map<Temp, uint32_t> spills;
1009 /* rename and reload operands */
1010 for (Operand& op : instr->operands) {
1011 if (!op.isTemp())
1012 continue;
1013 if (current_spills.find(op.getTemp()) == current_spills.end()) {
1014 /* the Operand is in register: check if it was renamed */
1015 if (ctx.renames[block_idx].find(op.getTemp()) != ctx.renames[block_idx].end())
1016 op.setTemp(ctx.renames[block_idx][op.getTemp()]);
1017 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1018 if (ctx.remat.count(op.getTemp()))
1019 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1020 continue;
1021 }
1022 /* the Operand is spilled: add it to reloads */
1023 Temp new_tmp = {ctx.program->allocateId(), op.regClass()};
1024 ctx.renames[block_idx][op.getTemp()] = new_tmp;
1025 reloads[new_tmp] = std::make_pair(op.getTemp(), current_spills[op.getTemp()]);
1026 current_spills.erase(op.getTemp());
1027 op.setTemp(new_tmp);
1028 spilled_registers -= new_tmp;
1029 }
1030
1031 /* check if register demand is low enough before and after the current instruction */
1032 if (block->register_demand.exceeds(ctx.target_pressure)) {
1033
1034 RegisterDemand new_demand = ctx.register_demand[block_idx][idx];
1035 if (idx == 0) {
1036 for (const Definition& def : instr->definitions) {
1037 if (!def.isTemp())
1038 continue;
1039 new_demand += def.getTemp();
1040 }
1041 } else {
1042 new_demand.update(ctx.register_demand[block_idx][idx - 1]);
1043 }
1044
1045 assert(!local_next_use_distance.empty());
1046
1047 /* if reg pressure is too high, spill variable with furthest next use */
1048 while (RegisterDemand(new_demand - spilled_registers).exceeds(ctx.target_pressure)) {
1049 unsigned distance = 0;
1050 Temp to_spill;
1051 bool do_rematerialize = false;
1052 if (new_demand.vgpr - spilled_registers.vgpr > ctx.target_pressure.vgpr) {
1053 for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1054 bool can_rematerialize = ctx.remat.count(pair.first);
1055 if (pair.first.type() == RegType::vgpr &&
1056 ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1057 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1058 current_spills.find(pair.first) == current_spills.end() &&
1059 ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1060 to_spill = pair.first;
1061 distance = pair.second;
1062 do_rematerialize = can_rematerialize;
1063 }
1064 }
1065 } else {
1066 for (std::pair<Temp, uint32_t> pair : local_next_use_distance[idx]) {
1067 bool can_rematerialize = ctx.remat.count(pair.first);
1068 if (pair.first.type() == RegType::sgpr &&
1069 ((pair.second > distance && can_rematerialize == do_rematerialize) ||
1070 (can_rematerialize && !do_rematerialize && pair.second > idx)) &&
1071 current_spills.find(pair.first) == current_spills.end() &&
1072 ctx.spills_exit[block_idx].find(pair.first) == ctx.spills_exit[block_idx].end()) {
1073 to_spill = pair.first;
1074 distance = pair.second;
1075 do_rematerialize = can_rematerialize;
1076 }
1077 }
1078 }
1079
1080 assert(distance != 0 && distance > idx);
1081 uint32_t spill_id = ctx.allocate_spill_id(to_spill.regClass());
1082
1083 /* add interferences with currently spilled variables */
1084 for (std::pair<Temp, uint32_t> pair : current_spills) {
1085 ctx.interferences[spill_id].second.emplace(pair.second);
1086 ctx.interferences[pair.second].second.emplace(spill_id);
1087 }
1088 for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1089 ctx.interferences[spill_id].second.emplace(pair.second.second);
1090 ctx.interferences[pair.second.second].second.emplace(spill_id);
1091 }
1092
1093 current_spills[to_spill] = spill_id;
1094 spilled_registers += to_spill;
1095
1096 /* rename if necessary */
1097 if (ctx.renames[block_idx].find(to_spill) != ctx.renames[block_idx].end()) {
1098 to_spill = ctx.renames[block_idx][to_spill];
1099 }
1100
1101 /* add spill to new instructions */
1102 aco_ptr<Pseudo_instruction> spill{create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 2, 0)};
1103 spill->operands[0] = Operand(to_spill);
1104 spill->operands[1] = Operand(spill_id);
1105 instructions.emplace_back(std::move(spill));
1106 }
1107 }
1108
1109 /* add reloads and instruction to new instructions */
1110 for (std::pair<Temp, std::pair<Temp, uint32_t>> pair : reloads) {
1111 aco_ptr<Instruction> reload = do_reload(ctx, pair.second.first, pair.first, pair.second.second);
1112 instructions.emplace_back(std::move(reload));
1113 }
1114 instructions.emplace_back(std::move(instr));
1115 idx++;
1116 }
1117
1118 block->instructions = std::move(instructions);
1119 ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1120 }
1121
1122 void spill_block(spill_ctx& ctx, unsigned block_idx)
1123 {
1124 Block* block = &ctx.program->blocks[block_idx];
1125 ctx.processed[block_idx] = true;
1126
1127 /* determine set of variables which are spilled at the beginning of the block */
1128 RegisterDemand spilled_registers = init_live_in_vars(ctx, block, block_idx);
1129
1130 /* add interferences for spilled variables */
1131 for (std::pair<Temp, uint32_t> x : ctx.spills_entry[block_idx]) {
1132 for (std::pair<Temp, uint32_t> y : ctx.spills_entry[block_idx])
1133 if (x.second != y.second)
1134 ctx.interferences[x.second].second.emplace(y.second);
1135 }
1136
1137 bool is_loop_header = block->loop_nest_depth && ctx.loop_header.top()->index == block_idx;
1138 if (!is_loop_header) {
1139 /* add spill/reload code on incoming control flow edges */
1140 add_coupling_code(ctx, block, block_idx);
1141 }
1142
1143 std::map<Temp, uint32_t> current_spills = ctx.spills_entry[block_idx];
1144
1145 /* check conditions to process this block */
1146 bool process = RegisterDemand(block->register_demand - spilled_registers).exceeds(ctx.target_pressure) ||
1147 !ctx.renames[block_idx].empty() ||
1148 ctx.remat_used.size();
1149
1150 std::map<Temp, uint32_t>::iterator it = current_spills.begin();
1151 while (!process && it != current_spills.end()) {
1152 if (ctx.next_use_distances_start[block_idx][it->first].first == block_idx)
1153 process = true;
1154 ++it;
1155 }
1156
1157 if (process)
1158 process_block(ctx, block_idx, block, current_spills, spilled_registers);
1159 else
1160 ctx.spills_exit[block_idx].insert(current_spills.begin(), current_spills.end());
1161
1162 /* check if the next block leaves the current loop */
1163 if (block->loop_nest_depth == 0 || ctx.program->blocks[block_idx + 1].loop_nest_depth >= block->loop_nest_depth)
1164 return;
1165
1166 Block* loop_header = ctx.loop_header.top();
1167
1168 /* preserve original renames at end of loop header block */
1169 std::map<Temp, Temp> renames = std::move(ctx.renames[loop_header->index]);
1170
1171 /* add coupling code to all loop header predecessors */
1172 add_coupling_code(ctx, loop_header, loop_header->index);
1173
1174 /* update remat_used for phis added in add_coupling_code() */
1175 for (aco_ptr<Instruction>& instr : loop_header->instructions) {
1176 if (!is_phi(instr))
1177 break;
1178 for (const Operand& op : instr->operands) {
1179 if (op.isTemp() && ctx.remat.count(op.getTemp()))
1180 ctx.remat_used[ctx.remat[op.getTemp()].instr] = true;
1181 }
1182 }
1183
1184 /* propagate new renames through loop: i.e. repair the SSA */
1185 renames.swap(ctx.renames[loop_header->index]);
1186 for (std::pair<Temp, Temp> rename : renames) {
1187 for (unsigned idx = loop_header->index; idx <= block_idx; idx++) {
1188 Block& current = ctx.program->blocks[idx];
1189 std::vector<aco_ptr<Instruction>>::iterator instr_it = current.instructions.begin();
1190
1191 /* first rename phis */
1192 while (instr_it != current.instructions.end()) {
1193 aco_ptr<Instruction>& phi = *instr_it;
1194 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
1195 break;
1196 /* no need to rename the loop header phis once again. this happened in add_coupling_code() */
1197 if (idx == loop_header->index) {
1198 instr_it++;
1199 continue;
1200 }
1201
1202 for (Operand& op : phi->operands) {
1203 if (!op.isTemp())
1204 continue;
1205 if (op.getTemp() == rename.first)
1206 op.setTemp(rename.second);
1207 }
1208 instr_it++;
1209 }
1210
1211 std::map<Temp, std::pair<uint32_t, uint32_t>>::iterator it = ctx.next_use_distances_start[idx].find(rename.first);
1212
1213 /* variable is not live at beginning of this block */
1214 if (it == ctx.next_use_distances_start[idx].end())
1215 continue;
1216
1217 /* if the variable is live at the block's exit, add rename */
1218 if (ctx.next_use_distances_end[idx].find(rename.first) != ctx.next_use_distances_end[idx].end())
1219 ctx.renames[idx].insert(rename);
1220
1221 /* rename all uses in this block */
1222 bool renamed = false;
1223 while (!renamed && instr_it != current.instructions.end()) {
1224 aco_ptr<Instruction>& instr = *instr_it;
1225 for (Operand& op : instr->operands) {
1226 if (!op.isTemp())
1227 continue;
1228 if (op.getTemp() == rename.first) {
1229 op.setTemp(rename.second);
1230 /* we can stop with this block as soon as the variable is spilled */
1231 if (instr->opcode == aco_opcode::p_spill)
1232 renamed = true;
1233 }
1234 }
1235 instr_it++;
1236 }
1237 }
1238 }
1239
1240 /* remove loop header info from stack */
1241 ctx.loop_header.pop();
1242 }
1243
1244 void assign_spill_slots(spill_ctx& ctx, unsigned spills_to_vgpr) {
1245 std::map<uint32_t, uint32_t> sgpr_slot;
1246 std::map<uint32_t, uint32_t> vgpr_slot;
1247 std::vector<bool> is_assigned(ctx.interferences.size());
1248
1249 /* first, handle affinities: just merge all interferences into both spill ids */
1250 for (std::vector<uint32_t>& vec : ctx.affinities) {
1251 for (unsigned i = 0; i < vec.size(); i++) {
1252 for (unsigned j = i + 1; j < vec.size(); j++) {
1253 assert(vec[i] != vec[j]);
1254 for (uint32_t id : ctx.interferences[vec[i]].second)
1255 ctx.interferences[id].second.insert(vec[j]);
1256 for (uint32_t id : ctx.interferences[vec[j]].second)
1257 ctx.interferences[id].second.insert(vec[i]);
1258 ctx.interferences[vec[i]].second.insert(ctx.interferences[vec[j]].second.begin(), ctx.interferences[vec[j]].second.end());
1259 ctx.interferences[vec[j]].second.insert(ctx.interferences[vec[i]].second.begin(), ctx.interferences[vec[i]].second.end());
1260
1261 bool reloaded = ctx.is_reloaded[vec[i]] || ctx.is_reloaded[vec[j]];
1262 ctx.is_reloaded[vec[i]] = reloaded;
1263 ctx.is_reloaded[vec[j]] = reloaded;
1264 }
1265 }
1266 }
1267 for (ASSERTED uint32_t i = 0; i < ctx.interferences.size(); i++)
1268 for (ASSERTED uint32_t id : ctx.interferences[i].second)
1269 assert(i != id);
1270
1271 /* for each spill slot, assign as many spill ids as possible */
1272 std::vector<std::set<uint32_t>> spill_slot_interferences;
1273 unsigned slot_idx = 0;
1274 bool done = false;
1275
1276 /* assign sgpr spill slots */
1277 while (!done) {
1278 done = true;
1279 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1280 if (is_assigned[id] || !ctx.is_reloaded[id])
1281 continue;
1282 if (ctx.interferences[id].first.type() != RegType::sgpr)
1283 continue;
1284
1285 /* check interferences */
1286 bool interferes = false;
1287 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++) {
1288 if (i == spill_slot_interferences.size())
1289 spill_slot_interferences.emplace_back(std::set<uint32_t>());
1290 if (spill_slot_interferences[i].find(id) != spill_slot_interferences[i].end() || i / 64 != slot_idx / 64) {
1291 interferes = true;
1292 break;
1293 }
1294 }
1295 if (interferes) {
1296 done = false;
1297 continue;
1298 }
1299
1300 /* we found a spill id which can be assigned to current spill slot */
1301 sgpr_slot[id] = slot_idx;
1302 is_assigned[id] = true;
1303 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++)
1304 spill_slot_interferences[i].insert(ctx.interferences[id].second.begin(), ctx.interferences[id].second.end());
1305
1306 /* add all affinities: there are no additional interferences */
1307 for (std::vector<uint32_t>& vec : ctx.affinities) {
1308 bool found_affinity = false;
1309 for (uint32_t entry : vec) {
1310 if (entry == id) {
1311 found_affinity = true;
1312 break;
1313 }
1314 }
1315 if (!found_affinity)
1316 continue;
1317 for (uint32_t entry : vec) {
1318 sgpr_slot[entry] = slot_idx;
1319 is_assigned[entry] = true;
1320 }
1321 }
1322 }
1323 slot_idx++;
1324 }
1325
1326 slot_idx = 0;
1327 done = false;
1328
1329 /* assign vgpr spill slots */
1330 while (!done) {
1331 done = true;
1332 for (unsigned id = 0; id < ctx.interferences.size(); id++) {
1333 if (is_assigned[id] || !ctx.is_reloaded[id])
1334 continue;
1335 if (ctx.interferences[id].first.type() != RegType::vgpr)
1336 continue;
1337
1338 /* check interferences */
1339 bool interferes = false;
1340 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++) {
1341 if (i == spill_slot_interferences.size())
1342 spill_slot_interferences.emplace_back(std::set<uint32_t>());
1343 /* check for interference and ensure that vector regs are stored next to each other */
1344 if (spill_slot_interferences[i].find(id) != spill_slot_interferences[i].end() || i / 64 != slot_idx / 64) {
1345 interferes = true;
1346 break;
1347 }
1348 }
1349 if (interferes) {
1350 done = false;
1351 continue;
1352 }
1353
1354 /* we found a spill id which can be assigned to current spill slot */
1355 vgpr_slot[id] = slot_idx;
1356 is_assigned[id] = true;
1357 for (unsigned i = slot_idx; i < slot_idx + ctx.interferences[id].first.size(); i++)
1358 spill_slot_interferences[i].insert(ctx.interferences[id].second.begin(), ctx.interferences[id].second.end());
1359 }
1360 slot_idx++;
1361 }
1362
1363 for (unsigned id = 0; id < is_assigned.size(); id++)
1364 assert(is_assigned[id] || !ctx.is_reloaded[id]);
1365
1366 for (std::vector<uint32_t>& vec : ctx.affinities) {
1367 for (unsigned i = 0; i < vec.size(); i++) {
1368 for (unsigned j = i + 1; j < vec.size(); j++) {
1369 assert(is_assigned[vec[i]] == is_assigned[vec[j]]);
1370 if (!is_assigned[vec[i]])
1371 continue;
1372 assert(ctx.is_reloaded[vec[i]] == ctx.is_reloaded[vec[j]]);
1373 assert(ctx.interferences[vec[i]].first.type() == ctx.interferences[vec[j]].first.type());
1374 if (ctx.interferences[vec[i]].first.type() == RegType::sgpr)
1375 assert(sgpr_slot[vec[i]] == sgpr_slot[vec[j]]);
1376 else
1377 assert(vgpr_slot[vec[i]] == vgpr_slot[vec[j]]);
1378 }
1379 }
1380 }
1381
1382 /* hope, we didn't mess up */
1383 std::vector<Temp> vgpr_spill_temps((spill_slot_interferences.size() + 63) / 64);
1384 assert(vgpr_spill_temps.size() <= spills_to_vgpr);
1385
1386 /* replace pseudo instructions with actual hardware instructions */
1387 unsigned last_top_level_block_idx = 0;
1388 std::vector<bool> reload_in_loop(vgpr_spill_temps.size());
1389 for (Block& block : ctx.program->blocks) {
1390
1391 /* after loops, we insert a user if there was a reload inside the loop */
1392 if (block.loop_nest_depth == 0) {
1393 int end_vgprs = 0;
1394 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1395 if (reload_in_loop[i])
1396 end_vgprs++;
1397 }
1398
1399 if (end_vgprs > 0) {
1400 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, end_vgprs, 0)};
1401 int k = 0;
1402 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1403 if (reload_in_loop[i])
1404 destr->operands[k++] = Operand(vgpr_spill_temps[i]);
1405 reload_in_loop[i] = false;
1406 }
1407 /* find insertion point */
1408 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1409 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1410 ++it;
1411 block.instructions.insert(it, std::move(destr));
1412 }
1413 }
1414
1415 if (block.kind & block_kind_top_level && !block.linear_preds.empty()) {
1416 last_top_level_block_idx = block.index;
1417
1418 /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1419 for (unsigned i = 0; i < vgpr_spill_temps.size(); i++) {
1420 if (vgpr_spill_temps[i] == Temp())
1421 continue;
1422
1423 bool can_destroy = true;
1424 for (std::pair<Temp, uint32_t> pair : ctx.spills_exit[block.linear_preds[0]]) {
1425
1426 if (sgpr_slot.find(pair.second) != sgpr_slot.end() &&
1427 sgpr_slot[pair.second] / 64 == i) {
1428 can_destroy = false;
1429 break;
1430 }
1431 }
1432 if (can_destroy)
1433 vgpr_spill_temps[i] = Temp();
1434 }
1435 }
1436
1437 std::vector<aco_ptr<Instruction>>::iterator it;
1438 std::vector<aco_ptr<Instruction>> instructions;
1439 instructions.reserve(block.instructions.size());
1440 for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
1441
1442 if ((*it)->opcode == aco_opcode::p_spill) {
1443 uint32_t spill_id = (*it)->operands[1].constantValue();
1444
1445 if (!ctx.is_reloaded[spill_id]) {
1446 /* never reloaded, so don't spill */
1447 } else if (vgpr_slot.find(spill_id) != vgpr_slot.end()) {
1448 /* spill vgpr */
1449 ctx.program->config->spilled_vgprs += (*it)->operands[0].size();
1450
1451 assert(false && "vgpr spilling not yet implemented.");
1452 } else if (sgpr_slot.find(spill_id) != sgpr_slot.end()) {
1453 ctx.program->config->spilled_sgprs += (*it)->operands[0].size();
1454
1455 uint32_t spill_slot = sgpr_slot[spill_id];
1456
1457 /* check if the linear vgpr already exists */
1458 if (vgpr_spill_temps[spill_slot / 64] == Temp()) {
1459 Temp linear_vgpr = {ctx.program->allocateId(), v1.as_linear()};
1460 vgpr_spill_temps[spill_slot / 64] = linear_vgpr;
1461 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1462 create->definitions[0] = Definition(linear_vgpr);
1463 /* find the right place to insert this definition */
1464 if (last_top_level_block_idx == block.index) {
1465 /* insert right before the current instruction */
1466 instructions.emplace_back(std::move(create));
1467 } else {
1468 assert(last_top_level_block_idx < block.index);
1469 /* insert before the branch at last top level block */
1470 std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1471 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1472 }
1473 }
1474
1475 /* spill sgpr: just add the vgpr temp to operands */
1476 Pseudo_instruction* spill = create_instruction<Pseudo_instruction>(aco_opcode::p_spill, Format::PSEUDO, 3, 0);
1477 spill->operands[0] = Operand(vgpr_spill_temps[spill_slot / 64]);
1478 spill->operands[1] = Operand(spill_slot % 64);
1479 spill->operands[2] = (*it)->operands[0];
1480 instructions.emplace_back(aco_ptr<Instruction>(spill));
1481 } else {
1482 unreachable("No spill slot assigned for spill id");
1483 }
1484
1485 } else if ((*it)->opcode == aco_opcode::p_reload) {
1486 uint32_t spill_id = (*it)->operands[0].constantValue();
1487 assert(ctx.is_reloaded[spill_id]);
1488
1489 if (vgpr_slot.find(spill_id) != vgpr_slot.end()) {
1490 /* reload vgpr */
1491 assert(false && "vgpr spilling not yet implemented.");
1492
1493 } else if (sgpr_slot.find(spill_id) != sgpr_slot.end()) {
1494 uint32_t spill_slot = sgpr_slot[spill_id];
1495 reload_in_loop[spill_slot / 64] = block.loop_nest_depth > 0;
1496
1497 /* check if the linear vgpr already exists */
1498 if (vgpr_spill_temps[spill_slot / 64] == Temp()) {
1499 Temp linear_vgpr = {ctx.program->allocateId(), v1.as_linear()};
1500 vgpr_spill_temps[spill_slot / 64] = linear_vgpr;
1501 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
1502 create->definitions[0] = Definition(linear_vgpr);
1503 /* find the right place to insert this definition */
1504 if (last_top_level_block_idx == block.index) {
1505 /* insert right before the current instruction */
1506 instructions.emplace_back(std::move(create));
1507 } else {
1508 assert(last_top_level_block_idx < block.index);
1509 /* insert before the branch at last top level block */
1510 std::vector<aco_ptr<Instruction>>& instructions = ctx.program->blocks[last_top_level_block_idx].instructions;
1511 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
1512 }
1513 }
1514
1515 /* reload sgpr: just add the vgpr temp to operands */
1516 Pseudo_instruction* reload = create_instruction<Pseudo_instruction>(aco_opcode::p_reload, Format::PSEUDO, 2, 1);
1517 reload->operands[0] = Operand(vgpr_spill_temps[spill_slot / 64]);
1518 reload->operands[1] = Operand(spill_slot % 64);
1519 reload->definitions[0] = (*it)->definitions[0];
1520 instructions.emplace_back(aco_ptr<Instruction>(reload));
1521 } else {
1522 unreachable("No spill slot assigned for spill id");
1523 }
1524 } else if (!ctx.remat_used.count(it->get()) || ctx.remat_used[it->get()]) {
1525 instructions.emplace_back(std::move(*it));
1526 }
1527
1528 }
1529 block.instructions = std::move(instructions);
1530 }
1531
1532 /* SSA elimination inserts copies for logical phis right before p_logical_end
1533 * So if a linear vgpr is used between that p_logical_end and the branch,
1534 * we need to ensure logical phis don't choose a definition which aliases
1535 * the linear vgpr.
1536 * TODO: Moving the spills and reloads to before p_logical_end might produce
1537 * slightly better code. */
1538 for (Block& block : ctx.program->blocks) {
1539 /* loops exits are already handled */
1540 if (block.logical_preds.size() <= 1)
1541 continue;
1542
1543 bool has_logical_phis = false;
1544 for (aco_ptr<Instruction>& instr : block.instructions) {
1545 if (instr->opcode == aco_opcode::p_phi) {
1546 has_logical_phis = true;
1547 break;
1548 } else if (instr->opcode != aco_opcode::p_linear_phi) {
1549 break;
1550 }
1551 }
1552 if (!has_logical_phis)
1553 continue;
1554
1555 std::set<Temp> vgprs;
1556 for (unsigned pred_idx : block.logical_preds) {
1557 Block& pred = ctx.program->blocks[pred_idx];
1558 for (int i = pred.instructions.size() - 1; i >= 0; i--) {
1559 aco_ptr<Instruction>& pred_instr = pred.instructions[i];
1560 if (pred_instr->opcode == aco_opcode::p_logical_end) {
1561 break;
1562 } else if (pred_instr->opcode == aco_opcode::p_spill ||
1563 pred_instr->opcode == aco_opcode::p_reload) {
1564 vgprs.insert(pred_instr->operands[0].getTemp());
1565 }
1566 }
1567 }
1568 if (!vgprs.size())
1569 continue;
1570
1571 aco_ptr<Instruction> destr{create_instruction<Pseudo_instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vgprs.size(), 0)};
1572 int k = 0;
1573 for (Temp tmp : vgprs) {
1574 destr->operands[k++] = Operand(tmp);
1575 }
1576 /* find insertion point */
1577 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
1578 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
1579 ++it;
1580 block.instructions.insert(it, std::move(destr));
1581 }
1582 }
1583
1584 } /* end namespace */
1585
1586
1587 void spill(Program* program, live& live_vars, const struct radv_nir_compiler_options *options)
1588 {
1589 program->config->spilled_vgprs = 0;
1590 program->config->spilled_sgprs = 0;
1591
1592 /* no spilling when wave count is already high */
1593 if (program->num_waves >= 6)
1594 return;
1595
1596 /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1597 lower_to_cssa(program, live_vars, options);
1598
1599 /* else, we check if we can improve things a bit */
1600
1601 /* calculate target register demand */
1602 RegisterDemand max_reg_demand;
1603 for (Block& block : program->blocks) {
1604 max_reg_demand.update(block.register_demand);
1605 }
1606
1607 RegisterDemand target_pressure = {256, int16_t(program->sgpr_limit)};
1608 unsigned num_waves = 1;
1609 int spills_to_vgpr = (max_reg_demand.sgpr - program->sgpr_limit + 63) / 64;
1610
1611 /* test if it possible to increase occupancy with little spilling */
1612 for (unsigned num_waves_next = 2; num_waves_next <= program->max_waves; num_waves_next++) {
1613 RegisterDemand target_pressure_next = {int16_t((256 / num_waves_next) & ~3),
1614 int16_t(get_addr_sgpr_from_waves(program, num_waves_next))};
1615
1616 /* Currently no vgpr spilling supported.
1617 * Spill as many sgprs as necessary to not hinder occupancy */
1618 if (max_reg_demand.vgpr > target_pressure_next.vgpr)
1619 break;
1620 /* check that we have enough free vgprs to spill sgprs to */
1621 if (max_reg_demand.sgpr > target_pressure_next.sgpr) {
1622 /* add some buffer in case graph coloring is not perfect ... */
1623 const int spills_to_vgpr_next = (max_reg_demand.sgpr - target_pressure_next.sgpr + 63 + 32) / 64;
1624 if (spills_to_vgpr_next + max_reg_demand.vgpr > target_pressure_next.vgpr)
1625 break;
1626 spills_to_vgpr = spills_to_vgpr_next;
1627 }
1628
1629 target_pressure = target_pressure_next;
1630 num_waves = num_waves_next;
1631 }
1632
1633 assert(max_reg_demand.vgpr <= target_pressure.vgpr && "VGPR spilling not yet supported.");
1634 /* nothing to do */
1635 if (num_waves == program->num_waves)
1636 return;
1637
1638 /* initialize ctx */
1639 spill_ctx ctx(target_pressure, program, live_vars.register_demand);
1640 compute_global_next_uses(ctx, live_vars.live_out);
1641 get_rematerialize_info(ctx);
1642
1643 /* create spills and reloads */
1644 for (unsigned i = 0; i < program->blocks.size(); i++)
1645 spill_block(ctx, i);
1646
1647 /* assign spill slots and DCE rematerialized code */
1648 assign_spill_slots(ctx, spills_to_vgpr);
1649
1650 /* update live variable information */
1651 live_vars = live_var_analysis(program, options);
1652
1653 assert(program->num_waves >= num_waves);
1654 }
1655
1656 }
1657