2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #include "vulkan/radv_shader.h"
33 * Implements the spilling algorithm on SSA-form from
34 * "Register Spilling and Live-Range Splitting for SSA-Form Programs"
35 * by Matthias Braun and Sebastian Hack.
47 RegisterDemand target_pressure
;
49 std::vector
<std::vector
<RegisterDemand
>> register_demand
;
50 std::vector
<std::map
<Temp
, Temp
>> renames
;
51 std::vector
<std::map
<Temp
, uint32_t>> spills_entry
;
52 std::vector
<std::map
<Temp
, uint32_t>> spills_exit
;
53 std::vector
<bool> processed
;
54 std::stack
<Block
*> loop_header
;
55 std::vector
<std::map
<Temp
, std::pair
<uint32_t, uint32_t>>> next_use_distances_start
;
56 std::vector
<std::map
<Temp
, std::pair
<uint32_t, uint32_t>>> next_use_distances_end
;
57 std::vector
<std::pair
<RegClass
, std::set
<uint32_t>>> interferences
;
58 std::vector
<std::vector
<uint32_t>> affinities
;
59 std::vector
<bool> is_reloaded
;
60 std::map
<Temp
, remat_info
> remat
;
61 std::map
<Instruction
*, bool> remat_used
;
63 spill_ctx(const RegisterDemand target_pressure
, Program
* program
,
64 std::vector
<std::vector
<RegisterDemand
>> register_demand
)
65 : target_pressure(target_pressure
), program(program
),
66 register_demand(register_demand
), renames(program
->blocks
.size()),
67 spills_entry(program
->blocks
.size()), spills_exit(program
->blocks
.size()),
68 processed(program
->blocks
.size(), false) {}
70 void add_affinity(uint32_t first
, uint32_t second
)
72 unsigned found_first
= affinities
.size();
73 unsigned found_second
= affinities
.size();
74 for (unsigned i
= 0; i
< affinities
.size(); i
++) {
75 std::vector
<uint32_t>& vec
= affinities
[i
];
76 for (uint32_t entry
: vec
) {
79 else if (entry
== second
)
83 if (found_first
== affinities
.size() && found_second
== affinities
.size()) {
84 affinities
.emplace_back(std::vector
<uint32_t>({first
, second
}));
85 } else if (found_first
< affinities
.size() && found_second
== affinities
.size()) {
86 affinities
[found_first
].push_back(second
);
87 } else if (found_second
< affinities
.size() && found_first
== affinities
.size()) {
88 affinities
[found_second
].push_back(first
);
89 } else if (found_first
!= found_second
) {
90 /* merge second into first */
91 affinities
[found_first
].insert(affinities
[found_first
].end(), affinities
[found_second
].begin(), affinities
[found_second
].end());
92 affinities
.erase(std::next(affinities
.begin(), found_second
));
94 assert(found_first
== found_second
);
98 uint32_t allocate_spill_id(RegClass rc
)
100 interferences
.emplace_back(rc
, std::set
<uint32_t>());
101 is_reloaded
.push_back(false);
102 return next_spill_id
++;
105 uint32_t next_spill_id
= 0;
108 int32_t get_dominator(int idx_a
, int idx_b
, Program
* program
, bool is_linear
)
116 while (idx_a
!= idx_b
) {
118 idx_a
= program
->blocks
[idx_a
].linear_idom
;
120 idx_b
= program
->blocks
[idx_b
].linear_idom
;
123 while (idx_a
!= idx_b
) {
125 idx_a
= program
->blocks
[idx_a
].logical_idom
;
127 idx_b
= program
->blocks
[idx_b
].logical_idom
;
134 void next_uses_per_block(spill_ctx
& ctx
, unsigned block_idx
, std::set
<uint32_t>& worklist
)
136 Block
* block
= &ctx
.program
->blocks
[block_idx
];
137 std::map
<Temp
, std::pair
<uint32_t, uint32_t>> next_uses
= ctx
.next_use_distances_end
[block_idx
];
139 /* to compute the next use distance at the beginning of the block, we have to add the block's size */
140 for (std::map
<Temp
, std::pair
<uint32_t, uint32_t>>::iterator it
= next_uses
.begin(); it
!= next_uses
.end();) {
141 it
->second
.second
= it
->second
.second
+ block
->instructions
.size();
143 /* remove the live out exec mask as we really don't want to spill it */
144 if (it
->first
== block
->live_out_exec
)
145 it
= next_uses
.erase(it
);
150 int idx
= block
->instructions
.size() - 1;
152 aco_ptr
<Instruction
>& instr
= block
->instructions
[idx
];
154 if (instr
->opcode
== aco_opcode::p_linear_phi
||
155 instr
->opcode
== aco_opcode::p_phi
)
158 for (const Definition
& def
: instr
->definitions
) {
160 next_uses
.erase(def
.getTemp());
163 for (const Operand
& op
: instr
->operands
) {
165 if (op
.isFixed() && op
.physReg() == exec
)
168 next_uses
[op
.getTemp()] = {block_idx
, idx
};
173 assert(block_idx
!= 0 || next_uses
.empty());
174 ctx
.next_use_distances_start
[block_idx
] = next_uses
;
176 aco_ptr
<Instruction
>& instr
= block
->instructions
[idx
];
177 assert(instr
->opcode
== aco_opcode::p_linear_phi
|| instr
->opcode
== aco_opcode::p_phi
);
179 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++) {
180 unsigned pred_idx
= instr
->opcode
== aco_opcode::p_phi
?
181 block
->logical_preds
[i
] :
182 block
->linear_preds
[i
];
183 if (instr
->operands
[i
].isTemp()) {
184 if (ctx
.next_use_distances_end
[pred_idx
].find(instr
->operands
[i
].getTemp()) == ctx
.next_use_distances_end
[pred_idx
].end() ||
185 ctx
.next_use_distances_end
[pred_idx
][instr
->operands
[i
].getTemp()] != std::pair
<uint32_t, uint32_t>{block_idx
, 0})
186 worklist
.insert(pred_idx
);
187 ctx
.next_use_distances_end
[pred_idx
][instr
->operands
[i
].getTemp()] = {block_idx
, 0};
190 next_uses
.erase(instr
->definitions
[0].getTemp());
194 /* all remaining live vars must be live-out at the predecessors */
195 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: next_uses
) {
196 Temp temp
= pair
.first
;
197 uint32_t distance
= pair
.second
.second
;
198 uint32_t dom
= pair
.second
.first
;
199 std::vector
<unsigned>& preds
= temp
.is_linear() ? block
->linear_preds
: block
->logical_preds
;
200 for (unsigned pred_idx
: preds
) {
201 if (ctx
.program
->blocks
[pred_idx
].loop_nest_depth
> block
->loop_nest_depth
)
203 if (ctx
.next_use_distances_end
[pred_idx
].find(temp
) != ctx
.next_use_distances_end
[pred_idx
].end()) {
204 dom
= get_dominator(dom
, ctx
.next_use_distances_end
[pred_idx
][temp
].first
, ctx
.program
, temp
.is_linear());
205 distance
= std::min(ctx
.next_use_distances_end
[pred_idx
][temp
].second
, distance
);
207 if (ctx
.next_use_distances_end
[pred_idx
][temp
] != std::pair
<uint32_t, uint32_t>{dom
, distance
})
208 worklist
.insert(pred_idx
);
209 ctx
.next_use_distances_end
[pred_idx
][temp
] = {dom
, distance
};
215 void compute_global_next_uses(spill_ctx
& ctx
, std::vector
<std::set
<Temp
>>& live_out
)
217 ctx
.next_use_distances_start
.resize(ctx
.program
->blocks
.size());
218 ctx
.next_use_distances_end
.resize(ctx
.program
->blocks
.size());
219 std::set
<uint32_t> worklist
;
220 for (Block
& block
: ctx
.program
->blocks
)
221 worklist
.insert(block
.index
);
223 while (!worklist
.empty()) {
224 std::set
<unsigned>::reverse_iterator b_it
= worklist
.rbegin();
225 unsigned block_idx
= *b_it
;
226 worklist
.erase(block_idx
);
227 next_uses_per_block(ctx
, block_idx
, worklist
);
231 bool should_rematerialize(aco_ptr
<Instruction
>& instr
)
233 /* TODO: rematerialization is only supported for VOP1, SOP1 and PSEUDO */
234 if (instr
->format
!= Format::VOP1
&& instr
->format
!= Format::SOP1
&& instr
->format
!= Format::PSEUDO
)
236 /* TODO: pseudo-instruction rematerialization is only supported for p_create_vector */
237 if (instr
->format
== Format::PSEUDO
&& instr
->opcode
!= aco_opcode::p_create_vector
)
240 for (const Operand
& op
: instr
->operands
) {
241 /* TODO: rematerialization using temporaries isn't yet supported */
246 /* TODO: rematerialization with multiple definitions isn't yet supported */
247 if (instr
->definitions
.size() > 1)
253 aco_ptr
<Instruction
> do_reload(spill_ctx
& ctx
, Temp tmp
, Temp new_name
, uint32_t spill_id
)
255 std::map
<Temp
, remat_info
>::iterator remat
= ctx
.remat
.find(tmp
);
256 if (remat
!= ctx
.remat
.end()) {
257 Instruction
*instr
= remat
->second
.instr
;
258 assert((instr
->format
== Format::VOP1
|| instr
->format
== Format::SOP1
|| instr
->format
== Format::PSEUDO
) && "unsupported");
259 assert((instr
->format
!= Format::PSEUDO
|| instr
->opcode
== aco_opcode::p_create_vector
) && "unsupported");
260 assert(instr
->definitions
.size() == 1 && "unsupported");
262 aco_ptr
<Instruction
> res
;
263 if (instr
->format
== Format::VOP1
) {
264 res
.reset(create_instruction
<VOP1_instruction
>(instr
->opcode
, instr
->format
, instr
->operands
.size(), instr
->definitions
.size()));
265 } else if (instr
->format
== Format::SOP1
) {
266 res
.reset(create_instruction
<SOP1_instruction
>(instr
->opcode
, instr
->format
, instr
->operands
.size(), instr
->definitions
.size()));
267 } else if (instr
->format
== Format::PSEUDO
) {
268 res
.reset(create_instruction
<Instruction
>(instr
->opcode
, instr
->format
, instr
->operands
.size(), instr
->definitions
.size()));
270 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++) {
271 res
->operands
[i
] = instr
->operands
[i
];
272 if (instr
->operands
[i
].isTemp()) {
273 assert(false && "unsupported");
274 if (ctx
.remat
.count(instr
->operands
[i
].getTemp()))
275 ctx
.remat_used
[ctx
.remat
[instr
->operands
[i
].getTemp()].instr
] = true;
278 res
->definitions
[0] = Definition(new_name
);
281 aco_ptr
<Pseudo_instruction
> reload
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_reload
, Format::PSEUDO
, 1, 1)};
282 reload
->operands
[0] = Operand(spill_id
);
283 reload
->definitions
[0] = Definition(new_name
);
284 ctx
.is_reloaded
[spill_id
] = true;
289 void get_rematerialize_info(spill_ctx
& ctx
)
291 for (Block
& block
: ctx
.program
->blocks
) {
292 bool logical
= false;
293 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
294 if (instr
->opcode
== aco_opcode::p_logical_start
)
296 else if (instr
->opcode
== aco_opcode::p_logical_end
)
298 if (logical
&& should_rematerialize(instr
)) {
299 for (const Definition
& def
: instr
->definitions
) {
301 ctx
.remat
[def
.getTemp()] = (remat_info
){instr
.get()};
302 ctx
.remat_used
[instr
.get()] = false;
310 std::vector
<std::map
<Temp
, uint32_t>> local_next_uses(spill_ctx
& ctx
, Block
* block
)
312 std::vector
<std::map
<Temp
, uint32_t>> local_next_uses(block
->instructions
.size());
314 std::map
<Temp
, uint32_t> next_uses
;
315 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_end
[block
->index
]) {
316 /* omit live out exec mask */
317 if (pair
.first
== block
->live_out_exec
)
320 next_uses
[pair
.first
] = pair
.second
.second
+ block
->instructions
.size();
323 for (int idx
= block
->instructions
.size() - 1; idx
>= 0; idx
--) {
324 aco_ptr
<Instruction
>& instr
= block
->instructions
[idx
];
327 if (instr
->opcode
== aco_opcode::p_phi
|| instr
->opcode
== aco_opcode::p_linear_phi
)
330 for (const Operand
& op
: instr
->operands
) {
331 if (op
.isFixed() && op
.physReg() == exec
)
334 next_uses
[op
.getTemp()] = idx
;
336 for (const Definition
& def
: instr
->definitions
) {
338 next_uses
.erase(def
.getTemp());
340 local_next_uses
[idx
] = next_uses
;
342 return local_next_uses
;
346 RegisterDemand
init_live_in_vars(spill_ctx
& ctx
, Block
* block
, unsigned block_idx
)
348 RegisterDemand spilled_registers
;
350 /* first block, nothing was spilled before */
354 /* loop header block */
355 if (block
->loop_nest_depth
> ctx
.program
->blocks
[block_idx
- 1].loop_nest_depth
) {
356 assert(block
->linear_preds
[0] == block_idx
- 1);
357 assert(block
->logical_preds
[0] == block_idx
- 1);
359 /* create new loop_info */
360 ctx
.loop_header
.emplace(block
);
362 /* check how many live-through variables should be spilled */
363 RegisterDemand new_demand
;
364 unsigned i
= block_idx
;
365 while (ctx
.program
->blocks
[i
].loop_nest_depth
>= block
->loop_nest_depth
) {
366 assert(ctx
.program
->blocks
.size() > i
);
367 new_demand
.update(ctx
.program
->blocks
[i
].register_demand
);
370 unsigned loop_end
= i
;
372 /* select live-through vgpr variables */
373 while (new_demand
.vgpr
- spilled_registers
.vgpr
> ctx
.target_pressure
.vgpr
) {
374 unsigned distance
= 0;
376 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_end
[block_idx
- 1]) {
377 if (pair
.first
.type() == RegType::vgpr
&&
378 pair
.second
.first
>= loop_end
&&
379 pair
.second
.second
> distance
&&
380 ctx
.spills_entry
[block_idx
].find(pair
.first
) == ctx
.spills_entry
[block_idx
].end()) {
381 to_spill
= pair
.first
;
382 distance
= pair
.second
.second
;
389 if (ctx
.spills_exit
[block_idx
- 1].find(to_spill
) == ctx
.spills_exit
[block_idx
- 1].end()) {
390 spill_id
= ctx
.allocate_spill_id(to_spill
.regClass());
392 spill_id
= ctx
.spills_exit
[block_idx
- 1][to_spill
];
395 ctx
.spills_entry
[block_idx
][to_spill
] = spill_id
;
396 spilled_registers
.vgpr
+= to_spill
.size();
399 /* select live-through sgpr variables */
400 while (new_demand
.sgpr
- spilled_registers
.sgpr
> ctx
.target_pressure
.sgpr
) {
401 unsigned distance
= 0;
403 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_end
[block_idx
- 1]) {
404 if (pair
.first
.type() == RegType::sgpr
&&
405 pair
.second
.first
>= loop_end
&&
406 pair
.second
.second
> distance
&&
407 ctx
.spills_entry
[block_idx
].find(pair
.first
) == ctx
.spills_entry
[block_idx
].end()) {
408 to_spill
= pair
.first
;
409 distance
= pair
.second
.second
;
416 if (ctx
.spills_exit
[block_idx
- 1].find(to_spill
) == ctx
.spills_exit
[block_idx
- 1].end()) {
417 spill_id
= ctx
.allocate_spill_id(to_spill
.regClass());
419 spill_id
= ctx
.spills_exit
[block_idx
- 1][to_spill
];
422 ctx
.spills_entry
[block_idx
][to_spill
] = spill_id
;
423 spilled_registers
.sgpr
+= to_spill
.size();
429 if (!RegisterDemand(new_demand
- spilled_registers
).exceeds(ctx
.target_pressure
))
430 return spilled_registers
;
432 /* if reg pressure is too high at beginning of loop, add variables with furthest use */
434 while (block
->instructions
[idx
]->opcode
== aco_opcode::p_phi
|| block
->instructions
[idx
]->opcode
== aco_opcode::p_linear_phi
)
437 assert(idx
!= 0 && "loop without phis: TODO");
439 RegisterDemand reg_pressure
= ctx
.register_demand
[block_idx
][idx
] - spilled_registers
;
440 while (reg_pressure
.sgpr
> ctx
.target_pressure
.sgpr
) {
441 unsigned distance
= 0;
443 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_start
[block_idx
]) {
444 if (pair
.first
.type() == RegType::sgpr
&&
445 pair
.second
.second
> distance
&&
446 ctx
.spills_entry
[block_idx
].find(pair
.first
) == ctx
.spills_entry
[block_idx
].end()) {
447 to_spill
= pair
.first
;
448 distance
= pair
.second
.second
;
451 assert(distance
!= 0);
453 ctx
.spills_entry
[block_idx
][to_spill
] = ctx
.allocate_spill_id(to_spill
.regClass());
454 spilled_registers
.sgpr
+= to_spill
.size();
455 reg_pressure
.sgpr
-= to_spill
.size();
457 while (reg_pressure
.vgpr
> ctx
.target_pressure
.vgpr
) {
458 unsigned distance
= 0;
460 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_start
[block_idx
]) {
461 if (pair
.first
.type() == RegType::vgpr
&&
462 pair
.second
.second
> distance
&&
463 ctx
.spills_entry
[block_idx
].find(pair
.first
) == ctx
.spills_entry
[block_idx
].end()) {
464 to_spill
= pair
.first
;
465 distance
= pair
.second
.second
;
468 assert(distance
!= 0);
469 ctx
.spills_entry
[block_idx
][to_spill
] = ctx
.allocate_spill_id(to_spill
.regClass());
470 spilled_registers
.vgpr
+= to_spill
.size();
471 reg_pressure
.vgpr
-= to_spill
.size();
474 return spilled_registers
;
478 if (block
->linear_preds
.size() == 1) {
479 /* keep variables spilled if they are alive and not used in the current block */
480 unsigned pred_idx
= block
->linear_preds
[0];
481 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_exit
[pred_idx
]) {
482 if (pair
.first
.type() == RegType::sgpr
&&
483 ctx
.next_use_distances_start
[block_idx
].find(pair
.first
) != ctx
.next_use_distances_start
[block_idx
].end() &&
484 ctx
.next_use_distances_start
[block_idx
][pair
.first
].second
> block_idx
) {
485 ctx
.spills_entry
[block_idx
].insert(pair
);
486 spilled_registers
.sgpr
+= pair
.first
.size();
489 if (block
->logical_preds
.size() == 1) {
490 pred_idx
= block
->logical_preds
[0];
491 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_exit
[pred_idx
]) {
492 if (pair
.first
.type() == RegType::vgpr
&&
493 ctx
.next_use_distances_start
[block_idx
].find(pair
.first
) != ctx
.next_use_distances_start
[block_idx
].end() &&
494 ctx
.next_use_distances_end
[pred_idx
][pair
.first
].second
> block_idx
) {
495 ctx
.spills_entry
[block_idx
].insert(pair
);
496 spilled_registers
.vgpr
+= pair
.first
.size();
501 /* if register demand is still too high, we just keep all spilled live vars and process the block */
502 if (block
->register_demand
.sgpr
- spilled_registers
.sgpr
> ctx
.target_pressure
.sgpr
) {
503 pred_idx
= block
->linear_preds
[0];
504 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_exit
[pred_idx
]) {
505 if (pair
.first
.type() == RegType::sgpr
&&
506 ctx
.next_use_distances_start
[block_idx
].find(pair
.first
) != ctx
.next_use_distances_start
[block_idx
].end() &&
507 ctx
.spills_entry
[block_idx
].insert(pair
).second
) {
508 spilled_registers
.sgpr
+= pair
.first
.size();
512 if (block
->register_demand
.vgpr
- spilled_registers
.vgpr
> ctx
.target_pressure
.vgpr
&& block
->logical_preds
.size() == 1) {
513 pred_idx
= block
->logical_preds
[0];
514 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_exit
[pred_idx
]) {
515 if (pair
.first
.type() == RegType::vgpr
&&
516 ctx
.next_use_distances_start
[block_idx
].find(pair
.first
) != ctx
.next_use_distances_start
[block_idx
].end() &&
517 ctx
.spills_entry
[block_idx
].insert(pair
).second
) {
518 spilled_registers
.vgpr
+= pair
.first
.size();
523 return spilled_registers
;
526 /* else: merge block */
527 std::set
<Temp
> partial_spills
;
529 /* keep variables spilled on all incoming paths */
530 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_start
[block_idx
]) {
531 std::vector
<unsigned>& preds
= pair
.first
.type() == RegType::vgpr
? block
->logical_preds
: block
->linear_preds
;
532 /* If it can be rematerialized, keep the variable spilled if all predecessors do not reload it.
533 * Otherwise, if any predecessor reloads it, ensure it's reloaded on all other predecessors.
534 * The idea is that it's better in practice to rematerialize redundantly than to create lots of phis. */
535 /* TODO: test this idea with more than Dawn of War III shaders (the current pipeline-db doesn't seem to exercise this path much) */
536 bool remat
= ctx
.remat
.count(pair
.first
);
538 uint32_t spill_id
= 0;
539 for (unsigned pred_idx
: preds
) {
540 /* variable is not even live at the predecessor: probably from a phi */
541 if (ctx
.next_use_distances_end
[pred_idx
].find(pair
.first
) == ctx
.next_use_distances_end
[pred_idx
].end()) {
545 if (ctx
.spills_exit
[pred_idx
].find(pair
.first
) == ctx
.spills_exit
[pred_idx
].end()) {
549 partial_spills
.insert(pair
.first
);
550 /* it might be that on one incoming path, the variable has a different spill_id, but add_couple_code() will take care of that. */
551 spill_id
= ctx
.spills_exit
[pred_idx
][pair
.first
];
557 ctx
.spills_entry
[block_idx
][pair
.first
] = spill_id
;
558 partial_spills
.erase(pair
.first
);
559 spilled_registers
+= pair
.first
;
565 while (block
->instructions
[idx
]->opcode
== aco_opcode::p_linear_phi
||
566 block
->instructions
[idx
]->opcode
== aco_opcode::p_phi
) {
567 aco_ptr
<Instruction
>& phi
= block
->instructions
[idx
];
568 std::vector
<unsigned>& preds
= phi
->opcode
== aco_opcode::p_phi
? block
->logical_preds
: block
->linear_preds
;
571 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++) {
572 if (phi
->operands
[i
].isUndefined())
574 assert(phi
->operands
[i
].isTemp());
575 if (ctx
.spills_exit
[preds
[i
]].find(phi
->operands
[i
].getTemp()) == ctx
.spills_exit
[preds
[i
]].end())
578 partial_spills
.insert(phi
->definitions
[0].getTemp());
581 ctx
.spills_entry
[block_idx
][phi
->definitions
[0].getTemp()] = ctx
.allocate_spill_id(phi
->definitions
[0].regClass());
582 partial_spills
.erase(phi
->definitions
[0].getTemp());
583 spilled_registers
+= phi
->definitions
[0].getTemp();
589 /* if reg pressure at first instruction is still too high, add partially spilled variables */
590 RegisterDemand reg_pressure
;
592 for (const Definition
& def
: block
->instructions
[idx
]->definitions
) {
594 reg_pressure
-= def
.getTemp();
597 for (const Operand
& op
: block
->instructions
[idx
]->operands
) {
598 if (op
.isTemp() && op
.isFirstKill()) {
599 reg_pressure
+= op
.getTemp();
605 reg_pressure
+= ctx
.register_demand
[block_idx
][idx
] - spilled_registers
;
607 while (reg_pressure
.sgpr
> ctx
.target_pressure
.sgpr
) {
608 assert(!partial_spills
.empty());
610 std::set
<Temp
>::iterator it
= partial_spills
.begin();
612 unsigned distance
= ctx
.next_use_distances_start
[block_idx
][*it
].second
;
613 while (it
!= partial_spills
.end()) {
614 assert(ctx
.spills_entry
[block_idx
].find(*it
) == ctx
.spills_entry
[block_idx
].end());
616 if (it
->type() == RegType::sgpr
&& ctx
.next_use_distances_start
[block_idx
][*it
].second
> distance
) {
617 distance
= ctx
.next_use_distances_start
[block_idx
][*it
].second
;
622 assert(distance
!= 0);
624 ctx
.spills_entry
[block_idx
][to_spill
] = ctx
.allocate_spill_id(to_spill
.regClass());
625 partial_spills
.erase(to_spill
);
626 spilled_registers
.sgpr
+= to_spill
.size();
627 reg_pressure
.sgpr
-= to_spill
.size();
630 while (reg_pressure
.vgpr
> ctx
.target_pressure
.vgpr
) {
631 assert(!partial_spills
.empty());
633 std::set
<Temp
>::iterator it
= partial_spills
.begin();
635 unsigned distance
= ctx
.next_use_distances_start
[block_idx
][*it
].second
;
636 while (it
!= partial_spills
.end()) {
637 assert(ctx
.spills_entry
[block_idx
].find(*it
) == ctx
.spills_entry
[block_idx
].end());
639 if (it
->type() == RegType::vgpr
&& ctx
.next_use_distances_start
[block_idx
][*it
].second
> distance
) {
640 distance
= ctx
.next_use_distances_start
[block_idx
][*it
].second
;
645 assert(distance
!= 0);
647 ctx
.spills_entry
[block_idx
][to_spill
] = ctx
.allocate_spill_id(to_spill
.regClass());
648 partial_spills
.erase(to_spill
);
649 spilled_registers
.vgpr
+= to_spill
.size();
650 reg_pressure
.vgpr
-= to_spill
.size();
653 return spilled_registers
;
657 void add_coupling_code(spill_ctx
& ctx
, Block
* block
, unsigned block_idx
)
659 /* no coupling code necessary */
660 if (block
->linear_preds
.size() == 0)
663 std::vector
<aco_ptr
<Instruction
>> instructions
;
664 /* branch block: TODO take other branch into consideration */
665 if (block
->linear_preds
.size() == 1) {
666 assert(ctx
.processed
[block
->linear_preds
[0]]);
668 if (block
->logical_preds
.size() == 1) {
669 unsigned pred_idx
= block
->logical_preds
[0];
670 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> live
: ctx
.next_use_distances_start
[block_idx
]) {
671 if (live
.first
.type() == RegType::sgpr
)
674 if (ctx
.spills_entry
[block_idx
].find(live
.first
) != ctx
.spills_entry
[block_idx
].end())
677 /* in register at end of predecessor */
678 if (ctx
.spills_exit
[pred_idx
].find(live
.first
) == ctx
.spills_exit
[pred_idx
].end()) {
679 std::map
<Temp
, Temp
>::iterator it
= ctx
.renames
[pred_idx
].find(live
.first
);
680 if (it
!= ctx
.renames
[pred_idx
].end())
681 ctx
.renames
[block_idx
].insert(*it
);
685 /* variable is spilled at predecessor and live at current block: create reload instruction */
686 Temp new_name
= {ctx
.program
->allocateId(), live
.first
.regClass()};
687 aco_ptr
<Instruction
> reload
= do_reload(ctx
, live
.first
, new_name
, ctx
.spills_exit
[pred_idx
][live
.first
]);
688 instructions
.emplace_back(std::move(reload
));
689 ctx
.renames
[block_idx
][live
.first
] = new_name
;
693 unsigned pred_idx
= block
->linear_preds
[0];
694 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> live
: ctx
.next_use_distances_start
[block_idx
]) {
695 if (live
.first
.type() == RegType::vgpr
)
698 if (ctx
.spills_entry
[block_idx
].find(live
.first
) != ctx
.spills_entry
[block_idx
].end())
701 /* in register at end of predecessor */
702 if (ctx
.spills_exit
[pred_idx
].find(live
.first
) == ctx
.spills_exit
[pred_idx
].end()) {
703 std::map
<Temp
, Temp
>::iterator it
= ctx
.renames
[pred_idx
].find(live
.first
);
704 if (it
!= ctx
.renames
[pred_idx
].end())
705 ctx
.renames
[block_idx
].insert(*it
);
709 /* variable is spilled at predecessor and live at current block: create reload instruction */
710 Temp new_name
= {ctx
.program
->allocateId(), live
.first
.regClass()};
711 aco_ptr
<Instruction
> reload
= do_reload(ctx
, live
.first
, new_name
, ctx
.spills_exit
[pred_idx
][live
.first
]);
712 instructions
.emplace_back(std::move(reload
));
713 ctx
.renames
[block_idx
][live
.first
] = new_name
;
716 /* combine new reload instructions with original block */
717 if (!instructions
.empty()) {
718 unsigned insert_idx
= 0;
719 while (block
->instructions
[insert_idx
]->opcode
== aco_opcode::p_phi
||
720 block
->instructions
[insert_idx
]->opcode
== aco_opcode::p_linear_phi
) {
723 ctx
.register_demand
[block
->index
].insert(std::next(ctx
.register_demand
[block
->index
].begin(), insert_idx
),
724 instructions
.size(), RegisterDemand());
725 block
->instructions
.insert(std::next(block
->instructions
.begin(), insert_idx
),
726 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(instructions
.begin()),
727 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(instructions
.end()));
732 /* loop header and merge blocks: check if all (linear) predecessors have been processed */
733 for (ASSERTED
unsigned pred
: block
->linear_preds
)
734 assert(ctx
.processed
[pred
]);
736 /* iterate the phi nodes for which operands to spill at the predecessor */
737 for (aco_ptr
<Instruction
>& phi
: block
->instructions
) {
738 if (phi
->opcode
!= aco_opcode::p_phi
&&
739 phi
->opcode
!= aco_opcode::p_linear_phi
)
742 /* if the phi is not spilled, add to instructions */
743 if (ctx
.spills_entry
[block_idx
].find(phi
->definitions
[0].getTemp()) == ctx
.spills_entry
[block_idx
].end()) {
744 instructions
.emplace_back(std::move(phi
));
748 std::vector
<unsigned>& preds
= phi
->opcode
== aco_opcode::p_phi
? block
->logical_preds
: block
->linear_preds
;
749 uint32_t def_spill_id
= ctx
.spills_entry
[block_idx
][phi
->definitions
[0].getTemp()];
751 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++) {
752 if (phi
->operands
[i
].isUndefined())
755 unsigned pred_idx
= preds
[i
];
756 assert(phi
->operands
[i
].isTemp() && phi
->operands
[i
].isKill());
757 Temp var
= phi
->operands
[i
].getTemp();
759 /* build interferences between the phi def and all spilled variables at the predecessor blocks */
760 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_exit
[pred_idx
]) {
761 if (var
== pair
.first
)
763 ctx
.interferences
[def_spill_id
].second
.emplace(pair
.second
);
764 ctx
.interferences
[pair
.second
].second
.emplace(def_spill_id
);
767 /* check if variable is already spilled at predecessor */
768 std::map
<Temp
, uint32_t>::iterator spilled
= ctx
.spills_exit
[pred_idx
].find(var
);
769 if (spilled
!= ctx
.spills_exit
[pred_idx
].end()) {
770 if (spilled
->second
!= def_spill_id
)
771 ctx
.add_affinity(def_spill_id
, spilled
->second
);
775 /* rename if necessary */
776 std::map
<Temp
, Temp
>::iterator rename_it
= ctx
.renames
[pred_idx
].find(var
);
777 if (rename_it
!= ctx
.renames
[pred_idx
].end()) {
778 var
= rename_it
->second
;
779 ctx
.renames
[pred_idx
].erase(rename_it
);
782 uint32_t spill_id
= ctx
.allocate_spill_id(phi
->definitions
[0].regClass());
783 ctx
.add_affinity(def_spill_id
, spill_id
);
784 aco_ptr
<Pseudo_instruction
> spill
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_spill
, Format::PSEUDO
, 2, 0)};
785 spill
->operands
[0] = Operand(var
);
786 spill
->operands
[1] = Operand(spill_id
);
787 Block
& pred
= ctx
.program
->blocks
[pred_idx
];
788 unsigned idx
= pred
.instructions
.size();
792 } while (phi
->opcode
== aco_opcode::p_phi
&& pred
.instructions
[idx
]->opcode
!= aco_opcode::p_logical_end
);
793 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(pred
.instructions
.begin(), idx
);
794 pred
.instructions
.insert(it
, std::move(spill
));
795 ctx
.spills_exit
[pred_idx
][phi
->operands
[i
].getTemp()] = spill_id
;
798 /* remove phi from instructions */
802 /* iterate all (other) spilled variables for which to spill at the predecessor */
803 // TODO: would be better to have them sorted: first vgprs and first with longest distance
804 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_entry
[block_idx
]) {
805 std::vector
<unsigned> preds
= pair
.first
.type() == RegType::vgpr
? block
->logical_preds
: block
->linear_preds
;
807 for (unsigned pred_idx
: preds
) {
808 /* add interferences between spilled variable and predecessors exit spills */
809 for (std::pair
<Temp
, uint32_t> exit_spill
: ctx
.spills_exit
[pred_idx
]) {
810 if (exit_spill
.first
== pair
.first
)
812 ctx
.interferences
[exit_spill
.second
].second
.emplace(pair
.second
);
813 ctx
.interferences
[pair
.second
].second
.emplace(exit_spill
.second
);
816 /* variable is already spilled at predecessor */
817 std::map
<Temp
, uint32_t>::iterator spilled
= ctx
.spills_exit
[pred_idx
].find(pair
.first
);
818 if (spilled
!= ctx
.spills_exit
[pred_idx
].end()) {
819 if (spilled
->second
!= pair
.second
)
820 ctx
.add_affinity(pair
.second
, spilled
->second
);
824 /* variable is dead at predecessor, it must be from a phi: this works because of CSSA form */
825 if (ctx
.next_use_distances_end
[pred_idx
].find(pair
.first
) == ctx
.next_use_distances_end
[pred_idx
].end())
828 /* variable is in register at predecessor and has to be spilled */
829 /* rename if necessary */
830 Temp var
= pair
.first
;
831 std::map
<Temp
, Temp
>::iterator rename_it
= ctx
.renames
[pred_idx
].find(var
);
832 if (rename_it
!= ctx
.renames
[pred_idx
].end()) {
833 var
= rename_it
->second
;
834 ctx
.renames
[pred_idx
].erase(rename_it
);
837 aco_ptr
<Pseudo_instruction
> spill
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_spill
, Format::PSEUDO
, 2, 0)};
838 spill
->operands
[0] = Operand(var
);
839 spill
->operands
[1] = Operand(pair
.second
);
840 Block
& pred
= ctx
.program
->blocks
[pred_idx
];
841 unsigned idx
= pred
.instructions
.size();
845 } while (pair
.first
.type() == RegType::vgpr
&& pred
.instructions
[idx
]->opcode
!= aco_opcode::p_logical_end
);
846 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(pred
.instructions
.begin(), idx
);
847 pred
.instructions
.insert(it
, std::move(spill
));
848 ctx
.spills_exit
[pred
.index
][pair
.first
] = pair
.second
;
852 /* iterate phis for which operands to reload */
853 for (aco_ptr
<Instruction
>& phi
: instructions
) {
854 assert(phi
->opcode
== aco_opcode::p_phi
|| phi
->opcode
== aco_opcode::p_linear_phi
);
855 assert(ctx
.spills_entry
[block_idx
].find(phi
->definitions
[0].getTemp()) == ctx
.spills_entry
[block_idx
].end());
857 std::vector
<unsigned>& preds
= phi
->opcode
== aco_opcode::p_phi
? block
->logical_preds
: block
->linear_preds
;
858 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++) {
859 if (!phi
->operands
[i
].isTemp())
861 unsigned pred_idx
= preds
[i
];
864 if (ctx
.spills_exit
[pred_idx
].find(phi
->operands
[i
].getTemp()) == ctx
.spills_exit
[pred_idx
].end()) {
865 std::map
<Temp
, Temp
>::iterator it
= ctx
.renames
[pred_idx
].find(phi
->operands
[i
].getTemp());
866 if (it
!= ctx
.renames
[pred_idx
].end())
867 phi
->operands
[i
].setTemp(it
->second
);
871 Temp tmp
= phi
->operands
[i
].getTemp();
873 /* reload phi operand at end of predecessor block */
874 Temp new_name
= {ctx
.program
->allocateId(), tmp
.regClass()};
875 Block
& pred
= ctx
.program
->blocks
[pred_idx
];
876 unsigned idx
= pred
.instructions
.size();
880 } while (phi
->opcode
== aco_opcode::p_phi
&& pred
.instructions
[idx
]->opcode
!= aco_opcode::p_logical_end
);
881 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(pred
.instructions
.begin(), idx
);
883 aco_ptr
<Instruction
> reload
= do_reload(ctx
, tmp
, new_name
, ctx
.spills_exit
[pred_idx
][tmp
]);
884 pred
.instructions
.insert(it
, std::move(reload
));
886 ctx
.spills_exit
[pred_idx
].erase(tmp
);
887 ctx
.renames
[pred_idx
][tmp
] = new_name
;
888 phi
->operands
[i
].setTemp(new_name
);
892 /* iterate live variables for which to reload */
893 // TODO: reload at current block if variable is spilled on all predecessors
894 for (std::pair
<Temp
, std::pair
<uint32_t, uint32_t>> pair
: ctx
.next_use_distances_start
[block_idx
]) {
895 /* skip spilled variables */
896 if (ctx
.spills_entry
[block_idx
].find(pair
.first
) != ctx
.spills_entry
[block_idx
].end())
898 std::vector
<unsigned> preds
= pair
.first
.type() == RegType::vgpr
? block
->logical_preds
: block
->linear_preds
;
900 /* variable is dead at predecessor, it must be from a phi */
901 bool is_dead
= false;
902 for (unsigned pred_idx
: preds
) {
903 if (ctx
.next_use_distances_end
[pred_idx
].find(pair
.first
) == ctx
.next_use_distances_end
[pred_idx
].end())
908 for (unsigned pred_idx
: preds
) {
909 /* the variable is not spilled at the predecessor */
910 if (ctx
.spills_exit
[pred_idx
].find(pair
.first
) == ctx
.spills_exit
[pred_idx
].end())
913 /* variable is spilled at predecessor and has to be reloaded */
914 Temp new_name
= {ctx
.program
->allocateId(), pair
.first
.regClass()};
915 Block
& pred
= ctx
.program
->blocks
[pred_idx
];
916 unsigned idx
= pred
.instructions
.size();
920 } while (pair
.first
.type() == RegType::vgpr
&& pred
.instructions
[idx
]->opcode
!= aco_opcode::p_logical_end
);
921 std::vector
<aco_ptr
<Instruction
>>::iterator it
= std::next(pred
.instructions
.begin(), idx
);
923 aco_ptr
<Instruction
> reload
= do_reload(ctx
, pair
.first
, new_name
, ctx
.spills_exit
[pred
.index
][pair
.first
]);
924 pred
.instructions
.insert(it
, std::move(reload
));
926 ctx
.spills_exit
[pred
.index
].erase(pair
.first
);
927 ctx
.renames
[pred
.index
][pair
.first
] = new_name
;
930 /* check if we have to create a new phi for this variable */
931 Temp rename
= Temp();
933 for (unsigned pred_idx
: preds
) {
934 if (ctx
.renames
[pred_idx
].find(pair
.first
) == ctx
.renames
[pred_idx
].end()) {
935 if (rename
== Temp())
938 is_same
= rename
== pair
.first
;
940 if (rename
== Temp())
941 rename
= ctx
.renames
[pred_idx
][pair
.first
];
943 is_same
= rename
== ctx
.renames
[pred_idx
][pair
.first
];
951 /* the variable was renamed differently in the predecessors: we have to create a phi */
952 aco_opcode opcode
= pair
.first
.type() == RegType::vgpr
? aco_opcode::p_phi
: aco_opcode::p_linear_phi
;
953 aco_ptr
<Pseudo_instruction
> phi
{create_instruction
<Pseudo_instruction
>(opcode
, Format::PSEUDO
, preds
.size(), 1)};
954 rename
= {ctx
.program
->allocateId(), pair
.first
.regClass()};
955 for (unsigned i
= 0; i
< phi
->operands
.size(); i
++) {
957 if (ctx
.renames
[preds
[i
]].find(pair
.first
) != ctx
.renames
[preds
[i
]].end())
958 tmp
= ctx
.renames
[preds
[i
]][pair
.first
];
959 else if (preds
[i
] >= block_idx
)
963 phi
->operands
[i
] = Operand(tmp
);
965 phi
->definitions
[0] = Definition(rename
);
966 instructions
.emplace_back(std::move(phi
));
969 /* the variable was renamed: add new name to renames */
970 if (!(rename
== Temp() || rename
== pair
.first
))
971 ctx
.renames
[block_idx
][pair
.first
] = rename
;
974 /* combine phis with instructions */
976 while (!block
->instructions
[idx
]) {
980 ctx
.register_demand
[block
->index
].erase(ctx
.register_demand
[block
->index
].begin(), ctx
.register_demand
[block
->index
].begin() + idx
);
981 ctx
.register_demand
[block
->index
].insert(ctx
.register_demand
[block
->index
].begin(), instructions
.size(), RegisterDemand());
983 std::vector
<aco_ptr
<Instruction
>>::iterator start
= std::next(block
->instructions
.begin(), idx
);
984 instructions
.insert(instructions
.end(), std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(start
),
985 std::move_iterator
<std::vector
<aco_ptr
<Instruction
>>::iterator
>(block
->instructions
.end()));
986 block
->instructions
= std::move(instructions
);
989 void process_block(spill_ctx
& ctx
, unsigned block_idx
, Block
* block
,
990 std::map
<Temp
, uint32_t> ¤t_spills
, RegisterDemand spilled_registers
)
992 std::vector
<std::map
<Temp
, uint32_t>> local_next_use_distance
;
993 std::vector
<aco_ptr
<Instruction
>> instructions
;
996 /* phis are handled separetely */
997 while (block
->instructions
[idx
]->opcode
== aco_opcode::p_phi
||
998 block
->instructions
[idx
]->opcode
== aco_opcode::p_linear_phi
) {
999 aco_ptr
<Instruction
>& instr
= block
->instructions
[idx
];
1000 for (const Operand
& op
: instr
->operands
) {
1001 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1002 if (op
.isTemp() && ctx
.remat
.count(op
.getTemp()))
1003 ctx
.remat_used
[ctx
.remat
[op
.getTemp()].instr
] = true;
1005 instructions
.emplace_back(std::move(instr
));
1009 if (block
->register_demand
.exceeds(ctx
.target_pressure
))
1010 local_next_use_distance
= local_next_uses(ctx
, block
);
1012 while (idx
< block
->instructions
.size()) {
1013 aco_ptr
<Instruction
>& instr
= block
->instructions
[idx
];
1015 std::map
<Temp
, std::pair
<Temp
, uint32_t>> reloads
;
1016 std::map
<Temp
, uint32_t> spills
;
1017 /* rename and reload operands */
1018 for (Operand
& op
: instr
->operands
) {
1021 if (current_spills
.find(op
.getTemp()) == current_spills
.end()) {
1022 /* the Operand is in register: check if it was renamed */
1023 if (ctx
.renames
[block_idx
].find(op
.getTemp()) != ctx
.renames
[block_idx
].end())
1024 op
.setTemp(ctx
.renames
[block_idx
][op
.getTemp()]);
1025 /* prevent it's definining instruction from being DCE'd if it could be rematerialized */
1026 if (ctx
.remat
.count(op
.getTemp()))
1027 ctx
.remat_used
[ctx
.remat
[op
.getTemp()].instr
] = true;
1030 /* the Operand is spilled: add it to reloads */
1031 Temp new_tmp
= {ctx
.program
->allocateId(), op
.regClass()};
1032 ctx
.renames
[block_idx
][op
.getTemp()] = new_tmp
;
1033 reloads
[new_tmp
] = std::make_pair(op
.getTemp(), current_spills
[op
.getTemp()]);
1034 current_spills
.erase(op
.getTemp());
1035 op
.setTemp(new_tmp
);
1036 spilled_registers
-= new_tmp
;
1039 /* check if register demand is low enough before and after the current instruction */
1040 if (block
->register_demand
.exceeds(ctx
.target_pressure
)) {
1042 RegisterDemand new_demand
= ctx
.register_demand
[block_idx
][idx
];
1044 for (const Definition
& def
: instr
->definitions
) {
1047 new_demand
+= def
.getTemp();
1050 new_demand
.update(ctx
.register_demand
[block_idx
][idx
- 1]);
1053 assert(!local_next_use_distance
.empty());
1055 /* if reg pressure is too high, spill variable with furthest next use */
1056 while (RegisterDemand(new_demand
- spilled_registers
).exceeds(ctx
.target_pressure
)) {
1057 unsigned distance
= 0;
1059 bool do_rematerialize
= false;
1060 if (new_demand
.vgpr
- spilled_registers
.vgpr
> ctx
.target_pressure
.vgpr
) {
1061 for (std::pair
<Temp
, uint32_t> pair
: local_next_use_distance
[idx
]) {
1062 bool can_rematerialize
= ctx
.remat
.count(pair
.first
);
1063 if (pair
.first
.type() == RegType::vgpr
&&
1064 ((pair
.second
> distance
&& can_rematerialize
== do_rematerialize
) ||
1065 (can_rematerialize
&& !do_rematerialize
&& pair
.second
> idx
)) &&
1066 current_spills
.find(pair
.first
) == current_spills
.end() &&
1067 ctx
.spills_exit
[block_idx
].find(pair
.first
) == ctx
.spills_exit
[block_idx
].end()) {
1068 to_spill
= pair
.first
;
1069 distance
= pair
.second
;
1070 do_rematerialize
= can_rematerialize
;
1074 for (std::pair
<Temp
, uint32_t> pair
: local_next_use_distance
[idx
]) {
1075 bool can_rematerialize
= ctx
.remat
.count(pair
.first
);
1076 if (pair
.first
.type() == RegType::sgpr
&&
1077 ((pair
.second
> distance
&& can_rematerialize
== do_rematerialize
) ||
1078 (can_rematerialize
&& !do_rematerialize
&& pair
.second
> idx
)) &&
1079 current_spills
.find(pair
.first
) == current_spills
.end() &&
1080 ctx
.spills_exit
[block_idx
].find(pair
.first
) == ctx
.spills_exit
[block_idx
].end()) {
1081 to_spill
= pair
.first
;
1082 distance
= pair
.second
;
1083 do_rematerialize
= can_rematerialize
;
1088 assert(distance
!= 0 && distance
> idx
);
1089 uint32_t spill_id
= ctx
.allocate_spill_id(to_spill
.regClass());
1091 /* add interferences with currently spilled variables */
1092 for (std::pair
<Temp
, uint32_t> pair
: current_spills
) {
1093 ctx
.interferences
[spill_id
].second
.emplace(pair
.second
);
1094 ctx
.interferences
[pair
.second
].second
.emplace(spill_id
);
1096 for (std::pair
<Temp
, std::pair
<Temp
, uint32_t>> pair
: reloads
) {
1097 ctx
.interferences
[spill_id
].second
.emplace(pair
.second
.second
);
1098 ctx
.interferences
[pair
.second
.second
].second
.emplace(spill_id
);
1101 current_spills
[to_spill
] = spill_id
;
1102 spilled_registers
+= to_spill
;
1104 /* rename if necessary */
1105 if (ctx
.renames
[block_idx
].find(to_spill
) != ctx
.renames
[block_idx
].end()) {
1106 to_spill
= ctx
.renames
[block_idx
][to_spill
];
1109 /* add spill to new instructions */
1110 aco_ptr
<Pseudo_instruction
> spill
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_spill
, Format::PSEUDO
, 2, 0)};
1111 spill
->operands
[0] = Operand(to_spill
);
1112 spill
->operands
[1] = Operand(spill_id
);
1113 instructions
.emplace_back(std::move(spill
));
1117 /* add reloads and instruction to new instructions */
1118 for (std::pair
<Temp
, std::pair
<Temp
, uint32_t>> pair
: reloads
) {
1119 aco_ptr
<Instruction
> reload
= do_reload(ctx
, pair
.second
.first
, pair
.first
, pair
.second
.second
);
1120 instructions
.emplace_back(std::move(reload
));
1122 instructions
.emplace_back(std::move(instr
));
1126 block
->instructions
= std::move(instructions
);
1127 ctx
.spills_exit
[block_idx
].insert(current_spills
.begin(), current_spills
.end());
1130 void spill_block(spill_ctx
& ctx
, unsigned block_idx
)
1132 Block
* block
= &ctx
.program
->blocks
[block_idx
];
1133 ctx
.processed
[block_idx
] = true;
1135 /* determine set of variables which are spilled at the beginning of the block */
1136 RegisterDemand spilled_registers
= init_live_in_vars(ctx
, block
, block_idx
);
1138 /* add interferences for spilled variables */
1139 for (std::pair
<Temp
, uint32_t> x
: ctx
.spills_entry
[block_idx
]) {
1140 for (std::pair
<Temp
, uint32_t> y
: ctx
.spills_entry
[block_idx
])
1141 if (x
.second
!= y
.second
)
1142 ctx
.interferences
[x
.second
].second
.emplace(y
.second
);
1145 bool is_loop_header
= block
->loop_nest_depth
&& ctx
.loop_header
.top()->index
== block_idx
;
1146 if (!is_loop_header
) {
1147 /* add spill/reload code on incoming control flow edges */
1148 add_coupling_code(ctx
, block
, block_idx
);
1151 std::map
<Temp
, uint32_t> current_spills
= ctx
.spills_entry
[block_idx
];
1153 /* check conditions to process this block */
1154 bool process
= RegisterDemand(block
->register_demand
- spilled_registers
).exceeds(ctx
.target_pressure
) ||
1155 !ctx
.renames
[block_idx
].empty() ||
1156 ctx
.remat_used
.size();
1158 std::map
<Temp
, uint32_t>::iterator it
= current_spills
.begin();
1159 while (!process
&& it
!= current_spills
.end()) {
1160 if (ctx
.next_use_distances_start
[block_idx
][it
->first
].first
== block_idx
)
1166 process_block(ctx
, block_idx
, block
, current_spills
, spilled_registers
);
1168 ctx
.spills_exit
[block_idx
].insert(current_spills
.begin(), current_spills
.end());
1170 /* check if the next block leaves the current loop */
1171 if (block
->loop_nest_depth
== 0 || ctx
.program
->blocks
[block_idx
+ 1].loop_nest_depth
>= block
->loop_nest_depth
)
1174 Block
* loop_header
= ctx
.loop_header
.top();
1176 /* preserve original renames at end of loop header block */
1177 std::map
<Temp
, Temp
> renames
= std::move(ctx
.renames
[loop_header
->index
]);
1179 /* add coupling code to all loop header predecessors */
1180 add_coupling_code(ctx
, loop_header
, loop_header
->index
);
1182 /* update remat_used for phis added in add_coupling_code() */
1183 for (aco_ptr
<Instruction
>& instr
: loop_header
->instructions
) {
1186 for (const Operand
& op
: instr
->operands
) {
1187 if (op
.isTemp() && ctx
.remat
.count(op
.getTemp()))
1188 ctx
.remat_used
[ctx
.remat
[op
.getTemp()].instr
] = true;
1192 /* propagate new renames through loop: i.e. repair the SSA */
1193 renames
.swap(ctx
.renames
[loop_header
->index
]);
1194 for (std::pair
<Temp
, Temp
> rename
: renames
) {
1195 for (unsigned idx
= loop_header
->index
; idx
<= block_idx
; idx
++) {
1196 Block
& current
= ctx
.program
->blocks
[idx
];
1197 std::vector
<aco_ptr
<Instruction
>>::iterator instr_it
= current
.instructions
.begin();
1199 /* first rename phis */
1200 while (instr_it
!= current
.instructions
.end()) {
1201 aco_ptr
<Instruction
>& phi
= *instr_it
;
1202 if (phi
->opcode
!= aco_opcode::p_phi
&& phi
->opcode
!= aco_opcode::p_linear_phi
)
1204 /* no need to rename the loop header phis once again. this happened in add_coupling_code() */
1205 if (idx
== loop_header
->index
) {
1210 for (Operand
& op
: phi
->operands
) {
1213 if (op
.getTemp() == rename
.first
)
1214 op
.setTemp(rename
.second
);
1219 std::map
<Temp
, std::pair
<uint32_t, uint32_t>>::iterator it
= ctx
.next_use_distances_start
[idx
].find(rename
.first
);
1221 /* variable is not live at beginning of this block */
1222 if (it
== ctx
.next_use_distances_start
[idx
].end())
1225 /* if the variable is live at the block's exit, add rename */
1226 if (ctx
.next_use_distances_end
[idx
].find(rename
.first
) != ctx
.next_use_distances_end
[idx
].end())
1227 ctx
.renames
[idx
].insert(rename
);
1229 /* rename all uses in this block */
1230 bool renamed
= false;
1231 while (!renamed
&& instr_it
!= current
.instructions
.end()) {
1232 aco_ptr
<Instruction
>& instr
= *instr_it
;
1233 for (Operand
& op
: instr
->operands
) {
1236 if (op
.getTemp() == rename
.first
) {
1237 op
.setTemp(rename
.second
);
1238 /* we can stop with this block as soon as the variable is spilled */
1239 if (instr
->opcode
== aco_opcode::p_spill
)
1248 /* remove loop header info from stack */
1249 ctx
.loop_header
.pop();
1252 void assign_spill_slots(spill_ctx
& ctx
, unsigned spills_to_vgpr
) {
1253 std::map
<uint32_t, uint32_t> sgpr_slot
;
1254 std::map
<uint32_t, uint32_t> vgpr_slot
;
1255 std::vector
<bool> is_assigned(ctx
.interferences
.size());
1257 /* first, handle affinities: just merge all interferences into both spill ids */
1258 for (std::vector
<uint32_t>& vec
: ctx
.affinities
) {
1259 for (unsigned i
= 0; i
< vec
.size(); i
++) {
1260 for (unsigned j
= i
+ 1; j
< vec
.size(); j
++) {
1261 assert(vec
[i
] != vec
[j
]);
1262 for (uint32_t id
: ctx
.interferences
[vec
[i
]].second
)
1263 ctx
.interferences
[id
].second
.insert(vec
[j
]);
1264 for (uint32_t id
: ctx
.interferences
[vec
[j
]].second
)
1265 ctx
.interferences
[id
].second
.insert(vec
[i
]);
1266 ctx
.interferences
[vec
[i
]].second
.insert(ctx
.interferences
[vec
[j
]].second
.begin(), ctx
.interferences
[vec
[j
]].second
.end());
1267 ctx
.interferences
[vec
[j
]].second
.insert(ctx
.interferences
[vec
[i
]].second
.begin(), ctx
.interferences
[vec
[i
]].second
.end());
1269 bool reloaded
= ctx
.is_reloaded
[vec
[i
]] || ctx
.is_reloaded
[vec
[j
]];
1270 ctx
.is_reloaded
[vec
[i
]] = reloaded
;
1271 ctx
.is_reloaded
[vec
[j
]] = reloaded
;
1275 for (ASSERTED
uint32_t i
= 0; i
< ctx
.interferences
.size(); i
++)
1276 for (ASSERTED
uint32_t id
: ctx
.interferences
[i
].second
)
1279 /* for each spill slot, assign as many spill ids as possible */
1280 std::vector
<std::set
<uint32_t>> spill_slot_interferences
;
1281 unsigned slot_idx
= 0;
1284 /* assign sgpr spill slots */
1287 for (unsigned id
= 0; id
< ctx
.interferences
.size(); id
++) {
1288 if (is_assigned
[id
] || !ctx
.is_reloaded
[id
])
1290 if (ctx
.interferences
[id
].first
.type() != RegType::sgpr
)
1293 /* check interferences */
1294 bool interferes
= false;
1295 for (unsigned i
= slot_idx
; i
< slot_idx
+ ctx
.interferences
[id
].first
.size(); i
++) {
1296 if (i
== spill_slot_interferences
.size())
1297 spill_slot_interferences
.emplace_back(std::set
<uint32_t>());
1298 if (spill_slot_interferences
[i
].find(id
) != spill_slot_interferences
[i
].end() || i
/ 64 != slot_idx
/ 64) {
1308 /* we found a spill id which can be assigned to current spill slot */
1309 sgpr_slot
[id
] = slot_idx
;
1310 is_assigned
[id
] = true;
1311 for (unsigned i
= slot_idx
; i
< slot_idx
+ ctx
.interferences
[id
].first
.size(); i
++)
1312 spill_slot_interferences
[i
].insert(ctx
.interferences
[id
].second
.begin(), ctx
.interferences
[id
].second
.end());
1314 /* add all affinities: there are no additional interferences */
1315 for (std::vector
<uint32_t>& vec
: ctx
.affinities
) {
1316 bool found_affinity
= false;
1317 for (uint32_t entry
: vec
) {
1319 found_affinity
= true;
1323 if (!found_affinity
)
1325 for (uint32_t entry
: vec
) {
1326 sgpr_slot
[entry
] = slot_idx
;
1327 is_assigned
[entry
] = true;
1337 /* assign vgpr spill slots */
1340 for (unsigned id
= 0; id
< ctx
.interferences
.size(); id
++) {
1341 if (is_assigned
[id
] || !ctx
.is_reloaded
[id
])
1343 if (ctx
.interferences
[id
].first
.type() != RegType::vgpr
)
1346 /* check interferences */
1347 bool interferes
= false;
1348 for (unsigned i
= slot_idx
; i
< slot_idx
+ ctx
.interferences
[id
].first
.size(); i
++) {
1349 if (i
== spill_slot_interferences
.size())
1350 spill_slot_interferences
.emplace_back(std::set
<uint32_t>());
1351 /* check for interference and ensure that vector regs are stored next to each other */
1352 if (spill_slot_interferences
[i
].find(id
) != spill_slot_interferences
[i
].end() || i
/ 64 != slot_idx
/ 64) {
1362 /* we found a spill id which can be assigned to current spill slot */
1363 vgpr_slot
[id
] = slot_idx
;
1364 is_assigned
[id
] = true;
1365 for (unsigned i
= slot_idx
; i
< slot_idx
+ ctx
.interferences
[id
].first
.size(); i
++)
1366 spill_slot_interferences
[i
].insert(ctx
.interferences
[id
].second
.begin(), ctx
.interferences
[id
].second
.end());
1371 for (unsigned id
= 0; id
< is_assigned
.size(); id
++)
1372 assert(is_assigned
[id
] || !ctx
.is_reloaded
[id
]);
1374 for (std::vector
<uint32_t>& vec
: ctx
.affinities
) {
1375 for (unsigned i
= 0; i
< vec
.size(); i
++) {
1376 for (unsigned j
= i
+ 1; j
< vec
.size(); j
++) {
1377 assert(is_assigned
[vec
[i
]] == is_assigned
[vec
[j
]]);
1378 if (!is_assigned
[vec
[i
]])
1380 assert(ctx
.is_reloaded
[vec
[i
]] == ctx
.is_reloaded
[vec
[j
]]);
1381 assert(ctx
.interferences
[vec
[i
]].first
.type() == ctx
.interferences
[vec
[j
]].first
.type());
1382 if (ctx
.interferences
[vec
[i
]].first
.type() == RegType::sgpr
)
1383 assert(sgpr_slot
[vec
[i
]] == sgpr_slot
[vec
[j
]]);
1385 assert(vgpr_slot
[vec
[i
]] == vgpr_slot
[vec
[j
]]);
1390 /* hope, we didn't mess up */
1391 std::vector
<Temp
> vgpr_spill_temps((spill_slot_interferences
.size() + 63) / 64);
1392 assert(vgpr_spill_temps
.size() <= spills_to_vgpr
);
1394 /* replace pseudo instructions with actual hardware instructions */
1395 unsigned last_top_level_block_idx
= 0;
1396 std::vector
<bool> reload_in_loop(vgpr_spill_temps
.size());
1397 for (Block
& block
: ctx
.program
->blocks
) {
1399 /* after loops, we insert a user if there was a reload inside the loop */
1400 if (block
.loop_nest_depth
== 0) {
1402 for (unsigned i
= 0; i
< vgpr_spill_temps
.size(); i
++) {
1403 if (reload_in_loop
[i
])
1407 if (end_vgprs
> 0) {
1408 aco_ptr
<Instruction
> destr
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_end_linear_vgpr
, Format::PSEUDO
, end_vgprs
, 0)};
1410 for (unsigned i
= 0; i
< vgpr_spill_temps
.size(); i
++) {
1411 if (reload_in_loop
[i
])
1412 destr
->operands
[k
++] = Operand(vgpr_spill_temps
[i
]);
1413 reload_in_loop
[i
] = false;
1415 /* find insertion point */
1416 std::vector
<aco_ptr
<Instruction
>>::iterator it
= block
.instructions
.begin();
1417 while ((*it
)->opcode
== aco_opcode::p_linear_phi
|| (*it
)->opcode
== aco_opcode::p_phi
)
1419 block
.instructions
.insert(it
, std::move(destr
));
1423 if (block
.kind
& block_kind_top_level
&& !block
.linear_preds
.empty()) {
1424 last_top_level_block_idx
= block
.index
;
1426 /* check if any spilled variables use a created linear vgpr, otherwise destroy them */
1427 for (unsigned i
= 0; i
< vgpr_spill_temps
.size(); i
++) {
1428 if (vgpr_spill_temps
[i
] == Temp())
1431 bool can_destroy
= true;
1432 for (std::pair
<Temp
, uint32_t> pair
: ctx
.spills_exit
[block
.linear_preds
[0]]) {
1434 if (sgpr_slot
.find(pair
.second
) != sgpr_slot
.end() &&
1435 sgpr_slot
[pair
.second
] / 64 == i
) {
1436 can_destroy
= false;
1441 vgpr_spill_temps
[i
] = Temp();
1445 std::vector
<aco_ptr
<Instruction
>>::iterator it
;
1446 std::vector
<aco_ptr
<Instruction
>> instructions
;
1447 instructions
.reserve(block
.instructions
.size());
1448 for (it
= block
.instructions
.begin(); it
!= block
.instructions
.end(); ++it
) {
1450 if ((*it
)->opcode
== aco_opcode::p_spill
) {
1451 uint32_t spill_id
= (*it
)->operands
[1].constantValue();
1453 if (!ctx
.is_reloaded
[spill_id
]) {
1454 /* never reloaded, so don't spill */
1455 } else if (vgpr_slot
.find(spill_id
) != vgpr_slot
.end()) {
1457 ctx
.program
->config
->spilled_vgprs
+= (*it
)->operands
[0].size();
1459 assert(false && "vgpr spilling not yet implemented.");
1460 } else if (sgpr_slot
.find(spill_id
) != sgpr_slot
.end()) {
1461 ctx
.program
->config
->spilled_sgprs
+= (*it
)->operands
[0].size();
1463 uint32_t spill_slot
= sgpr_slot
[spill_id
];
1465 /* check if the linear vgpr already exists */
1466 if (vgpr_spill_temps
[spill_slot
/ 64] == Temp()) {
1467 Temp linear_vgpr
= {ctx
.program
->allocateId(), v1
.as_linear()};
1468 vgpr_spill_temps
[spill_slot
/ 64] = linear_vgpr
;
1469 aco_ptr
<Pseudo_instruction
> create
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_start_linear_vgpr
, Format::PSEUDO
, 0, 1)};
1470 create
->definitions
[0] = Definition(linear_vgpr
);
1471 /* find the right place to insert this definition */
1472 if (last_top_level_block_idx
== block
.index
) {
1473 /* insert right before the current instruction */
1474 instructions
.emplace_back(std::move(create
));
1476 assert(last_top_level_block_idx
< block
.index
);
1477 /* insert before the branch at last top level block */
1478 std::vector
<aco_ptr
<Instruction
>>& instructions
= ctx
.program
->blocks
[last_top_level_block_idx
].instructions
;
1479 instructions
.insert(std::next(instructions
.begin(), instructions
.size() - 1), std::move(create
));
1483 /* spill sgpr: just add the vgpr temp to operands */
1484 Pseudo_instruction
* spill
= create_instruction
<Pseudo_instruction
>(aco_opcode::p_spill
, Format::PSEUDO
, 3, 0);
1485 spill
->operands
[0] = Operand(vgpr_spill_temps
[spill_slot
/ 64]);
1486 spill
->operands
[1] = Operand(spill_slot
% 64);
1487 spill
->operands
[2] = (*it
)->operands
[0];
1488 instructions
.emplace_back(aco_ptr
<Instruction
>(spill
));
1490 unreachable("No spill slot assigned for spill id");
1493 } else if ((*it
)->opcode
== aco_opcode::p_reload
) {
1494 uint32_t spill_id
= (*it
)->operands
[0].constantValue();
1495 assert(ctx
.is_reloaded
[spill_id
]);
1497 if (vgpr_slot
.find(spill_id
) != vgpr_slot
.end()) {
1499 assert(false && "vgpr spilling not yet implemented.");
1501 } else if (sgpr_slot
.find(spill_id
) != sgpr_slot
.end()) {
1502 uint32_t spill_slot
= sgpr_slot
[spill_id
];
1503 reload_in_loop
[spill_slot
/ 64] = block
.loop_nest_depth
> 0;
1505 /* check if the linear vgpr already exists */
1506 if (vgpr_spill_temps
[spill_slot
/ 64] == Temp()) {
1507 Temp linear_vgpr
= {ctx
.program
->allocateId(), v1
.as_linear()};
1508 vgpr_spill_temps
[spill_slot
/ 64] = linear_vgpr
;
1509 aco_ptr
<Pseudo_instruction
> create
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_start_linear_vgpr
, Format::PSEUDO
, 0, 1)};
1510 create
->definitions
[0] = Definition(linear_vgpr
);
1511 /* find the right place to insert this definition */
1512 if (last_top_level_block_idx
== block
.index
) {
1513 /* insert right before the current instruction */
1514 instructions
.emplace_back(std::move(create
));
1516 assert(last_top_level_block_idx
< block
.index
);
1517 /* insert before the branch at last top level block */
1518 std::vector
<aco_ptr
<Instruction
>>& instructions
= ctx
.program
->blocks
[last_top_level_block_idx
].instructions
;
1519 instructions
.insert(std::next(instructions
.begin(), instructions
.size() - 1), std::move(create
));
1523 /* reload sgpr: just add the vgpr temp to operands */
1524 Pseudo_instruction
* reload
= create_instruction
<Pseudo_instruction
>(aco_opcode::p_reload
, Format::PSEUDO
, 2, 1);
1525 reload
->operands
[0] = Operand(vgpr_spill_temps
[spill_slot
/ 64]);
1526 reload
->operands
[1] = Operand(spill_slot
% 64);
1527 reload
->definitions
[0] = (*it
)->definitions
[0];
1528 instructions
.emplace_back(aco_ptr
<Instruction
>(reload
));
1530 unreachable("No spill slot assigned for spill id");
1532 } else if (!ctx
.remat_used
.count(it
->get()) || ctx
.remat_used
[it
->get()]) {
1533 instructions
.emplace_back(std::move(*it
));
1537 block
.instructions
= std::move(instructions
);
1540 /* SSA elimination inserts copies for logical phis right before p_logical_end
1541 * So if a linear vgpr is used between that p_logical_end and the branch,
1542 * we need to ensure logical phis don't choose a definition which aliases
1544 * TODO: Moving the spills and reloads to before p_logical_end might produce
1545 * slightly better code. */
1546 for (Block
& block
: ctx
.program
->blocks
) {
1547 /* loops exits are already handled */
1548 if (block
.logical_preds
.size() <= 1)
1551 bool has_logical_phis
= false;
1552 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
1553 if (instr
->opcode
== aco_opcode::p_phi
) {
1554 has_logical_phis
= true;
1556 } else if (instr
->opcode
!= aco_opcode::p_linear_phi
) {
1560 if (!has_logical_phis
)
1563 std::set
<Temp
> vgprs
;
1564 for (unsigned pred_idx
: block
.logical_preds
) {
1565 Block
& pred
= ctx
.program
->blocks
[pred_idx
];
1566 for (int i
= pred
.instructions
.size() - 1; i
>= 0; i
--) {
1567 aco_ptr
<Instruction
>& pred_instr
= pred
.instructions
[i
];
1568 if (pred_instr
->opcode
== aco_opcode::p_logical_end
) {
1570 } else if (pred_instr
->opcode
== aco_opcode::p_spill
||
1571 pred_instr
->opcode
== aco_opcode::p_reload
) {
1572 vgprs
.insert(pred_instr
->operands
[0].getTemp());
1579 aco_ptr
<Instruction
> destr
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_end_linear_vgpr
, Format::PSEUDO
, vgprs
.size(), 0)};
1581 for (Temp tmp
: vgprs
) {
1582 destr
->operands
[k
++] = Operand(tmp
);
1584 /* find insertion point */
1585 std::vector
<aco_ptr
<Instruction
>>::iterator it
= block
.instructions
.begin();
1586 while ((*it
)->opcode
== aco_opcode::p_linear_phi
|| (*it
)->opcode
== aco_opcode::p_phi
)
1588 block
.instructions
.insert(it
, std::move(destr
));
1592 } /* end namespace */
1595 void spill(Program
* program
, live
& live_vars
, const struct radv_nir_compiler_options
*options
)
1597 program
->config
->spilled_vgprs
= 0;
1598 program
->config
->spilled_sgprs
= 0;
1600 /* no spilling when wave count is already high */
1601 if (program
->num_waves
>= 6)
1604 /* lower to CSSA before spilling to ensure correctness w.r.t. phis */
1605 lower_to_cssa(program
, live_vars
, options
);
1607 /* else, we check if we can improve things a bit */
1609 /* calculate target register demand */
1610 RegisterDemand max_reg_demand
;
1611 for (Block
& block
: program
->blocks
) {
1612 max_reg_demand
.update(block
.register_demand
);
1615 RegisterDemand target_pressure
= {256, int16_t(program
->sgpr_limit
)};
1616 unsigned num_waves
= 1;
1617 int spills_to_vgpr
= (max_reg_demand
.sgpr
- program
->sgpr_limit
+ 63) / 64;
1619 /* test if it possible to increase occupancy with little spilling */
1620 for (unsigned num_waves_next
= 2; num_waves_next
<= program
->max_waves
; num_waves_next
++) {
1621 RegisterDemand target_pressure_next
= {int16_t((256 / num_waves_next
) & ~3),
1622 int16_t(get_addr_sgpr_from_waves(program
, num_waves_next
))};
1624 /* Currently no vgpr spilling supported.
1625 * Spill as many sgprs as necessary to not hinder occupancy */
1626 if (max_reg_demand
.vgpr
> target_pressure_next
.vgpr
)
1628 /* check that we have enough free vgprs to spill sgprs to */
1629 if (max_reg_demand
.sgpr
> target_pressure_next
.sgpr
) {
1630 /* add some buffer in case graph coloring is not perfect ... */
1631 const int spills_to_vgpr_next
= (max_reg_demand
.sgpr
- target_pressure_next
.sgpr
+ 63 + 32) / 64;
1632 if (spills_to_vgpr_next
+ max_reg_demand
.vgpr
> target_pressure_next
.vgpr
)
1634 spills_to_vgpr
= spills_to_vgpr_next
;
1637 target_pressure
= target_pressure_next
;
1638 num_waves
= num_waves_next
;
1641 assert(max_reg_demand
.vgpr
<= target_pressure
.vgpr
&& "VGPR spilling not yet supported.");
1643 if (num_waves
== program
->num_waves
)
1646 /* initialize ctx */
1647 spill_ctx
ctx(target_pressure
, program
, live_vars
.register_demand
);
1648 compute_global_next_uses(ctx
, live_vars
.live_out
);
1649 get_rematerialize_info(ctx
);
1651 /* create spills and reloads */
1652 for (unsigned i
= 0; i
< program
->blocks
.size(); i
++)
1653 spill_block(ctx
, i
);
1655 /* assign spill slots and DCE rematerialized code */
1656 assign_spill_slots(ctx
, spills_to_vgpr
);
1658 /* update live variable information */
1659 live_vars
= live_var_analysis(program
, options
);
1661 assert(program
->num_waves
>= num_waves
);