2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
26 * Bas Nieuwenhuizen (bas@basnieuwenhuizen.nl)
31 #include "util/u_math.h"
36 #include "vulkan/radv_shader.h"
39 RegisterDemand
get_live_changes(aco_ptr
<Instruction
>& instr
)
41 RegisterDemand changes
;
42 for (const Definition
& def
: instr
->definitions
) {
43 if (!def
.isTemp() || def
.isKill())
45 changes
+= def
.getTemp();
48 for (const Operand
& op
: instr
->operands
) {
49 if (!op
.isTemp() || !op
.isFirstKill())
51 changes
-= op
.getTemp();
57 RegisterDemand
get_temp_registers(aco_ptr
<Instruction
>& instr
)
59 RegisterDemand temp_registers
;
61 for (Definition def
: instr
->definitions
) {
65 temp_registers
+= def
.getTemp();
68 for (Operand op
: instr
->operands
) {
69 if (op
.isTemp() && op
.isLateKill() && op
.isFirstKill())
70 temp_registers
+= op
.getTemp();
73 return temp_registers
;
76 RegisterDemand
get_demand_before(RegisterDemand demand
, aco_ptr
<Instruction
>& instr
, aco_ptr
<Instruction
>& instr_before
)
78 demand
-= get_live_changes(instr
);
79 demand
-= get_temp_registers(instr
);
81 demand
+= get_temp_registers(instr_before
);
86 void process_live_temps_per_block(Program
*program
, live
& lives
, Block
* block
,
87 std::set
<unsigned>& worklist
, std::vector
<uint16_t>& phi_sgpr_ops
)
89 std::vector
<RegisterDemand
>& register_demand
= lives
.register_demand
[block
->index
];
90 RegisterDemand new_demand
;
92 register_demand
.resize(block
->instructions
.size());
93 block
->register_demand
= RegisterDemand();
95 std::set
<Temp
> live_sgprs
;
96 std::set
<Temp
> live_vgprs
;
98 /* add the live_out_exec to live */
99 bool exec_live
= false;
100 if (block
->live_out_exec
!= Temp()) {
101 live_sgprs
.insert(block
->live_out_exec
);
102 new_demand
.sgpr
+= program
->lane_mask
.size();
106 /* split the live-outs from this block into the temporary sets */
107 std::vector
<std::set
<Temp
>>& live_temps
= lives
.live_out
;
108 for (const Temp temp
: live_temps
[block
->index
]) {
109 const bool inserted
= temp
.is_linear()
110 ? live_sgprs
.insert(temp
).second
111 : live_vgprs
.insert(temp
).second
;
116 new_demand
.sgpr
-= phi_sgpr_ops
[block
->index
];
118 /* traverse the instructions backwards */
120 for (idx
= block
->instructions
.size() -1; idx
>= 0; idx
--) {
121 Instruction
*insn
= block
->instructions
[idx
].get();
125 /* substract the 1 or 2 sgprs from exec */
127 assert(new_demand
.sgpr
>= (int16_t) program
->lane_mask
.size());
128 register_demand
[idx
] = RegisterDemand(new_demand
.vgpr
, new_demand
.sgpr
- (exec_live
? program
->lane_mask
.size() : 0));
131 for (Definition
& definition
: insn
->definitions
) {
132 if (!definition
.isTemp()) {
135 if ((definition
.isFixed() || definition
.hasHint()) && definition
.physReg() == vcc
)
136 program
->needs_vcc
= true;
138 const Temp temp
= definition
.getTemp();
140 if (temp
.is_linear())
141 n
= live_sgprs
.erase(temp
);
143 n
= live_vgprs
.erase(temp
);
147 definition
.setKill(false);
149 register_demand
[idx
] += temp
;
150 definition
.setKill(true);
153 if (definition
.isFixed() && definition
.physReg() == exec
)
158 if (insn
->opcode
== aco_opcode::p_logical_end
) {
159 new_demand
.sgpr
+= phi_sgpr_ops
[block
->index
];
161 /* we need to do this in a separate loop because the next one can
162 * setKill() for several operands at once and we don't want to
163 * overwrite that in a later iteration */
164 for (Operand
& op
: insn
->operands
)
167 for (unsigned i
= 0; i
< insn
->operands
.size(); ++i
)
169 Operand
& operand
= insn
->operands
[i
];
170 if (!operand
.isTemp())
172 if (operand
.isFixed() && operand
.physReg() == vcc
)
173 program
->needs_vcc
= true;
174 const Temp temp
= operand
.getTemp();
175 const bool inserted
= temp
.is_linear()
176 ? live_sgprs
.insert(temp
).second
177 : live_vgprs
.insert(temp
).second
;
179 operand
.setFirstKill(true);
180 for (unsigned j
= i
+ 1; j
< insn
->operands
.size(); ++j
) {
181 if (insn
->operands
[j
].isTemp() && insn
->operands
[j
].tempId() == operand
.tempId()) {
182 insn
->operands
[j
].setFirstKill(false);
183 insn
->operands
[j
].setKill(true);
186 if (operand
.isLateKill())
187 register_demand
[idx
] += temp
;
191 if (operand
.isFixed() && operand
.physReg() == exec
)
196 block
->register_demand
.update(register_demand
[idx
]);
199 /* update block's register demand for a last time */
201 assert(new_demand
.sgpr
>= (int16_t) program
->lane_mask
.size());
202 new_demand
.sgpr
-= exec_live
? program
->lane_mask
.size() : 0;
203 block
->register_demand
.update(new_demand
);
205 /* handle phi definitions */
207 while (phi_idx
>= 0) {
208 register_demand
[phi_idx
] = new_demand
;
209 Instruction
*insn
= block
->instructions
[phi_idx
].get();
211 assert(is_phi(insn
));
212 assert(insn
->definitions
.size() == 1 && insn
->definitions
[0].isTemp());
213 Definition
& definition
= insn
->definitions
[0];
214 if ((definition
.isFixed() || definition
.hasHint()) && definition
.physReg() == vcc
)
215 program
->needs_vcc
= true;
216 const Temp temp
= definition
.getTemp();
219 if (temp
.is_linear())
220 n
= live_sgprs
.erase(temp
);
222 n
= live_vgprs
.erase(temp
);
225 definition
.setKill(false);
227 definition
.setKill(true);
232 /* now, we have the live-in sets and need to merge them into the live-out sets */
233 for (unsigned pred_idx
: block
->logical_preds
) {
234 for (Temp vgpr
: live_vgprs
) {
235 auto it
= live_temps
[pred_idx
].insert(vgpr
);
237 worklist
.insert(pred_idx
);
241 for (unsigned pred_idx
: block
->linear_preds
) {
242 for (Temp sgpr
: live_sgprs
) {
243 auto it
= live_temps
[pred_idx
].insert(sgpr
);
245 worklist
.insert(pred_idx
);
249 /* handle phi operands */
251 while (phi_idx
>= 0) {
252 Instruction
*insn
= block
->instructions
[phi_idx
].get();
253 assert(is_phi(insn
));
254 /* directly insert into the predecessors live-out set */
255 std::vector
<unsigned>& preds
= insn
->opcode
== aco_opcode::p_phi
256 ? block
->logical_preds
257 : block
->linear_preds
;
258 for (unsigned i
= 0; i
< preds
.size(); ++i
) {
259 Operand
&operand
= insn
->operands
[i
];
260 if (!operand
.isTemp())
262 if (operand
.isFixed() && operand
.physReg() == vcc
)
263 program
->needs_vcc
= true;
264 /* check if we changed an already processed block */
265 const bool inserted
= live_temps
[preds
[i
]].insert(operand
.getTemp()).second
;
267 operand
.setKill(true);
268 worklist
.insert(preds
[i
]);
269 if (insn
->opcode
== aco_opcode::p_phi
&& operand
.getTemp().type() == RegType::sgpr
)
270 phi_sgpr_ops
[preds
[i
]] += operand
.size();
276 if ((block
->logical_preds
.empty() && !live_vgprs
.empty()) ||
277 (block
->linear_preds
.empty() && !live_sgprs
.empty())) {
278 aco_print_program(program
, stderr
);
279 fprintf(stderr
, "These temporaries are never defined or are defined after use:\n");
280 for (Temp vgpr
: live_vgprs
)
281 fprintf(stderr
, "%%%d\n", vgpr
.id());
282 for (Temp sgpr
: live_sgprs
)
283 fprintf(stderr
, "%%%d\n", sgpr
.id());
287 assert(block
->index
!= 0 || new_demand
== RegisterDemand());
290 unsigned calc_waves_per_workgroup(Program
*program
)
292 /* When workgroup size is not known, just go with wave_size */
293 unsigned workgroup_size
= program
->workgroup_size
== UINT_MAX
295 : program
->workgroup_size
;
297 return align(workgroup_size
, program
->wave_size
) / program
->wave_size
;
299 } /* end namespace */
301 uint16_t get_extra_sgprs(Program
*program
)
303 if (program
->chip_class
>= GFX10
) {
304 assert(!program
->needs_flat_scr
);
305 assert(!program
->xnack_enabled
);
307 } else if (program
->chip_class
>= GFX8
) {
308 if (program
->needs_flat_scr
)
310 else if (program
->xnack_enabled
)
312 else if (program
->needs_vcc
)
317 assert(!program
->xnack_enabled
);
318 if (program
->needs_flat_scr
)
320 else if (program
->needs_vcc
)
327 uint16_t get_sgpr_alloc(Program
*program
, uint16_t addressable_sgprs
)
329 assert(addressable_sgprs
<= program
->sgpr_limit
);
330 uint16_t sgprs
= addressable_sgprs
+ get_extra_sgprs(program
);
331 uint16_t granule
= program
->sgpr_alloc_granule
+ 1;
332 return align(std::max(sgprs
, granule
), granule
);
335 uint16_t get_vgpr_alloc(Program
*program
, uint16_t addressable_vgprs
)
337 assert(addressable_vgprs
<= program
->vgpr_limit
);
338 uint16_t granule
= program
->vgpr_alloc_granule
+ 1;
339 return align(std::max(addressable_vgprs
, granule
), granule
);
342 uint16_t get_addr_sgpr_from_waves(Program
*program
, uint16_t max_waves
)
344 uint16_t sgprs
= program
->physical_sgprs
/ max_waves
& ~program
->sgpr_alloc_granule
;
345 sgprs
-= get_extra_sgprs(program
);
346 return std::min(sgprs
, program
->sgpr_limit
);
349 uint16_t get_addr_vgpr_from_waves(Program
*program
, uint16_t max_waves
)
351 uint16_t vgprs
= 256 / max_waves
& ~program
->vgpr_alloc_granule
;
352 return std::min(vgprs
, program
->vgpr_limit
);
355 void calc_min_waves(Program
* program
)
357 unsigned waves_per_workgroup
= calc_waves_per_workgroup(program
);
358 /* currently min_waves is in wave64 waves */
359 if (program
->wave_size
== 32)
360 waves_per_workgroup
= DIV_ROUND_UP(waves_per_workgroup
, 2);
362 unsigned simd_per_cu
= 4; /* TODO: different on Navi */
363 bool wgp
= program
->chip_class
>= GFX10
; /* assume WGP is used on Navi */
364 unsigned simd_per_cu_wgp
= wgp
? simd_per_cu
* 2 : simd_per_cu
;
366 program
->min_waves
= DIV_ROUND_UP(waves_per_workgroup
, simd_per_cu_wgp
);
369 void update_vgpr_sgpr_demand(Program
* program
, const RegisterDemand new_demand
)
371 /* TODO: max_waves_per_simd, simd_per_cu and the number of physical vgprs for Navi */
372 unsigned max_waves_per_simd
= 10;
373 unsigned simd_per_cu
= 4;
375 bool wgp
= program
->chip_class
>= GFX10
; /* assume WGP is used on Navi */
376 unsigned simd_per_cu_wgp
= wgp
? simd_per_cu
* 2 : simd_per_cu
;
377 unsigned lds_limit
= wgp
? program
->lds_limit
* 2 : program
->lds_limit
;
379 /* this won't compile, register pressure reduction necessary */
380 if (new_demand
.vgpr
> program
->vgpr_limit
|| new_demand
.sgpr
> program
->sgpr_limit
) {
381 program
->num_waves
= 0;
382 program
->max_reg_demand
= new_demand
;
384 program
->num_waves
= program
->physical_sgprs
/ get_sgpr_alloc(program
, new_demand
.sgpr
);
385 program
->num_waves
= std::min
<uint16_t>(program
->num_waves
, 256 / get_vgpr_alloc(program
, new_demand
.vgpr
));
386 program
->max_waves
= max_waves_per_simd
;
388 /* adjust max_waves for workgroup and LDS limits */
389 unsigned waves_per_workgroup
= calc_waves_per_workgroup(program
);
390 unsigned workgroups_per_cu_wgp
= max_waves_per_simd
* simd_per_cu_wgp
/ waves_per_workgroup
;
391 if (program
->config
->lds_size
) {
392 unsigned lds
= program
->config
->lds_size
* program
->lds_alloc_granule
;
393 workgroups_per_cu_wgp
= std::min(workgroups_per_cu_wgp
, lds_limit
/ lds
);
395 if (waves_per_workgroup
> 1 && program
->chip_class
< GFX10
)
396 workgroups_per_cu_wgp
= std::min(workgroups_per_cu_wgp
, 16u); /* TODO: is this a SI-only limit? what about Navi? */
398 /* in cases like waves_per_workgroup=3 or lds=65536 and
399 * waves_per_workgroup=1, we want the maximum possible number of waves per
400 * SIMD and not the minimum. so DIV_ROUND_UP is used */
401 program
->max_waves
= std::min
<uint16_t>(program
->max_waves
, DIV_ROUND_UP(workgroups_per_cu_wgp
* waves_per_workgroup
, simd_per_cu_wgp
));
403 /* incorporate max_waves and calculate max_reg_demand */
404 program
->num_waves
= std::min
<uint16_t>(program
->num_waves
, program
->max_waves
);
405 program
->max_reg_demand
.vgpr
= get_addr_vgpr_from_waves(program
, program
->num_waves
);
406 program
->max_reg_demand
.sgpr
= get_addr_sgpr_from_waves(program
, program
->num_waves
);
410 live
live_var_analysis(Program
* program
,
411 const struct radv_nir_compiler_options
*options
)
414 result
.live_out
.resize(program
->blocks
.size());
415 result
.register_demand
.resize(program
->blocks
.size());
416 std::set
<unsigned> worklist
;
417 std::vector
<uint16_t> phi_sgpr_ops(program
->blocks
.size());
418 RegisterDemand new_demand
;
420 program
->needs_vcc
= false;
422 /* this implementation assumes that the block idx corresponds to the block's position in program->blocks vector */
423 for (Block
& block
: program
->blocks
)
424 worklist
.insert(block
.index
);
425 while (!worklist
.empty()) {
426 std::set
<unsigned>::reverse_iterator b_it
= worklist
.rbegin();
427 unsigned block_idx
= *b_it
;
428 worklist
.erase(block_idx
);
429 process_live_temps_per_block(program
, result
, &program
->blocks
[block_idx
], worklist
, phi_sgpr_ops
);
430 new_demand
.update(program
->blocks
[block_idx
].register_demand
);
433 /* calculate the program's register demand and number of waves */
434 update_vgpr_sgpr_demand(program
, new_demand
);