2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
26 * Bas Nieuwenhuizen (bas@basnieuwenhuizen.nl)
31 #include "util/u_math.h"
36 #include "vulkan/radv_shader.h"
41 void process_live_temps_per_block(Program
*program
, live
& lives
, Block
* block
,
42 std::set
<unsigned>& worklist
, std::vector
<uint16_t>& phi_sgpr_ops
)
44 std::vector
<RegisterDemand
>& register_demand
= lives
.register_demand
[block
->index
];
45 RegisterDemand new_demand
;
47 register_demand
.resize(block
->instructions
.size());
48 block
->register_demand
= RegisterDemand();
50 std::set
<Temp
> live_sgprs
;
51 std::set
<Temp
> live_vgprs
;
53 /* add the live_out_exec to live */
54 bool exec_live
= false;
55 if (block
->live_out_exec
!= Temp()) {
56 live_sgprs
.insert(block
->live_out_exec
);
57 new_demand
.sgpr
+= program
->lane_mask
.size();
61 /* split the live-outs from this block into the temporary sets */
62 std::vector
<std::set
<Temp
>>& live_temps
= lives
.live_out
;
63 for (const Temp temp
: live_temps
[block
->index
]) {
64 const bool inserted
= temp
.is_linear()
65 ? live_sgprs
.insert(temp
).second
66 : live_vgprs
.insert(temp
).second
;
71 new_demand
.sgpr
-= phi_sgpr_ops
[block
->index
];
73 /* traverse the instructions backwards */
75 for (idx
= block
->instructions
.size() -1; idx
>= 0; idx
--) {
76 Instruction
*insn
= block
->instructions
[idx
].get();
80 /* substract the 1 or 2 sgprs from exec */
82 assert(new_demand
.sgpr
>= (int16_t) program
->lane_mask
.size());
83 register_demand
[idx
] = RegisterDemand(new_demand
.vgpr
, new_demand
.sgpr
- (exec_live
? program
->lane_mask
.size() : 0));
86 for (Definition
& definition
: insn
->definitions
) {
87 if (!definition
.isTemp()) {
90 if ((definition
.isFixed() || definition
.hasHint()) && definition
.physReg() == vcc
)
91 program
->needs_vcc
= true;
93 const Temp temp
= definition
.getTemp();
96 n
= live_sgprs
.erase(temp
);
98 n
= live_vgprs
.erase(temp
);
102 definition
.setKill(false);
104 register_demand
[idx
] += temp
;
105 definition
.setKill(true);
108 if (definition
.isFixed() && definition
.physReg() == exec
)
113 if (insn
->opcode
== aco_opcode::p_logical_end
) {
114 new_demand
.sgpr
+= phi_sgpr_ops
[block
->index
];
116 /* we need to do this in a separate loop because the next one can
117 * setKill() for several operands at once and we don't want to
118 * overwrite that in a later iteration */
119 for (Operand
& op
: insn
->operands
)
122 for (unsigned i
= 0; i
< insn
->operands
.size(); ++i
)
124 Operand
& operand
= insn
->operands
[i
];
125 if (!operand
.isTemp())
127 if (operand
.isFixed() && operand
.physReg() == vcc
)
128 program
->needs_vcc
= true;
129 const Temp temp
= operand
.getTemp();
130 const bool inserted
= temp
.is_linear()
131 ? live_sgprs
.insert(temp
).second
132 : live_vgprs
.insert(temp
).second
;
134 operand
.setFirstKill(true);
135 for (unsigned j
= i
+ 1; j
< insn
->operands
.size(); ++j
) {
136 if (insn
->operands
[j
].isTemp() && insn
->operands
[j
].tempId() == operand
.tempId()) {
137 insn
->operands
[j
].setFirstKill(false);
138 insn
->operands
[j
].setKill(true);
144 if (operand
.isFixed() && operand
.physReg() == exec
)
149 block
->register_demand
.update(register_demand
[idx
]);
152 /* update block's register demand for a last time */
154 assert(new_demand
.sgpr
>= (int16_t) program
->lane_mask
.size());
155 new_demand
.sgpr
-= exec_live
? program
->lane_mask
.size() : 0;
156 block
->register_demand
.update(new_demand
);
158 /* handle phi definitions */
160 while (phi_idx
>= 0) {
161 register_demand
[phi_idx
] = new_demand
;
162 Instruction
*insn
= block
->instructions
[phi_idx
].get();
164 assert(is_phi(insn
));
165 assert(insn
->definitions
.size() == 1 && insn
->definitions
[0].isTemp());
166 Definition
& definition
= insn
->definitions
[0];
167 if ((definition
.isFixed() || definition
.hasHint()) && definition
.physReg() == vcc
)
168 program
->needs_vcc
= true;
169 const Temp temp
= definition
.getTemp();
172 if (temp
.is_linear())
173 n
= live_sgprs
.erase(temp
);
175 n
= live_vgprs
.erase(temp
);
178 definition
.setKill(false);
180 definition
.setKill(true);
185 /* now, we have the live-in sets and need to merge them into the live-out sets */
186 for (unsigned pred_idx
: block
->logical_preds
) {
187 for (Temp vgpr
: live_vgprs
) {
188 auto it
= live_temps
[pred_idx
].insert(vgpr
);
190 worklist
.insert(pred_idx
);
194 for (unsigned pred_idx
: block
->linear_preds
) {
195 for (Temp sgpr
: live_sgprs
) {
196 auto it
= live_temps
[pred_idx
].insert(sgpr
);
198 worklist
.insert(pred_idx
);
202 /* handle phi operands */
204 while (phi_idx
>= 0) {
205 Instruction
*insn
= block
->instructions
[phi_idx
].get();
206 assert(is_phi(insn
));
207 /* directly insert into the predecessors live-out set */
208 std::vector
<unsigned>& preds
= insn
->opcode
== aco_opcode::p_phi
209 ? block
->logical_preds
210 : block
->linear_preds
;
211 for (unsigned i
= 0; i
< preds
.size(); ++i
) {
212 Operand
&operand
= insn
->operands
[i
];
213 if (!operand
.isTemp())
215 if (operand
.isFixed() && operand
.physReg() == vcc
)
216 program
->needs_vcc
= true;
217 /* check if we changed an already processed block */
218 const bool inserted
= live_temps
[preds
[i
]].insert(operand
.getTemp()).second
;
220 operand
.setKill(true);
221 worklist
.insert(preds
[i
]);
222 if (insn
->opcode
== aco_opcode::p_phi
&& operand
.getTemp().type() == RegType::sgpr
)
223 phi_sgpr_ops
[preds
[i
]] += operand
.size();
229 if ((block
->logical_preds
.empty() && !live_vgprs
.empty()) ||
230 (block
->linear_preds
.empty() && !live_sgprs
.empty())) {
231 aco_print_program(program
, stderr
);
232 fprintf(stderr
, "These temporaries are never defined or are defined after use:\n");
233 for (Temp vgpr
: live_vgprs
)
234 fprintf(stderr
, "%%%d\n", vgpr
.id());
235 for (Temp sgpr
: live_sgprs
)
236 fprintf(stderr
, "%%%d\n", sgpr
.id());
240 assert(block
->index
!= 0 || new_demand
== RegisterDemand());
243 unsigned calc_waves_per_workgroup(Program
*program
)
245 unsigned workgroup_size
= program
->wave_size
;
246 if (program
->stage
== compute_cs
) {
247 unsigned* bsize
= program
->info
->cs
.block_size
;
248 workgroup_size
= bsize
[0] * bsize
[1] * bsize
[2];
250 return align(workgroup_size
, program
->wave_size
) / program
->wave_size
;
252 } /* end namespace */
254 uint16_t get_extra_sgprs(Program
*program
)
256 if (program
->chip_class
>= GFX10
) {
257 assert(!program
->needs_flat_scr
);
258 assert(!program
->needs_xnack_mask
);
260 } else if (program
->chip_class
>= GFX8
) {
261 if (program
->needs_flat_scr
)
263 else if (program
->needs_xnack_mask
)
265 else if (program
->needs_vcc
)
270 assert(!program
->needs_xnack_mask
);
271 if (program
->needs_flat_scr
)
273 else if (program
->needs_vcc
)
280 uint16_t get_sgpr_alloc(Program
*program
, uint16_t addressable_sgprs
)
282 assert(addressable_sgprs
<= program
->sgpr_limit
);
283 uint16_t sgprs
= addressable_sgprs
+ get_extra_sgprs(program
);
284 uint16_t granule
= program
->sgpr_alloc_granule
+ 1;
285 return align(std::max(sgprs
, granule
), granule
);
288 uint16_t get_vgpr_alloc(Program
*program
, uint16_t addressable_vgprs
)
290 assert(addressable_vgprs
<= program
->vgpr_limit
);
291 uint16_t granule
= program
->vgpr_alloc_granule
+ 1;
292 return align(std::max(addressable_vgprs
, granule
), granule
);
295 uint16_t get_addr_sgpr_from_waves(Program
*program
, uint16_t max_waves
)
297 uint16_t sgprs
= program
->physical_sgprs
/ max_waves
& ~program
->sgpr_alloc_granule
;
298 sgprs
-= get_extra_sgprs(program
);
299 return std::min(sgprs
, program
->sgpr_limit
);
302 uint16_t get_addr_vgpr_from_waves(Program
*program
, uint16_t max_waves
)
304 uint16_t vgprs
= 256 / max_waves
& ~program
->vgpr_alloc_granule
;
305 return std::min(vgprs
, program
->vgpr_limit
);
308 void calc_min_waves(Program
* program
)
310 unsigned waves_per_workgroup
= calc_waves_per_workgroup(program
);
311 /* currently min_waves is in wave64 waves */
312 if (program
->wave_size
== 32)
313 waves_per_workgroup
= DIV_ROUND_UP(waves_per_workgroup
, 2);
315 unsigned simd_per_cu
= 4; /* TODO: different on Navi */
316 bool wgp
= program
->chip_class
>= GFX10
; /* assume WGP is used on Navi */
317 unsigned simd_per_cu_wgp
= wgp
? simd_per_cu
* 2 : simd_per_cu
;
319 program
->min_waves
= DIV_ROUND_UP(waves_per_workgroup
, simd_per_cu_wgp
);
322 void update_vgpr_sgpr_demand(Program
* program
, const RegisterDemand new_demand
)
324 /* TODO: max_waves_per_simd, simd_per_cu and the number of physical vgprs for Navi */
325 unsigned max_waves_per_simd
= 10;
326 unsigned simd_per_cu
= 4;
328 bool wgp
= program
->chip_class
>= GFX10
; /* assume WGP is used on Navi */
329 unsigned simd_per_cu_wgp
= wgp
? simd_per_cu
* 2 : simd_per_cu
;
330 unsigned lds_limit
= wgp
? program
->lds_limit
* 2 : program
->lds_limit
;
332 /* this won't compile, register pressure reduction necessary */
333 if (new_demand
.vgpr
> program
->vgpr_limit
|| new_demand
.sgpr
> program
->sgpr_limit
) {
334 program
->num_waves
= 0;
335 program
->max_reg_demand
= new_demand
;
337 program
->num_waves
= program
->physical_sgprs
/ get_sgpr_alloc(program
, new_demand
.sgpr
);
338 program
->num_waves
= std::min
<uint16_t>(program
->num_waves
, 256 / get_vgpr_alloc(program
, new_demand
.vgpr
));
339 program
->max_waves
= max_waves_per_simd
;
341 /* adjust max_waves for workgroup and LDS limits */
342 unsigned waves_per_workgroup
= calc_waves_per_workgroup(program
);
343 unsigned workgroups_per_cu_wgp
= max_waves_per_simd
* simd_per_cu_wgp
/ waves_per_workgroup
;
344 if (program
->config
->lds_size
) {
345 unsigned lds
= program
->config
->lds_size
* program
->lds_alloc_granule
;
346 workgroups_per_cu_wgp
= std::min(workgroups_per_cu_wgp
, lds_limit
/ lds
);
348 if (waves_per_workgroup
> 1 && program
->chip_class
< GFX10
)
349 workgroups_per_cu_wgp
= std::min(workgroups_per_cu_wgp
, 16u); /* TODO: is this a SI-only limit? what about Navi? */
351 /* in cases like waves_per_workgroup=3 or lds=65536 and
352 * waves_per_workgroup=1, we want the maximum possible number of waves per
353 * SIMD and not the minimum. so DIV_ROUND_UP is used */
354 program
->max_waves
= std::min
<uint16_t>(program
->max_waves
, DIV_ROUND_UP(workgroups_per_cu_wgp
* waves_per_workgroup
, simd_per_cu_wgp
));
356 /* incorporate max_waves and calculate max_reg_demand */
357 program
->num_waves
= std::min
<uint16_t>(program
->num_waves
, program
->max_waves
);
358 program
->max_reg_demand
.vgpr
= get_addr_vgpr_from_waves(program
, program
->num_waves
);
359 program
->max_reg_demand
.sgpr
= get_addr_sgpr_from_waves(program
, program
->num_waves
);
363 live
live_var_analysis(Program
* program
,
364 const struct radv_nir_compiler_options
*options
)
367 result
.live_out
.resize(program
->blocks
.size());
368 result
.register_demand
.resize(program
->blocks
.size());
369 std::set
<unsigned> worklist
;
370 std::vector
<uint16_t> phi_sgpr_ops(program
->blocks
.size());
371 RegisterDemand new_demand
;
373 program
->needs_vcc
= false;
375 /* this implementation assumes that the block idx corresponds to the block's position in program->blocks vector */
376 for (Block
& block
: program
->blocks
)
377 worklist
.insert(block
.index
);
378 while (!worklist
.empty()) {
379 std::set
<unsigned>::reverse_iterator b_it
= worklist
.rbegin();
380 unsigned block_idx
= *b_it
;
381 worklist
.erase(block_idx
);
382 process_live_temps_per_block(program
, result
, &program
->blocks
[block_idx
], worklist
, phi_sgpr_ops
);
383 new_demand
.update(program
->blocks
[block_idx
].register_demand
);
386 /* calculate the program's register demand and number of waves */
387 update_vgpr_sgpr_demand(program
, new_demand
);