aco: only break SMEM clauses if XNACK is enabled (mostly APUs)
[mesa.git] / src / amd / compiler / aco_live_var_analysis.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 * Authors:
25 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
26 * Bas Nieuwenhuizen (bas@basnieuwenhuizen.nl)
27 *
28 */
29
30 #include "aco_ir.h"
31 #include "util/u_math.h"
32
33 #include <set>
34 #include <vector>
35
36 #include "vulkan/radv_shader.h"
37
38 namespace aco {
39 RegisterDemand get_live_changes(aco_ptr<Instruction>& instr)
40 {
41 RegisterDemand changes;
42 for (const Definition& def : instr->definitions) {
43 if (!def.isTemp() || def.isKill())
44 continue;
45 changes += def.getTemp();
46 }
47
48 for (const Operand& op : instr->operands) {
49 if (!op.isTemp() || !op.isFirstKill())
50 continue;
51 changes -= op.getTemp();
52 }
53
54 return changes;
55 }
56
57 RegisterDemand get_temp_registers(aco_ptr<Instruction>& instr)
58 {
59 RegisterDemand temp_registers;
60
61 for (Definition def : instr->definitions) {
62 if (!def.isTemp())
63 continue;
64 if (def.isKill())
65 temp_registers += def.getTemp();
66 }
67
68 for (Operand op : instr->operands) {
69 if (op.isTemp() && op.isLateKill() && op.isFirstKill())
70 temp_registers += op.getTemp();
71 }
72
73 return temp_registers;
74 }
75
76 RegisterDemand get_demand_before(RegisterDemand demand, aco_ptr<Instruction>& instr, aco_ptr<Instruction>& instr_before)
77 {
78 demand -= get_live_changes(instr);
79 demand -= get_temp_registers(instr);
80 if (instr_before)
81 demand += get_temp_registers(instr_before);
82 return demand;
83 }
84
85 namespace {
86 void process_live_temps_per_block(Program *program, live& lives, Block* block,
87 std::set<unsigned>& worklist, std::vector<uint16_t>& phi_sgpr_ops)
88 {
89 std::vector<RegisterDemand>& register_demand = lives.register_demand[block->index];
90 RegisterDemand new_demand;
91
92 register_demand.resize(block->instructions.size());
93 block->register_demand = RegisterDemand();
94
95 std::set<Temp> live_sgprs;
96 std::set<Temp> live_vgprs;
97
98 /* add the live_out_exec to live */
99 bool exec_live = false;
100 if (block->live_out_exec != Temp()) {
101 live_sgprs.insert(block->live_out_exec);
102 new_demand.sgpr += program->lane_mask.size();
103 exec_live = true;
104 }
105
106 /* split the live-outs from this block into the temporary sets */
107 std::vector<std::set<Temp>>& live_temps = lives.live_out;
108 for (const Temp temp : live_temps[block->index]) {
109 const bool inserted = temp.is_linear()
110 ? live_sgprs.insert(temp).second
111 : live_vgprs.insert(temp).second;
112 if (inserted) {
113 new_demand += temp;
114 }
115 }
116 new_demand.sgpr -= phi_sgpr_ops[block->index];
117
118 /* traverse the instructions backwards */
119 int idx;
120 for (idx = block->instructions.size() -1; idx >= 0; idx--) {
121 Instruction *insn = block->instructions[idx].get();
122 if (is_phi(insn))
123 break;
124
125 /* substract the 1 or 2 sgprs from exec */
126 if (exec_live)
127 assert(new_demand.sgpr >= (int16_t) program->lane_mask.size());
128 register_demand[idx] = RegisterDemand(new_demand.vgpr, new_demand.sgpr - (exec_live ? program->lane_mask.size() : 0));
129
130 /* KILL */
131 for (Definition& definition : insn->definitions) {
132 if (!definition.isTemp()) {
133 continue;
134 }
135 if ((definition.isFixed() || definition.hasHint()) && definition.physReg() == vcc)
136 program->needs_vcc = true;
137
138 const Temp temp = definition.getTemp();
139 size_t n = 0;
140 if (temp.is_linear())
141 n = live_sgprs.erase(temp);
142 else
143 n = live_vgprs.erase(temp);
144
145 if (n) {
146 new_demand -= temp;
147 definition.setKill(false);
148 } else {
149 register_demand[idx] += temp;
150 definition.setKill(true);
151 }
152
153 if (definition.isFixed() && definition.physReg() == exec)
154 exec_live = false;
155 }
156
157 /* GEN */
158 if (insn->opcode == aco_opcode::p_logical_end) {
159 new_demand.sgpr += phi_sgpr_ops[block->index];
160 } else {
161 /* we need to do this in a separate loop because the next one can
162 * setKill() for several operands at once and we don't want to
163 * overwrite that in a later iteration */
164 for (Operand& op : insn->operands)
165 op.setKill(false);
166
167 for (unsigned i = 0; i < insn->operands.size(); ++i)
168 {
169 Operand& operand = insn->operands[i];
170 if (!operand.isTemp())
171 continue;
172 if (operand.isFixed() && operand.physReg() == vcc)
173 program->needs_vcc = true;
174 const Temp temp = operand.getTemp();
175 const bool inserted = temp.is_linear()
176 ? live_sgprs.insert(temp).second
177 : live_vgprs.insert(temp).second;
178 if (inserted) {
179 operand.setFirstKill(true);
180 for (unsigned j = i + 1; j < insn->operands.size(); ++j) {
181 if (insn->operands[j].isTemp() && insn->operands[j].tempId() == operand.tempId()) {
182 insn->operands[j].setFirstKill(false);
183 insn->operands[j].setKill(true);
184 }
185 }
186 if (operand.isLateKill())
187 register_demand[idx] += temp;
188 new_demand += temp;
189 }
190
191 if (operand.isFixed() && operand.physReg() == exec)
192 exec_live = true;
193 }
194 }
195
196 block->register_demand.update(register_demand[idx]);
197 }
198
199 /* update block's register demand for a last time */
200 if (exec_live)
201 assert(new_demand.sgpr >= (int16_t) program->lane_mask.size());
202 new_demand.sgpr -= exec_live ? program->lane_mask.size() : 0;
203 block->register_demand.update(new_demand);
204
205 /* handle phi definitions */
206 int phi_idx = idx;
207 while (phi_idx >= 0) {
208 register_demand[phi_idx] = new_demand;
209 Instruction *insn = block->instructions[phi_idx].get();
210
211 assert(is_phi(insn));
212 assert(insn->definitions.size() == 1 && insn->definitions[0].isTemp());
213 Definition& definition = insn->definitions[0];
214 if ((definition.isFixed() || definition.hasHint()) && definition.physReg() == vcc)
215 program->needs_vcc = true;
216 const Temp temp = definition.getTemp();
217 size_t n = 0;
218
219 if (temp.is_linear())
220 n = live_sgprs.erase(temp);
221 else
222 n = live_vgprs.erase(temp);
223
224 if (n)
225 definition.setKill(false);
226 else
227 definition.setKill(true);
228
229 phi_idx--;
230 }
231
232 /* now, we have the live-in sets and need to merge them into the live-out sets */
233 for (unsigned pred_idx : block->logical_preds) {
234 for (Temp vgpr : live_vgprs) {
235 auto it = live_temps[pred_idx].insert(vgpr);
236 if (it.second)
237 worklist.insert(pred_idx);
238 }
239 }
240
241 for (unsigned pred_idx : block->linear_preds) {
242 for (Temp sgpr : live_sgprs) {
243 auto it = live_temps[pred_idx].insert(sgpr);
244 if (it.second)
245 worklist.insert(pred_idx);
246 }
247 }
248
249 /* handle phi operands */
250 phi_idx = idx;
251 while (phi_idx >= 0) {
252 Instruction *insn = block->instructions[phi_idx].get();
253 assert(is_phi(insn));
254 /* directly insert into the predecessors live-out set */
255 std::vector<unsigned>& preds = insn->opcode == aco_opcode::p_phi
256 ? block->logical_preds
257 : block->linear_preds;
258 for (unsigned i = 0; i < preds.size(); ++i) {
259 Operand &operand = insn->operands[i];
260 if (!operand.isTemp())
261 continue;
262 if (operand.isFixed() && operand.physReg() == vcc)
263 program->needs_vcc = true;
264 /* check if we changed an already processed block */
265 const bool inserted = live_temps[preds[i]].insert(operand.getTemp()).second;
266 if (inserted) {
267 operand.setKill(true);
268 worklist.insert(preds[i]);
269 if (insn->opcode == aco_opcode::p_phi && operand.getTemp().type() == RegType::sgpr)
270 phi_sgpr_ops[preds[i]] += operand.size();
271 }
272 }
273 phi_idx--;
274 }
275
276 if ((block->logical_preds.empty() && !live_vgprs.empty()) ||
277 (block->linear_preds.empty() && !live_sgprs.empty())) {
278 aco_print_program(program, stderr);
279 fprintf(stderr, "These temporaries are never defined or are defined after use:\n");
280 for (Temp vgpr : live_vgprs)
281 fprintf(stderr, "%%%d\n", vgpr.id());
282 for (Temp sgpr : live_sgprs)
283 fprintf(stderr, "%%%d\n", sgpr.id());
284 abort();
285 }
286
287 assert(block->index != 0 || new_demand == RegisterDemand());
288 }
289
290 unsigned calc_waves_per_workgroup(Program *program)
291 {
292 /* When workgroup size is not known, just go with wave_size */
293 unsigned workgroup_size = program->workgroup_size == UINT_MAX
294 ? program->wave_size
295 : program->workgroup_size;
296
297 return align(workgroup_size, program->wave_size) / program->wave_size;
298 }
299 } /* end namespace */
300
301 uint16_t get_extra_sgprs(Program *program)
302 {
303 if (program->chip_class >= GFX10) {
304 assert(!program->needs_flat_scr);
305 assert(!program->xnack_enabled);
306 return 2;
307 } else if (program->chip_class >= GFX8) {
308 if (program->needs_flat_scr)
309 return 6;
310 else if (program->xnack_enabled)
311 return 4;
312 else if (program->needs_vcc)
313 return 2;
314 else
315 return 0;
316 } else {
317 assert(!program->xnack_enabled);
318 if (program->needs_flat_scr)
319 return 4;
320 else if (program->needs_vcc)
321 return 2;
322 else
323 return 0;
324 }
325 }
326
327 uint16_t get_sgpr_alloc(Program *program, uint16_t addressable_sgprs)
328 {
329 assert(addressable_sgprs <= program->sgpr_limit);
330 uint16_t sgprs = addressable_sgprs + get_extra_sgprs(program);
331 uint16_t granule = program->sgpr_alloc_granule + 1;
332 return align(std::max(sgprs, granule), granule);
333 }
334
335 uint16_t get_vgpr_alloc(Program *program, uint16_t addressable_vgprs)
336 {
337 assert(addressable_vgprs <= program->vgpr_limit);
338 uint16_t granule = program->vgpr_alloc_granule + 1;
339 return align(std::max(addressable_vgprs, granule), granule);
340 }
341
342 uint16_t get_addr_sgpr_from_waves(Program *program, uint16_t max_waves)
343 {
344 uint16_t sgprs = program->physical_sgprs / max_waves & ~program->sgpr_alloc_granule;
345 sgprs -= get_extra_sgprs(program);
346 return std::min(sgprs, program->sgpr_limit);
347 }
348
349 uint16_t get_addr_vgpr_from_waves(Program *program, uint16_t max_waves)
350 {
351 uint16_t vgprs = 256 / max_waves & ~program->vgpr_alloc_granule;
352 return std::min(vgprs, program->vgpr_limit);
353 }
354
355 void calc_min_waves(Program* program)
356 {
357 unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
358 /* currently min_waves is in wave64 waves */
359 if (program->wave_size == 32)
360 waves_per_workgroup = DIV_ROUND_UP(waves_per_workgroup, 2);
361
362 unsigned simd_per_cu = 4; /* TODO: different on Navi */
363 bool wgp = program->chip_class >= GFX10; /* assume WGP is used on Navi */
364 unsigned simd_per_cu_wgp = wgp ? simd_per_cu * 2 : simd_per_cu;
365
366 program->min_waves = DIV_ROUND_UP(waves_per_workgroup, simd_per_cu_wgp);
367 }
368
369 void update_vgpr_sgpr_demand(Program* program, const RegisterDemand new_demand)
370 {
371 /* TODO: max_waves_per_simd, simd_per_cu and the number of physical vgprs for Navi */
372 unsigned max_waves_per_simd = 10;
373 unsigned simd_per_cu = 4;
374
375 bool wgp = program->chip_class >= GFX10; /* assume WGP is used on Navi */
376 unsigned simd_per_cu_wgp = wgp ? simd_per_cu * 2 : simd_per_cu;
377 unsigned lds_limit = wgp ? program->lds_limit * 2 : program->lds_limit;
378
379 /* this won't compile, register pressure reduction necessary */
380 if (new_demand.vgpr > program->vgpr_limit || new_demand.sgpr > program->sgpr_limit) {
381 program->num_waves = 0;
382 program->max_reg_demand = new_demand;
383 } else {
384 program->num_waves = program->physical_sgprs / get_sgpr_alloc(program, new_demand.sgpr);
385 program->num_waves = std::min<uint16_t>(program->num_waves, 256 / get_vgpr_alloc(program, new_demand.vgpr));
386 program->max_waves = max_waves_per_simd;
387
388 /* adjust max_waves for workgroup and LDS limits */
389 unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
390 unsigned workgroups_per_cu_wgp = max_waves_per_simd * simd_per_cu_wgp / waves_per_workgroup;
391 if (program->config->lds_size) {
392 unsigned lds = program->config->lds_size * program->lds_alloc_granule;
393 workgroups_per_cu_wgp = std::min(workgroups_per_cu_wgp, lds_limit / lds);
394 }
395 if (waves_per_workgroup > 1 && program->chip_class < GFX10)
396 workgroups_per_cu_wgp = std::min(workgroups_per_cu_wgp, 16u); /* TODO: is this a SI-only limit? what about Navi? */
397
398 /* in cases like waves_per_workgroup=3 or lds=65536 and
399 * waves_per_workgroup=1, we want the maximum possible number of waves per
400 * SIMD and not the minimum. so DIV_ROUND_UP is used */
401 program->max_waves = std::min<uint16_t>(program->max_waves, DIV_ROUND_UP(workgroups_per_cu_wgp * waves_per_workgroup, simd_per_cu_wgp));
402
403 /* incorporate max_waves and calculate max_reg_demand */
404 program->num_waves = std::min<uint16_t>(program->num_waves, program->max_waves);
405 program->max_reg_demand.vgpr = get_addr_vgpr_from_waves(program, program->num_waves);
406 program->max_reg_demand.sgpr = get_addr_sgpr_from_waves(program, program->num_waves);
407 }
408 }
409
410 live live_var_analysis(Program* program,
411 const struct radv_nir_compiler_options *options)
412 {
413 live result;
414 result.live_out.resize(program->blocks.size());
415 result.register_demand.resize(program->blocks.size());
416 std::set<unsigned> worklist;
417 std::vector<uint16_t> phi_sgpr_ops(program->blocks.size());
418 RegisterDemand new_demand;
419
420 program->needs_vcc = false;
421
422 /* this implementation assumes that the block idx corresponds to the block's position in program->blocks vector */
423 for (Block& block : program->blocks)
424 worklist.insert(block.index);
425 while (!worklist.empty()) {
426 std::set<unsigned>::reverse_iterator b_it = worklist.rbegin();
427 unsigned block_idx = *b_it;
428 worklist.erase(block_idx);
429 process_live_temps_per_block(program, result, &program->blocks[block_idx], worklist, phi_sgpr_ops);
430 new_demand.update(program->blocks[block_idx].register_demand);
431 }
432
433 /* calculate the program's register demand and number of waves */
434 update_vgpr_sgpr_demand(program, new_demand);
435
436 return result;
437 }
438
439 }
440