aco: fix operand kill flags when a temporary is used more than once
[mesa.git] / src / amd / compiler / aco_live_var_analysis.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 * Authors:
25 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
26 * Bas Nieuwenhuizen (bas@basnieuwenhuizen.nl)
27 *
28 */
29
30 #include "aco_ir.h"
31 #include "util/u_math.h"
32
33 #include <set>
34 #include <vector>
35
36 #include "vulkan/radv_shader.h"
37
38 namespace aco {
39 namespace {
40
41 void process_live_temps_per_block(Program *program, live& lives, Block* block,
42 std::set<unsigned>& worklist, std::vector<uint16_t>& phi_sgpr_ops)
43 {
44 std::vector<RegisterDemand>& register_demand = lives.register_demand[block->index];
45 RegisterDemand new_demand;
46
47 register_demand.resize(block->instructions.size());
48 block->register_demand = RegisterDemand();
49
50 std::set<Temp> live_sgprs;
51 std::set<Temp> live_vgprs;
52
53 /* add the live_out_exec to live */
54 bool exec_live = false;
55 if (block->live_out_exec != Temp()) {
56 live_sgprs.insert(block->live_out_exec);
57 new_demand.sgpr += program->lane_mask.size();
58 exec_live = true;
59 }
60
61 /* split the live-outs from this block into the temporary sets */
62 std::vector<std::set<Temp>>& live_temps = lives.live_out;
63 for (const Temp temp : live_temps[block->index]) {
64 const bool inserted = temp.is_linear()
65 ? live_sgprs.insert(temp).second
66 : live_vgprs.insert(temp).second;
67 if (inserted) {
68 new_demand += temp;
69 }
70 }
71 new_demand.sgpr -= phi_sgpr_ops[block->index];
72
73 /* traverse the instructions backwards */
74 int idx;
75 for (idx = block->instructions.size() -1; idx >= 0; idx--) {
76 Instruction *insn = block->instructions[idx].get();
77 if (is_phi(insn))
78 break;
79
80 /* substract the 1 or 2 sgprs from exec */
81 if (exec_live)
82 assert(new_demand.sgpr >= (int16_t) program->lane_mask.size());
83 register_demand[idx] = RegisterDemand(new_demand.vgpr, new_demand.sgpr - (exec_live ? program->lane_mask.size() : 0));
84
85 /* KILL */
86 for (Definition& definition : insn->definitions) {
87 if (!definition.isTemp()) {
88 continue;
89 }
90
91 const Temp temp = definition.getTemp();
92 size_t n = 0;
93 if (temp.is_linear())
94 n = live_sgprs.erase(temp);
95 else
96 n = live_vgprs.erase(temp);
97
98 if (n) {
99 new_demand -= temp;
100 definition.setKill(false);
101 } else {
102 register_demand[idx] += temp;
103 definition.setKill(true);
104 }
105
106 if (definition.isFixed() && definition.physReg() == exec)
107 exec_live = false;
108 }
109
110 /* GEN */
111 if (insn->opcode == aco_opcode::p_logical_end) {
112 new_demand.sgpr += phi_sgpr_ops[block->index];
113 } else {
114 /* we need to do this in a separate loop because the next one can
115 * setKill() for several operands at once and we don't want to
116 * overwrite that in a later iteration */
117 for (Operand& op : insn->operands)
118 op.setKill(false);
119
120 for (unsigned i = 0; i < insn->operands.size(); ++i)
121 {
122 Operand& operand = insn->operands[i];
123 if (!operand.isTemp()) {
124 continue;
125 }
126 const Temp temp = operand.getTemp();
127 const bool inserted = temp.is_linear()
128 ? live_sgprs.insert(temp).second
129 : live_vgprs.insert(temp).second;
130 if (inserted) {
131 operand.setFirstKill(true);
132 for (unsigned j = i + 1; j < insn->operands.size(); ++j) {
133 if (insn->operands[j].isTemp() && insn->operands[j].tempId() == operand.tempId()) {
134 insn->operands[j].setFirstKill(false);
135 insn->operands[j].setKill(true);
136 }
137 }
138 new_demand += temp;
139 }
140
141 if (operand.isFixed() && operand.physReg() == exec)
142 exec_live = true;
143 }
144 }
145
146 block->register_demand.update(register_demand[idx]);
147 }
148
149 /* update block's register demand for a last time */
150 if (exec_live)
151 assert(new_demand.sgpr >= (int16_t) program->lane_mask.size());
152 new_demand.sgpr -= exec_live ? program->lane_mask.size() : 0;
153 block->register_demand.update(new_demand);
154
155 /* handle phi definitions */
156 int phi_idx = idx;
157 while (phi_idx >= 0) {
158 register_demand[phi_idx] = new_demand;
159 Instruction *insn = block->instructions[phi_idx].get();
160
161 assert(is_phi(insn));
162 assert(insn->definitions.size() == 1 && insn->definitions[0].isTemp());
163 Definition& definition = insn->definitions[0];
164 const Temp temp = definition.getTemp();
165 size_t n = 0;
166
167 if (temp.is_linear())
168 n = live_sgprs.erase(temp);
169 else
170 n = live_vgprs.erase(temp);
171
172 if (n)
173 definition.setKill(false);
174 else
175 definition.setKill(true);
176
177 phi_idx--;
178 }
179
180 /* now, we have the live-in sets and need to merge them into the live-out sets */
181 for (unsigned pred_idx : block->logical_preds) {
182 for (Temp vgpr : live_vgprs) {
183 auto it = live_temps[pred_idx].insert(vgpr);
184 if (it.second)
185 worklist.insert(pred_idx);
186 }
187 }
188
189 for (unsigned pred_idx : block->linear_preds) {
190 for (Temp sgpr : live_sgprs) {
191 auto it = live_temps[pred_idx].insert(sgpr);
192 if (it.second)
193 worklist.insert(pred_idx);
194 }
195 }
196
197 /* handle phi operands */
198 phi_idx = idx;
199 while (phi_idx >= 0) {
200 Instruction *insn = block->instructions[phi_idx].get();
201 assert(is_phi(insn));
202 /* directly insert into the predecessors live-out set */
203 std::vector<unsigned>& preds = insn->opcode == aco_opcode::p_phi
204 ? block->logical_preds
205 : block->linear_preds;
206 for (unsigned i = 0; i < preds.size(); ++i) {
207 Operand &operand = insn->operands[i];
208 if (!operand.isTemp()) {
209 continue;
210 }
211 /* check if we changed an already processed block */
212 const bool inserted = live_temps[preds[i]].insert(operand.getTemp()).second;
213 if (inserted) {
214 operand.setKill(true);
215 worklist.insert(preds[i]);
216 if (insn->opcode == aco_opcode::p_phi && operand.getTemp().type() == RegType::sgpr)
217 phi_sgpr_ops[preds[i]] += operand.size();
218 }
219 }
220 phi_idx--;
221 }
222
223 if (!(block->index != 0 || (live_vgprs.empty() && live_sgprs.empty()))) {
224 aco_print_program(program, stderr);
225 fprintf(stderr, "These temporaries are never defined or are defined after use:\n");
226 for (Temp vgpr : live_vgprs)
227 fprintf(stderr, "%%%d\n", vgpr.id());
228 for (Temp sgpr : live_sgprs)
229 fprintf(stderr, "%%%d\n", sgpr.id());
230 abort();
231 }
232
233 assert(block->index != 0 || new_demand == RegisterDemand());
234 }
235
236 unsigned calc_waves_per_workgroup(Program *program)
237 {
238 unsigned workgroup_size = program->wave_size;
239 if (program->stage == compute_cs) {
240 unsigned* bsize = program->info->cs.block_size;
241 workgroup_size = bsize[0] * bsize[1] * bsize[2];
242 }
243 return align(workgroup_size, program->wave_size) / program->wave_size;
244 }
245 } /* end namespace */
246
247 uint16_t get_extra_sgprs(Program *program)
248 {
249 if (program->chip_class >= GFX10) {
250 assert(!program->needs_flat_scr);
251 assert(!program->needs_xnack_mask);
252 return 2;
253 } else if (program->chip_class >= GFX8) {
254 if (program->needs_flat_scr)
255 return 6;
256 else if (program->needs_xnack_mask)
257 return 4;
258 else if (program->needs_vcc)
259 return 2;
260 else
261 return 0;
262 } else {
263 assert(!program->needs_xnack_mask);
264 if (program->needs_flat_scr)
265 return 4;
266 else if (program->needs_vcc)
267 return 2;
268 else
269 return 0;
270 }
271 }
272
273 uint16_t get_sgpr_alloc(Program *program, uint16_t addressable_sgprs)
274 {
275 assert(addressable_sgprs <= program->sgpr_limit);
276 uint16_t sgprs = addressable_sgprs + get_extra_sgprs(program);
277 uint16_t granule = program->sgpr_alloc_granule + 1;
278 return align(std::max(sgprs, granule), granule);
279 }
280
281 uint16_t get_vgpr_alloc(Program *program, uint16_t addressable_vgprs)
282 {
283 assert(addressable_vgprs <= program->vgpr_limit);
284 uint16_t granule = program->vgpr_alloc_granule + 1;
285 return align(std::max(addressable_vgprs, granule), granule);
286 }
287
288 uint16_t get_addr_sgpr_from_waves(Program *program, uint16_t max_waves)
289 {
290 uint16_t sgprs = program->physical_sgprs / max_waves & ~program->sgpr_alloc_granule;
291 sgprs -= get_extra_sgprs(program);
292 return std::min(sgprs, program->sgpr_limit);
293 }
294
295 uint16_t get_addr_vgpr_from_waves(Program *program, uint16_t max_waves)
296 {
297 uint16_t vgprs = 256 / max_waves & ~program->vgpr_alloc_granule;
298 return std::min(vgprs, program->vgpr_limit);
299 }
300
301 void calc_min_waves(Program* program)
302 {
303 unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
304 /* currently min_waves is in wave64 waves */
305 if (program->wave_size == 32)
306 waves_per_workgroup = DIV_ROUND_UP(waves_per_workgroup, 2);
307
308 unsigned simd_per_cu = 4; /* TODO: different on Navi */
309 bool wgp = program->chip_class >= GFX10; /* assume WGP is used on Navi */
310 unsigned simd_per_cu_wgp = wgp ? simd_per_cu * 2 : simd_per_cu;
311
312 program->min_waves = DIV_ROUND_UP(waves_per_workgroup, simd_per_cu_wgp);
313 }
314
315 void update_vgpr_sgpr_demand(Program* program, const RegisterDemand new_demand)
316 {
317 /* TODO: max_waves_per_simd, simd_per_cu and the number of physical vgprs for Navi */
318 unsigned max_waves_per_simd = 10;
319 unsigned simd_per_cu = 4;
320
321 bool wgp = program->chip_class >= GFX10; /* assume WGP is used on Navi */
322 unsigned simd_per_cu_wgp = wgp ? simd_per_cu * 2 : simd_per_cu;
323 unsigned lds_limit = wgp ? program->lds_limit * 2 : program->lds_limit;
324
325 /* this won't compile, register pressure reduction necessary */
326 if (new_demand.vgpr > program->vgpr_limit || new_demand.sgpr > program->sgpr_limit) {
327 program->num_waves = 0;
328 program->max_reg_demand = new_demand;
329 } else {
330 program->num_waves = program->physical_sgprs / get_sgpr_alloc(program, new_demand.sgpr);
331 program->num_waves = std::min<uint16_t>(program->num_waves, 256 / get_vgpr_alloc(program, new_demand.vgpr));
332 program->max_waves = max_waves_per_simd;
333
334 /* adjust max_waves for workgroup and LDS limits */
335 unsigned waves_per_workgroup = calc_waves_per_workgroup(program);
336 unsigned workgroups_per_cu_wgp = max_waves_per_simd * simd_per_cu_wgp / waves_per_workgroup;
337 if (program->config->lds_size) {
338 unsigned lds = program->config->lds_size * program->lds_alloc_granule;
339 workgroups_per_cu_wgp = std::min(workgroups_per_cu_wgp, lds_limit / lds);
340 }
341 if (waves_per_workgroup > 1 && program->chip_class < GFX10)
342 workgroups_per_cu_wgp = std::min(workgroups_per_cu_wgp, 16u); /* TODO: is this a SI-only limit? what about Navi? */
343
344 /* in cases like waves_per_workgroup=3 or lds=65536 and
345 * waves_per_workgroup=1, we want the maximum possible number of waves per
346 * SIMD and not the minimum. so DIV_ROUND_UP is used */
347 program->max_waves = std::min<uint16_t>(program->max_waves, DIV_ROUND_UP(workgroups_per_cu_wgp * waves_per_workgroup, simd_per_cu_wgp));
348
349 /* incorporate max_waves and calculate max_reg_demand */
350 program->num_waves = std::min<uint16_t>(program->num_waves, program->max_waves);
351 program->max_reg_demand.vgpr = get_addr_vgpr_from_waves(program, program->num_waves);
352 program->max_reg_demand.sgpr = get_addr_sgpr_from_waves(program, program->num_waves);
353 }
354 }
355
356 live live_var_analysis(Program* program,
357 const struct radv_nir_compiler_options *options)
358 {
359 live result;
360 result.live_out.resize(program->blocks.size());
361 result.register_demand.resize(program->blocks.size());
362 std::set<unsigned> worklist;
363 std::vector<uint16_t> phi_sgpr_ops(program->blocks.size());
364 RegisterDemand new_demand;
365
366 /* this implementation assumes that the block idx corresponds to the block's position in program->blocks vector */
367 for (Block& block : program->blocks)
368 worklist.insert(block.index);
369 while (!worklist.empty()) {
370 std::set<unsigned>::reverse_iterator b_it = worklist.rbegin();
371 unsigned block_idx = *b_it;
372 worklist.erase(block_idx);
373 process_live_temps_per_block(program, result, &program->blocks[block_idx], worklist, phi_sgpr_ops);
374 new_demand.update(program->blocks[block_idx].register_demand);
375 }
376
377 /* calculate the program's register demand and number of waves */
378 update_vgpr_sgpr_demand(program, new_demand);
379
380 return result;
381 }
382
383 }
384