aco: fix WaR check for >64-bit FLAT/GLOBAL instructions
[mesa.git] / src / amd / compiler / aco_insert_NOPs.cpp
1 /*
2 * Copyright © 2019 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <algorithm>
26
27 #include "aco_ir.h"
28 #include <stack>
29
30 namespace aco {
31 namespace {
32
33 struct NOP_ctx_gfx8_9 {
34 enum chip_class chip_class;
35 unsigned vcc_physical;
36
37 /* just initialize these with something less than max NOPs */
38 int VALU_wrexec = -10;
39 int VALU_wrvcc = -10;
40 int VALU_wrsgpr = -10;
41
42 NOP_ctx_gfx8_9(Program* program) : chip_class(program->chip_class) {
43 vcc_physical = program->config->num_sgprs - 2;
44 }
45 };
46
47 struct NOP_ctx_gfx10 {
48 bool has_VOPC = false;
49 bool has_nonVALU_exec_read = false;
50 bool has_VMEM = false;
51 bool has_branch_after_VMEM = false;
52 bool has_DS = false;
53 bool has_branch_after_DS = false;
54 std::bitset<128> sgprs_read_by_VMEM;
55 std::bitset<128> sgprs_read_by_SMEM;
56
57 void join(const NOP_ctx_gfx10 &other) {
58 has_VOPC |= other.has_VOPC;
59 has_nonVALU_exec_read |= other.has_nonVALU_exec_read;
60 has_VMEM |= other.has_VMEM;
61 has_branch_after_VMEM |= other.has_branch_after_VMEM;
62 has_DS |= other.has_DS;
63 has_branch_after_DS |= other.has_branch_after_DS;
64 sgprs_read_by_VMEM |= other.sgprs_read_by_VMEM;
65 sgprs_read_by_SMEM |= other.sgprs_read_by_SMEM;
66 }
67
68 bool operator==(const NOP_ctx_gfx10 &other)
69 {
70 return
71 has_VOPC == other.has_VOPC &&
72 has_nonVALU_exec_read == other.has_nonVALU_exec_read &&
73 has_VMEM == other.has_VMEM &&
74 has_branch_after_VMEM == other.has_branch_after_VMEM &&
75 has_DS == other.has_DS &&
76 has_branch_after_DS == other.has_branch_after_DS &&
77 sgprs_read_by_VMEM == other.sgprs_read_by_VMEM &&
78 sgprs_read_by_SMEM == other.sgprs_read_by_SMEM;
79 }
80 };
81
82 template <std::size_t N>
83 bool check_written_regs(const aco_ptr<Instruction> &instr, const std::bitset<N> &check_regs)
84 {
85 return std::any_of(instr->definitions.begin(), instr->definitions.end(), [&check_regs](const Definition &def) -> bool {
86 bool writes_any = false;
87 for (unsigned i = 0; i < def.size(); i++) {
88 unsigned def_reg = def.physReg() + i;
89 writes_any |= def_reg < check_regs.size() && check_regs[def_reg];
90 }
91 return writes_any;
92 });
93 }
94
95 template <std::size_t N>
96 void mark_read_regs(const aco_ptr<Instruction> &instr, std::bitset<N> &reg_reads)
97 {
98 for (const Operand &op : instr->operands) {
99 for (unsigned i = 0; i < op.size(); i++) {
100 unsigned reg = op.physReg() + i;
101 if (reg < reg_reads.size())
102 reg_reads.set(reg);
103 }
104 }
105 }
106
107 bool VALU_writes_sgpr(aco_ptr<Instruction>& instr)
108 {
109 if ((uint32_t) instr->format & (uint32_t) Format::VOPC)
110 return true;
111 if (instr->isVOP3() && instr->definitions.size() == 2)
112 return true;
113 if (instr->opcode == aco_opcode::v_readfirstlane_b32 ||
114 instr->opcode == aco_opcode::v_readlane_b32 ||
115 instr->opcode == aco_opcode::v_readlane_b32_e64)
116 return true;
117 return false;
118 }
119
120 bool instr_writes_exec(const aco_ptr<Instruction>& instr)
121 {
122 return std::any_of(instr->definitions.begin(), instr->definitions.end(), [](const Definition &def) -> bool {
123 return def.physReg() == exec_lo || def.physReg() == exec_hi;
124 });
125 }
126
127 bool instr_writes_sgpr(const aco_ptr<Instruction>& instr)
128 {
129 return std::any_of(instr->definitions.begin(), instr->definitions.end(), [](const Definition &def) -> bool {
130 return def.getTemp().type() == RegType::sgpr;
131 });
132 }
133
134 inline bool instr_is_branch(const aco_ptr<Instruction>& instr)
135 {
136 return instr->opcode == aco_opcode::s_branch ||
137 instr->opcode == aco_opcode::s_cbranch_scc0 ||
138 instr->opcode == aco_opcode::s_cbranch_scc1 ||
139 instr->opcode == aco_opcode::s_cbranch_vccz ||
140 instr->opcode == aco_opcode::s_cbranch_vccnz ||
141 instr->opcode == aco_opcode::s_cbranch_execz ||
142 instr->opcode == aco_opcode::s_cbranch_execnz ||
143 instr->opcode == aco_opcode::s_cbranch_cdbgsys ||
144 instr->opcode == aco_opcode::s_cbranch_cdbguser ||
145 instr->opcode == aco_opcode::s_cbranch_cdbgsys_or_user ||
146 instr->opcode == aco_opcode::s_cbranch_cdbgsys_and_user ||
147 instr->opcode == aco_opcode::s_subvector_loop_begin ||
148 instr->opcode == aco_opcode::s_subvector_loop_end ||
149 instr->opcode == aco_opcode::s_setpc_b64 ||
150 instr->opcode == aco_opcode::s_swappc_b64 ||
151 instr->opcode == aco_opcode::s_getpc_b64 ||
152 instr->opcode == aco_opcode::s_call_b64;
153 }
154
155 bool regs_intersect(PhysReg a_reg, unsigned a_size, PhysReg b_reg, unsigned b_size)
156 {
157 return a_reg > b_reg ?
158 (a_reg - b_reg < b_size) :
159 (b_reg - a_reg < a_size);
160 }
161
162 unsigned handle_SMEM_clause(aco_ptr<Instruction>& instr, int new_idx,
163 std::vector<aco_ptr<Instruction>>& new_instructions)
164 {
165 //TODO: s_dcache_inv needs to be in it's own group on GFX10 (and previous versions?)
166 const bool is_store = instr->definitions.empty();
167 for (int pred_idx = new_idx - 1; pred_idx >= 0; pred_idx--) {
168 aco_ptr<Instruction>& pred = new_instructions[pred_idx];
169 if (pred->format != Format::SMEM)
170 break;
171
172 /* Don't allow clauses with store instructions since the clause's
173 * instructions may use the same address. */
174 if (is_store || pred->definitions.empty())
175 return 1;
176
177 Definition& instr_def = instr->definitions[0];
178 Definition& pred_def = pred->definitions[0];
179
180 /* ISA reference doesn't say anything about this, but best to be safe */
181 if (regs_intersect(instr_def.physReg(), instr_def.size(), pred_def.physReg(), pred_def.size()))
182 return 1;
183
184 for (const Operand& op : pred->operands) {
185 if (op.isConstant() || !op.isFixed())
186 continue;
187 if (regs_intersect(instr_def.physReg(), instr_def.size(), op.physReg(), op.size()))
188 return 1;
189 }
190 for (const Operand& op : instr->operands) {
191 if (op.isConstant() || !op.isFixed())
192 continue;
193 if (regs_intersect(pred_def.physReg(), pred_def.size(), op.physReg(), op.size()))
194 return 1;
195 }
196 }
197
198 return 0;
199 }
200
201 int handle_instruction_gfx8_9(NOP_ctx_gfx8_9& ctx, aco_ptr<Instruction>& instr,
202 std::vector<aco_ptr<Instruction>>& old_instructions,
203 std::vector<aco_ptr<Instruction>>& new_instructions)
204 {
205 int new_idx = new_instructions.size();
206
207 // TODO: setreg / getreg / m0 writes
208 // TODO: try to schedule the NOP-causing instruction up to reduce the number of stall cycles
209
210
211 if (instr->format == Format::SMEM) {
212 if (ctx.chip_class == GFX6) {
213 bool is_buffer_load = instr->operands.size() && instr->operands[0].size() > 2;
214 for (int pred_idx = new_idx - 1; pred_idx >= 0 && pred_idx >= new_idx - 4; pred_idx--) {
215 aco_ptr<Instruction>& pred = new_instructions[pred_idx];
216 /* A read of an SGPR by SMRD instruction requires 4 wait states
217 * when the SGPR was written by a VALU instruction. */
218 if (VALU_writes_sgpr(pred)) {
219 Definition pred_def = pred->definitions[pred->definitions.size() - 1];
220 for (const Operand& op : instr->operands) {
221 if (regs_intersect(pred_def.physReg(), pred_def.size(), op.physReg(), op.size()))
222 return 4 + pred_idx - new_idx + 1;
223 }
224 }
225 /* According to LLVM, this is an undocumented hardware behavior */
226 if (is_buffer_load && pred->isSALU() && pred->definitions.size()) {
227 Definition pred_def = pred->definitions[0];
228 Operand& op = instr->operands[0];
229 if (regs_intersect(pred_def.physReg(), pred_def.size(), op.physReg(), op.size()))
230 return 4 + pred_idx - new_idx + 1;
231 }
232 }
233 }
234
235 /* break off from prevous SMEM clause if needed */
236 return handle_SMEM_clause(instr, new_idx, new_instructions);
237
238 } else if (instr->isVALU() || instr->format == Format::VINTRP) {
239 int NOPs = 0;
240
241 if (instr->isDPP()) {
242 /* VALU does not forward EXEC to DPP. */
243 if (ctx.VALU_wrexec + 5 >= new_idx)
244 NOPs = 5 + ctx.VALU_wrexec - new_idx + 1;
245
246 /* VALU DPP reads VGPR written by VALU */
247 for (int pred_idx = new_idx - 1; pred_idx >= 0 && pred_idx >= new_idx - 2; pred_idx--) {
248 aco_ptr<Instruction>& pred = new_instructions[pred_idx];
249 if ((pred->isVALU() || pred->format == Format::VINTRP) &&
250 !pred->definitions.empty() &&
251 pred->definitions[0].physReg() == instr->operands[0].physReg()) {
252 NOPs = std::max(NOPs, 2 + pred_idx - new_idx + 1);
253 break;
254 }
255 }
256 }
257
258 /* SALU writes M0 */
259 if (instr->format == Format::VINTRP && new_idx > 0 && ctx.chip_class >= GFX9) {
260 aco_ptr<Instruction>& pred = new_instructions.back();
261 if (pred->isSALU() &&
262 !pred->definitions.empty() &&
263 pred->definitions[0].physReg() == m0)
264 NOPs = std::max(NOPs, 1);
265 }
266
267 for (const Operand& op : instr->operands) {
268 /* VALU which uses VCCZ */
269 if (op.physReg() == PhysReg{251} &&
270 ctx.VALU_wrvcc + 5 >= new_idx)
271 NOPs = std::max(NOPs, 5 + ctx.VALU_wrvcc - new_idx + 1);
272
273 /* VALU which uses EXECZ */
274 if (op.physReg() == PhysReg{252} &&
275 ctx.VALU_wrexec + 5 >= new_idx)
276 NOPs = std::max(NOPs, 5 + ctx.VALU_wrexec - new_idx + 1);
277
278 /* VALU which reads VCC as a constant */
279 if (ctx.VALU_wrvcc + 1 >= new_idx) {
280 for (unsigned k = 0; k < op.size(); k++) {
281 unsigned reg = op.physReg() + k;
282 if (reg == ctx.vcc_physical || reg == ctx.vcc_physical + 1)
283 NOPs = std::max(NOPs, 1);
284 }
285 }
286 }
287
288 switch (instr->opcode) {
289 case aco_opcode::v_readlane_b32:
290 case aco_opcode::v_readlane_b32_e64:
291 case aco_opcode::v_writelane_b32:
292 case aco_opcode::v_writelane_b32_e64: {
293 if (ctx.VALU_wrsgpr + 4 < new_idx)
294 break;
295 PhysReg reg = instr->operands[1].physReg();
296 for (int pred_idx = new_idx - 1; pred_idx >= 0 && pred_idx >= new_idx - 4; pred_idx--) {
297 aco_ptr<Instruction>& pred = new_instructions[pred_idx];
298 if (!pred->isVALU() || !VALU_writes_sgpr(pred))
299 continue;
300 for (const Definition& def : pred->definitions) {
301 if (def.physReg() == reg)
302 NOPs = std::max(NOPs, 4 + pred_idx - new_idx + 1);
303 }
304 }
305 break;
306 }
307 case aco_opcode::v_div_fmas_f32:
308 case aco_opcode::v_div_fmas_f64: {
309 if (ctx.VALU_wrvcc + 4 >= new_idx)
310 NOPs = std::max(NOPs, 4 + ctx.VALU_wrvcc - new_idx + 1);
311 break;
312 }
313 default:
314 break;
315 }
316
317 /* Write VGPRs holding writedata > 64 bit from MIMG/MUBUF instructions */
318 // FIXME: handle case if the last instruction of a block without branch is such store
319 if (new_idx > 0) {
320 aco_ptr<Instruction>& pred = new_instructions.back();
321 /* >64-bit MUBUF/MTBUF store with a constant in SOFFSET */
322 bool consider_buf = (pred->format == Format::MUBUF || pred->format == Format::MTBUF) &&
323 pred->operands.size() == 4 &&
324 pred->operands[3].size() > 2 &&
325 pred->operands[2].physReg() >= 128;
326 /* MIMG store with a 128-bit T# with more than two bits set in dmask (making it a >64-bit store) */
327 bool consider_mimg = pred->format == Format::MIMG &&
328 pred->operands.size() == 4 &&
329 pred->operands[3].size() > 2 &&
330 pred->operands[1].size() != 8;
331 /* FLAT/GLOBAL/SCRATCH store with >64-bit data */
332 bool consider_flat = (pred->isFlatOrGlobal() || pred->format == Format::SCRATCH) &&
333 pred->operands.size() == 3 &&
334 pred->operands[2].size() > 2;
335 if (consider_buf || consider_mimg || consider_flat) {
336 PhysReg wrdata = pred->operands[consider_flat ? 2 : 3].physReg();
337 unsigned size = pred->operands[consider_flat ? 2 : 3].size();
338 assert(wrdata >= 256);
339 for (const Definition& def : instr->definitions) {
340 if (regs_intersect(def.physReg(), def.size(), wrdata, size))
341 NOPs = std::max(NOPs, 1);
342 }
343 }
344 }
345
346 if (VALU_writes_sgpr(instr)) {
347 for (const Definition& def : instr->definitions) {
348 if (def.physReg() == vcc)
349 ctx.VALU_wrvcc = NOPs ? new_idx : new_idx + 1;
350 else if (def.physReg() == exec)
351 ctx.VALU_wrexec = NOPs ? new_idx : new_idx + 1;
352 else if (def.physReg() <= 102)
353 ctx.VALU_wrsgpr = NOPs ? new_idx : new_idx + 1;
354 }
355 }
356
357 /* It's required to insert 1 wait state if the dst VGPR of any v_interp_*
358 * is followed by a read with v_readfirstlane or v_readlane to fix GPU
359 * hangs on GFX6. Note that v_writelane_* is apparently not affected.
360 * This hazard isn't documented anywhere but AMD confirmed that hazard.
361 */
362 if (ctx.chip_class == GFX6 &&
363 !new_instructions.empty() &&
364 (instr->opcode == aco_opcode::v_readfirstlane_b32 ||
365 instr->opcode == aco_opcode::v_readlane_b32)) {
366 aco_ptr<Instruction>& pred = new_instructions.back();
367 if (pred->format == Format::VINTRP) {
368 Definition pred_def = pred->definitions[0];
369 Operand& op = instr->operands[0];
370 if (regs_intersect(pred_def.physReg(), pred_def.size(), op.physReg(), op.size()))
371 NOPs = std::max(NOPs, 1);
372 }
373 }
374 return NOPs;
375 } else if (instr->isVMEM() && ctx.VALU_wrsgpr + 5 >= new_idx) {
376 /* If the VALU writes the SGPR that is used by a VMEM, the user must add five wait states. */
377 for (int pred_idx = new_idx - 1; pred_idx >= 0 && pred_idx >= new_idx - 5; pred_idx--) {
378 aco_ptr<Instruction>& pred = new_instructions[pred_idx];
379 if (!(pred->isVALU() && VALU_writes_sgpr(pred)))
380 continue;
381
382 for (const Definition& def : pred->definitions) {
383 if (def.physReg() > 102)
384 continue;
385
386 if (instr->operands.size() > 1 &&
387 regs_intersect(instr->operands[1].physReg(), instr->operands[1].size(),
388 def.physReg(), def.size())) {
389 return 5 + pred_idx - new_idx + 1;
390 }
391
392 if (instr->operands.size() > 2 &&
393 regs_intersect(instr->operands[2].physReg(), instr->operands[2].size(),
394 def.physReg(), def.size())) {
395 return 5 + pred_idx - new_idx + 1;
396 }
397 }
398 }
399 } else if (instr->format == Format::SOPP) {
400 if (instr->opcode == aco_opcode::s_sendmsg && new_idx > 0) {
401 aco_ptr<Instruction>& pred = new_instructions.back();
402 if (pred->isSALU() &&
403 !pred->definitions.empty() &&
404 pred->definitions[0].physReg() == m0)
405 return 1;
406 }
407 }
408
409 return 0;
410 }
411
412 void handle_block_gfx8_9(NOP_ctx_gfx8_9& ctx, Block& block)
413 {
414 std::vector<aco_ptr<Instruction>> instructions;
415 instructions.reserve(block.instructions.size());
416 for (unsigned i = 0; i < block.instructions.size(); i++) {
417 aco_ptr<Instruction>& instr = block.instructions[i];
418 unsigned NOPs = handle_instruction_gfx8_9(ctx, instr, block.instructions, instructions);
419 if (NOPs) {
420 // TODO: try to move the instruction down
421 /* create NOP */
422 aco_ptr<SOPP_instruction> nop{create_instruction<SOPP_instruction>(aco_opcode::s_nop, Format::SOPP, 0, 0)};
423 nop->imm = NOPs - 1;
424 nop->block = -1;
425 instructions.emplace_back(std::move(nop));
426 }
427
428 instructions.emplace_back(std::move(instr));
429 }
430
431 ctx.VALU_wrvcc -= instructions.size();
432 ctx.VALU_wrexec -= instructions.size();
433 ctx.VALU_wrsgpr -= instructions.size();
434 block.instructions = std::move(instructions);
435 }
436
437 void insert_NOPs_gfx8_9(Program* program)
438 {
439 NOP_ctx_gfx8_9 ctx(program);
440
441 for (Block& block : program->blocks) {
442 if (block.instructions.empty())
443 continue;
444
445 handle_block_gfx8_9(ctx, block);
446 }
447 }
448
449 void handle_instruction_gfx10(Program *program, NOP_ctx_gfx10 &ctx, aco_ptr<Instruction>& instr,
450 std::vector<aco_ptr<Instruction>>& old_instructions,
451 std::vector<aco_ptr<Instruction>>& new_instructions)
452 {
453 /* VMEMtoScalarWriteHazard
454 * Handle EXEC/M0/SGPR write following a VMEM instruction without a VALU or "waitcnt vmcnt(0)" in-between.
455 */
456 if (instr->isVMEM() || instr->format == Format::FLAT || instr->format == Format::GLOBAL ||
457 instr->format == Format::SCRATCH || instr->format == Format::DS) {
458 /* Remember all SGPRs that are read by the VMEM instruction */
459 mark_read_regs(instr, ctx.sgprs_read_by_VMEM);
460 ctx.sgprs_read_by_VMEM.set(exec);
461 if (program->wave_size == 64)
462 ctx.sgprs_read_by_VMEM.set(exec_hi);
463 } else if (instr->isSALU() || instr->format == Format::SMEM) {
464 /* Check if SALU writes an SGPR that was previously read by the VALU */
465 if (check_written_regs(instr, ctx.sgprs_read_by_VMEM)) {
466 ctx.sgprs_read_by_VMEM.reset();
467
468 /* Insert v_nop to mitigate the problem */
469 aco_ptr<VOP1_instruction> nop{create_instruction<VOP1_instruction>(aco_opcode::v_nop, Format::VOP1, 0, 0)};
470 new_instructions.emplace_back(std::move(nop));
471 }
472 } else if (instr->opcode == aco_opcode::s_waitcnt) {
473 /* Hazard is mitigated by "s_waitcnt vmcnt(0)" */
474 uint16_t imm = static_cast<SOPP_instruction*>(instr.get())->imm;
475 unsigned vmcnt = (imm & 0xF) | ((imm & (0x3 << 14)) >> 10);
476 if (vmcnt == 0)
477 ctx.sgprs_read_by_VMEM.reset();
478 } else if (instr->isVALU()) {
479 /* Hazard is mitigated by any VALU instruction */
480 ctx.sgprs_read_by_VMEM.reset();
481 }
482
483 /* VcmpxPermlaneHazard
484 * Handle any permlane following a VOPC instruction, insert v_mov between them.
485 */
486 if (instr->format == Format::VOPC) {
487 ctx.has_VOPC = true;
488 } else if (ctx.has_VOPC &&
489 (instr->opcode == aco_opcode::v_permlane16_b32 ||
490 instr->opcode == aco_opcode::v_permlanex16_b32)) {
491 ctx.has_VOPC = false;
492
493 /* v_nop would be discarded by SQ, so use v_mov with the first operand of the permlane */
494 aco_ptr<VOP1_instruction> v_mov{create_instruction<VOP1_instruction>(aco_opcode::v_mov_b32, Format::VOP1, 1, 1)};
495 v_mov->definitions[0] = Definition(instr->operands[0].physReg(), v1);
496 v_mov->operands[0] = Operand(instr->operands[0].physReg(), v1);
497 new_instructions.emplace_back(std::move(v_mov));
498 } else if (instr->isVALU() && instr->opcode != aco_opcode::v_nop) {
499 ctx.has_VOPC = false;
500 }
501
502 /* VcmpxExecWARHazard
503 * Handle any VALU instruction writing the exec mask after it was read by a non-VALU instruction.
504 */
505 if (!instr->isVALU() && instr->reads_exec()) {
506 ctx.has_nonVALU_exec_read = true;
507 } else if (instr->isVALU()) {
508 if (instr_writes_exec(instr)) {
509 ctx.has_nonVALU_exec_read = false;
510
511 /* Insert s_waitcnt_depctr instruction with magic imm to mitigate the problem */
512 aco_ptr<SOPP_instruction> depctr{create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt_depctr, Format::SOPP, 0, 0)};
513 depctr->imm = 0xfffe;
514 depctr->block = -1;
515 new_instructions.emplace_back(std::move(depctr));
516 } else if (instr_writes_sgpr(instr)) {
517 /* Any VALU instruction that writes an SGPR mitigates the problem */
518 ctx.has_nonVALU_exec_read = false;
519 }
520 } else if (instr->opcode == aco_opcode::s_waitcnt_depctr) {
521 /* s_waitcnt_depctr can mitigate the problem if it has a magic imm */
522 const SOPP_instruction *sopp = static_cast<const SOPP_instruction *>(instr.get());
523 if ((sopp->imm & 0xfffe) == 0xfffe)
524 ctx.has_nonVALU_exec_read = false;
525 }
526
527 /* SMEMtoVectorWriteHazard
528 * Handle any VALU instruction writing an SGPR after an SMEM reads it.
529 */
530 if (instr->format == Format::SMEM) {
531 /* Remember all SGPRs that are read by the SMEM instruction */
532 mark_read_regs(instr, ctx.sgprs_read_by_SMEM);
533 } else if (VALU_writes_sgpr(instr)) {
534 /* Check if VALU writes an SGPR that was previously read by SMEM */
535 if (check_written_regs(instr, ctx.sgprs_read_by_SMEM)) {
536 ctx.sgprs_read_by_SMEM.reset();
537
538 /* Insert s_mov to mitigate the problem */
539 aco_ptr<SOP1_instruction> s_mov{create_instruction<SOP1_instruction>(aco_opcode::s_mov_b32, Format::SOP1, 1, 1)};
540 s_mov->definitions[0] = Definition(sgpr_null, s1);
541 s_mov->operands[0] = Operand(0u);
542 new_instructions.emplace_back(std::move(s_mov));
543 }
544 } else if (instr->isSALU()) {
545 if (instr->format != Format::SOPP) {
546 /* SALU can mitigate the hazard */
547 ctx.sgprs_read_by_SMEM.reset();
548 } else {
549 /* Reducing lgkmcnt count to 0 always mitigates the hazard. */
550 const SOPP_instruction *sopp = static_cast<const SOPP_instruction *>(instr.get());
551 if (sopp->opcode == aco_opcode::s_waitcnt_lgkmcnt) {
552 if (sopp->imm == 0 && sopp->definitions[0].physReg() == sgpr_null)
553 ctx.sgprs_read_by_SMEM.reset();
554 } else if (sopp->opcode == aco_opcode::s_waitcnt) {
555 unsigned lgkm = (sopp->imm >> 8) & 0x3f;
556 if (lgkm == 0)
557 ctx.sgprs_read_by_SMEM.reset();
558 }
559 }
560 }
561
562 /* LdsBranchVmemWARHazard
563 * Handle VMEM/GLOBAL/SCRATCH->branch->DS and DS->branch->VMEM/GLOBAL/SCRATCH patterns.
564 */
565 if (instr->isVMEM() || instr->format == Format::GLOBAL || instr->format == Format::SCRATCH) {
566 ctx.has_VMEM = true;
567 ctx.has_branch_after_VMEM = false;
568 /* Mitigation for DS is needed only if there was already a branch after */
569 ctx.has_DS = ctx.has_branch_after_DS;
570 } else if (instr->format == Format::DS) {
571 ctx.has_DS = true;
572 ctx.has_branch_after_DS = false;
573 /* Mitigation for VMEM is needed only if there was already a branch after */
574 ctx.has_VMEM = ctx.has_branch_after_VMEM;
575 } else if (instr_is_branch(instr)) {
576 ctx.has_branch_after_VMEM = ctx.has_VMEM;
577 ctx.has_branch_after_DS = ctx.has_DS;
578 } else if (instr->opcode == aco_opcode::s_waitcnt_vscnt) {
579 /* Only s_waitcnt_vscnt can mitigate the hazard */
580 const SOPK_instruction *sopk = static_cast<const SOPK_instruction *>(instr.get());
581 if (sopk->definitions[0].physReg() == sgpr_null && sopk->imm == 0)
582 ctx.has_VMEM = ctx.has_branch_after_VMEM = ctx.has_DS = ctx.has_branch_after_DS = false;
583 }
584 if ((ctx.has_VMEM && ctx.has_branch_after_DS) || (ctx.has_DS && ctx.has_branch_after_VMEM)) {
585 ctx.has_VMEM = ctx.has_branch_after_VMEM = ctx.has_DS = ctx.has_branch_after_DS = false;
586
587 /* Insert s_waitcnt_vscnt to mitigate the problem */
588 aco_ptr<SOPK_instruction> wait{create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1)};
589 wait->definitions[0] = Definition(sgpr_null, s1);
590 wait->imm = 0;
591 new_instructions.emplace_back(std::move(wait));
592 }
593 }
594
595 void handle_block_gfx10(Program *program, NOP_ctx_gfx10& ctx, Block& block)
596 {
597 if (block.instructions.empty())
598 return;
599
600 std::vector<aco_ptr<Instruction>> instructions;
601 instructions.reserve(block.instructions.size());
602
603 for (aco_ptr<Instruction>& instr : block.instructions) {
604 handle_instruction_gfx10(program, ctx, instr, block.instructions, instructions);
605 instructions.emplace_back(std::move(instr));
606 }
607
608 block.instructions = std::move(instructions);
609 }
610
611 void mitigate_hazards_gfx10(Program *program)
612 {
613 NOP_ctx_gfx10 all_ctx[program->blocks.size()];
614 std::stack<unsigned> loop_header_indices;
615
616 for (unsigned i = 0; i < program->blocks.size(); i++) {
617 Block& block = program->blocks[i];
618 NOP_ctx_gfx10 &ctx = all_ctx[i];
619
620 if (block.kind & block_kind_loop_header) {
621 loop_header_indices.push(i);
622 } else if (block.kind & block_kind_loop_exit) {
623 /* Go through the whole loop again */
624 for (unsigned idx = loop_header_indices.top(); idx < i; idx++) {
625 NOP_ctx_gfx10 loop_block_ctx;
626 for (unsigned b : program->blocks[idx].linear_preds)
627 loop_block_ctx.join(all_ctx[b]);
628
629 handle_block_gfx10(program, loop_block_ctx, program->blocks[idx]);
630
631 /* We only need to continue if the loop header context changed */
632 if (idx == loop_header_indices.top() && loop_block_ctx == all_ctx[idx])
633 break;
634
635 all_ctx[idx] = loop_block_ctx;
636 }
637
638 loop_header_indices.pop();
639 }
640
641 for (unsigned b : block.linear_preds)
642 ctx.join(all_ctx[b]);
643
644 handle_block_gfx10(program, ctx, block);
645 }
646 }
647
648 } /* end namespace */
649
650 void insert_NOPs(Program* program)
651 {
652 if (program->chip_class >= GFX10)
653 mitigate_hazards_gfx10(program);
654 else
655 insert_NOPs_gfx8_9(program);
656 }
657
658 }