2 * Copyright © 2019 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 enum chip_class chip_class
;
34 unsigned vcc_physical
;
37 /* just initialize these with something less than max NOPs */
38 int VALU_wrexec
= -10;
40 int VALU_wrsgpr
= -10;
43 int last_VMEM_since_scalar_write
= -1;
44 bool has_VOPC
= false;
45 bool has_nonVALU_exec_read
= false;
46 std::bitset
<128> sgprs_read_by_SMEM
;
48 NOP_ctx(Program
* program
) : chip_class(program
->chip_class
) {
49 vcc_physical
= program
->config
->num_sgprs
- 2;
53 template <std::size_t N
>
54 bool check_written_regs(const aco_ptr
<Instruction
> &instr
, const std::bitset
<N
> &check_regs
)
56 return std::any_of(instr
->definitions
.begin(), instr
->definitions
.end(), [&check_regs
](const Definition
&def
) -> bool {
57 bool writes_any
= false;
58 for (unsigned i
= 0; i
< def
.size(); i
++) {
59 unsigned def_reg
= def
.physReg() + i
;
60 writes_any
|= def_reg
< check_regs
.size() && check_regs
[def_reg
];
66 template <std::size_t N
>
67 void mark_read_regs(const aco_ptr
<Instruction
> &instr
, std::bitset
<N
> ®_reads
)
69 for (const Operand
&op
: instr
->operands
) {
70 for (unsigned i
= 0; i
< op
.size(); i
++) {
71 unsigned reg
= op
.physReg() + i
;
72 if (reg
< reg_reads
.size())
78 bool VALU_writes_sgpr(aco_ptr
<Instruction
>& instr
)
80 if ((uint32_t) instr
->format
& (uint32_t) Format::VOPC
)
82 if (instr
->isVOP3() && instr
->definitions
.size() == 2)
84 if (instr
->opcode
== aco_opcode::v_readfirstlane_b32
|| instr
->opcode
== aco_opcode::v_readlane_b32
)
89 bool instr_reads_exec(const aco_ptr
<Instruction
>& instr
)
91 return std::any_of(instr
->operands
.begin(), instr
->operands
.end(), [](const Operand
&op
) -> bool {
92 return op
.physReg() == exec_lo
|| op
.physReg() == exec_hi
;
96 bool instr_writes_exec(const aco_ptr
<Instruction
>& instr
)
98 return std::any_of(instr
->definitions
.begin(), instr
->definitions
.end(), [](const Definition
&def
) -> bool {
99 return def
.physReg() == exec_lo
|| def
.physReg() == exec_hi
;
103 bool instr_writes_sgpr(const aco_ptr
<Instruction
>& instr
)
105 return std::any_of(instr
->definitions
.begin(), instr
->definitions
.end(), [](const Definition
&def
) -> bool {
106 return def
.getTemp().type() == RegType::sgpr
;
110 bool regs_intersect(PhysReg a_reg
, unsigned a_size
, PhysReg b_reg
, unsigned b_size
)
112 return a_reg
> b_reg
?
113 (a_reg
- b_reg
< b_size
) :
114 (b_reg
- a_reg
< a_size
);
117 unsigned handle_SMEM_clause(aco_ptr
<Instruction
>& instr
, int new_idx
,
118 std::vector
<aco_ptr
<Instruction
>>& new_instructions
)
120 //TODO: s_dcache_inv needs to be in it's own group on GFX10 (and previous versions?)
121 const bool is_store
= instr
->definitions
.empty();
122 for (int pred_idx
= new_idx
- 1; pred_idx
>= 0; pred_idx
--) {
123 aco_ptr
<Instruction
>& pred
= new_instructions
[pred_idx
];
124 if (pred
->format
!= Format::SMEM
)
127 /* Don't allow clauses with store instructions since the clause's
128 * instructions may use the same address. */
129 if (is_store
|| pred
->definitions
.empty())
132 Definition
& instr_def
= instr
->definitions
[0];
133 Definition
& pred_def
= pred
->definitions
[0];
135 /* ISA reference doesn't say anything about this, but best to be safe */
136 if (regs_intersect(instr_def
.physReg(), instr_def
.size(), pred_def
.physReg(), pred_def
.size()))
139 for (const Operand
& op
: pred
->operands
) {
140 if (op
.isConstant() || !op
.isFixed())
142 if (regs_intersect(instr_def
.physReg(), instr_def
.size(), op
.physReg(), op
.size()))
145 for (const Operand
& op
: instr
->operands
) {
146 if (op
.isConstant() || !op
.isFixed())
148 if (regs_intersect(pred_def
.physReg(), pred_def
.size(), op
.physReg(), op
.size()))
156 int handle_instruction(NOP_ctx
& ctx
, aco_ptr
<Instruction
>& instr
,
157 std::vector
<aco_ptr
<Instruction
>>& old_instructions
,
158 std::vector
<aco_ptr
<Instruction
>>& new_instructions
)
160 int new_idx
= new_instructions
.size();
162 // TODO: setreg / getreg / m0 writes
163 // TODO: try to schedule the NOP-causing instruction up to reduce the number of stall cycles
165 /* break off from prevous SMEM clause if needed */
166 if (instr
->format
== Format::SMEM
&& ctx
.chip_class
>= GFX8
) {
167 return handle_SMEM_clause(instr
, new_idx
, new_instructions
);
168 } else if (instr
->isVALU() || instr
->format
== Format::VINTRP
) {
171 if (instr
->isDPP()) {
172 /* VALU does not forward EXEC to DPP. */
173 if (ctx
.VALU_wrexec
+ 5 >= new_idx
)
174 NOPs
= 5 + ctx
.VALU_wrexec
- new_idx
+ 1;
176 /* VALU DPP reads VGPR written by VALU */
177 for (int pred_idx
= new_idx
- 1; pred_idx
>= 0 && pred_idx
>= new_idx
- 2; pred_idx
--) {
178 aco_ptr
<Instruction
>& pred
= new_instructions
[pred_idx
];
179 if ((pred
->isVALU() || pred
->format
== Format::VINTRP
) &&
180 !pred
->definitions
.empty() &&
181 pred
->definitions
[0].physReg() == instr
->operands
[0].physReg()) {
182 NOPs
= std::max(NOPs
, 2 + pred_idx
- new_idx
+ 1);
189 if (instr
->format
== Format::VINTRP
&& new_idx
> 0 && ctx
.chip_class
>= GFX9
) {
190 aco_ptr
<Instruction
>& pred
= new_instructions
.back();
191 if (pred
->isSALU() &&
192 !pred
->definitions
.empty() &&
193 pred
->definitions
[0].physReg() == m0
)
194 NOPs
= std::max(NOPs
, 1);
197 for (const Operand
& op
: instr
->operands
) {
198 /* VALU which uses VCCZ */
199 if (op
.physReg() == PhysReg
{251} &&
200 ctx
.VALU_wrvcc
+ 5 >= new_idx
)
201 NOPs
= std::max(NOPs
, 5 + ctx
.VALU_wrvcc
- new_idx
+ 1);
203 /* VALU which uses EXECZ */
204 if (op
.physReg() == PhysReg
{252} &&
205 ctx
.VALU_wrexec
+ 5 >= new_idx
)
206 NOPs
= std::max(NOPs
, 5 + ctx
.VALU_wrexec
- new_idx
+ 1);
208 /* VALU which reads VCC as a constant */
209 if (ctx
.VALU_wrvcc
+ 1 >= new_idx
) {
210 for (unsigned k
= 0; k
< op
.size(); k
++) {
211 unsigned reg
= op
.physReg() + k
;
212 if (reg
== ctx
.vcc_physical
|| reg
== ctx
.vcc_physical
+ 1)
213 NOPs
= std::max(NOPs
, 1);
218 switch (instr
->opcode
) {
219 case aco_opcode::v_readlane_b32
:
220 case aco_opcode::v_writelane_b32
: {
221 if (ctx
.VALU_wrsgpr
+ 4 < new_idx
)
223 PhysReg reg
= instr
->operands
[1].physReg();
224 for (int pred_idx
= new_idx
- 1; pred_idx
>= 0 && pred_idx
>= new_idx
- 4; pred_idx
--) {
225 aco_ptr
<Instruction
>& pred
= new_instructions
[pred_idx
];
226 if (!pred
->isVALU() || !VALU_writes_sgpr(pred
))
228 for (const Definition
& def
: pred
->definitions
) {
229 if (def
.physReg() == reg
)
230 NOPs
= std::max(NOPs
, 4 + pred_idx
- new_idx
+ 1);
235 case aco_opcode::v_div_fmas_f32
:
236 case aco_opcode::v_div_fmas_f64
: {
237 if (ctx
.VALU_wrvcc
+ 4 >= new_idx
)
238 NOPs
= std::max(NOPs
, 4 + ctx
.VALU_wrvcc
- new_idx
+ 1);
245 /* Write VGPRs holding writedata > 64 bit from MIMG/MUBUF instructions */
246 // FIXME: handle case if the last instruction of a block without branch is such store
247 // TODO: confirm that DS instructions cannot cause WAR hazards here
249 aco_ptr
<Instruction
>& pred
= new_instructions
.back();
250 if (pred
->isVMEM() &&
251 pred
->operands
.size() == 4 &&
252 pred
->operands
[3].size() > 2 &&
253 pred
->operands
[1].size() != 8 &&
254 (pred
->format
!= Format::MUBUF
|| pred
->operands
[2].physReg() >= 102)) {
255 /* Ops that use a 256-bit T# do not need a wait state.
256 * BUFFER_STORE_* operations that use an SGPR for "offset"
257 * do not require any wait states. */
258 PhysReg wrdata
= pred
->operands
[3].physReg();
259 unsigned size
= pred
->operands
[3].size();
260 assert(wrdata
>= 256);
261 for (const Definition
& def
: instr
->definitions
) {
262 if (regs_intersect(def
.physReg(), def
.size(), wrdata
, size
))
263 NOPs
= std::max(NOPs
, 1);
268 if (VALU_writes_sgpr(instr
)) {
269 for (const Definition
& def
: instr
->definitions
) {
270 if (def
.physReg() == vcc
)
271 ctx
.VALU_wrvcc
= NOPs
? new_idx
: new_idx
+ 1;
272 else if (def
.physReg() == exec
)
273 ctx
.VALU_wrexec
= NOPs
? new_idx
: new_idx
+ 1;
274 else if (def
.physReg() <= 102)
275 ctx
.VALU_wrsgpr
= NOPs
? new_idx
: new_idx
+ 1;
279 } else if (instr
->isVMEM() && ctx
.VALU_wrsgpr
+ 5 >= new_idx
) {
280 /* If the VALU writes the SGPR that is used by a VMEM, the user must add five wait states. */
281 for (int pred_idx
= new_idx
- 1; pred_idx
>= 0 && pred_idx
>= new_idx
- 5; pred_idx
--) {
282 aco_ptr
<Instruction
>& pred
= new_instructions
[pred_idx
];
283 if (!(pred
->isVALU() && VALU_writes_sgpr(pred
)))
286 for (const Definition
& def
: pred
->definitions
) {
287 if (def
.physReg() > 102)
290 if (instr
->operands
.size() > 1 &&
291 regs_intersect(instr
->operands
[1].physReg(), instr
->operands
[1].size(),
292 def
.physReg(), def
.size())) {
293 return 5 + pred_idx
- new_idx
+ 1;
296 if (instr
->operands
.size() > 2 &&
297 regs_intersect(instr
->operands
[2].physReg(), instr
->operands
[2].size(),
298 def
.physReg(), def
.size())) {
299 return 5 + pred_idx
- new_idx
+ 1;
308 std::pair
<int, int> handle_instruction_gfx10(NOP_ctx
& ctx
, aco_ptr
<Instruction
>& instr
,
309 std::vector
<aco_ptr
<Instruction
>>& old_instructions
,
310 std::vector
<aco_ptr
<Instruction
>>& new_instructions
)
312 int new_idx
= new_instructions
.size();
316 /* break off from prevous SMEM group ("clause" seems to mean something different in RDNA) if needed */
317 if (instr
->format
== Format::SMEM
)
318 sNOPs
= std::max(sNOPs
, handle_SMEM_clause(instr
, new_idx
, new_instructions
));
320 /* handle EXEC/M0/SGPR write following a VMEM instruction without a VALU or "waitcnt vmcnt(0)" in-between */
321 if (instr
->isSALU() || instr
->format
== Format::SMEM
) {
322 if (!instr
->definitions
.empty() && ctx
.last_VMEM_since_scalar_write
!= -1) {
323 ctx
.last_VMEM_since_scalar_write
= -1;
326 } else if (instr
->isVMEM() || instr
->isFlatOrGlobal()) {
327 ctx
.last_VMEM_since_scalar_write
= new_idx
;
328 } else if (instr
->opcode
== aco_opcode::s_waitcnt
) {
329 uint16_t imm
= static_cast<SOPP_instruction
*>(instr
.get())->imm
;
330 unsigned vmcnt
= (imm
& 0xF) | ((imm
& (0x3 << 14)) >> 10);
332 ctx
.last_VMEM_since_scalar_write
= -1;
333 } else if (instr
->isVALU()) {
334 ctx
.last_VMEM_since_scalar_write
= -1;
337 /* VcmpxPermlaneHazard
338 * Handle any permlane following a VOPC instruction, insert v_mov between them.
340 if (instr
->format
== Format::VOPC
) {
342 } else if (ctx
.has_VOPC
&&
343 (instr
->opcode
== aco_opcode::v_permlane16_b32
||
344 instr
->opcode
== aco_opcode::v_permlanex16_b32
)) {
345 ctx
.has_VOPC
= false;
347 /* v_nop would be discarded by SQ, so use v_mov with the first operand of the permlane */
348 aco_ptr
<VOP1_instruction
> v_mov
{create_instruction
<VOP1_instruction
>(aco_opcode::v_mov_b32
, Format::VOP1
, 1, 1)};
349 v_mov
->definitions
[0] = Definition(instr
->operands
[0].physReg(), v1
);
350 v_mov
->operands
[0] = Operand(instr
->operands
[0].physReg(), v1
);
351 new_instructions
.emplace_back(std::move(v_mov
));
352 } else if (instr
->isVALU() && instr
->opcode
!= aco_opcode::v_nop
) {
353 ctx
.has_VOPC
= false;
356 /* VcmpxExecWARHazard
357 * Handle any VALU instruction writing the exec mask after it was read by a non-VALU instruction.
359 if (!instr
->isVALU() && instr_reads_exec(instr
)) {
360 ctx
.has_nonVALU_exec_read
= true;
361 } else if (instr
->isVALU()) {
362 if (instr_writes_exec(instr
)) {
363 ctx
.has_nonVALU_exec_read
= false;
365 /* Insert s_waitcnt_depctr instruction with magic imm to mitigate the problem */
366 aco_ptr
<SOPP_instruction
> depctr
{create_instruction
<SOPP_instruction
>(aco_opcode::s_waitcnt_depctr
, Format::SOPP
, 0, 1)};
367 depctr
->imm
= 0xfffe;
368 depctr
->definitions
[0] = Definition(sgpr_null
, s1
);
369 new_instructions
.emplace_back(std::move(depctr
));
370 } else if (instr_writes_sgpr(instr
)) {
371 /* Any VALU instruction that writes an SGPR mitigates the problem */
372 ctx
.has_nonVALU_exec_read
= false;
374 } else if (instr
->opcode
== aco_opcode::s_waitcnt_depctr
) {
375 /* s_waitcnt_depctr can mitigate the problem if it has a magic imm */
376 const SOPP_instruction
*sopp
= static_cast<const SOPP_instruction
*>(instr
.get());
377 if ((sopp
->imm
& 0xfffe) == 0xfffe)
378 ctx
.has_nonVALU_exec_read
= false;
381 /* SMEMtoVectorWriteHazard
382 * Handle any VALU instruction writing an SGPR after an SMEM reads it.
384 if (instr
->format
== Format::SMEM
) {
385 /* Remember all SGPRs that are read by the SMEM instruction */
386 mark_read_regs(instr
, ctx
.sgprs_read_by_SMEM
);
387 } else if (VALU_writes_sgpr(instr
)) {
388 /* Check if VALU writes an SGPR that was previously read by SMEM */
389 if (check_written_regs(instr
, ctx
.sgprs_read_by_SMEM
)) {
390 ctx
.sgprs_read_by_SMEM
.reset();
392 /* Insert s_mov to mitigate the problem */
393 aco_ptr
<SOP1_instruction
> s_mov
{create_instruction
<SOP1_instruction
>(aco_opcode::s_mov_b32
, Format::SOP1
, 1, 1)};
394 s_mov
->definitions
[0] = Definition(sgpr_null
, s1
);
395 s_mov
->operands
[0] = Operand(0u);
396 new_instructions
.emplace_back(std::move(s_mov
));
398 } else if (instr
->isSALU()) {
399 if (instr
->format
!= Format::SOPP
) {
400 /* SALU can mitigate the hazard */
401 ctx
.sgprs_read_by_SMEM
.reset();
403 /* Reducing lgkmcnt count to 0 always mitigates the hazard. */
404 const SOPP_instruction
*sopp
= static_cast<const SOPP_instruction
*>(instr
.get());
405 if (sopp
->opcode
== aco_opcode::s_waitcnt_lgkmcnt
) {
406 if (sopp
->imm
== 0 && sopp
->definitions
[0].physReg() == sgpr_null
)
407 ctx
.sgprs_read_by_SMEM
.reset();
408 } else if (sopp
->opcode
== aco_opcode::s_waitcnt
) {
409 unsigned lgkm
= (sopp
->imm
>> 8) & 0x3f;
411 ctx
.sgprs_read_by_SMEM
.reset();
416 return std::make_pair(sNOPs
, vNOPs
);
420 void handle_block(NOP_ctx
& ctx
, Block
& block
)
422 std::vector
<aco_ptr
<Instruction
>> instructions
;
423 instructions
.reserve(block
.instructions
.size());
424 for (unsigned i
= 0; i
< block
.instructions
.size(); i
++) {
425 aco_ptr
<Instruction
>& instr
= block
.instructions
[i
];
426 unsigned NOPs
= handle_instruction(ctx
, instr
, block
.instructions
, instructions
);
428 // TODO: try to move the instruction down
430 aco_ptr
<SOPP_instruction
> nop
{create_instruction
<SOPP_instruction
>(aco_opcode::s_nop
, Format::SOPP
, 0, 0)};
433 instructions
.emplace_back(std::move(nop
));
436 instructions
.emplace_back(std::move(instr
));
439 ctx
.VALU_wrvcc
-= instructions
.size();
440 ctx
.VALU_wrexec
-= instructions
.size();
441 ctx
.VALU_wrsgpr
-= instructions
.size();
442 block
.instructions
= std::move(instructions
);
445 void handle_block_gfx10(NOP_ctx
& ctx
, Block
& block
)
447 std::vector
<aco_ptr
<Instruction
>> instructions
;
448 instructions
.reserve(block
.instructions
.size());
449 for (unsigned i
= 0; i
< block
.instructions
.size(); i
++) {
450 aco_ptr
<Instruction
>& instr
= block
.instructions
[i
];
451 std::pair
<int, int> NOPs
= handle_instruction_gfx10(ctx
, instr
, block
.instructions
, instructions
);
452 for (int i
= 0; i
< NOPs
.second
; i
++) {
453 // TODO: try to move the instruction down
455 aco_ptr
<VOP1_instruction
> nop
{create_instruction
<VOP1_instruction
>(aco_opcode::v_nop
, Format::VOP1
, 0, 0)};
456 instructions
.emplace_back(std::move(nop
));
459 // TODO: try to move the instruction down
461 aco_ptr
<SOPP_instruction
> nop
{create_instruction
<SOPP_instruction
>(aco_opcode::s_nop
, Format::SOPP
, 0, 0)};
462 nop
->imm
= NOPs
.first
- 1;
464 instructions
.emplace_back(std::move(nop
));
467 instructions
.emplace_back(std::move(instr
));
470 block
.instructions
= std::move(instructions
);
473 } /* end namespace */
476 void insert_NOPs(Program
* program
)
478 NOP_ctx
ctx(program
);
480 for (Block
& block
: program
->blocks
) {
481 if (block
.instructions
.empty())
484 if (ctx
.chip_class
>= GFX10
)
485 handle_block_gfx10(ctx
, block
);
487 handle_block(ctx
, block
);