2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "vulkan/radv_shader.h"
37 * The general idea of this pass is:
38 * The CFG is traversed in reverse postorder (forward) and loops are processed
39 * several times until no progress is made.
40 * Per BB two wait_ctx is maintained: an in-context and out-context.
41 * The in-context is the joined out-contexts of the predecessors.
42 * The context contains a map: gpr -> wait_entry
43 * consisting of the information about the cnt values to be waited for.
44 * Note: After merge-nodes, it might occur that for the same register
45 * multiple cnt values are to be waited for.
47 * The values are updated according to the encountered instructions:
48 * - additional events increment the counter of waits of the same type
49 * - or erase gprs with counters higher than to be waited for.
52 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
54 /* Instructions of the same event will finish in-order except for smem
55 * and maybe flat. Instructions of different events may not finish in-order. */
56 enum wait_event
: uint16_t {
61 event_vmem_store
= 1 << 4, /* GFX10+ */
63 event_exp_pos
= 1 << 6,
64 event_exp_param
= 1 << 7,
65 event_exp_mrt_null
= 1 << 8,
66 event_gds_gpr_lock
= 1 << 9,
67 event_vmem_gpr_lock
= 1 << 10,
68 event_sendmsg
= 1 << 11,
71 enum counter_type
: uint8_t {
73 counter_lgkm
= 1 << 1,
78 static const uint16_t exp_events
= event_exp_pos
| event_exp_param
| event_exp_mrt_null
| event_gds_gpr_lock
| event_vmem_gpr_lock
;
79 static const uint16_t lgkm_events
= event_smem
| event_lds
| event_gds
| event_flat
| event_sendmsg
;
80 static const uint16_t vm_events
= event_vmem
| event_flat
;
81 static const uint16_t vs_events
= event_vmem_store
;
83 uint8_t get_counters_for_event(wait_event ev
)
93 case event_vmem_store
:
96 return counter_vm
| counter_lgkm
;
99 case event_exp_mrt_null
:
100 case event_gds_gpr_lock
:
101 case event_vmem_gpr_lock
:
109 static const uint8_t unset_counter
= 0xff;
117 vm(unset_counter
), exp(unset_counter
), lgkm(unset_counter
), vs(unset_counter
) {}
118 wait_imm(uint16_t vm_
, uint16_t exp_
, uint16_t lgkm_
, uint16_t vs_
) :
119 vm(vm_
), exp(exp_
), lgkm(lgkm_
), vs(vs_
) {}
121 wait_imm(enum chip_class chip
, uint16_t packed
) : vs(unset_counter
)
125 vm
|= (packed
>> 10) & 0x30;
127 exp
= (packed
>> 4) & 0x7;
129 lgkm
= (packed
>> 8) & 0xf;
131 lgkm
|= (packed
>> 8) & 0x30;
134 uint16_t pack(enum chip_class chip
) const
137 assert(exp
== unset_counter
|| exp
<= 0x7);
140 assert(lgkm
== unset_counter
|| lgkm
<= 0x3f);
141 assert(vm
== unset_counter
|| vm
<= 0x3f);
142 imm
= ((vm
& 0x30) << 10) | ((lgkm
& 0x3f) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
145 assert(lgkm
== unset_counter
|| lgkm
<= 0xf);
146 assert(vm
== unset_counter
|| vm
<= 0x3f);
147 imm
= ((vm
& 0x30) << 10) | ((lgkm
& 0xf) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
150 assert(lgkm
== unset_counter
|| lgkm
<= 0xf);
151 assert(vm
== unset_counter
|| vm
<= 0xf);
152 imm
= ((lgkm
& 0xf) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
155 if (chip
< GFX9
&& vm
== wait_imm::unset_counter
)
156 imm
|= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
157 if (chip
< GFX10
&& lgkm
== wait_imm::unset_counter
)
158 imm
|= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
162 bool combine(const wait_imm
& other
)
164 bool changed
= other
.vm
< vm
|| other
.exp
< exp
|| other
.lgkm
< lgkm
|| other
.vs
< vs
;
165 vm
= std::min(vm
, other
.vm
);
166 exp
= std::min(exp
, other
.exp
);
167 lgkm
= std::min(lgkm
, other
.lgkm
);
168 vs
= std::min(vs
, other
.vs
);
174 return vm
== unset_counter
&& exp
== unset_counter
&&
175 lgkm
== unset_counter
&& vs
== unset_counter
;
181 uint16_t events
; /* use wait_event notion */
182 uint8_t counters
; /* use counter_type notion */
186 wait_entry(wait_event event
, wait_imm imm
, bool logical
, bool wait_on_read
)
187 : imm(imm
), events(event
), counters(get_counters_for_event(event
)),
188 wait_on_read(wait_on_read
), logical(logical
) {}
190 bool join(const wait_entry
& other
)
192 bool changed
= (other
.events
& ~events
) ||
193 (other
.counters
& ~counters
) ||
194 (other
.wait_on_read
&& !wait_on_read
);
195 events
|= other
.events
;
196 counters
|= other
.counters
;
197 changed
|= imm
.combine(other
.imm
);
198 wait_on_read
= wait_on_read
|| other
.wait_on_read
;
199 assert(logical
== other
.logical
);
203 void remove_counter(counter_type counter
)
205 counters
&= ~counter
;
207 if (counter
== counter_lgkm
) {
208 imm
.lgkm
= wait_imm::unset_counter
;
209 events
&= ~(event_smem
| event_lds
| event_gds
| event_sendmsg
);
212 if (counter
== counter_vm
) {
213 imm
.vm
= wait_imm::unset_counter
;
214 events
&= ~event_vmem
;
217 if (counter
== counter_exp
) {
218 imm
.exp
= wait_imm::unset_counter
;
219 events
&= ~(event_exp_pos
| event_exp_param
| event_exp_mrt_null
| event_gds_gpr_lock
| event_vmem_gpr_lock
);
222 if (counter
== counter_vs
) {
223 imm
.vs
= wait_imm::unset_counter
;
224 events
&= ~event_vmem_store
;
227 if (!(counters
& counter_lgkm
) && !(counters
& counter_vm
))
228 events
&= ~event_flat
;
234 enum chip_class chip_class
;
236 uint16_t max_exp_cnt
;
237 uint16_t max_lgkm_cnt
;
239 uint16_t unordered_events
= event_smem
| event_flat
;
243 uint8_t lgkm_cnt
= 0;
245 bool pending_flat_lgkm
= false;
246 bool pending_flat_vm
= false;
247 bool pending_s_buffer_store
= false; /* GFX10 workaround */
249 wait_imm barrier_imm
[barrier_count
];
250 uint16_t barrier_events
[barrier_count
]; /* use wait_event notion */
252 std::map
<PhysReg
,wait_entry
> gpr_map
;
255 wait_ctx(Program
*program_
)
257 chip_class(program_
->chip_class
),
258 max_vm_cnt(program_
->chip_class
>= GFX9
? 62 : 14),
260 max_lgkm_cnt(program_
->chip_class
>= GFX10
? 62 : 14),
261 max_vs_cnt(program_
->chip_class
>= GFX10
? 62 : 0),
262 unordered_events(event_smem
| (program_
->chip_class
< GFX10
? event_flat
: 0)) {}
264 bool join(const wait_ctx
* other
, bool logical
)
266 bool changed
= other
->exp_cnt
> exp_cnt
||
267 other
->vm_cnt
> vm_cnt
||
268 other
->lgkm_cnt
> lgkm_cnt
||
269 other
->vs_cnt
> vs_cnt
||
270 (other
->pending_flat_lgkm
&& !pending_flat_lgkm
) ||
271 (other
->pending_flat_vm
&& !pending_flat_vm
);
273 exp_cnt
= std::max(exp_cnt
, other
->exp_cnt
);
274 vm_cnt
= std::max(vm_cnt
, other
->vm_cnt
);
275 lgkm_cnt
= std::max(lgkm_cnt
, other
->lgkm_cnt
);
276 vs_cnt
= std::max(vs_cnt
, other
->vs_cnt
);
277 pending_flat_lgkm
|= other
->pending_flat_lgkm
;
278 pending_flat_vm
|= other
->pending_flat_vm
;
279 pending_s_buffer_store
|= other
->pending_s_buffer_store
;
281 for (std::pair
<PhysReg
,wait_entry
> entry
: other
->gpr_map
)
283 std::map
<PhysReg
,wait_entry
>::iterator it
= gpr_map
.find(entry
.first
);
284 if (entry
.second
.logical
!= logical
)
287 if (it
!= gpr_map
.end()) {
288 changed
|= it
->second
.join(entry
.second
);
290 gpr_map
.insert(entry
);
295 for (unsigned i
= 0; i
< barrier_count
; i
++) {
296 changed
|= barrier_imm
[i
].combine(other
->barrier_imm
[i
]);
297 changed
|= other
->barrier_events
[i
] & ~barrier_events
[i
];
298 barrier_events
[i
] |= other
->barrier_events
[i
];
305 wait_imm
check_instr(Instruction
* instr
, wait_ctx
& ctx
)
309 for (const Operand op
: instr
->operands
) {
310 if (op
.isConstant() || op
.isUndefined())
313 /* check consecutively read gprs */
314 for (unsigned j
= 0; j
< op
.size(); j
++) {
315 PhysReg reg
{op
.physReg() + j
};
316 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.find(reg
);
317 if (it
== ctx
.gpr_map
.end() || !it
->second
.wait_on_read
)
320 wait
.combine(it
->second
.imm
);
324 for (const Definition
& def
: instr
->definitions
) {
325 /* check consecutively written gprs */
326 for (unsigned j
= 0; j
< def
.getTemp().size(); j
++)
328 PhysReg reg
{def
.physReg() + j
};
330 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.find(reg
);
331 if (it
== ctx
.gpr_map
.end())
334 /* Vector Memory reads and writes return in the order they were issued */
335 if (instr
->isVMEM() && ((it
->second
.events
& vm_events
) == event_vmem
)) {
336 it
->second
.remove_counter(counter_vm
);
337 if (!it
->second
.counters
)
338 it
= ctx
.gpr_map
.erase(it
);
342 /* LDS reads and writes return in the order they were issued. same for GDS */
343 if (instr
->format
== Format::DS
) {
344 bool gds
= static_cast<DS_instruction
*>(instr
)->gds
;
345 if ((it
->second
.events
& lgkm_events
) == (gds
? event_gds
: event_lds
)) {
346 it
->second
.remove_counter(counter_lgkm
);
347 if (!it
->second
.counters
)
348 it
= ctx
.gpr_map
.erase(it
);
353 wait
.combine(it
->second
.imm
);
360 wait_imm
parse_wait_instr(wait_ctx
& ctx
, Instruction
*instr
)
362 if (instr
->opcode
== aco_opcode::s_waitcnt_vscnt
&&
363 instr
->definitions
[0].physReg() == sgpr_null
) {
365 imm
.vs
= std::min
<uint8_t>(imm
.vs
, static_cast<SOPK_instruction
*>(instr
)->imm
);
367 } else if (instr
->opcode
== aco_opcode::s_waitcnt
) {
368 return wait_imm(ctx
.chip_class
, static_cast<SOPP_instruction
*>(instr
)->imm
);
373 wait_imm
kill(Instruction
* instr
, wait_ctx
& ctx
)
376 if (ctx
.exp_cnt
|| ctx
.vm_cnt
|| ctx
.lgkm_cnt
)
377 imm
.combine(check_instr(instr
, ctx
));
379 imm
.combine(parse_wait_instr(ctx
, instr
));
382 /* It's required to wait for scalar stores before "writing back" data.
383 * It shouldn't cost anything anyways since we're about to do s_endpgm.
385 if (ctx
.lgkm_cnt
&& instr
->opcode
== aco_opcode::s_dcache_wb
) {
386 assert(ctx
.chip_class
>= GFX8
);
390 if (ctx
.chip_class
>= GFX10
) {
391 /* GFX10: A store followed by a load at the same address causes a problem because
392 * the load doesn't load the correct values unless we wait for the store first.
393 * This is NOT mitigated by an s_nop.
395 * TODO: Refine this when we have proper alias analysis.
397 SMEM_instruction
*smem
= static_cast<SMEM_instruction
*>(instr
);
398 if (ctx
.pending_s_buffer_store
&&
399 !smem
->definitions
.empty() &&
400 !smem
->can_reorder
&& smem
->barrier
== barrier_buffer
) {
405 if (instr
->format
== Format::PSEUDO_BARRIER
) {
406 uint32_t workgroup_size
= UINT32_MAX
;
407 if (ctx
.program
->stage
& sw_cs
) {
408 unsigned* bsize
= ctx
.program
->info
->cs
.block_size
;
409 workgroup_size
= bsize
[0] * bsize
[1] * bsize
[2];
411 switch (instr
->opcode
) {
412 case aco_opcode::p_memory_barrier_common
:
413 imm
.combine(ctx
.barrier_imm
[ffs(barrier_atomic
) - 1]);
414 imm
.combine(ctx
.barrier_imm
[ffs(barrier_buffer
) - 1]);
415 imm
.combine(ctx
.barrier_imm
[ffs(barrier_image
) - 1]);
416 if (workgroup_size
> ctx
.program
->wave_size
)
417 imm
.combine(ctx
.barrier_imm
[ffs(barrier_shared
) - 1]);
419 case aco_opcode::p_memory_barrier_atomic
:
420 imm
.combine(ctx
.barrier_imm
[ffs(barrier_atomic
) - 1]);
422 /* see comment in aco_scheduler.cpp's can_move_instr() on why these barriers are merged */
423 case aco_opcode::p_memory_barrier_buffer
:
424 case aco_opcode::p_memory_barrier_image
:
425 imm
.combine(ctx
.barrier_imm
[ffs(barrier_buffer
) - 1]);
426 imm
.combine(ctx
.barrier_imm
[ffs(barrier_image
) - 1]);
428 case aco_opcode::p_memory_barrier_shared
:
429 if (workgroup_size
> ctx
.program
->wave_size
)
430 imm
.combine(ctx
.barrier_imm
[ffs(barrier_shared
) - 1]);
432 case aco_opcode::p_memory_barrier_gs_data
:
433 imm
.combine(ctx
.barrier_imm
[ffs(barrier_gs_data
) - 1]);
435 case aco_opcode::p_memory_barrier_gs_sendmsg
:
436 imm
.combine(ctx
.barrier_imm
[ffs(barrier_gs_sendmsg
) - 1]);
445 if (ctx
.pending_flat_vm
&& imm
.vm
!= wait_imm::unset_counter
)
447 if (ctx
.pending_flat_lgkm
&& imm
.lgkm
!= wait_imm::unset_counter
)
451 ctx
.exp_cnt
= std::min(ctx
.exp_cnt
, imm
.exp
);
452 ctx
.vm_cnt
= std::min(ctx
.vm_cnt
, imm
.vm
);
453 ctx
.lgkm_cnt
= std::min(ctx
.lgkm_cnt
, imm
.lgkm
);
454 ctx
.vs_cnt
= std::min(ctx
.vs_cnt
, imm
.vs
);
456 /* update barrier wait imms */
457 for (unsigned i
= 0; i
< barrier_count
; i
++) {
458 wait_imm
& bar
= ctx
.barrier_imm
[i
];
459 uint16_t& bar_ev
= ctx
.barrier_events
[i
];
460 if (bar
.exp
!= wait_imm::unset_counter
&& imm
.exp
<= bar
.exp
) {
461 bar
.exp
= wait_imm::unset_counter
;
462 bar_ev
&= ~exp_events
;
464 if (bar
.vm
!= wait_imm::unset_counter
&& imm
.vm
<= bar
.vm
) {
465 bar
.vm
= wait_imm::unset_counter
;
466 bar_ev
&= ~(vm_events
& ~event_flat
);
468 if (bar
.lgkm
!= wait_imm::unset_counter
&& imm
.lgkm
<= bar
.lgkm
) {
469 bar
.lgkm
= wait_imm::unset_counter
;
470 bar_ev
&= ~(lgkm_events
& ~event_flat
);
472 if (bar
.vs
!= wait_imm::unset_counter
&& imm
.vs
<= bar
.vs
) {
473 bar
.vs
= wait_imm::unset_counter
;
474 bar_ev
&= ~vs_events
;
476 if (bar
.vm
== wait_imm::unset_counter
&& bar
.lgkm
== wait_imm::unset_counter
)
477 bar_ev
&= ~event_flat
;
480 /* remove all gprs with higher counter from map */
481 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.begin();
482 while (it
!= ctx
.gpr_map
.end())
484 if (imm
.exp
!= wait_imm::unset_counter
&& imm
.exp
<= it
->second
.imm
.exp
)
485 it
->second
.remove_counter(counter_exp
);
486 if (imm
.vm
!= wait_imm::unset_counter
&& imm
.vm
<= it
->second
.imm
.vm
)
487 it
->second
.remove_counter(counter_vm
);
488 if (imm
.lgkm
!= wait_imm::unset_counter
&& imm
.lgkm
<= it
->second
.imm
.lgkm
)
489 it
->second
.remove_counter(counter_lgkm
);
490 if (imm
.lgkm
!= wait_imm::unset_counter
&& imm
.vs
<= it
->second
.imm
.vs
)
491 it
->second
.remove_counter(counter_vs
);
492 if (!it
->second
.counters
)
493 it
= ctx
.gpr_map
.erase(it
);
500 ctx
.pending_flat_vm
= false;
502 ctx
.pending_flat_lgkm
= false;
503 ctx
.pending_s_buffer_store
= false;
509 void update_barrier_counter(uint8_t *ctr
, unsigned max
)
511 if (*ctr
!= wait_imm::unset_counter
&& *ctr
< max
)
515 void update_barrier_imm(wait_ctx
& ctx
, uint8_t counters
, wait_event event
, barrier_interaction barrier
)
517 for (unsigned i
= 0; i
< barrier_count
; i
++) {
518 wait_imm
& bar
= ctx
.barrier_imm
[i
];
519 uint16_t& bar_ev
= ctx
.barrier_events
[i
];
520 if (barrier
& (1 << i
)) {
522 if (counters
& counter_lgkm
)
524 if (counters
& counter_vm
)
526 if (counters
& counter_exp
)
528 if (counters
& counter_vs
)
530 } else if (!(bar_ev
& ctx
.unordered_events
) && !(ctx
.unordered_events
& event
)) {
531 if (counters
& counter_lgkm
&& (bar_ev
& lgkm_events
) == event
)
532 update_barrier_counter(&bar
.lgkm
, ctx
.max_lgkm_cnt
);
533 if (counters
& counter_vm
&& (bar_ev
& vm_events
) == event
)
534 update_barrier_counter(&bar
.vm
, ctx
.max_vm_cnt
);
535 if (counters
& counter_exp
&& (bar_ev
& exp_events
) == event
)
536 update_barrier_counter(&bar
.exp
, ctx
.max_exp_cnt
);
537 if (counters
& counter_vs
&& (bar_ev
& vs_events
) == event
)
538 update_barrier_counter(&bar
.vs
, ctx
.max_vs_cnt
);
543 void update_counters(wait_ctx
& ctx
, wait_event event
, barrier_interaction barrier
=barrier_none
)
545 uint8_t counters
= get_counters_for_event(event
);
547 if (counters
& counter_lgkm
&& ctx
.lgkm_cnt
<= ctx
.max_lgkm_cnt
)
549 if (counters
& counter_vm
&& ctx
.vm_cnt
<= ctx
.max_vm_cnt
)
551 if (counters
& counter_exp
&& ctx
.exp_cnt
<= ctx
.max_exp_cnt
)
553 if (counters
& counter_vs
&& ctx
.vs_cnt
<= ctx
.max_vs_cnt
)
556 update_barrier_imm(ctx
, counters
, event
, barrier
);
558 if (ctx
.unordered_events
& event
)
561 if (ctx
.pending_flat_lgkm
)
562 counters
&= ~counter_lgkm
;
563 if (ctx
.pending_flat_vm
)
564 counters
&= ~counter_vm
;
566 for (std::pair
<const PhysReg
,wait_entry
>& e
: ctx
.gpr_map
) {
567 wait_entry
& entry
= e
.second
;
569 if (entry
.events
& ctx
.unordered_events
)
572 assert(entry
.events
);
574 if ((counters
& counter_exp
) && (entry
.events
& exp_events
) == event
&& entry
.imm
.exp
< ctx
.max_exp_cnt
)
576 if ((counters
& counter_lgkm
) && (entry
.events
& lgkm_events
) == event
&& entry
.imm
.lgkm
< ctx
.max_lgkm_cnt
)
578 if ((counters
& counter_vm
) && (entry
.events
& vm_events
) == event
&& entry
.imm
.vm
< ctx
.max_vm_cnt
)
580 if ((counters
& counter_vs
) && (entry
.events
& vs_events
) == event
&& entry
.imm
.vs
< ctx
.max_vs_cnt
)
585 void update_counters_for_flat_load(wait_ctx
& ctx
, barrier_interaction barrier
=barrier_none
)
587 assert(ctx
.chip_class
< GFX10
);
589 if (ctx
.lgkm_cnt
<= ctx
.max_lgkm_cnt
)
591 if (ctx
.vm_cnt
<= ctx
.max_vm_cnt
)
594 update_barrier_imm(ctx
, counter_vm
| counter_lgkm
, event_flat
, barrier
);
596 for (std::pair
<PhysReg
,wait_entry
> e
: ctx
.gpr_map
)
598 if (e
.second
.counters
& counter_vm
)
600 if (e
.second
.counters
& counter_lgkm
)
601 e
.second
.imm
.lgkm
= 0;
603 ctx
.pending_flat_lgkm
= true;
604 ctx
.pending_flat_vm
= true;
607 void insert_wait_entry(wait_ctx
& ctx
, PhysReg reg
, RegClass rc
, wait_event event
, bool wait_on_read
)
609 uint16_t counters
= get_counters_for_event(event
);
611 if (counters
& counter_lgkm
)
613 if (counters
& counter_vm
)
615 if (counters
& counter_exp
)
617 if (counters
& counter_vs
)
620 wait_entry
new_entry(event
, imm
, !rc
.is_linear(), wait_on_read
);
622 for (unsigned i
= 0; i
< rc
.size(); i
++) {
623 auto it
= ctx
.gpr_map
.emplace(PhysReg
{reg
.reg
+i
}, new_entry
);
625 it
.first
->second
.join(new_entry
);
629 void insert_wait_entry(wait_ctx
& ctx
, Operand op
, wait_event event
)
631 if (!op
.isConstant() && !op
.isUndefined())
632 insert_wait_entry(ctx
, op
.physReg(), op
.regClass(), event
, false);
635 void insert_wait_entry(wait_ctx
& ctx
, Definition def
, wait_event event
)
637 insert_wait_entry(ctx
, def
.physReg(), def
.regClass(), event
, true);
640 void gen(Instruction
* instr
, wait_ctx
& ctx
)
642 switch (instr
->format
) {
644 Export_instruction
* exp_instr
= static_cast<Export_instruction
*>(instr
);
647 if (exp_instr
->dest
<= 9)
648 ev
= event_exp_mrt_null
;
649 else if (exp_instr
->dest
<= 15)
652 ev
= event_exp_param
;
653 update_counters(ctx
, ev
);
655 /* insert new entries for exported vgprs */
656 for (unsigned i
= 0; i
< 4; i
++)
658 if (exp_instr
->enabled_mask
& (1 << i
)) {
659 unsigned idx
= exp_instr
->compressed
? i
>> 1 : i
;
660 assert(idx
< exp_instr
->operands
.size());
661 insert_wait_entry(ctx
, exp_instr
->operands
[idx
], ev
);
665 insert_wait_entry(ctx
, exec
, s2
, ev
, false);
669 if (ctx
.chip_class
< GFX10
&& !instr
->definitions
.empty())
670 update_counters_for_flat_load(ctx
, barrier_buffer
);
672 update_counters(ctx
, event_flat
, barrier_buffer
);
674 if (!instr
->definitions
.empty())
675 insert_wait_entry(ctx
, instr
->definitions
[0], event_flat
);
679 SMEM_instruction
*smem
= static_cast<SMEM_instruction
*>(instr
);
680 update_counters(ctx
, event_smem
, static_cast<SMEM_instruction
*>(instr
)->barrier
);
682 if (!instr
->definitions
.empty())
683 insert_wait_entry(ctx
, instr
->definitions
[0], event_smem
);
684 else if (ctx
.chip_class
>= GFX10
&&
685 !smem
->can_reorder
&&
686 smem
->barrier
== barrier_buffer
)
687 ctx
.pending_s_buffer_store
= true;
692 bool gds
= static_cast<DS_instruction
*>(instr
)->gds
;
693 update_counters(ctx
, gds
? event_gds
: event_lds
, gds
? barrier_none
: barrier_shared
);
695 update_counters(ctx
, event_gds_gpr_lock
);
697 if (!instr
->definitions
.empty())
698 insert_wait_entry(ctx
, instr
->definitions
[0], gds
? event_gds
: event_lds
);
701 for (const Operand
& op
: instr
->operands
)
702 insert_wait_entry(ctx
, op
, event_gds_gpr_lock
);
703 insert_wait_entry(ctx
, exec
, s2
, event_gds_gpr_lock
, false);
710 case Format::GLOBAL
: {
711 wait_event ev
= !instr
->definitions
.empty() || ctx
.chip_class
< GFX10
? event_vmem
: event_vmem_store
;
712 update_counters(ctx
, ev
, get_barrier_interaction(instr
));
714 if (!instr
->definitions
.empty())
715 insert_wait_entry(ctx
, instr
->definitions
[0], ev
);
717 if (ctx
.chip_class
== GFX6
&&
718 instr
->format
!= Format::MIMG
&&
719 instr
->operands
.size() == 4) {
721 update_counters(ctx
, event_vmem_gpr_lock
);
722 insert_wait_entry(ctx
, instr
->operands
[3], event_vmem_gpr_lock
);
723 } else if (ctx
.chip_class
== GFX6
&&
724 instr
->format
== Format::MIMG
&&
725 instr
->operands
[1].regClass().type() == RegType::vgpr
) {
727 update_counters(ctx
, event_vmem_gpr_lock
);
728 insert_wait_entry(ctx
, instr
->operands
[1], event_vmem_gpr_lock
);
734 if (instr
->opcode
== aco_opcode::s_sendmsg
||
735 instr
->opcode
== aco_opcode::s_sendmsghalt
)
736 update_counters(ctx
, event_sendmsg
, get_barrier_interaction(instr
));
743 void emit_waitcnt(wait_ctx
& ctx
, std::vector
<aco_ptr
<Instruction
>>& instructions
, wait_imm imm
)
745 if (imm
.vs
!= wait_imm::unset_counter
) {
746 assert(ctx
.chip_class
>= GFX10
);
747 SOPK_instruction
* waitcnt_vs
= create_instruction
<SOPK_instruction
>(aco_opcode::s_waitcnt_vscnt
, Format::SOPK
, 0, 1);
748 waitcnt_vs
->definitions
[0] = Definition(sgpr_null
, s1
);
749 waitcnt_vs
->imm
= imm
.vs
;
750 instructions
.emplace_back(waitcnt_vs
);
751 imm
.vs
= wait_imm::unset_counter
;
754 SOPP_instruction
* waitcnt
= create_instruction
<SOPP_instruction
>(aco_opcode::s_waitcnt
, Format::SOPP
, 0, 0);
755 waitcnt
->imm
= imm
.pack(ctx
.chip_class
);
757 instructions
.emplace_back(waitcnt
);
761 void handle_block(Program
*program
, Block
& block
, wait_ctx
& ctx
)
763 std::vector
<aco_ptr
<Instruction
>> new_instructions
;
766 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
767 bool is_wait
= !parse_wait_instr(ctx
, instr
.get()).empty();
769 queued_imm
.combine(kill(instr
.get(), ctx
));
771 gen(instr
.get(), ctx
);
773 if (instr
->format
!= Format::PSEUDO_BARRIER
&& !is_wait
) {
774 if (!queued_imm
.empty()) {
775 emit_waitcnt(ctx
, new_instructions
, queued_imm
);
776 queued_imm
= wait_imm();
778 new_instructions
.emplace_back(std::move(instr
));
782 if (!queued_imm
.empty())
783 emit_waitcnt(ctx
, new_instructions
, queued_imm
);
785 block
.instructions
.swap(new_instructions
);
788 } /* end namespace */
790 void insert_wait_states(Program
* program
)
793 std::vector
<bool> done(program
->blocks
.size());
794 wait_ctx in_ctx
[program
->blocks
.size()];
795 wait_ctx out_ctx
[program
->blocks
.size()];
796 for (unsigned i
= 0; i
< program
->blocks
.size(); i
++)
797 in_ctx
[i
] = wait_ctx(program
);
798 std::stack
<unsigned> loop_header_indices
;
799 unsigned loop_progress
= 0;
801 for (unsigned i
= 0; i
< program
->blocks
.size();) {
802 Block
& current
= program
->blocks
[i
++];
803 wait_ctx ctx
= in_ctx
[current
.index
];
805 if (current
.kind
& block_kind_loop_header
) {
806 loop_header_indices
.push(current
.index
);
807 } else if (current
.kind
& block_kind_loop_exit
) {
809 if (loop_progress
== loop_header_indices
.size()) {
810 i
= loop_header_indices
.top();
813 loop_header_indices
.pop();
814 loop_progress
= std::min
<unsigned>(loop_progress
, loop_header_indices
.size());
819 bool changed
= false;
820 for (unsigned b
: current
.linear_preds
)
821 changed
|= ctx
.join(&out_ctx
[b
], false);
822 for (unsigned b
: current
.logical_preds
)
823 changed
|= ctx
.join(&out_ctx
[b
], true);
825 in_ctx
[current
.index
] = ctx
;
827 if (done
[current
.index
] && !changed
)
830 if (current
.instructions
.empty()) {
831 out_ctx
[current
.index
] = ctx
;
835 loop_progress
= std::max
<unsigned>(loop_progress
, current
.loop_nest_depth
);
836 done
[current
.index
] = true;
838 handle_block(program
, current
, ctx
);
840 out_ctx
[current
.index
] = ctx
;