2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include "vulkan/radv_shader.h"
38 * The general idea of this pass is:
39 * The CFG is traversed in reverse postorder (forward) and loops are processed
40 * several times until no progress is made.
41 * Per BB two wait_ctx is maintained: an in-context and out-context.
42 * The in-context is the joined out-contexts of the predecessors.
43 * The context contains a map: gpr -> wait_entry
44 * consisting of the information about the cnt values to be waited for.
45 * Note: After merge-nodes, it might occur that for the same register
46 * multiple cnt values are to be waited for.
48 * The values are updated according to the encountered instructions:
49 * - additional events increment the counter of waits of the same type
50 * - or erase gprs with counters higher than to be waited for.
53 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
55 /* Instructions of the same event will finish in-order except for smem
56 * and maybe flat. Instructions of different events may not finish in-order. */
57 enum wait_event
: uint16_t {
62 event_vmem_store
= 1 << 4, /* GFX10+ */
64 event_exp_pos
= 1 << 6,
65 event_exp_param
= 1 << 7,
66 event_exp_mrt_null
= 1 << 8,
67 event_gds_gpr_lock
= 1 << 9,
68 event_vmem_gpr_lock
= 1 << 10,
69 event_sendmsg
= 1 << 11,
73 enum counter_type
: uint8_t {
75 counter_lgkm
= 1 << 1,
81 static const uint16_t exp_events
= event_exp_pos
| event_exp_param
| event_exp_mrt_null
| event_gds_gpr_lock
| event_vmem_gpr_lock
;
82 static const uint16_t lgkm_events
= event_smem
| event_lds
| event_gds
| event_flat
| event_sendmsg
;
83 static const uint16_t vm_events
= event_vmem
| event_flat
;
84 static const uint16_t vs_events
= event_vmem_store
;
86 uint8_t get_counters_for_event(wait_event ev
)
96 case event_vmem_store
:
99 return counter_vm
| counter_lgkm
;
101 case event_exp_param
:
102 case event_exp_mrt_null
:
103 case event_gds_gpr_lock
:
104 case event_vmem_gpr_lock
:
111 uint16_t get_events_for_counter(counter_type ctr
)
127 static const uint8_t unset_counter
= 0xff;
135 vm(unset_counter
), exp(unset_counter
), lgkm(unset_counter
), vs(unset_counter
) {}
136 wait_imm(uint16_t vm_
, uint16_t exp_
, uint16_t lgkm_
, uint16_t vs_
) :
137 vm(vm_
), exp(exp_
), lgkm(lgkm_
), vs(vs_
) {}
139 wait_imm(enum chip_class chip
, uint16_t packed
) : vs(unset_counter
)
143 vm
|= (packed
>> 10) & 0x30;
145 exp
= (packed
>> 4) & 0x7;
147 lgkm
= (packed
>> 8) & 0xf;
149 lgkm
|= (packed
>> 8) & 0x30;
152 uint16_t pack(enum chip_class chip
) const
155 assert(exp
== unset_counter
|| exp
<= 0x7);
158 assert(lgkm
== unset_counter
|| lgkm
<= 0x3f);
159 assert(vm
== unset_counter
|| vm
<= 0x3f);
160 imm
= ((vm
& 0x30) << 10) | ((lgkm
& 0x3f) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
163 assert(lgkm
== unset_counter
|| lgkm
<= 0xf);
164 assert(vm
== unset_counter
|| vm
<= 0x3f);
165 imm
= ((vm
& 0x30) << 10) | ((lgkm
& 0xf) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
168 assert(lgkm
== unset_counter
|| lgkm
<= 0xf);
169 assert(vm
== unset_counter
|| vm
<= 0xf);
170 imm
= ((lgkm
& 0xf) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
173 if (chip
< GFX9
&& vm
== wait_imm::unset_counter
)
174 imm
|= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
175 if (chip
< GFX10
&& lgkm
== wait_imm::unset_counter
)
176 imm
|= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
180 bool combine(const wait_imm
& other
)
182 bool changed
= other
.vm
< vm
|| other
.exp
< exp
|| other
.lgkm
< lgkm
|| other
.vs
< vs
;
183 vm
= std::min(vm
, other
.vm
);
184 exp
= std::min(exp
, other
.exp
);
185 lgkm
= std::min(lgkm
, other
.lgkm
);
186 vs
= std::min(vs
, other
.vs
);
192 return vm
== unset_counter
&& exp
== unset_counter
&&
193 lgkm
== unset_counter
&& vs
== unset_counter
;
199 uint16_t events
; /* use wait_event notion */
200 uint8_t counters
; /* use counter_type notion */
204 wait_entry(wait_event event
, wait_imm imm
, bool logical
, bool wait_on_read
)
205 : imm(imm
), events(event
), counters(get_counters_for_event(event
)),
206 wait_on_read(wait_on_read
), logical(logical
) {}
208 bool join(const wait_entry
& other
)
210 bool changed
= (other
.events
& ~events
) ||
211 (other
.counters
& ~counters
) ||
212 (other
.wait_on_read
&& !wait_on_read
);
213 events
|= other
.events
;
214 counters
|= other
.counters
;
215 changed
|= imm
.combine(other
.imm
);
216 wait_on_read
= wait_on_read
|| other
.wait_on_read
;
217 assert(logical
== other
.logical
);
221 void remove_counter(counter_type counter
)
223 counters
&= ~counter
;
225 if (counter
== counter_lgkm
) {
226 imm
.lgkm
= wait_imm::unset_counter
;
227 events
&= ~(event_smem
| event_lds
| event_gds
| event_sendmsg
);
230 if (counter
== counter_vm
) {
231 imm
.vm
= wait_imm::unset_counter
;
232 events
&= ~event_vmem
;
235 if (counter
== counter_exp
) {
236 imm
.exp
= wait_imm::unset_counter
;
237 events
&= ~(event_exp_pos
| event_exp_param
| event_exp_mrt_null
| event_gds_gpr_lock
| event_vmem_gpr_lock
);
240 if (counter
== counter_vs
) {
241 imm
.vs
= wait_imm::unset_counter
;
242 events
&= ~event_vmem_store
;
245 if (!(counters
& counter_lgkm
) && !(counters
& counter_vm
))
246 events
&= ~event_flat
;
252 enum chip_class chip_class
;
254 uint16_t max_exp_cnt
;
255 uint16_t max_lgkm_cnt
;
257 uint16_t unordered_events
= event_smem
| event_flat
;
261 uint8_t lgkm_cnt
= 0;
263 bool pending_flat_lgkm
= false;
264 bool pending_flat_vm
= false;
265 bool pending_s_buffer_store
= false; /* GFX10 workaround */
267 wait_imm barrier_imm
[barrier_count
];
268 uint16_t barrier_events
[barrier_count
] = {}; /* use wait_event notion */
270 std::map
<PhysReg
,wait_entry
> gpr_map
;
272 /* used for vmem/smem scores */
273 bool collect_statistics
;
274 Instruction
*gen_instr
;
275 std::map
<Instruction
*, unsigned> unwaited_instrs
[num_counters
];
276 std::map
<PhysReg
,std::set
<Instruction
*>> reg_instrs
[num_counters
];
277 std::vector
<unsigned> wait_distances
[num_events
];
280 wait_ctx(Program
*program_
)
282 chip_class(program_
->chip_class
),
283 max_vm_cnt(program_
->chip_class
>= GFX9
? 62 : 14),
285 max_lgkm_cnt(program_
->chip_class
>= GFX10
? 62 : 14),
286 max_vs_cnt(program_
->chip_class
>= GFX10
? 62 : 0),
287 unordered_events(event_smem
| (program_
->chip_class
< GFX10
? event_flat
: 0)) {}
289 bool join(const wait_ctx
* other
, bool logical
)
291 bool changed
= other
->exp_cnt
> exp_cnt
||
292 other
->vm_cnt
> vm_cnt
||
293 other
->lgkm_cnt
> lgkm_cnt
||
294 other
->vs_cnt
> vs_cnt
||
295 (other
->pending_flat_lgkm
&& !pending_flat_lgkm
) ||
296 (other
->pending_flat_vm
&& !pending_flat_vm
);
298 exp_cnt
= std::max(exp_cnt
, other
->exp_cnt
);
299 vm_cnt
= std::max(vm_cnt
, other
->vm_cnt
);
300 lgkm_cnt
= std::max(lgkm_cnt
, other
->lgkm_cnt
);
301 vs_cnt
= std::max(vs_cnt
, other
->vs_cnt
);
302 pending_flat_lgkm
|= other
->pending_flat_lgkm
;
303 pending_flat_vm
|= other
->pending_flat_vm
;
304 pending_s_buffer_store
|= other
->pending_s_buffer_store
;
306 for (std::pair
<PhysReg
,wait_entry
> entry
: other
->gpr_map
)
308 std::map
<PhysReg
,wait_entry
>::iterator it
= gpr_map
.find(entry
.first
);
309 if (entry
.second
.logical
!= logical
)
312 if (it
!= gpr_map
.end()) {
313 changed
|= it
->second
.join(entry
.second
);
315 gpr_map
.insert(entry
);
320 for (unsigned i
= 0; i
< barrier_count
; i
++) {
321 changed
|= barrier_imm
[i
].combine(other
->barrier_imm
[i
]);
322 changed
|= other
->barrier_events
[i
] & ~barrier_events
[i
];
323 barrier_events
[i
] |= other
->barrier_events
[i
];
326 /* these are used for statistics, so don't update "changed" */
327 for (unsigned i
= 0; i
< num_counters
; i
++) {
328 for (std::pair
<Instruction
*, unsigned> instr
: other
->unwaited_instrs
[i
]) {
329 auto pos
= unwaited_instrs
[i
].find(instr
.first
);
330 if (pos
== unwaited_instrs
[i
].end())
331 unwaited_instrs
[i
].insert(instr
);
333 pos
->second
= std::min(pos
->second
, instr
.second
);
335 /* don't use a foreach loop to avoid copies */
336 for (auto it
= other
->reg_instrs
[i
].begin(); it
!= other
->reg_instrs
[i
].end(); ++it
)
337 reg_instrs
[i
][it
->first
].insert(it
->second
.begin(), it
->second
.end());
343 void wait_and_remove_from_entry(PhysReg reg
, wait_entry
& entry
, counter_type counter
) {
344 if (collect_statistics
&& (entry
.counters
& counter
)) {
345 unsigned counter_idx
= ffs(counter
) - 1;
346 for (Instruction
*instr
: reg_instrs
[counter_idx
][reg
]) {
347 auto pos
= unwaited_instrs
[counter_idx
].find(instr
);
348 if (pos
== unwaited_instrs
[counter_idx
].end())
351 unsigned distance
= pos
->second
;
352 unsigned events
= entry
.events
& get_events_for_counter(counter
);
354 unsigned event_idx
= u_bit_scan(&events
);
355 wait_distances
[event_idx
].push_back(distance
);
358 unwaited_instrs
[counter_idx
].erase(instr
);
360 reg_instrs
[counter_idx
][reg
].clear();
363 entry
.remove_counter(counter
);
366 void advance_unwaited_instrs()
368 for (unsigned i
= 0; i
< num_counters
; i
++) {
369 for (auto it
= unwaited_instrs
[i
].begin(); it
!= unwaited_instrs
[i
].end(); ++it
)
375 wait_imm
check_instr(Instruction
* instr
, wait_ctx
& ctx
)
379 for (const Operand op
: instr
->operands
) {
380 if (op
.isConstant() || op
.isUndefined())
383 /* check consecutively read gprs */
384 for (unsigned j
= 0; j
< op
.size(); j
++) {
385 PhysReg reg
{op
.physReg() + j
};
386 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.find(reg
);
387 if (it
== ctx
.gpr_map
.end() || !it
->second
.wait_on_read
)
390 wait
.combine(it
->second
.imm
);
394 for (const Definition
& def
: instr
->definitions
) {
395 /* check consecutively written gprs */
396 for (unsigned j
= 0; j
< def
.getTemp().size(); j
++)
398 PhysReg reg
{def
.physReg() + j
};
400 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.find(reg
);
401 if (it
== ctx
.gpr_map
.end())
404 /* Vector Memory reads and writes return in the order they were issued */
405 if (instr
->isVMEM() && ((it
->second
.events
& vm_events
) == event_vmem
)) {
406 it
->second
.remove_counter(counter_vm
);
407 if (!it
->second
.counters
)
408 it
= ctx
.gpr_map
.erase(it
);
412 /* LDS reads and writes return in the order they were issued. same for GDS */
413 if (instr
->format
== Format::DS
) {
414 bool gds
= static_cast<DS_instruction
*>(instr
)->gds
;
415 if ((it
->second
.events
& lgkm_events
) == (gds
? event_gds
: event_lds
)) {
416 it
->second
.remove_counter(counter_lgkm
);
417 if (!it
->second
.counters
)
418 it
= ctx
.gpr_map
.erase(it
);
423 wait
.combine(it
->second
.imm
);
430 wait_imm
parse_wait_instr(wait_ctx
& ctx
, Instruction
*instr
)
432 if (instr
->opcode
== aco_opcode::s_waitcnt_vscnt
&&
433 instr
->definitions
[0].physReg() == sgpr_null
) {
435 imm
.vs
= std::min
<uint8_t>(imm
.vs
, static_cast<SOPK_instruction
*>(instr
)->imm
);
437 } else if (instr
->opcode
== aco_opcode::s_waitcnt
) {
438 return wait_imm(ctx
.chip_class
, static_cast<SOPP_instruction
*>(instr
)->imm
);
443 wait_imm
kill(Instruction
* instr
, wait_ctx
& ctx
)
446 if (ctx
.exp_cnt
|| ctx
.vm_cnt
|| ctx
.lgkm_cnt
)
447 imm
.combine(check_instr(instr
, ctx
));
449 imm
.combine(parse_wait_instr(ctx
, instr
));
452 /* It's required to wait for scalar stores before "writing back" data.
453 * It shouldn't cost anything anyways since we're about to do s_endpgm.
455 if (ctx
.lgkm_cnt
&& instr
->opcode
== aco_opcode::s_dcache_wb
) {
456 assert(ctx
.chip_class
>= GFX8
);
460 if (ctx
.chip_class
>= GFX10
) {
461 /* GFX10: A store followed by a load at the same address causes a problem because
462 * the load doesn't load the correct values unless we wait for the store first.
463 * This is NOT mitigated by an s_nop.
465 * TODO: Refine this when we have proper alias analysis.
467 SMEM_instruction
*smem
= static_cast<SMEM_instruction
*>(instr
);
468 if (ctx
.pending_s_buffer_store
&&
469 !smem
->definitions
.empty() &&
470 !smem
->can_reorder
&& smem
->barrier
== barrier_buffer
) {
475 if (instr
->format
== Format::PSEUDO_BARRIER
) {
476 switch (instr
->opcode
) {
477 case aco_opcode::p_memory_barrier_common
:
478 imm
.combine(ctx
.barrier_imm
[ffs(barrier_atomic
) - 1]);
479 imm
.combine(ctx
.barrier_imm
[ffs(barrier_buffer
) - 1]);
480 imm
.combine(ctx
.barrier_imm
[ffs(barrier_image
) - 1]);
481 if (ctx
.program
->workgroup_size
> ctx
.program
->wave_size
)
482 imm
.combine(ctx
.barrier_imm
[ffs(barrier_shared
) - 1]);
484 case aco_opcode::p_memory_barrier_atomic
:
485 imm
.combine(ctx
.barrier_imm
[ffs(barrier_atomic
) - 1]);
487 /* see comment in aco_scheduler.cpp's can_move_instr() on why these barriers are merged */
488 case aco_opcode::p_memory_barrier_buffer
:
489 case aco_opcode::p_memory_barrier_image
:
490 imm
.combine(ctx
.barrier_imm
[ffs(barrier_buffer
) - 1]);
491 imm
.combine(ctx
.barrier_imm
[ffs(barrier_image
) - 1]);
493 case aco_opcode::p_memory_barrier_shared
:
494 if (ctx
.program
->workgroup_size
> ctx
.program
->wave_size
)
495 imm
.combine(ctx
.barrier_imm
[ffs(barrier_shared
) - 1]);
497 case aco_opcode::p_memory_barrier_gs_data
:
498 imm
.combine(ctx
.barrier_imm
[ffs(barrier_gs_data
) - 1]);
500 case aco_opcode::p_memory_barrier_gs_sendmsg
:
501 imm
.combine(ctx
.barrier_imm
[ffs(barrier_gs_sendmsg
) - 1]);
510 if (ctx
.pending_flat_vm
&& imm
.vm
!= wait_imm::unset_counter
)
512 if (ctx
.pending_flat_lgkm
&& imm
.lgkm
!= wait_imm::unset_counter
)
516 ctx
.exp_cnt
= std::min(ctx
.exp_cnt
, imm
.exp
);
517 ctx
.vm_cnt
= std::min(ctx
.vm_cnt
, imm
.vm
);
518 ctx
.lgkm_cnt
= std::min(ctx
.lgkm_cnt
, imm
.lgkm
);
519 ctx
.vs_cnt
= std::min(ctx
.vs_cnt
, imm
.vs
);
521 /* update barrier wait imms */
522 for (unsigned i
= 0; i
< barrier_count
; i
++) {
523 wait_imm
& bar
= ctx
.barrier_imm
[i
];
524 uint16_t& bar_ev
= ctx
.barrier_events
[i
];
525 if (bar
.exp
!= wait_imm::unset_counter
&& imm
.exp
<= bar
.exp
) {
526 bar
.exp
= wait_imm::unset_counter
;
527 bar_ev
&= ~exp_events
;
529 if (bar
.vm
!= wait_imm::unset_counter
&& imm
.vm
<= bar
.vm
) {
530 bar
.vm
= wait_imm::unset_counter
;
531 bar_ev
&= ~(vm_events
& ~event_flat
);
533 if (bar
.lgkm
!= wait_imm::unset_counter
&& imm
.lgkm
<= bar
.lgkm
) {
534 bar
.lgkm
= wait_imm::unset_counter
;
535 bar_ev
&= ~(lgkm_events
& ~event_flat
);
537 if (bar
.vs
!= wait_imm::unset_counter
&& imm
.vs
<= bar
.vs
) {
538 bar
.vs
= wait_imm::unset_counter
;
539 bar_ev
&= ~vs_events
;
541 if (bar
.vm
== wait_imm::unset_counter
&& bar
.lgkm
== wait_imm::unset_counter
)
542 bar_ev
&= ~event_flat
;
545 /* remove all gprs with higher counter from map */
546 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.begin();
547 while (it
!= ctx
.gpr_map
.end())
549 if (imm
.exp
!= wait_imm::unset_counter
&& imm
.exp
<= it
->second
.imm
.exp
)
550 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_exp
);
551 if (imm
.vm
!= wait_imm::unset_counter
&& imm
.vm
<= it
->second
.imm
.vm
)
552 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_vm
);
553 if (imm
.lgkm
!= wait_imm::unset_counter
&& imm
.lgkm
<= it
->second
.imm
.lgkm
)
554 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_lgkm
);
555 if (imm
.lgkm
!= wait_imm::unset_counter
&& imm
.vs
<= it
->second
.imm
.vs
)
556 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_vs
);
557 if (!it
->second
.counters
)
558 it
= ctx
.gpr_map
.erase(it
);
565 ctx
.pending_flat_vm
= false;
567 ctx
.pending_flat_lgkm
= false;
568 ctx
.pending_s_buffer_store
= false;
574 void update_barrier_counter(uint8_t *ctr
, unsigned max
)
576 if (*ctr
!= wait_imm::unset_counter
&& *ctr
< max
)
580 void update_barrier_imm(wait_ctx
& ctx
, uint8_t counters
, wait_event event
, barrier_interaction barrier
)
582 for (unsigned i
= 0; i
< barrier_count
; i
++) {
583 wait_imm
& bar
= ctx
.barrier_imm
[i
];
584 uint16_t& bar_ev
= ctx
.barrier_events
[i
];
585 if (barrier
& (1 << i
)) {
587 if (counters
& counter_lgkm
)
589 if (counters
& counter_vm
)
591 if (counters
& counter_exp
)
593 if (counters
& counter_vs
)
595 } else if (!(bar_ev
& ctx
.unordered_events
) && !(ctx
.unordered_events
& event
)) {
596 if (counters
& counter_lgkm
&& (bar_ev
& lgkm_events
) == event
)
597 update_barrier_counter(&bar
.lgkm
, ctx
.max_lgkm_cnt
);
598 if (counters
& counter_vm
&& (bar_ev
& vm_events
) == event
)
599 update_barrier_counter(&bar
.vm
, ctx
.max_vm_cnt
);
600 if (counters
& counter_exp
&& (bar_ev
& exp_events
) == event
)
601 update_barrier_counter(&bar
.exp
, ctx
.max_exp_cnt
);
602 if (counters
& counter_vs
&& (bar_ev
& vs_events
) == event
)
603 update_barrier_counter(&bar
.vs
, ctx
.max_vs_cnt
);
608 void update_counters(wait_ctx
& ctx
, wait_event event
, barrier_interaction barrier
=barrier_none
)
610 uint8_t counters
= get_counters_for_event(event
);
612 if (counters
& counter_lgkm
&& ctx
.lgkm_cnt
<= ctx
.max_lgkm_cnt
)
614 if (counters
& counter_vm
&& ctx
.vm_cnt
<= ctx
.max_vm_cnt
)
616 if (counters
& counter_exp
&& ctx
.exp_cnt
<= ctx
.max_exp_cnt
)
618 if (counters
& counter_vs
&& ctx
.vs_cnt
<= ctx
.max_vs_cnt
)
621 update_barrier_imm(ctx
, counters
, event
, barrier
);
623 if (ctx
.unordered_events
& event
)
626 if (ctx
.pending_flat_lgkm
)
627 counters
&= ~counter_lgkm
;
628 if (ctx
.pending_flat_vm
)
629 counters
&= ~counter_vm
;
631 for (std::pair
<const PhysReg
,wait_entry
>& e
: ctx
.gpr_map
) {
632 wait_entry
& entry
= e
.second
;
634 if (entry
.events
& ctx
.unordered_events
)
637 assert(entry
.events
);
639 if ((counters
& counter_exp
) && (entry
.events
& exp_events
) == event
&& entry
.imm
.exp
< ctx
.max_exp_cnt
)
641 if ((counters
& counter_lgkm
) && (entry
.events
& lgkm_events
) == event
&& entry
.imm
.lgkm
< ctx
.max_lgkm_cnt
)
643 if ((counters
& counter_vm
) && (entry
.events
& vm_events
) == event
&& entry
.imm
.vm
< ctx
.max_vm_cnt
)
645 if ((counters
& counter_vs
) && (entry
.events
& vs_events
) == event
&& entry
.imm
.vs
< ctx
.max_vs_cnt
)
650 void update_counters_for_flat_load(wait_ctx
& ctx
, barrier_interaction barrier
=barrier_none
)
652 assert(ctx
.chip_class
< GFX10
);
654 if (ctx
.lgkm_cnt
<= ctx
.max_lgkm_cnt
)
656 if (ctx
.vm_cnt
<= ctx
.max_vm_cnt
)
659 update_barrier_imm(ctx
, counter_vm
| counter_lgkm
, event_flat
, barrier
);
661 for (std::pair
<PhysReg
,wait_entry
> e
: ctx
.gpr_map
)
663 if (e
.second
.counters
& counter_vm
)
665 if (e
.second
.counters
& counter_lgkm
)
666 e
.second
.imm
.lgkm
= 0;
668 ctx
.pending_flat_lgkm
= true;
669 ctx
.pending_flat_vm
= true;
672 void insert_wait_entry(wait_ctx
& ctx
, PhysReg reg
, RegClass rc
, wait_event event
, bool wait_on_read
)
674 uint16_t counters
= get_counters_for_event(event
);
676 if (counters
& counter_lgkm
)
678 if (counters
& counter_vm
)
680 if (counters
& counter_exp
)
682 if (counters
& counter_vs
)
685 wait_entry
new_entry(event
, imm
, !rc
.is_linear(), wait_on_read
);
687 for (unsigned i
= 0; i
< rc
.size(); i
++) {
688 auto it
= ctx
.gpr_map
.emplace(PhysReg
{reg
.reg()+i
}, new_entry
);
690 it
.first
->second
.join(new_entry
);
693 if (ctx
.collect_statistics
) {
694 unsigned counters_todo
= counters
;
695 while (counters_todo
) {
696 unsigned i
= u_bit_scan(&counters_todo
);
697 ctx
.unwaited_instrs
[i
].insert(std::make_pair(ctx
.gen_instr
, 0u));
698 for (unsigned j
= 0; j
< rc
.size(); j
++)
699 ctx
.reg_instrs
[i
][PhysReg
{reg
.reg()+j
}].insert(ctx
.gen_instr
);
704 void insert_wait_entry(wait_ctx
& ctx
, Operand op
, wait_event event
)
706 if (!op
.isConstant() && !op
.isUndefined())
707 insert_wait_entry(ctx
, op
.physReg(), op
.regClass(), event
, false);
710 void insert_wait_entry(wait_ctx
& ctx
, Definition def
, wait_event event
)
712 insert_wait_entry(ctx
, def
.physReg(), def
.regClass(), event
, true);
715 void gen(Instruction
* instr
, wait_ctx
& ctx
)
717 switch (instr
->format
) {
719 Export_instruction
* exp_instr
= static_cast<Export_instruction
*>(instr
);
722 if (exp_instr
->dest
<= 9)
723 ev
= event_exp_mrt_null
;
724 else if (exp_instr
->dest
<= 15)
727 ev
= event_exp_param
;
728 update_counters(ctx
, ev
);
730 /* insert new entries for exported vgprs */
731 for (unsigned i
= 0; i
< 4; i
++)
733 if (exp_instr
->enabled_mask
& (1 << i
)) {
734 unsigned idx
= exp_instr
->compressed
? i
>> 1 : i
;
735 assert(idx
< exp_instr
->operands
.size());
736 insert_wait_entry(ctx
, exp_instr
->operands
[idx
], ev
);
740 insert_wait_entry(ctx
, exec
, s2
, ev
, false);
744 if (ctx
.chip_class
< GFX10
&& !instr
->definitions
.empty())
745 update_counters_for_flat_load(ctx
, barrier_buffer
);
747 update_counters(ctx
, event_flat
, barrier_buffer
);
749 if (!instr
->definitions
.empty())
750 insert_wait_entry(ctx
, instr
->definitions
[0], event_flat
);
754 SMEM_instruction
*smem
= static_cast<SMEM_instruction
*>(instr
);
755 update_counters(ctx
, event_smem
, static_cast<SMEM_instruction
*>(instr
)->barrier
);
757 if (!instr
->definitions
.empty())
758 insert_wait_entry(ctx
, instr
->definitions
[0], event_smem
);
759 else if (ctx
.chip_class
>= GFX10
&&
760 !smem
->can_reorder
&&
761 smem
->barrier
== barrier_buffer
)
762 ctx
.pending_s_buffer_store
= true;
767 bool gds
= static_cast<DS_instruction
*>(instr
)->gds
;
768 update_counters(ctx
, gds
? event_gds
: event_lds
, gds
? barrier_none
: barrier_shared
);
770 update_counters(ctx
, event_gds_gpr_lock
);
772 if (!instr
->definitions
.empty())
773 insert_wait_entry(ctx
, instr
->definitions
[0], gds
? event_gds
: event_lds
);
776 for (const Operand
& op
: instr
->operands
)
777 insert_wait_entry(ctx
, op
, event_gds_gpr_lock
);
778 insert_wait_entry(ctx
, exec
, s2
, event_gds_gpr_lock
, false);
785 case Format::GLOBAL
: {
786 wait_event ev
= !instr
->definitions
.empty() || ctx
.chip_class
< GFX10
? event_vmem
: event_vmem_store
;
787 update_counters(ctx
, ev
, get_barrier_interaction(instr
));
789 if (!instr
->definitions
.empty())
790 insert_wait_entry(ctx
, instr
->definitions
[0], ev
);
792 if (ctx
.chip_class
== GFX6
&&
793 instr
->format
!= Format::MIMG
&&
794 instr
->operands
.size() == 4) {
796 update_counters(ctx
, event_vmem_gpr_lock
);
797 insert_wait_entry(ctx
, instr
->operands
[3], event_vmem_gpr_lock
);
798 } else if (ctx
.chip_class
== GFX6
&&
799 instr
->format
== Format::MIMG
&&
800 instr
->operands
[1].regClass().type() == RegType::vgpr
) {
802 update_counters(ctx
, event_vmem_gpr_lock
);
803 insert_wait_entry(ctx
, instr
->operands
[1], event_vmem_gpr_lock
);
809 if (instr
->opcode
== aco_opcode::s_sendmsg
||
810 instr
->opcode
== aco_opcode::s_sendmsghalt
)
811 update_counters(ctx
, event_sendmsg
, get_barrier_interaction(instr
));
818 void emit_waitcnt(wait_ctx
& ctx
, std::vector
<aco_ptr
<Instruction
>>& instructions
, wait_imm imm
)
820 if (imm
.vs
!= wait_imm::unset_counter
) {
821 assert(ctx
.chip_class
>= GFX10
);
822 SOPK_instruction
* waitcnt_vs
= create_instruction
<SOPK_instruction
>(aco_opcode::s_waitcnt_vscnt
, Format::SOPK
, 0, 1);
823 waitcnt_vs
->definitions
[0] = Definition(sgpr_null
, s1
);
824 waitcnt_vs
->imm
= imm
.vs
;
825 instructions
.emplace_back(waitcnt_vs
);
826 imm
.vs
= wait_imm::unset_counter
;
829 SOPP_instruction
* waitcnt
= create_instruction
<SOPP_instruction
>(aco_opcode::s_waitcnt
, Format::SOPP
, 0, 0);
830 waitcnt
->imm
= imm
.pack(ctx
.chip_class
);
832 instructions
.emplace_back(waitcnt
);
836 void handle_block(Program
*program
, Block
& block
, wait_ctx
& ctx
)
838 std::vector
<aco_ptr
<Instruction
>> new_instructions
;
842 ctx
.collect_statistics
= program
->collect_statistics
;
844 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
845 bool is_wait
= !parse_wait_instr(ctx
, instr
.get()).empty();
847 queued_imm
.combine(kill(instr
.get(), ctx
));
849 ctx
.gen_instr
= instr
.get();
850 gen(instr
.get(), ctx
);
852 if (instr
->format
!= Format::PSEUDO_BARRIER
&& !is_wait
) {
853 if (!queued_imm
.empty()) {
854 emit_waitcnt(ctx
, new_instructions
, queued_imm
);
855 queued_imm
= wait_imm();
857 new_instructions
.emplace_back(std::move(instr
));
859 if (ctx
.collect_statistics
)
860 ctx
.advance_unwaited_instrs();
864 if (!queued_imm
.empty())
865 emit_waitcnt(ctx
, new_instructions
, queued_imm
);
867 block
.instructions
.swap(new_instructions
);
870 } /* end namespace */
872 static uint32_t calculate_score(std::vector
<wait_ctx
> &ctx_vec
, uint32_t event_mask
)
875 unsigned num_waits
= 0;
877 unsigned event_index
= u_bit_scan(&event_mask
);
878 for (const wait_ctx
&ctx
: ctx_vec
) {
879 for (unsigned dist
: ctx
.wait_distances
[event_index
]) {
881 /* for many events, excessive distances provide little benefit, so
882 * decrease the score in that case. */
883 double threshold
= INFINITY
;
884 double inv_strength
= 0.000001;
885 switch (1 << event_index
) {
891 case event_vmem_store
:
894 inv_strength
= 150.0;
902 if (score
> threshold
) {
904 score
= threshold
+ score
/ (1.0 + score
/ inv_strength
);
907 /* we don't want increases in high scores to hide decreases in low scores,
908 * so raise to the power of 0.1 before averaging. */
909 result
+= pow(score
, 0.1);
914 return round(pow(result
/ num_waits
, 10.0) * 10.0);
917 void insert_wait_states(Program
* program
)
920 std::vector
<bool> done(program
->blocks
.size());
921 std::vector
<wait_ctx
> in_ctx(program
->blocks
.size(), wait_ctx(program
));
922 std::vector
<wait_ctx
> out_ctx(program
->blocks
.size(), wait_ctx(program
));
924 std::stack
<unsigned> loop_header_indices
;
925 unsigned loop_progress
= 0;
927 for (unsigned i
= 0; i
< program
->blocks
.size();) {
928 Block
& current
= program
->blocks
[i
++];
929 wait_ctx ctx
= in_ctx
[current
.index
];
931 if (current
.kind
& block_kind_loop_header
) {
932 loop_header_indices
.push(current
.index
);
933 } else if (current
.kind
& block_kind_loop_exit
) {
935 if (loop_progress
== loop_header_indices
.size()) {
936 i
= loop_header_indices
.top();
939 loop_header_indices
.pop();
940 loop_progress
= std::min
<unsigned>(loop_progress
, loop_header_indices
.size());
945 bool changed
= false;
946 for (unsigned b
: current
.linear_preds
)
947 changed
|= ctx
.join(&out_ctx
[b
], false);
948 for (unsigned b
: current
.logical_preds
)
949 changed
|= ctx
.join(&out_ctx
[b
], true);
951 if (done
[current
.index
] && !changed
) {
952 in_ctx
[current
.index
] = std::move(ctx
);
955 in_ctx
[current
.index
] = ctx
;
958 if (current
.instructions
.empty()) {
959 out_ctx
[current
.index
] = std::move(ctx
);
963 loop_progress
= std::max
<unsigned>(loop_progress
, current
.loop_nest_depth
);
964 done
[current
.index
] = true;
966 handle_block(program
, current
, ctx
);
968 out_ctx
[current
.index
] = std::move(ctx
);
971 if (program
->collect_statistics
) {
972 program
->statistics
[statistic_vmem_score
] =
973 calculate_score(out_ctx
, event_vmem
| event_flat
| event_vmem_store
);
974 program
->statistics
[statistic_smem_score
] =
975 calculate_score(out_ctx
, event_smem
);