2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 #include "vulkan/radv_shader.h"
38 * The general idea of this pass is:
39 * The CFG is traversed in reverse postorder (forward) and loops are processed
40 * several times until no progress is made.
41 * Per BB two wait_ctx is maintained: an in-context and out-context.
42 * The in-context is the joined out-contexts of the predecessors.
43 * The context contains a map: gpr -> wait_entry
44 * consisting of the information about the cnt values to be waited for.
45 * Note: After merge-nodes, it might occur that for the same register
46 * multiple cnt values are to be waited for.
48 * The values are updated according to the encountered instructions:
49 * - additional events increment the counter of waits of the same type
50 * - or erase gprs with counters higher than to be waited for.
53 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
55 /* Instructions of the same event will finish in-order except for smem
56 * and maybe flat. Instructions of different events may not finish in-order. */
57 enum wait_event
: uint16_t {
62 event_vmem_store
= 1 << 4, /* GFX10+ */
64 event_exp_pos
= 1 << 6,
65 event_exp_param
= 1 << 7,
66 event_exp_mrt_null
= 1 << 8,
67 event_gds_gpr_lock
= 1 << 9,
68 event_vmem_gpr_lock
= 1 << 10,
69 event_sendmsg
= 1 << 11,
73 enum counter_type
: uint8_t {
75 counter_lgkm
= 1 << 1,
81 static const uint16_t exp_events
= event_exp_pos
| event_exp_param
| event_exp_mrt_null
| event_gds_gpr_lock
| event_vmem_gpr_lock
;
82 static const uint16_t lgkm_events
= event_smem
| event_lds
| event_gds
| event_flat
| event_sendmsg
;
83 static const uint16_t vm_events
= event_vmem
| event_flat
;
84 static const uint16_t vs_events
= event_vmem_store
;
86 uint8_t get_counters_for_event(wait_event ev
)
96 case event_vmem_store
:
99 return counter_vm
| counter_lgkm
;
101 case event_exp_param
:
102 case event_exp_mrt_null
:
103 case event_gds_gpr_lock
:
104 case event_vmem_gpr_lock
:
111 uint16_t get_events_for_counter(counter_type ctr
)
127 static const uint8_t unset_counter
= 0xff;
135 vm(unset_counter
), exp(unset_counter
), lgkm(unset_counter
), vs(unset_counter
) {}
136 wait_imm(uint16_t vm_
, uint16_t exp_
, uint16_t lgkm_
, uint16_t vs_
) :
137 vm(vm_
), exp(exp_
), lgkm(lgkm_
), vs(vs_
) {}
139 wait_imm(enum chip_class chip
, uint16_t packed
) : vs(unset_counter
)
143 vm
|= (packed
>> 10) & 0x30;
145 exp
= (packed
>> 4) & 0x7;
147 lgkm
= (packed
>> 8) & 0xf;
149 lgkm
|= (packed
>> 8) & 0x30;
152 uint16_t pack(enum chip_class chip
) const
155 assert(exp
== unset_counter
|| exp
<= 0x7);
158 assert(lgkm
== unset_counter
|| lgkm
<= 0x3f);
159 assert(vm
== unset_counter
|| vm
<= 0x3f);
160 imm
= ((vm
& 0x30) << 10) | ((lgkm
& 0x3f) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
163 assert(lgkm
== unset_counter
|| lgkm
<= 0xf);
164 assert(vm
== unset_counter
|| vm
<= 0x3f);
165 imm
= ((vm
& 0x30) << 10) | ((lgkm
& 0xf) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
168 assert(lgkm
== unset_counter
|| lgkm
<= 0xf);
169 assert(vm
== unset_counter
|| vm
<= 0xf);
170 imm
= ((lgkm
& 0xf) << 8) | ((exp
& 0x7) << 4) | (vm
& 0xf);
173 if (chip
< GFX9
&& vm
== wait_imm::unset_counter
)
174 imm
|= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
175 if (chip
< GFX10
&& lgkm
== wait_imm::unset_counter
)
176 imm
|= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
180 bool combine(const wait_imm
& other
)
182 bool changed
= other
.vm
< vm
|| other
.exp
< exp
|| other
.lgkm
< lgkm
|| other
.vs
< vs
;
183 vm
= std::min(vm
, other
.vm
);
184 exp
= std::min(exp
, other
.exp
);
185 lgkm
= std::min(lgkm
, other
.lgkm
);
186 vs
= std::min(vs
, other
.vs
);
192 return vm
== unset_counter
&& exp
== unset_counter
&&
193 lgkm
== unset_counter
&& vs
== unset_counter
;
199 uint16_t events
; /* use wait_event notion */
200 uint8_t counters
; /* use counter_type notion */
203 bool has_vmem_nosampler
:1;
204 bool has_vmem_sampler
:1;
206 wait_entry(wait_event event
, wait_imm imm
, bool logical
, bool wait_on_read
)
207 : imm(imm
), events(event
), counters(get_counters_for_event(event
)),
208 wait_on_read(wait_on_read
), logical(logical
),
209 has_vmem_nosampler(false), has_vmem_sampler(false) {}
211 bool join(const wait_entry
& other
)
213 bool changed
= (other
.events
& ~events
) ||
214 (other
.counters
& ~counters
) ||
215 (other
.wait_on_read
&& !wait_on_read
) ||
216 (other
.has_vmem_nosampler
&& !has_vmem_nosampler
) ||
217 (other
.has_vmem_sampler
&& !has_vmem_sampler
);
218 events
|= other
.events
;
219 counters
|= other
.counters
;
220 changed
|= imm
.combine(other
.imm
);
221 wait_on_read
|= other
.wait_on_read
;
222 has_vmem_nosampler
|= other
.has_vmem_nosampler
;
223 has_vmem_sampler
|= other
.has_vmem_sampler
;
224 assert(logical
== other
.logical
);
228 void remove_counter(counter_type counter
)
230 counters
&= ~counter
;
232 if (counter
== counter_lgkm
) {
233 imm
.lgkm
= wait_imm::unset_counter
;
234 events
&= ~(event_smem
| event_lds
| event_gds
| event_sendmsg
);
237 if (counter
== counter_vm
) {
238 imm
.vm
= wait_imm::unset_counter
;
239 events
&= ~event_vmem
;
240 has_vmem_nosampler
= false;
241 has_vmem_sampler
= false;
244 if (counter
== counter_exp
) {
245 imm
.exp
= wait_imm::unset_counter
;
246 events
&= ~(event_exp_pos
| event_exp_param
| event_exp_mrt_null
| event_gds_gpr_lock
| event_vmem_gpr_lock
);
249 if (counter
== counter_vs
) {
250 imm
.vs
= wait_imm::unset_counter
;
251 events
&= ~event_vmem_store
;
254 if (!(counters
& counter_lgkm
) && !(counters
& counter_vm
))
255 events
&= ~event_flat
;
261 enum chip_class chip_class
;
263 uint16_t max_exp_cnt
;
264 uint16_t max_lgkm_cnt
;
266 uint16_t unordered_events
= event_smem
| event_flat
;
270 uint8_t lgkm_cnt
= 0;
272 bool pending_flat_lgkm
= false;
273 bool pending_flat_vm
= false;
274 bool pending_s_buffer_store
= false; /* GFX10 workaround */
276 wait_imm barrier_imm
[storage_count
];
277 uint16_t barrier_events
[storage_count
] = {}; /* use wait_event notion */
279 std::map
<PhysReg
,wait_entry
> gpr_map
;
281 /* used for vmem/smem scores */
282 bool collect_statistics
;
283 Instruction
*gen_instr
;
284 std::map
<Instruction
*, unsigned> unwaited_instrs
[num_counters
];
285 std::map
<PhysReg
,std::set
<Instruction
*>> reg_instrs
[num_counters
];
286 std::vector
<unsigned> wait_distances
[num_events
];
289 wait_ctx(Program
*program_
)
291 chip_class(program_
->chip_class
),
292 max_vm_cnt(program_
->chip_class
>= GFX9
? 62 : 14),
294 max_lgkm_cnt(program_
->chip_class
>= GFX10
? 62 : 14),
295 max_vs_cnt(program_
->chip_class
>= GFX10
? 62 : 0),
296 unordered_events(event_smem
| (program_
->chip_class
< GFX10
? event_flat
: 0)),
297 collect_statistics(program_
->collect_statistics
) {}
299 bool join(const wait_ctx
* other
, bool logical
)
301 bool changed
= other
->exp_cnt
> exp_cnt
||
302 other
->vm_cnt
> vm_cnt
||
303 other
->lgkm_cnt
> lgkm_cnt
||
304 other
->vs_cnt
> vs_cnt
||
305 (other
->pending_flat_lgkm
&& !pending_flat_lgkm
) ||
306 (other
->pending_flat_vm
&& !pending_flat_vm
);
308 exp_cnt
= std::max(exp_cnt
, other
->exp_cnt
);
309 vm_cnt
= std::max(vm_cnt
, other
->vm_cnt
);
310 lgkm_cnt
= std::max(lgkm_cnt
, other
->lgkm_cnt
);
311 vs_cnt
= std::max(vs_cnt
, other
->vs_cnt
);
312 pending_flat_lgkm
|= other
->pending_flat_lgkm
;
313 pending_flat_vm
|= other
->pending_flat_vm
;
314 pending_s_buffer_store
|= other
->pending_s_buffer_store
;
316 for (std::pair
<PhysReg
,wait_entry
> entry
: other
->gpr_map
)
318 std::map
<PhysReg
,wait_entry
>::iterator it
= gpr_map
.find(entry
.first
);
319 if (entry
.second
.logical
!= logical
)
322 if (it
!= gpr_map
.end()) {
323 changed
|= it
->second
.join(entry
.second
);
325 gpr_map
.insert(entry
);
330 for (unsigned i
= 0; i
< storage_count
; i
++) {
331 changed
|= barrier_imm
[i
].combine(other
->barrier_imm
[i
]);
332 changed
|= other
->barrier_events
[i
] & ~barrier_events
[i
];
333 barrier_events
[i
] |= other
->barrier_events
[i
];
336 /* these are used for statistics, so don't update "changed" */
337 for (unsigned i
= 0; i
< num_counters
; i
++) {
338 for (std::pair
<Instruction
*, unsigned> instr
: other
->unwaited_instrs
[i
]) {
339 auto pos
= unwaited_instrs
[i
].find(instr
.first
);
340 if (pos
== unwaited_instrs
[i
].end())
341 unwaited_instrs
[i
].insert(instr
);
343 pos
->second
= std::min(pos
->second
, instr
.second
);
345 /* don't use a foreach loop to avoid copies */
346 for (auto it
= other
->reg_instrs
[i
].begin(); it
!= other
->reg_instrs
[i
].end(); ++it
)
347 reg_instrs
[i
][it
->first
].insert(it
->second
.begin(), it
->second
.end());
353 void wait_and_remove_from_entry(PhysReg reg
, wait_entry
& entry
, counter_type counter
) {
354 if (collect_statistics
&& (entry
.counters
& counter
)) {
355 unsigned counter_idx
= ffs(counter
) - 1;
356 for (Instruction
*instr
: reg_instrs
[counter_idx
][reg
]) {
357 auto pos
= unwaited_instrs
[counter_idx
].find(instr
);
358 if (pos
== unwaited_instrs
[counter_idx
].end())
361 unsigned distance
= pos
->second
;
362 unsigned events
= entry
.events
& get_events_for_counter(counter
);
364 unsigned event_idx
= u_bit_scan(&events
);
365 wait_distances
[event_idx
].push_back(distance
);
368 unwaited_instrs
[counter_idx
].erase(instr
);
370 reg_instrs
[counter_idx
][reg
].clear();
373 entry
.remove_counter(counter
);
376 void advance_unwaited_instrs()
378 for (unsigned i
= 0; i
< num_counters
; i
++) {
379 for (auto it
= unwaited_instrs
[i
].begin(); it
!= unwaited_instrs
[i
].end(); ++it
)
385 wait_imm
check_instr(Instruction
* instr
, wait_ctx
& ctx
)
389 for (const Operand op
: instr
->operands
) {
390 if (op
.isConstant() || op
.isUndefined())
393 /* check consecutively read gprs */
394 for (unsigned j
= 0; j
< op
.size(); j
++) {
395 PhysReg reg
{op
.physReg() + j
};
396 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.find(reg
);
397 if (it
== ctx
.gpr_map
.end() || !it
->second
.wait_on_read
)
400 wait
.combine(it
->second
.imm
);
404 for (const Definition
& def
: instr
->definitions
) {
405 /* check consecutively written gprs */
406 for (unsigned j
= 0; j
< def
.getTemp().size(); j
++)
408 PhysReg reg
{def
.physReg() + j
};
410 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.find(reg
);
411 if (it
== ctx
.gpr_map
.end())
414 /* Vector Memory reads and writes return in the order they were issued */
415 bool has_sampler
= instr
->format
== Format::MIMG
&& !instr
->operands
[1].isUndefined() && instr
->operands
[1].regClass() == s4
;
416 if (instr
->isVMEM() && ((it
->second
.events
& vm_events
) == event_vmem
) &&
417 it
->second
.has_vmem_nosampler
== !has_sampler
&& it
->second
.has_vmem_sampler
== has_sampler
)
420 /* LDS reads and writes return in the order they were issued. same for GDS */
421 if (instr
->format
== Format::DS
) {
422 bool gds
= static_cast<DS_instruction
*>(instr
)->gds
;
423 if ((it
->second
.events
& lgkm_events
) == (gds
? event_gds
: event_lds
))
427 wait
.combine(it
->second
.imm
);
434 wait_imm
parse_wait_instr(wait_ctx
& ctx
, Instruction
*instr
)
436 if (instr
->opcode
== aco_opcode::s_waitcnt_vscnt
&&
437 instr
->definitions
[0].physReg() == sgpr_null
) {
439 imm
.vs
= std::min
<uint8_t>(imm
.vs
, static_cast<SOPK_instruction
*>(instr
)->imm
);
441 } else if (instr
->opcode
== aco_opcode::s_waitcnt
) {
442 return wait_imm(ctx
.chip_class
, static_cast<SOPP_instruction
*>(instr
)->imm
);
447 wait_imm
perform_barrier(wait_ctx
& ctx
, memory_sync_info sync
, unsigned semantics
)
450 sync_scope subgroup_scope
= ctx
.program
->workgroup_size
<= ctx
.program
->wave_size
? scope_workgroup
: scope_subgroup
;
451 if ((sync
.semantics
& semantics
) && sync
.scope
> subgroup_scope
) {
452 unsigned storage
= sync
.storage
;
454 unsigned idx
= u_bit_scan(&storage
);
456 /* LDS is private to the workgroup */
457 sync_scope bar_scope_lds
= MIN2(sync
.scope
, scope_workgroup
);
459 uint16_t events
= ctx
.barrier_events
[idx
];
460 if (bar_scope_lds
<= subgroup_scope
)
461 events
&= ~event_lds
;
463 /* in non-WGP, the L1/L0 cache keeps all memory operations in-order for the same workgroup */
464 if (ctx
.chip_class
< GFX10
&& sync
.scope
<= scope_workgroup
)
465 events
&= ~(event_vmem
| event_vmem_store
| event_smem
);
468 imm
.combine(ctx
.barrier_imm
[idx
]);
475 wait_imm
kill(Instruction
* instr
, wait_ctx
& ctx
, memory_sync_info sync_info
)
478 if (ctx
.exp_cnt
|| ctx
.vm_cnt
|| ctx
.lgkm_cnt
)
479 imm
.combine(check_instr(instr
, ctx
));
481 imm
.combine(parse_wait_instr(ctx
, instr
));
484 /* It's required to wait for scalar stores before "writing back" data.
485 * It shouldn't cost anything anyways since we're about to do s_endpgm.
487 if (ctx
.lgkm_cnt
&& instr
->opcode
== aco_opcode::s_dcache_wb
) {
488 assert(ctx
.chip_class
>= GFX8
);
492 if (ctx
.chip_class
>= GFX10
&& instr
->format
== Format::SMEM
) {
493 /* GFX10: A store followed by a load at the same address causes a problem because
494 * the load doesn't load the correct values unless we wait for the store first.
495 * This is NOT mitigated by an s_nop.
497 * TODO: Refine this when we have proper alias analysis.
499 SMEM_instruction
*smem
= static_cast<SMEM_instruction
*>(instr
);
500 if (ctx
.pending_s_buffer_store
&&
501 !smem
->definitions
.empty() &&
502 !smem
->sync
.can_reorder()) {
507 if (instr
->opcode
== aco_opcode::p_barrier
)
508 imm
.combine(perform_barrier(ctx
, static_cast<Pseudo_barrier_instruction
*>(instr
)->sync
, semantic_acqrel
));
510 imm
.combine(perform_barrier(ctx
, sync_info
, semantic_release
));
513 if (ctx
.pending_flat_vm
&& imm
.vm
!= wait_imm::unset_counter
)
515 if (ctx
.pending_flat_lgkm
&& imm
.lgkm
!= wait_imm::unset_counter
)
519 ctx
.exp_cnt
= std::min(ctx
.exp_cnt
, imm
.exp
);
520 ctx
.vm_cnt
= std::min(ctx
.vm_cnt
, imm
.vm
);
521 ctx
.lgkm_cnt
= std::min(ctx
.lgkm_cnt
, imm
.lgkm
);
522 ctx
.vs_cnt
= std::min(ctx
.vs_cnt
, imm
.vs
);
524 /* update barrier wait imms */
525 for (unsigned i
= 0; i
< storage_count
; i
++) {
526 wait_imm
& bar
= ctx
.barrier_imm
[i
];
527 uint16_t& bar_ev
= ctx
.barrier_events
[i
];
528 if (bar
.exp
!= wait_imm::unset_counter
&& imm
.exp
<= bar
.exp
) {
529 bar
.exp
= wait_imm::unset_counter
;
530 bar_ev
&= ~exp_events
;
532 if (bar
.vm
!= wait_imm::unset_counter
&& imm
.vm
<= bar
.vm
) {
533 bar
.vm
= wait_imm::unset_counter
;
534 bar_ev
&= ~(vm_events
& ~event_flat
);
536 if (bar
.lgkm
!= wait_imm::unset_counter
&& imm
.lgkm
<= bar
.lgkm
) {
537 bar
.lgkm
= wait_imm::unset_counter
;
538 bar_ev
&= ~(lgkm_events
& ~event_flat
);
540 if (bar
.vs
!= wait_imm::unset_counter
&& imm
.vs
<= bar
.vs
) {
541 bar
.vs
= wait_imm::unset_counter
;
542 bar_ev
&= ~vs_events
;
544 if (bar
.vm
== wait_imm::unset_counter
&& bar
.lgkm
== wait_imm::unset_counter
)
545 bar_ev
&= ~event_flat
;
548 /* remove all gprs with higher counter from map */
549 std::map
<PhysReg
,wait_entry
>::iterator it
= ctx
.gpr_map
.begin();
550 while (it
!= ctx
.gpr_map
.end())
552 if (imm
.exp
!= wait_imm::unset_counter
&& imm
.exp
<= it
->second
.imm
.exp
)
553 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_exp
);
554 if (imm
.vm
!= wait_imm::unset_counter
&& imm
.vm
<= it
->second
.imm
.vm
)
555 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_vm
);
556 if (imm
.lgkm
!= wait_imm::unset_counter
&& imm
.lgkm
<= it
->second
.imm
.lgkm
)
557 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_lgkm
);
558 if (imm
.vs
!= wait_imm::unset_counter
&& imm
.vs
<= it
->second
.imm
.vs
)
559 ctx
.wait_and_remove_from_entry(it
->first
, it
->second
, counter_vs
);
560 if (!it
->second
.counters
)
561 it
= ctx
.gpr_map
.erase(it
);
568 ctx
.pending_flat_vm
= false;
570 ctx
.pending_flat_lgkm
= false;
571 ctx
.pending_s_buffer_store
= false;
577 void update_barrier_counter(uint8_t *ctr
, unsigned max
)
579 if (*ctr
!= wait_imm::unset_counter
&& *ctr
< max
)
583 void update_barrier_imm(wait_ctx
& ctx
, uint8_t counters
, wait_event event
, memory_sync_info sync
)
585 for (unsigned i
= 0; i
< storage_count
; i
++) {
586 wait_imm
& bar
= ctx
.barrier_imm
[i
];
587 uint16_t& bar_ev
= ctx
.barrier_events
[i
];
588 if (sync
.storage
& (1 << i
) && !(sync
.semantics
& semantic_private
)) {
590 if (counters
& counter_lgkm
)
592 if (counters
& counter_vm
)
594 if (counters
& counter_exp
)
596 if (counters
& counter_vs
)
598 } else if (!(bar_ev
& ctx
.unordered_events
) && !(ctx
.unordered_events
& event
)) {
599 if (counters
& counter_lgkm
&& (bar_ev
& lgkm_events
) == event
)
600 update_barrier_counter(&bar
.lgkm
, ctx
.max_lgkm_cnt
);
601 if (counters
& counter_vm
&& (bar_ev
& vm_events
) == event
)
602 update_barrier_counter(&bar
.vm
, ctx
.max_vm_cnt
);
603 if (counters
& counter_exp
&& (bar_ev
& exp_events
) == event
)
604 update_barrier_counter(&bar
.exp
, ctx
.max_exp_cnt
);
605 if (counters
& counter_vs
&& (bar_ev
& vs_events
) == event
)
606 update_barrier_counter(&bar
.vs
, ctx
.max_vs_cnt
);
611 void update_counters(wait_ctx
& ctx
, wait_event event
, memory_sync_info sync
=memory_sync_info())
613 uint8_t counters
= get_counters_for_event(event
);
615 if (counters
& counter_lgkm
&& ctx
.lgkm_cnt
<= ctx
.max_lgkm_cnt
)
617 if (counters
& counter_vm
&& ctx
.vm_cnt
<= ctx
.max_vm_cnt
)
619 if (counters
& counter_exp
&& ctx
.exp_cnt
<= ctx
.max_exp_cnt
)
621 if (counters
& counter_vs
&& ctx
.vs_cnt
<= ctx
.max_vs_cnt
)
624 update_barrier_imm(ctx
, counters
, event
, sync
);
626 if (ctx
.unordered_events
& event
)
629 if (ctx
.pending_flat_lgkm
)
630 counters
&= ~counter_lgkm
;
631 if (ctx
.pending_flat_vm
)
632 counters
&= ~counter_vm
;
634 for (std::pair
<const PhysReg
,wait_entry
>& e
: ctx
.gpr_map
) {
635 wait_entry
& entry
= e
.second
;
637 if (entry
.events
& ctx
.unordered_events
)
640 assert(entry
.events
);
642 if ((counters
& counter_exp
) && (entry
.events
& exp_events
) == event
&& entry
.imm
.exp
< ctx
.max_exp_cnt
)
644 if ((counters
& counter_lgkm
) && (entry
.events
& lgkm_events
) == event
&& entry
.imm
.lgkm
< ctx
.max_lgkm_cnt
)
646 if ((counters
& counter_vm
) && (entry
.events
& vm_events
) == event
&& entry
.imm
.vm
< ctx
.max_vm_cnt
)
648 if ((counters
& counter_vs
) && (entry
.events
& vs_events
) == event
&& entry
.imm
.vs
< ctx
.max_vs_cnt
)
653 void update_counters_for_flat_load(wait_ctx
& ctx
, memory_sync_info sync
=memory_sync_info())
655 assert(ctx
.chip_class
< GFX10
);
657 if (ctx
.lgkm_cnt
<= ctx
.max_lgkm_cnt
)
659 if (ctx
.vm_cnt
<= ctx
.max_vm_cnt
)
662 update_barrier_imm(ctx
, counter_vm
| counter_lgkm
, event_flat
, sync
);
664 for (std::pair
<PhysReg
,wait_entry
> e
: ctx
.gpr_map
)
666 if (e
.second
.counters
& counter_vm
)
668 if (e
.second
.counters
& counter_lgkm
)
669 e
.second
.imm
.lgkm
= 0;
671 ctx
.pending_flat_lgkm
= true;
672 ctx
.pending_flat_vm
= true;
675 void insert_wait_entry(wait_ctx
& ctx
, PhysReg reg
, RegClass rc
, wait_event event
, bool wait_on_read
,
676 bool has_sampler
=false)
678 uint16_t counters
= get_counters_for_event(event
);
680 if (counters
& counter_lgkm
)
682 if (counters
& counter_vm
)
684 if (counters
& counter_exp
)
686 if (counters
& counter_vs
)
689 wait_entry
new_entry(event
, imm
, !rc
.is_linear(), wait_on_read
);
690 new_entry
.has_vmem_nosampler
= (event
& event_vmem
) && !has_sampler
;
691 new_entry
.has_vmem_sampler
= (event
& event_vmem
) && has_sampler
;
693 for (unsigned i
= 0; i
< rc
.size(); i
++) {
694 auto it
= ctx
.gpr_map
.emplace(PhysReg
{reg
.reg()+i
}, new_entry
);
696 it
.first
->second
.join(new_entry
);
699 if (ctx
.collect_statistics
) {
700 unsigned counters_todo
= counters
;
701 while (counters_todo
) {
702 unsigned i
= u_bit_scan(&counters_todo
);
703 ctx
.unwaited_instrs
[i
].insert(std::make_pair(ctx
.gen_instr
, 0u));
704 for (unsigned j
= 0; j
< rc
.size(); j
++)
705 ctx
.reg_instrs
[i
][PhysReg
{reg
.reg()+j
}].insert(ctx
.gen_instr
);
710 void insert_wait_entry(wait_ctx
& ctx
, Operand op
, wait_event event
, bool has_sampler
=false)
712 if (!op
.isConstant() && !op
.isUndefined())
713 insert_wait_entry(ctx
, op
.physReg(), op
.regClass(), event
, false, has_sampler
);
716 void insert_wait_entry(wait_ctx
& ctx
, Definition def
, wait_event event
, bool has_sampler
=false)
718 insert_wait_entry(ctx
, def
.physReg(), def
.regClass(), event
, true, has_sampler
);
721 void gen(Instruction
* instr
, wait_ctx
& ctx
)
723 switch (instr
->format
) {
725 Export_instruction
* exp_instr
= static_cast<Export_instruction
*>(instr
);
728 if (exp_instr
->dest
<= 9)
729 ev
= event_exp_mrt_null
;
730 else if (exp_instr
->dest
<= 15)
733 ev
= event_exp_param
;
734 update_counters(ctx
, ev
);
736 /* insert new entries for exported vgprs */
737 for (unsigned i
= 0; i
< 4; i
++)
739 if (exp_instr
->enabled_mask
& (1 << i
)) {
740 unsigned idx
= exp_instr
->compressed
? i
>> 1 : i
;
741 assert(idx
< exp_instr
->operands
.size());
742 insert_wait_entry(ctx
, exp_instr
->operands
[idx
], ev
);
746 insert_wait_entry(ctx
, exec
, s2
, ev
, false);
750 FLAT_instruction
*flat
= static_cast<FLAT_instruction
*>(instr
);
751 if (ctx
.chip_class
< GFX10
&& !instr
->definitions
.empty())
752 update_counters_for_flat_load(ctx
, flat
->sync
);
754 update_counters(ctx
, event_flat
, flat
->sync
);
756 if (!instr
->definitions
.empty())
757 insert_wait_entry(ctx
, instr
->definitions
[0], event_flat
);
761 SMEM_instruction
*smem
= static_cast<SMEM_instruction
*>(instr
);
762 update_counters(ctx
, event_smem
, smem
->sync
);
764 if (!instr
->definitions
.empty())
765 insert_wait_entry(ctx
, instr
->definitions
[0], event_smem
);
766 else if (ctx
.chip_class
>= GFX10
&&
767 !smem
->sync
.can_reorder())
768 ctx
.pending_s_buffer_store
= true;
773 DS_instruction
*ds
= static_cast<DS_instruction
*>(instr
);
774 update_counters(ctx
, ds
->gds
? event_gds
: event_lds
, ds
->sync
);
776 update_counters(ctx
, event_gds_gpr_lock
);
778 if (!instr
->definitions
.empty())
779 insert_wait_entry(ctx
, instr
->definitions
[0], ds
->gds
? event_gds
: event_lds
);
782 for (const Operand
& op
: instr
->operands
)
783 insert_wait_entry(ctx
, op
, event_gds_gpr_lock
);
784 insert_wait_entry(ctx
, exec
, s2
, event_gds_gpr_lock
, false);
791 case Format::GLOBAL
: {
792 wait_event ev
= !instr
->definitions
.empty() || ctx
.chip_class
< GFX10
? event_vmem
: event_vmem_store
;
793 update_counters(ctx
, ev
, get_sync_info(instr
));
795 bool has_sampler
= instr
->format
== Format::MIMG
&& !instr
->operands
[1].isUndefined() && instr
->operands
[1].regClass() == s4
;
797 if (!instr
->definitions
.empty())
798 insert_wait_entry(ctx
, instr
->definitions
[0], ev
, has_sampler
);
800 if (ctx
.chip_class
== GFX6
&&
801 instr
->format
!= Format::MIMG
&&
802 instr
->operands
.size() == 4) {
804 update_counters(ctx
, event_vmem_gpr_lock
);
805 insert_wait_entry(ctx
, instr
->operands
[3], event_vmem_gpr_lock
);
806 } else if (ctx
.chip_class
== GFX6
&&
807 instr
->format
== Format::MIMG
&&
808 instr
->operands
[1].regClass().type() == RegType::vgpr
) {
810 update_counters(ctx
, event_vmem_gpr_lock
);
811 insert_wait_entry(ctx
, instr
->operands
[1], event_vmem_gpr_lock
);
817 if (instr
->opcode
== aco_opcode::s_sendmsg
||
818 instr
->opcode
== aco_opcode::s_sendmsghalt
)
819 update_counters(ctx
, event_sendmsg
);
826 void emit_waitcnt(wait_ctx
& ctx
, std::vector
<aco_ptr
<Instruction
>>& instructions
, wait_imm imm
)
828 if (imm
.vs
!= wait_imm::unset_counter
) {
829 assert(ctx
.chip_class
>= GFX10
);
830 SOPK_instruction
* waitcnt_vs
= create_instruction
<SOPK_instruction
>(aco_opcode::s_waitcnt_vscnt
, Format::SOPK
, 0, 1);
831 waitcnt_vs
->definitions
[0] = Definition(sgpr_null
, s1
);
832 waitcnt_vs
->imm
= imm
.vs
;
833 instructions
.emplace_back(waitcnt_vs
);
834 imm
.vs
= wait_imm::unset_counter
;
837 SOPP_instruction
* waitcnt
= create_instruction
<SOPP_instruction
>(aco_opcode::s_waitcnt
, Format::SOPP
, 0, 0);
838 waitcnt
->imm
= imm
.pack(ctx
.chip_class
);
840 instructions
.emplace_back(waitcnt
);
844 void handle_block(Program
*program
, Block
& block
, wait_ctx
& ctx
)
846 std::vector
<aco_ptr
<Instruction
>> new_instructions
;
850 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
851 bool is_wait
= !parse_wait_instr(ctx
, instr
.get()).empty();
853 memory_sync_info sync_info
= get_sync_info(instr
.get());
854 queued_imm
.combine(kill(instr
.get(), ctx
, sync_info
));
856 ctx
.gen_instr
= instr
.get();
857 gen(instr
.get(), ctx
);
859 if (instr
->format
!= Format::PSEUDO_BARRIER
&& !is_wait
) {
860 if (!queued_imm
.empty()) {
861 emit_waitcnt(ctx
, new_instructions
, queued_imm
);
862 queued_imm
= wait_imm();
864 new_instructions
.emplace_back(std::move(instr
));
866 queued_imm
.combine(perform_barrier(ctx
, sync_info
, semantic_acquire
));
868 if (ctx
.collect_statistics
)
869 ctx
.advance_unwaited_instrs();
873 if (!queued_imm
.empty())
874 emit_waitcnt(ctx
, new_instructions
, queued_imm
);
876 block
.instructions
.swap(new_instructions
);
879 } /* end namespace */
881 static uint32_t calculate_score(std::vector
<wait_ctx
> &ctx_vec
, uint32_t event_mask
)
884 unsigned num_waits
= 0;
886 unsigned event_index
= u_bit_scan(&event_mask
);
887 for (const wait_ctx
&ctx
: ctx_vec
) {
888 for (unsigned dist
: ctx
.wait_distances
[event_index
]) {
890 /* for many events, excessive distances provide little benefit, so
891 * decrease the score in that case. */
892 double threshold
= INFINITY
;
893 double inv_strength
= 0.000001;
894 switch (1 << event_index
) {
900 case event_vmem_store
:
903 inv_strength
= 150.0;
911 if (score
> threshold
) {
913 score
= threshold
+ score
/ (1.0 + score
/ inv_strength
);
916 /* we don't want increases in high scores to hide decreases in low scores,
917 * so raise to the power of 0.1 before averaging. */
918 result
+= pow(score
, 0.1);
923 return round(pow(result
/ num_waits
, 10.0) * 10.0);
926 void insert_wait_states(Program
* program
)
929 std::vector
<bool> done(program
->blocks
.size());
930 std::vector
<wait_ctx
> in_ctx(program
->blocks
.size(), wait_ctx(program
));
931 std::vector
<wait_ctx
> out_ctx(program
->blocks
.size(), wait_ctx(program
));
933 std::stack
<unsigned> loop_header_indices
;
934 unsigned loop_progress
= 0;
936 for (unsigned i
= 0; i
< program
->blocks
.size();) {
937 Block
& current
= program
->blocks
[i
++];
938 wait_ctx ctx
= in_ctx
[current
.index
];
940 if (current
.kind
& block_kind_loop_header
) {
941 loop_header_indices
.push(current
.index
);
942 } else if (current
.kind
& block_kind_loop_exit
) {
944 if (loop_progress
== loop_header_indices
.size()) {
945 i
= loop_header_indices
.top();
948 loop_header_indices
.pop();
949 loop_progress
= std::min
<unsigned>(loop_progress
, loop_header_indices
.size());
954 bool changed
= false;
955 for (unsigned b
: current
.linear_preds
)
956 changed
|= ctx
.join(&out_ctx
[b
], false);
957 for (unsigned b
: current
.logical_preds
)
958 changed
|= ctx
.join(&out_ctx
[b
], true);
960 if (done
[current
.index
] && !changed
) {
961 in_ctx
[current
.index
] = std::move(ctx
);
964 in_ctx
[current
.index
] = ctx
;
967 if (current
.instructions
.empty()) {
968 out_ctx
[current
.index
] = std::move(ctx
);
972 loop_progress
= std::max
<unsigned>(loop_progress
, current
.loop_nest_depth
);
973 done
[current
.index
] = true;
975 handle_block(program
, current
, ctx
);
977 out_ctx
[current
.index
] = std::move(ctx
);
980 if (program
->collect_statistics
) {
981 program
->statistics
[statistic_vmem_score
] =
982 calculate_score(out_ctx
, event_vmem
| event_flat
| event_vmem_store
);
983 program
->statistics
[statistic_smem_score
] =
984 calculate_score(out_ctx
, event_smem
);