aco: fix waitcnts for barriers at block ends
[mesa.git] / src / amd / compiler / aco_insert_waitcnt.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <algorithm>
26 #include <map>
27 #include <stack>
28
29 #include "aco_ir.h"
30 #include "vulkan/radv_shader.h"
31
32 namespace aco {
33
34 namespace {
35
36 /**
37 * The general idea of this pass is:
38 * The CFG is traversed in reverse postorder (forward) and loops are processed
39 * several times until no progress is made.
40 * Per BB two wait_ctx is maintained: an in-context and out-context.
41 * The in-context is the joined out-contexts of the predecessors.
42 * The context contains a map: gpr -> wait_entry
43 * consisting of the information about the cnt values to be waited for.
44 * Note: After merge-nodes, it might occur that for the same register
45 * multiple cnt values are to be waited for.
46 *
47 * The values are updated according to the encountered instructions:
48 * - additional events increment the counter of waits of the same type
49 * - or erase gprs with counters higher than to be waited for.
50 */
51
52 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
53
54 /* Instructions of the same event will finish in-order except for smem
55 * and maybe flat. Instructions of different events may not finish in-order. */
56 enum wait_event : uint16_t {
57 event_smem = 1 << 0,
58 event_lds = 1 << 1,
59 event_gds = 1 << 2,
60 event_vmem = 1 << 3,
61 event_vmem_store = 1 << 4, /* GFX10+ */
62 event_flat = 1 << 5,
63 event_exp_pos = 1 << 6,
64 event_exp_param = 1 << 7,
65 event_exp_mrt_null = 1 << 8,
66 event_gds_gpr_lock = 1 << 9,
67 event_vmem_gpr_lock = 1 << 10,
68 };
69
70 enum counter_type : uint8_t {
71 counter_exp = 1 << 0,
72 counter_lgkm = 1 << 1,
73 counter_vm = 1 << 2,
74 counter_vs = 1 << 3,
75 };
76
77 static const uint16_t exp_events = event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock;
78 static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat;
79 static const uint16_t vm_events = event_vmem | event_flat;
80 static const uint16_t vs_events = event_vmem_store;
81
82 uint8_t get_counters_for_event(wait_event ev)
83 {
84 switch (ev) {
85 case event_smem:
86 case event_lds:
87 case event_gds:
88 return counter_lgkm;
89 case event_vmem:
90 return counter_vm;
91 case event_vmem_store:
92 return counter_vs;
93 case event_flat:
94 return counter_vm | counter_lgkm;
95 case event_exp_pos:
96 case event_exp_param:
97 case event_exp_mrt_null:
98 case event_gds_gpr_lock:
99 case event_vmem_gpr_lock:
100 return counter_exp;
101 default:
102 return 0;
103 }
104 }
105
106 struct wait_imm {
107 static const uint8_t unset_counter = 0xff;
108
109 uint8_t vm;
110 uint8_t exp;
111 uint8_t lgkm;
112 uint8_t vs;
113
114 wait_imm() :
115 vm(unset_counter), exp(unset_counter), lgkm(unset_counter), vs(unset_counter) {}
116 wait_imm(uint16_t vm_, uint16_t exp_, uint16_t lgkm_, uint16_t vs_) :
117 vm(vm_), exp(exp_), lgkm(lgkm_), vs(vs_) {}
118
119 wait_imm(enum chip_class chip, uint16_t packed) : vs(unset_counter)
120 {
121 vm = packed & 0xf;
122 if (chip >= GFX9)
123 vm |= (packed >> 10) & 0x30;
124
125 exp = (packed >> 4) & 0x7;
126
127 lgkm = (packed >> 8) & 0xf;
128 if (chip >= GFX10)
129 lgkm |= (packed >> 8) & 0x30;
130 }
131
132 uint16_t pack(enum chip_class chip) const
133 {
134 uint16_t imm = 0;
135 assert(exp == unset_counter || exp <= 0x7);
136 switch (chip) {
137 case GFX10:
138 assert(lgkm == unset_counter || lgkm <= 0x3f);
139 assert(vm == unset_counter || vm <= 0x3f);
140 imm = ((vm & 0x30) << 10) | ((lgkm & 0x3f) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
141 break;
142 case GFX9:
143 assert(lgkm == unset_counter || lgkm <= 0xf);
144 assert(vm == unset_counter || vm <= 0x3f);
145 imm = ((vm & 0x30) << 10) | ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
146 break;
147 default:
148 assert(lgkm == unset_counter || lgkm <= 0xf);
149 assert(vm == unset_counter || vm <= 0xf);
150 imm = ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
151 break;
152 }
153 if (chip < GFX9 && vm == wait_imm::unset_counter)
154 imm |= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
155 if (chip < GFX10 && lgkm == wait_imm::unset_counter)
156 imm |= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
157 return imm;
158 }
159
160 bool combine(const wait_imm& other)
161 {
162 bool changed = other.vm < vm || other.exp < exp || other.lgkm < lgkm || other.vs < vs;
163 vm = std::min(vm, other.vm);
164 exp = std::min(exp, other.exp);
165 lgkm = std::min(lgkm, other.lgkm);
166 vs = std::min(vs, other.vs);
167 return changed;
168 }
169
170 bool empty() const
171 {
172 return vm == unset_counter && exp == unset_counter &&
173 lgkm == unset_counter && vs == unset_counter;
174 }
175 };
176
177 struct wait_entry {
178 wait_imm imm;
179 uint16_t events; /* use wait_event notion */
180 uint8_t counters; /* use counter_type notion */
181 bool wait_on_read:1;
182 bool logical:1;
183
184 wait_entry(wait_event event, wait_imm imm, bool logical, bool wait_on_read)
185 : imm(imm), events(event), counters(get_counters_for_event(event)),
186 wait_on_read(wait_on_read), logical(logical) {}
187
188 bool join(const wait_entry& other)
189 {
190 bool changed = (other.events & ~events) ||
191 (other.counters & ~counters) ||
192 (other.wait_on_read && !wait_on_read);
193 events |= other.events;
194 counters |= other.counters;
195 changed |= imm.combine(other.imm);
196 wait_on_read = wait_on_read || other.wait_on_read;
197 assert(logical == other.logical);
198 return changed;
199 }
200
201 void remove_counter(counter_type counter)
202 {
203 counters &= ~counter;
204
205 if (counter == counter_lgkm) {
206 imm.lgkm = wait_imm::unset_counter;
207 events &= ~(event_smem | event_lds | event_gds);
208 }
209
210 if (counter == counter_vm) {
211 imm.vm = wait_imm::unset_counter;
212 events &= ~event_vmem;
213 }
214
215 if (counter == counter_exp) {
216 imm.exp = wait_imm::unset_counter;
217 events &= ~(event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock);
218 }
219
220 if (counter == counter_vs) {
221 imm.vs = wait_imm::unset_counter;
222 events &= ~event_vmem_store;
223 }
224
225 if (!(counters & counter_lgkm) && !(counters & counter_vm))
226 events &= ~event_flat;
227 }
228 };
229
230 struct wait_ctx {
231 Program *program;
232 enum chip_class chip_class;
233 uint16_t max_vm_cnt;
234 uint16_t max_exp_cnt;
235 uint16_t max_lgkm_cnt;
236 uint16_t max_vs_cnt;
237 uint16_t unordered_events = event_smem | event_flat;
238
239 uint8_t vm_cnt = 0;
240 uint8_t exp_cnt = 0;
241 uint8_t lgkm_cnt = 0;
242 uint8_t vs_cnt = 0;
243 bool pending_flat_lgkm = false;
244 bool pending_flat_vm = false;
245 bool pending_s_buffer_store = false; /* GFX10 workaround */
246
247 wait_imm barrier_imm[barrier_count];
248
249 std::map<PhysReg,wait_entry> gpr_map;
250
251 wait_ctx() {}
252 wait_ctx(Program *program_)
253 : program(program_),
254 chip_class(program_->chip_class),
255 max_vm_cnt(program_->chip_class >= GFX9 ? 62 : 14),
256 max_exp_cnt(6),
257 max_lgkm_cnt(program_->chip_class >= GFX10 ? 62 : 14),
258 max_vs_cnt(program_->chip_class >= GFX10 ? 62 : 0),
259 unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0)) {}
260
261 bool join(const wait_ctx* other, bool logical)
262 {
263 bool changed = other->exp_cnt > exp_cnt ||
264 other->vm_cnt > vm_cnt ||
265 other->lgkm_cnt > lgkm_cnt ||
266 other->vs_cnt > vs_cnt ||
267 (other->pending_flat_lgkm && !pending_flat_lgkm) ||
268 (other->pending_flat_vm && !pending_flat_vm);
269
270 exp_cnt = std::max(exp_cnt, other->exp_cnt);
271 vm_cnt = std::max(vm_cnt, other->vm_cnt);
272 lgkm_cnt = std::max(lgkm_cnt, other->lgkm_cnt);
273 vs_cnt = std::max(vs_cnt, other->vs_cnt);
274 pending_flat_lgkm |= other->pending_flat_lgkm;
275 pending_flat_vm |= other->pending_flat_vm;
276 pending_s_buffer_store |= other->pending_s_buffer_store;
277
278 for (std::pair<PhysReg,wait_entry> entry : other->gpr_map)
279 {
280 std::map<PhysReg,wait_entry>::iterator it = gpr_map.find(entry.first);
281 if (entry.second.logical != logical)
282 continue;
283
284 if (it != gpr_map.end()) {
285 changed |= it->second.join(entry.second);
286 } else {
287 gpr_map.insert(entry);
288 changed = true;
289 }
290 }
291
292 for (unsigned i = 0; i < barrier_count; i++)
293 changed |= barrier_imm[i].combine(other->barrier_imm[i]);
294
295 return changed;
296 }
297 };
298
299 wait_imm check_instr(Instruction* instr, wait_ctx& ctx)
300 {
301 wait_imm wait;
302
303 for (const Operand op : instr->operands) {
304 if (op.isConstant() || op.isUndefined())
305 continue;
306
307 /* check consecutively read gprs */
308 for (unsigned j = 0; j < op.size(); j++) {
309 PhysReg reg{op.physReg() + j};
310 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
311 if (it == ctx.gpr_map.end() || !it->second.wait_on_read)
312 continue;
313
314 wait.combine(it->second.imm);
315 }
316 }
317
318 for (const Definition& def : instr->definitions) {
319 /* check consecutively written gprs */
320 for (unsigned j = 0; j < def.getTemp().size(); j++)
321 {
322 PhysReg reg{def.physReg() + j};
323
324 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
325 if (it == ctx.gpr_map.end())
326 continue;
327
328 /* Vector Memory reads and writes return in the order they were issued */
329 if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem)) {
330 it->second.remove_counter(counter_vm);
331 if (!it->second.counters)
332 it = ctx.gpr_map.erase(it);
333 continue;
334 }
335
336 /* LDS reads and writes return in the order they were issued. same for GDS */
337 if (instr->format == Format::DS) {
338 bool gds = static_cast<DS_instruction*>(instr)->gds;
339 if ((it->second.events & lgkm_events) == (gds ? event_gds : event_lds)) {
340 it->second.remove_counter(counter_lgkm);
341 if (!it->second.counters)
342 it = ctx.gpr_map.erase(it);
343 continue;
344 }
345 }
346
347 wait.combine(it->second.imm);
348 }
349 }
350
351 return wait;
352 }
353
354 wait_imm parse_wait_instr(wait_ctx& ctx, Instruction *instr)
355 {
356 if (instr->opcode == aco_opcode::s_waitcnt_vscnt &&
357 instr->definitions[0].physReg() == sgpr_null) {
358 wait_imm imm;
359 imm.vs = std::min<uint8_t>(imm.vs, static_cast<SOPK_instruction*>(instr)->imm);
360 return imm;
361 } else if (instr->opcode == aco_opcode::s_waitcnt) {
362 return wait_imm(ctx.chip_class, static_cast<SOPK_instruction*>(instr)->imm);
363 }
364 return wait_imm();
365 }
366
367 wait_imm kill(Instruction* instr, wait_ctx& ctx)
368 {
369 wait_imm imm;
370 if (ctx.exp_cnt || ctx.vm_cnt || ctx.lgkm_cnt)
371 imm.combine(check_instr(instr, ctx));
372
373 imm.combine(parse_wait_instr(ctx, instr));
374
375 if (ctx.chip_class >= GFX10) {
376 /* Seems to be required on GFX10 to achieve correct behaviour.
377 * It shouldn't cost anything anyways since we're about to do s_endpgm.
378 */
379 if (ctx.lgkm_cnt && instr->opcode == aco_opcode::s_dcache_wb)
380 imm.lgkm = 0;
381
382 /* GFX10: A store followed by a load at the same address causes a problem because
383 * the load doesn't load the correct values unless we wait for the store first.
384 * This is NOT mitigated by an s_nop.
385 *
386 * TODO: Refine this when we have proper alias analysis.
387 */
388 SMEM_instruction *smem = static_cast<SMEM_instruction *>(instr);
389 if (ctx.pending_s_buffer_store &&
390 !smem->definitions.empty() &&
391 !smem->can_reorder && smem->barrier == barrier_buffer) {
392 imm.lgkm = 0;
393 }
394 }
395
396 if (instr->format == Format::PSEUDO_BARRIER) {
397 unsigned* bsize = ctx.program->info->cs.block_size;
398 unsigned workgroup_size = bsize[0] * bsize[1] * bsize[2];
399 switch (instr->opcode) {
400 case aco_opcode::p_memory_barrier_all:
401 for (unsigned i = 0; i < barrier_count; i++) {
402 if ((1 << i) == barrier_shared && workgroup_size <= 64)
403 continue;
404 imm.combine(ctx.barrier_imm[i]);
405 }
406 break;
407 case aco_opcode::p_memory_barrier_atomic:
408 imm.combine(ctx.barrier_imm[ffs(barrier_atomic) - 1]);
409 break;
410 /* see comment in aco_scheduler.cpp's can_move_instr() on why these barriers are merged */
411 case aco_opcode::p_memory_barrier_buffer:
412 case aco_opcode::p_memory_barrier_image:
413 imm.combine(ctx.barrier_imm[ffs(barrier_buffer) - 1]);
414 imm.combine(ctx.barrier_imm[ffs(barrier_image) - 1]);
415 break;
416 case aco_opcode::p_memory_barrier_shared:
417 if (workgroup_size > 64)
418 imm.combine(ctx.barrier_imm[ffs(barrier_shared) - 1]);
419 break;
420 default:
421 assert(false);
422 break;
423 }
424 }
425
426 if (!imm.empty()) {
427 if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
428 imm.vm = 0;
429 if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
430 imm.lgkm = 0;
431
432 /* reset counters */
433 ctx.exp_cnt = std::min(ctx.exp_cnt, imm.exp);
434 ctx.vm_cnt = std::min(ctx.vm_cnt, imm.vm);
435 ctx.lgkm_cnt = std::min(ctx.lgkm_cnt, imm.lgkm);
436 ctx.vs_cnt = std::min(ctx.vs_cnt, imm.vs);
437
438 /* update barrier wait imms */
439 for (unsigned i = 0; i < barrier_count; i++) {
440 wait_imm& bar = ctx.barrier_imm[i];
441 if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp)
442 bar.exp = wait_imm::unset_counter;
443 if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm)
444 bar.vm = wait_imm::unset_counter;
445 if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm)
446 bar.lgkm = wait_imm::unset_counter;
447 if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs)
448 bar.vs = wait_imm::unset_counter;
449 }
450
451 /* remove all gprs with higher counter from map */
452 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.begin();
453 while (it != ctx.gpr_map.end())
454 {
455 if (imm.exp != wait_imm::unset_counter && imm.exp <= it->second.imm.exp)
456 it->second.remove_counter(counter_exp);
457 if (imm.vm != wait_imm::unset_counter && imm.vm <= it->second.imm.vm)
458 it->second.remove_counter(counter_vm);
459 if (imm.lgkm != wait_imm::unset_counter && imm.lgkm <= it->second.imm.lgkm)
460 it->second.remove_counter(counter_lgkm);
461 if (imm.lgkm != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
462 it->second.remove_counter(counter_vs);
463 if (!it->second.counters)
464 it = ctx.gpr_map.erase(it);
465 else
466 it++;
467 }
468 }
469
470 if (imm.vm == 0)
471 ctx.pending_flat_vm = false;
472 if (imm.lgkm == 0) {
473 ctx.pending_flat_lgkm = false;
474 ctx.pending_s_buffer_store = false;
475 }
476
477 return imm;
478 }
479
480 void update_barrier_imm(wait_ctx& ctx, uint8_t counters, barrier_interaction barrier)
481 {
482 unsigned barrier_index = ffs(barrier) - 1;
483 for (unsigned i = 0; i < barrier_count; i++) {
484 wait_imm& bar = ctx.barrier_imm[i];
485 if (i == barrier_index) {
486 if (counters & counter_lgkm)
487 bar.lgkm = 0;
488 if (counters & counter_vm)
489 bar.vm = 0;
490 if (counters & counter_exp)
491 bar.exp = 0;
492 if (counters & counter_vs)
493 bar.vs = 0;
494 } else {
495 if (counters & counter_lgkm && bar.lgkm != wait_imm::unset_counter && bar.lgkm < ctx.max_lgkm_cnt)
496 bar.lgkm++;
497 if (counters & counter_vm && bar.vm != wait_imm::unset_counter && bar.vm < ctx.max_vm_cnt)
498 bar.vm++;
499 if (counters & counter_exp && bar.exp != wait_imm::unset_counter && bar.exp < ctx.max_exp_cnt)
500 bar.exp++;
501 if (counters & counter_vs && bar.vs != wait_imm::unset_counter && bar.vs < ctx.max_vs_cnt)
502 bar.vs++;
503 }
504 }
505 }
506
507 void update_counters(wait_ctx& ctx, wait_event event, barrier_interaction barrier=barrier_none)
508 {
509 uint8_t counters = get_counters_for_event(event);
510
511 if (counters & counter_lgkm && ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
512 ctx.lgkm_cnt++;
513 if (counters & counter_vm && ctx.vm_cnt <= ctx.max_vm_cnt)
514 ctx.vm_cnt++;
515 if (counters & counter_exp && ctx.exp_cnt <= ctx.max_exp_cnt)
516 ctx.exp_cnt++;
517 if (counters & counter_vs && ctx.vs_cnt <= ctx.max_vs_cnt)
518 ctx.vs_cnt++;
519
520 update_barrier_imm(ctx, counters, barrier);
521
522 if (ctx.unordered_events & event)
523 return;
524
525 if (ctx.pending_flat_lgkm)
526 counters &= ~counter_lgkm;
527 if (ctx.pending_flat_vm)
528 counters &= ~counter_vm;
529
530 for (std::pair<const PhysReg,wait_entry>& e : ctx.gpr_map) {
531 wait_entry& entry = e.second;
532
533 if (entry.events & ctx.unordered_events)
534 continue;
535
536 assert(entry.events);
537
538 if ((counters & counter_exp) && (entry.events & exp_events) == event && entry.imm.exp < ctx.max_exp_cnt)
539 entry.imm.exp++;
540 if ((counters & counter_lgkm) && (entry.events & lgkm_events) == event && entry.imm.lgkm < ctx.max_lgkm_cnt)
541 entry.imm.lgkm++;
542 if ((counters & counter_vm) && (entry.events & vm_events) == event && entry.imm.vm < ctx.max_vm_cnt)
543 entry.imm.vm++;
544 if ((counters & counter_vs) && (entry.events & vs_events) == event && entry.imm.vs < ctx.max_vs_cnt)
545 entry.imm.vs++;
546 }
547 }
548
549 void update_counters_for_flat_load(wait_ctx& ctx, barrier_interaction barrier=barrier_none)
550 {
551 assert(ctx.chip_class < GFX10);
552
553 if (ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
554 ctx.lgkm_cnt++;
555 if (ctx.vm_cnt <= ctx.max_vm_cnt)
556 ctx.vm_cnt++;
557
558 update_barrier_imm(ctx, counter_vm | counter_lgkm, barrier);
559
560 for (std::pair<PhysReg,wait_entry> e : ctx.gpr_map)
561 {
562 if (e.second.counters & counter_vm)
563 e.second.imm.vm = 0;
564 if (e.second.counters & counter_lgkm)
565 e.second.imm.lgkm = 0;
566 }
567 ctx.pending_flat_lgkm = true;
568 ctx.pending_flat_vm = true;
569 }
570
571 void insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read)
572 {
573 uint16_t counters = get_counters_for_event(event);
574 wait_imm imm;
575 if (counters & counter_lgkm)
576 imm.lgkm = 0;
577 if (counters & counter_vm)
578 imm.vm = 0;
579 if (counters & counter_exp)
580 imm.exp = 0;
581 if (counters & counter_vs)
582 imm.vs = 0;
583
584 wait_entry new_entry(event, imm, !rc.is_linear(), wait_on_read);
585
586 for (unsigned i = 0; i < rc.size(); i++) {
587 auto it = ctx.gpr_map.emplace(PhysReg{reg.reg+i}, new_entry);
588 if (!it.second)
589 it.first->second.join(new_entry);
590 }
591 }
592
593 void insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event)
594 {
595 if (!op.isConstant() && !op.isUndefined())
596 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false);
597 }
598
599 void insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event)
600 {
601 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true);
602 }
603
604 void gen(Instruction* instr, wait_ctx& ctx)
605 {
606 switch (instr->format) {
607 case Format::EXP: {
608 Export_instruction* exp_instr = static_cast<Export_instruction*>(instr);
609
610 wait_event ev;
611 if (exp_instr->dest <= 9)
612 ev = event_exp_mrt_null;
613 else if (exp_instr->dest <= 15)
614 ev = event_exp_pos;
615 else
616 ev = event_exp_param;
617 update_counters(ctx, ev);
618
619 /* insert new entries for exported vgprs */
620 for (unsigned i = 0; i < 4; i++)
621 {
622 if (exp_instr->enabled_mask & (1 << i)) {
623 unsigned idx = exp_instr->compressed ? i >> 1 : i;
624 assert(idx < exp_instr->operands.size());
625 insert_wait_entry(ctx, exp_instr->operands[idx], ev);
626
627 }
628 }
629 insert_wait_entry(ctx, exec, s2, ev, false);
630 break;
631 }
632 case Format::FLAT: {
633 if (ctx.chip_class < GFX10 && !instr->definitions.empty())
634 update_counters_for_flat_load(ctx, barrier_buffer);
635 else
636 update_counters(ctx, event_flat, barrier_buffer);
637
638 if (!instr->definitions.empty())
639 insert_wait_entry(ctx, instr->definitions[0], event_flat);
640 break;
641 }
642 case Format::SMEM: {
643 SMEM_instruction *smem = static_cast<SMEM_instruction*>(instr);
644 update_counters(ctx, event_smem, static_cast<SMEM_instruction*>(instr)->barrier);
645
646 if (!instr->definitions.empty())
647 insert_wait_entry(ctx, instr->definitions[0], event_smem);
648 else if (ctx.chip_class >= GFX10 &&
649 !smem->can_reorder &&
650 smem->barrier == barrier_buffer)
651 ctx.pending_s_buffer_store = true;
652
653 break;
654 }
655 case Format::DS: {
656 bool gds = static_cast<DS_instruction*>(instr)->gds;
657 update_counters(ctx, gds ? event_gds : event_lds, gds ? barrier_none : barrier_shared);
658 if (gds)
659 update_counters(ctx, event_gds_gpr_lock);
660
661 if (!instr->definitions.empty())
662 insert_wait_entry(ctx, instr->definitions[0], gds ? event_gds : event_lds);
663
664 if (gds) {
665 for (const Operand& op : instr->operands)
666 insert_wait_entry(ctx, op, event_gds_gpr_lock);
667 insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
668 }
669 break;
670 }
671 case Format::MUBUF:
672 case Format::MTBUF:
673 case Format::MIMG:
674 case Format::GLOBAL: {
675 wait_event ev = !instr->definitions.empty() || ctx.chip_class < GFX10 ? event_vmem : event_vmem_store;
676 update_counters(ctx, ev, get_barrier_interaction(instr));
677
678 if (!instr->definitions.empty())
679 insert_wait_entry(ctx, instr->definitions[0], ev);
680
681 if (instr->operands.size() == 4 && ctx.chip_class == GFX6) {
682 ctx.exp_cnt++;
683 update_counters(ctx, event_vmem_gpr_lock);
684 insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
685 }
686 break;
687 }
688 default:
689 break;
690 }
691 }
692
693 void emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm imm)
694 {
695 if (imm.vs != wait_imm::unset_counter) {
696 assert(ctx.chip_class >= GFX10);
697 SOPK_instruction* waitcnt_vs = create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1);
698 waitcnt_vs->definitions[0] = Definition(sgpr_null, s1);
699 waitcnt_vs->imm = imm.vs;
700 instructions.emplace_back(waitcnt_vs);
701 imm.vs = wait_imm::unset_counter;
702 }
703 if (!imm.empty()) {
704 SOPP_instruction* waitcnt = create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt, Format::SOPP, 0, 0);
705 waitcnt->imm = imm.pack(ctx.chip_class);
706 waitcnt->block = -1;
707 instructions.emplace_back(waitcnt);
708 }
709 }
710
711 void handle_block(Program *program, Block& block, wait_ctx& ctx)
712 {
713 std::vector<aco_ptr<Instruction>> new_instructions;
714
715 wait_imm queued_imm;
716 for (aco_ptr<Instruction>& instr : block.instructions) {
717 bool is_wait = !parse_wait_instr(ctx, instr.get()).empty();
718
719 queued_imm.combine(kill(instr.get(), ctx));
720
721 gen(instr.get(), ctx);
722
723 if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
724 if (!queued_imm.empty()) {
725 emit_waitcnt(ctx, new_instructions, queued_imm);
726 queued_imm = wait_imm();
727 }
728 new_instructions.emplace_back(std::move(instr));
729 }
730 }
731
732 if (!queued_imm.empty())
733 emit_waitcnt(ctx, new_instructions, queued_imm);
734
735 block.instructions.swap(new_instructions);
736 }
737
738 } /* end namespace */
739
740 void insert_wait_states(Program* program)
741 {
742 /* per BB ctx */
743 std::vector<bool> done(program->blocks.size());
744 wait_ctx in_ctx[program->blocks.size()];
745 wait_ctx out_ctx[program->blocks.size()];
746 for (unsigned i = 0; i < program->blocks.size(); i++)
747 in_ctx[i] = wait_ctx(program);
748 std::stack<unsigned> loop_header_indices;
749 unsigned loop_progress = 0;
750
751 for (unsigned i = 0; i < program->blocks.size();) {
752 Block& current = program->blocks[i++];
753 wait_ctx ctx = in_ctx[current.index];
754
755 if (current.kind & block_kind_loop_header) {
756 loop_header_indices.push(current.index);
757 } else if (current.kind & block_kind_loop_exit) {
758 bool repeat = false;
759 if (loop_progress == loop_header_indices.size()) {
760 i = loop_header_indices.top();
761 repeat = true;
762 }
763 loop_header_indices.pop();
764 loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
765 if (repeat)
766 continue;
767 }
768
769 bool changed = false;
770 for (unsigned b : current.linear_preds)
771 changed |= ctx.join(&out_ctx[b], false);
772 for (unsigned b : current.logical_preds)
773 changed |= ctx.join(&out_ctx[b], true);
774
775 in_ctx[current.index] = ctx;
776
777 if (done[current.index] && !changed)
778 continue;
779
780 if (current.instructions.empty()) {
781 out_ctx[current.index] = ctx;
782 continue;
783 }
784
785 loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
786 done[current.index] = true;
787
788 handle_block(program, current, ctx);
789
790 out_ctx[current.index] = ctx;
791 }
792 }
793
794 }
795