aco: fix uninitialized data error in waitcnt pass
[mesa.git] / src / amd / compiler / aco_insert_waitcnt.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <algorithm>
26 #include <map>
27 #include <stack>
28
29 #include "aco_ir.h"
30 #include "vulkan/radv_shader.h"
31
32 namespace aco {
33
34 namespace {
35
36 /**
37 * The general idea of this pass is:
38 * The CFG is traversed in reverse postorder (forward) and loops are processed
39 * several times until no progress is made.
40 * Per BB two wait_ctx is maintained: an in-context and out-context.
41 * The in-context is the joined out-contexts of the predecessors.
42 * The context contains a map: gpr -> wait_entry
43 * consisting of the information about the cnt values to be waited for.
44 * Note: After merge-nodes, it might occur that for the same register
45 * multiple cnt values are to be waited for.
46 *
47 * The values are updated according to the encountered instructions:
48 * - additional events increment the counter of waits of the same type
49 * - or erase gprs with counters higher than to be waited for.
50 */
51
52 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
53
54 /* Instructions of the same event will finish in-order except for smem
55 * and maybe flat. Instructions of different events may not finish in-order. */
56 enum wait_event : uint16_t {
57 event_smem = 1 << 0,
58 event_lds = 1 << 1,
59 event_gds = 1 << 2,
60 event_vmem = 1 << 3,
61 event_vmem_store = 1 << 4, /* GFX10+ */
62 event_flat = 1 << 5,
63 event_exp_pos = 1 << 6,
64 event_exp_param = 1 << 7,
65 event_exp_mrt_null = 1 << 8,
66 event_gds_gpr_lock = 1 << 9,
67 event_vmem_gpr_lock = 1 << 10,
68 event_sendmsg = 1 << 11,
69 };
70
71 enum counter_type : uint8_t {
72 counter_exp = 1 << 0,
73 counter_lgkm = 1 << 1,
74 counter_vm = 1 << 2,
75 counter_vs = 1 << 3,
76 };
77
78 static const uint16_t exp_events = event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock;
79 static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
80 static const uint16_t vm_events = event_vmem | event_flat;
81 static const uint16_t vs_events = event_vmem_store;
82
83 uint8_t get_counters_for_event(wait_event ev)
84 {
85 switch (ev) {
86 case event_smem:
87 case event_lds:
88 case event_gds:
89 case event_sendmsg:
90 return counter_lgkm;
91 case event_vmem:
92 return counter_vm;
93 case event_vmem_store:
94 return counter_vs;
95 case event_flat:
96 return counter_vm | counter_lgkm;
97 case event_exp_pos:
98 case event_exp_param:
99 case event_exp_mrt_null:
100 case event_gds_gpr_lock:
101 case event_vmem_gpr_lock:
102 return counter_exp;
103 default:
104 return 0;
105 }
106 }
107
108 struct wait_imm {
109 static const uint8_t unset_counter = 0xff;
110
111 uint8_t vm;
112 uint8_t exp;
113 uint8_t lgkm;
114 uint8_t vs;
115
116 wait_imm() :
117 vm(unset_counter), exp(unset_counter), lgkm(unset_counter), vs(unset_counter) {}
118 wait_imm(uint16_t vm_, uint16_t exp_, uint16_t lgkm_, uint16_t vs_) :
119 vm(vm_), exp(exp_), lgkm(lgkm_), vs(vs_) {}
120
121 wait_imm(enum chip_class chip, uint16_t packed) : vs(unset_counter)
122 {
123 vm = packed & 0xf;
124 if (chip >= GFX9)
125 vm |= (packed >> 10) & 0x30;
126
127 exp = (packed >> 4) & 0x7;
128
129 lgkm = (packed >> 8) & 0xf;
130 if (chip >= GFX10)
131 lgkm |= (packed >> 8) & 0x30;
132 }
133
134 uint16_t pack(enum chip_class chip) const
135 {
136 uint16_t imm = 0;
137 assert(exp == unset_counter || exp <= 0x7);
138 switch (chip) {
139 case GFX10:
140 assert(lgkm == unset_counter || lgkm <= 0x3f);
141 assert(vm == unset_counter || vm <= 0x3f);
142 imm = ((vm & 0x30) << 10) | ((lgkm & 0x3f) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
143 break;
144 case GFX9:
145 assert(lgkm == unset_counter || lgkm <= 0xf);
146 assert(vm == unset_counter || vm <= 0x3f);
147 imm = ((vm & 0x30) << 10) | ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
148 break;
149 default:
150 assert(lgkm == unset_counter || lgkm <= 0xf);
151 assert(vm == unset_counter || vm <= 0xf);
152 imm = ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
153 break;
154 }
155 if (chip < GFX9 && vm == wait_imm::unset_counter)
156 imm |= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
157 if (chip < GFX10 && lgkm == wait_imm::unset_counter)
158 imm |= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
159 return imm;
160 }
161
162 bool combine(const wait_imm& other)
163 {
164 bool changed = other.vm < vm || other.exp < exp || other.lgkm < lgkm || other.vs < vs;
165 vm = std::min(vm, other.vm);
166 exp = std::min(exp, other.exp);
167 lgkm = std::min(lgkm, other.lgkm);
168 vs = std::min(vs, other.vs);
169 return changed;
170 }
171
172 bool empty() const
173 {
174 return vm == unset_counter && exp == unset_counter &&
175 lgkm == unset_counter && vs == unset_counter;
176 }
177 };
178
179 struct wait_entry {
180 wait_imm imm;
181 uint16_t events; /* use wait_event notion */
182 uint8_t counters; /* use counter_type notion */
183 bool wait_on_read:1;
184 bool logical:1;
185
186 wait_entry(wait_event event, wait_imm imm, bool logical, bool wait_on_read)
187 : imm(imm), events(event), counters(get_counters_for_event(event)),
188 wait_on_read(wait_on_read), logical(logical) {}
189
190 bool join(const wait_entry& other)
191 {
192 bool changed = (other.events & ~events) ||
193 (other.counters & ~counters) ||
194 (other.wait_on_read && !wait_on_read);
195 events |= other.events;
196 counters |= other.counters;
197 changed |= imm.combine(other.imm);
198 wait_on_read = wait_on_read || other.wait_on_read;
199 assert(logical == other.logical);
200 return changed;
201 }
202
203 void remove_counter(counter_type counter)
204 {
205 counters &= ~counter;
206
207 if (counter == counter_lgkm) {
208 imm.lgkm = wait_imm::unset_counter;
209 events &= ~(event_smem | event_lds | event_gds | event_sendmsg);
210 }
211
212 if (counter == counter_vm) {
213 imm.vm = wait_imm::unset_counter;
214 events &= ~event_vmem;
215 }
216
217 if (counter == counter_exp) {
218 imm.exp = wait_imm::unset_counter;
219 events &= ~(event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock);
220 }
221
222 if (counter == counter_vs) {
223 imm.vs = wait_imm::unset_counter;
224 events &= ~event_vmem_store;
225 }
226
227 if (!(counters & counter_lgkm) && !(counters & counter_vm))
228 events &= ~event_flat;
229 }
230 };
231
232 struct wait_ctx {
233 Program *program;
234 enum chip_class chip_class;
235 uint16_t max_vm_cnt;
236 uint16_t max_exp_cnt;
237 uint16_t max_lgkm_cnt;
238 uint16_t max_vs_cnt;
239 uint16_t unordered_events = event_smem | event_flat;
240
241 uint8_t vm_cnt = 0;
242 uint8_t exp_cnt = 0;
243 uint8_t lgkm_cnt = 0;
244 uint8_t vs_cnt = 0;
245 bool pending_flat_lgkm = false;
246 bool pending_flat_vm = false;
247 bool pending_s_buffer_store = false; /* GFX10 workaround */
248
249 wait_imm barrier_imm[barrier_count];
250 uint16_t barrier_events[barrier_count] = {}; /* use wait_event notion */
251
252 std::map<PhysReg,wait_entry> gpr_map;
253
254 wait_ctx() {}
255 wait_ctx(Program *program_)
256 : program(program_),
257 chip_class(program_->chip_class),
258 max_vm_cnt(program_->chip_class >= GFX9 ? 62 : 14),
259 max_exp_cnt(6),
260 max_lgkm_cnt(program_->chip_class >= GFX10 ? 62 : 14),
261 max_vs_cnt(program_->chip_class >= GFX10 ? 62 : 0),
262 unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0)) {}
263
264 bool join(const wait_ctx* other, bool logical)
265 {
266 bool changed = other->exp_cnt > exp_cnt ||
267 other->vm_cnt > vm_cnt ||
268 other->lgkm_cnt > lgkm_cnt ||
269 other->vs_cnt > vs_cnt ||
270 (other->pending_flat_lgkm && !pending_flat_lgkm) ||
271 (other->pending_flat_vm && !pending_flat_vm);
272
273 exp_cnt = std::max(exp_cnt, other->exp_cnt);
274 vm_cnt = std::max(vm_cnt, other->vm_cnt);
275 lgkm_cnt = std::max(lgkm_cnt, other->lgkm_cnt);
276 vs_cnt = std::max(vs_cnt, other->vs_cnt);
277 pending_flat_lgkm |= other->pending_flat_lgkm;
278 pending_flat_vm |= other->pending_flat_vm;
279 pending_s_buffer_store |= other->pending_s_buffer_store;
280
281 for (std::pair<PhysReg,wait_entry> entry : other->gpr_map)
282 {
283 std::map<PhysReg,wait_entry>::iterator it = gpr_map.find(entry.first);
284 if (entry.second.logical != logical)
285 continue;
286
287 if (it != gpr_map.end()) {
288 changed |= it->second.join(entry.second);
289 } else {
290 gpr_map.insert(entry);
291 changed = true;
292 }
293 }
294
295 for (unsigned i = 0; i < barrier_count; i++) {
296 changed |= barrier_imm[i].combine(other->barrier_imm[i]);
297 changed |= other->barrier_events[i] & ~barrier_events[i];
298 barrier_events[i] |= other->barrier_events[i];
299 }
300
301 return changed;
302 }
303 };
304
305 wait_imm check_instr(Instruction* instr, wait_ctx& ctx)
306 {
307 wait_imm wait;
308
309 for (const Operand op : instr->operands) {
310 if (op.isConstant() || op.isUndefined())
311 continue;
312
313 /* check consecutively read gprs */
314 for (unsigned j = 0; j < op.size(); j++) {
315 PhysReg reg{op.physReg() + j};
316 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
317 if (it == ctx.gpr_map.end() || !it->second.wait_on_read)
318 continue;
319
320 wait.combine(it->second.imm);
321 }
322 }
323
324 for (const Definition& def : instr->definitions) {
325 /* check consecutively written gprs */
326 for (unsigned j = 0; j < def.getTemp().size(); j++)
327 {
328 PhysReg reg{def.physReg() + j};
329
330 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
331 if (it == ctx.gpr_map.end())
332 continue;
333
334 /* Vector Memory reads and writes return in the order they were issued */
335 if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem)) {
336 it->second.remove_counter(counter_vm);
337 if (!it->second.counters)
338 it = ctx.gpr_map.erase(it);
339 continue;
340 }
341
342 /* LDS reads and writes return in the order they were issued. same for GDS */
343 if (instr->format == Format::DS) {
344 bool gds = static_cast<DS_instruction*>(instr)->gds;
345 if ((it->second.events & lgkm_events) == (gds ? event_gds : event_lds)) {
346 it->second.remove_counter(counter_lgkm);
347 if (!it->second.counters)
348 it = ctx.gpr_map.erase(it);
349 continue;
350 }
351 }
352
353 wait.combine(it->second.imm);
354 }
355 }
356
357 return wait;
358 }
359
360 wait_imm parse_wait_instr(wait_ctx& ctx, Instruction *instr)
361 {
362 if (instr->opcode == aco_opcode::s_waitcnt_vscnt &&
363 instr->definitions[0].physReg() == sgpr_null) {
364 wait_imm imm;
365 imm.vs = std::min<uint8_t>(imm.vs, static_cast<SOPK_instruction*>(instr)->imm);
366 return imm;
367 } else if (instr->opcode == aco_opcode::s_waitcnt) {
368 return wait_imm(ctx.chip_class, static_cast<SOPP_instruction*>(instr)->imm);
369 }
370 return wait_imm();
371 }
372
373 wait_imm kill(Instruction* instr, wait_ctx& ctx)
374 {
375 wait_imm imm;
376 if (ctx.exp_cnt || ctx.vm_cnt || ctx.lgkm_cnt)
377 imm.combine(check_instr(instr, ctx));
378
379 imm.combine(parse_wait_instr(ctx, instr));
380
381
382 /* It's required to wait for scalar stores before "writing back" data.
383 * It shouldn't cost anything anyways since we're about to do s_endpgm.
384 */
385 if (ctx.lgkm_cnt && instr->opcode == aco_opcode::s_dcache_wb) {
386 assert(ctx.chip_class >= GFX8);
387 imm.lgkm = 0;
388 }
389
390 if (ctx.chip_class >= GFX10) {
391 /* GFX10: A store followed by a load at the same address causes a problem because
392 * the load doesn't load the correct values unless we wait for the store first.
393 * This is NOT mitigated by an s_nop.
394 *
395 * TODO: Refine this when we have proper alias analysis.
396 */
397 SMEM_instruction *smem = static_cast<SMEM_instruction *>(instr);
398 if (ctx.pending_s_buffer_store &&
399 !smem->definitions.empty() &&
400 !smem->can_reorder && smem->barrier == barrier_buffer) {
401 imm.lgkm = 0;
402 }
403 }
404
405 if (instr->format == Format::PSEUDO_BARRIER) {
406 uint32_t workgroup_size = UINT32_MAX;
407 if (ctx.program->stage & sw_cs) {
408 unsigned* bsize = ctx.program->info->cs.block_size;
409 workgroup_size = bsize[0] * bsize[1] * bsize[2];
410 }
411 switch (instr->opcode) {
412 case aco_opcode::p_memory_barrier_common:
413 imm.combine(ctx.barrier_imm[ffs(barrier_atomic) - 1]);
414 imm.combine(ctx.barrier_imm[ffs(barrier_buffer) - 1]);
415 imm.combine(ctx.barrier_imm[ffs(barrier_image) - 1]);
416 if (workgroup_size > ctx.program->wave_size)
417 imm.combine(ctx.barrier_imm[ffs(barrier_shared) - 1]);
418 break;
419 case aco_opcode::p_memory_barrier_atomic:
420 imm.combine(ctx.barrier_imm[ffs(barrier_atomic) - 1]);
421 break;
422 /* see comment in aco_scheduler.cpp's can_move_instr() on why these barriers are merged */
423 case aco_opcode::p_memory_barrier_buffer:
424 case aco_opcode::p_memory_barrier_image:
425 imm.combine(ctx.barrier_imm[ffs(barrier_buffer) - 1]);
426 imm.combine(ctx.barrier_imm[ffs(barrier_image) - 1]);
427 break;
428 case aco_opcode::p_memory_barrier_shared:
429 if (workgroup_size > ctx.program->wave_size)
430 imm.combine(ctx.barrier_imm[ffs(barrier_shared) - 1]);
431 break;
432 case aco_opcode::p_memory_barrier_gs_data:
433 imm.combine(ctx.barrier_imm[ffs(barrier_gs_data) - 1]);
434 break;
435 case aco_opcode::p_memory_barrier_gs_sendmsg:
436 imm.combine(ctx.barrier_imm[ffs(barrier_gs_sendmsg) - 1]);
437 break;
438 default:
439 assert(false);
440 break;
441 }
442 }
443
444 if (!imm.empty()) {
445 if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
446 imm.vm = 0;
447 if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
448 imm.lgkm = 0;
449
450 /* reset counters */
451 ctx.exp_cnt = std::min(ctx.exp_cnt, imm.exp);
452 ctx.vm_cnt = std::min(ctx.vm_cnt, imm.vm);
453 ctx.lgkm_cnt = std::min(ctx.lgkm_cnt, imm.lgkm);
454 ctx.vs_cnt = std::min(ctx.vs_cnt, imm.vs);
455
456 /* update barrier wait imms */
457 for (unsigned i = 0; i < barrier_count; i++) {
458 wait_imm& bar = ctx.barrier_imm[i];
459 uint16_t& bar_ev = ctx.barrier_events[i];
460 if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp) {
461 bar.exp = wait_imm::unset_counter;
462 bar_ev &= ~exp_events;
463 }
464 if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm) {
465 bar.vm = wait_imm::unset_counter;
466 bar_ev &= ~(vm_events & ~event_flat);
467 }
468 if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm) {
469 bar.lgkm = wait_imm::unset_counter;
470 bar_ev &= ~(lgkm_events & ~event_flat);
471 }
472 if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs) {
473 bar.vs = wait_imm::unset_counter;
474 bar_ev &= ~vs_events;
475 }
476 if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
477 bar_ev &= ~event_flat;
478 }
479
480 /* remove all gprs with higher counter from map */
481 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.begin();
482 while (it != ctx.gpr_map.end())
483 {
484 if (imm.exp != wait_imm::unset_counter && imm.exp <= it->second.imm.exp)
485 it->second.remove_counter(counter_exp);
486 if (imm.vm != wait_imm::unset_counter && imm.vm <= it->second.imm.vm)
487 it->second.remove_counter(counter_vm);
488 if (imm.lgkm != wait_imm::unset_counter && imm.lgkm <= it->second.imm.lgkm)
489 it->second.remove_counter(counter_lgkm);
490 if (imm.lgkm != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
491 it->second.remove_counter(counter_vs);
492 if (!it->second.counters)
493 it = ctx.gpr_map.erase(it);
494 else
495 it++;
496 }
497 }
498
499 if (imm.vm == 0)
500 ctx.pending_flat_vm = false;
501 if (imm.lgkm == 0) {
502 ctx.pending_flat_lgkm = false;
503 ctx.pending_s_buffer_store = false;
504 }
505
506 return imm;
507 }
508
509 void update_barrier_counter(uint8_t *ctr, unsigned max)
510 {
511 if (*ctr != wait_imm::unset_counter && *ctr < max)
512 (*ctr)++;
513 }
514
515 void update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, barrier_interaction barrier)
516 {
517 for (unsigned i = 0; i < barrier_count; i++) {
518 wait_imm& bar = ctx.barrier_imm[i];
519 uint16_t& bar_ev = ctx.barrier_events[i];
520 if (barrier & (1 << i)) {
521 bar_ev |= event;
522 if (counters & counter_lgkm)
523 bar.lgkm = 0;
524 if (counters & counter_vm)
525 bar.vm = 0;
526 if (counters & counter_exp)
527 bar.exp = 0;
528 if (counters & counter_vs)
529 bar.vs = 0;
530 } else if (!(bar_ev & ctx.unordered_events) && !(ctx.unordered_events & event)) {
531 if (counters & counter_lgkm && (bar_ev & lgkm_events) == event)
532 update_barrier_counter(&bar.lgkm, ctx.max_lgkm_cnt);
533 if (counters & counter_vm && (bar_ev & vm_events) == event)
534 update_barrier_counter(&bar.vm, ctx.max_vm_cnt);
535 if (counters & counter_exp && (bar_ev & exp_events) == event)
536 update_barrier_counter(&bar.exp, ctx.max_exp_cnt);
537 if (counters & counter_vs && (bar_ev & vs_events) == event)
538 update_barrier_counter(&bar.vs, ctx.max_vs_cnt);
539 }
540 }
541 }
542
543 void update_counters(wait_ctx& ctx, wait_event event, barrier_interaction barrier=barrier_none)
544 {
545 uint8_t counters = get_counters_for_event(event);
546
547 if (counters & counter_lgkm && ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
548 ctx.lgkm_cnt++;
549 if (counters & counter_vm && ctx.vm_cnt <= ctx.max_vm_cnt)
550 ctx.vm_cnt++;
551 if (counters & counter_exp && ctx.exp_cnt <= ctx.max_exp_cnt)
552 ctx.exp_cnt++;
553 if (counters & counter_vs && ctx.vs_cnt <= ctx.max_vs_cnt)
554 ctx.vs_cnt++;
555
556 update_barrier_imm(ctx, counters, event, barrier);
557
558 if (ctx.unordered_events & event)
559 return;
560
561 if (ctx.pending_flat_lgkm)
562 counters &= ~counter_lgkm;
563 if (ctx.pending_flat_vm)
564 counters &= ~counter_vm;
565
566 for (std::pair<const PhysReg,wait_entry>& e : ctx.gpr_map) {
567 wait_entry& entry = e.second;
568
569 if (entry.events & ctx.unordered_events)
570 continue;
571
572 assert(entry.events);
573
574 if ((counters & counter_exp) && (entry.events & exp_events) == event && entry.imm.exp < ctx.max_exp_cnt)
575 entry.imm.exp++;
576 if ((counters & counter_lgkm) && (entry.events & lgkm_events) == event && entry.imm.lgkm < ctx.max_lgkm_cnt)
577 entry.imm.lgkm++;
578 if ((counters & counter_vm) && (entry.events & vm_events) == event && entry.imm.vm < ctx.max_vm_cnt)
579 entry.imm.vm++;
580 if ((counters & counter_vs) && (entry.events & vs_events) == event && entry.imm.vs < ctx.max_vs_cnt)
581 entry.imm.vs++;
582 }
583 }
584
585 void update_counters_for_flat_load(wait_ctx& ctx, barrier_interaction barrier=barrier_none)
586 {
587 assert(ctx.chip_class < GFX10);
588
589 if (ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
590 ctx.lgkm_cnt++;
591 if (ctx.vm_cnt <= ctx.max_vm_cnt)
592 ctx.vm_cnt++;
593
594 update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, barrier);
595
596 for (std::pair<PhysReg,wait_entry> e : ctx.gpr_map)
597 {
598 if (e.second.counters & counter_vm)
599 e.second.imm.vm = 0;
600 if (e.second.counters & counter_lgkm)
601 e.second.imm.lgkm = 0;
602 }
603 ctx.pending_flat_lgkm = true;
604 ctx.pending_flat_vm = true;
605 }
606
607 void insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read)
608 {
609 uint16_t counters = get_counters_for_event(event);
610 wait_imm imm;
611 if (counters & counter_lgkm)
612 imm.lgkm = 0;
613 if (counters & counter_vm)
614 imm.vm = 0;
615 if (counters & counter_exp)
616 imm.exp = 0;
617 if (counters & counter_vs)
618 imm.vs = 0;
619
620 wait_entry new_entry(event, imm, !rc.is_linear(), wait_on_read);
621
622 for (unsigned i = 0; i < rc.size(); i++) {
623 auto it = ctx.gpr_map.emplace(PhysReg{reg.reg+i}, new_entry);
624 if (!it.second)
625 it.first->second.join(new_entry);
626 }
627 }
628
629 void insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event)
630 {
631 if (!op.isConstant() && !op.isUndefined())
632 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false);
633 }
634
635 void insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event)
636 {
637 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true);
638 }
639
640 void gen(Instruction* instr, wait_ctx& ctx)
641 {
642 switch (instr->format) {
643 case Format::EXP: {
644 Export_instruction* exp_instr = static_cast<Export_instruction*>(instr);
645
646 wait_event ev;
647 if (exp_instr->dest <= 9)
648 ev = event_exp_mrt_null;
649 else if (exp_instr->dest <= 15)
650 ev = event_exp_pos;
651 else
652 ev = event_exp_param;
653 update_counters(ctx, ev);
654
655 /* insert new entries for exported vgprs */
656 for (unsigned i = 0; i < 4; i++)
657 {
658 if (exp_instr->enabled_mask & (1 << i)) {
659 unsigned idx = exp_instr->compressed ? i >> 1 : i;
660 assert(idx < exp_instr->operands.size());
661 insert_wait_entry(ctx, exp_instr->operands[idx], ev);
662
663 }
664 }
665 insert_wait_entry(ctx, exec, s2, ev, false);
666 break;
667 }
668 case Format::FLAT: {
669 if (ctx.chip_class < GFX10 && !instr->definitions.empty())
670 update_counters_for_flat_load(ctx, barrier_buffer);
671 else
672 update_counters(ctx, event_flat, barrier_buffer);
673
674 if (!instr->definitions.empty())
675 insert_wait_entry(ctx, instr->definitions[0], event_flat);
676 break;
677 }
678 case Format::SMEM: {
679 SMEM_instruction *smem = static_cast<SMEM_instruction*>(instr);
680 update_counters(ctx, event_smem, static_cast<SMEM_instruction*>(instr)->barrier);
681
682 if (!instr->definitions.empty())
683 insert_wait_entry(ctx, instr->definitions[0], event_smem);
684 else if (ctx.chip_class >= GFX10 &&
685 !smem->can_reorder &&
686 smem->barrier == barrier_buffer)
687 ctx.pending_s_buffer_store = true;
688
689 break;
690 }
691 case Format::DS: {
692 bool gds = static_cast<DS_instruction*>(instr)->gds;
693 update_counters(ctx, gds ? event_gds : event_lds, gds ? barrier_none : barrier_shared);
694 if (gds)
695 update_counters(ctx, event_gds_gpr_lock);
696
697 if (!instr->definitions.empty())
698 insert_wait_entry(ctx, instr->definitions[0], gds ? event_gds : event_lds);
699
700 if (gds) {
701 for (const Operand& op : instr->operands)
702 insert_wait_entry(ctx, op, event_gds_gpr_lock);
703 insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
704 }
705 break;
706 }
707 case Format::MUBUF:
708 case Format::MTBUF:
709 case Format::MIMG:
710 case Format::GLOBAL: {
711 wait_event ev = !instr->definitions.empty() || ctx.chip_class < GFX10 ? event_vmem : event_vmem_store;
712 update_counters(ctx, ev, get_barrier_interaction(instr));
713
714 if (!instr->definitions.empty())
715 insert_wait_entry(ctx, instr->definitions[0], ev);
716
717 if (ctx.chip_class == GFX6 &&
718 instr->format != Format::MIMG &&
719 instr->operands.size() == 4) {
720 ctx.exp_cnt++;
721 update_counters(ctx, event_vmem_gpr_lock);
722 insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
723 } else if (ctx.chip_class == GFX6 &&
724 instr->format == Format::MIMG &&
725 instr->operands[1].regClass().type() == RegType::vgpr) {
726 ctx.exp_cnt++;
727 update_counters(ctx, event_vmem_gpr_lock);
728 insert_wait_entry(ctx, instr->operands[1], event_vmem_gpr_lock);
729 }
730
731 break;
732 }
733 case Format::SOPP: {
734 if (instr->opcode == aco_opcode::s_sendmsg ||
735 instr->opcode == aco_opcode::s_sendmsghalt)
736 update_counters(ctx, event_sendmsg, get_barrier_interaction(instr));
737 }
738 default:
739 break;
740 }
741 }
742
743 void emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm imm)
744 {
745 if (imm.vs != wait_imm::unset_counter) {
746 assert(ctx.chip_class >= GFX10);
747 SOPK_instruction* waitcnt_vs = create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1);
748 waitcnt_vs->definitions[0] = Definition(sgpr_null, s1);
749 waitcnt_vs->imm = imm.vs;
750 instructions.emplace_back(waitcnt_vs);
751 imm.vs = wait_imm::unset_counter;
752 }
753 if (!imm.empty()) {
754 SOPP_instruction* waitcnt = create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt, Format::SOPP, 0, 0);
755 waitcnt->imm = imm.pack(ctx.chip_class);
756 waitcnt->block = -1;
757 instructions.emplace_back(waitcnt);
758 }
759 }
760
761 void handle_block(Program *program, Block& block, wait_ctx& ctx)
762 {
763 std::vector<aco_ptr<Instruction>> new_instructions;
764
765 wait_imm queued_imm;
766 for (aco_ptr<Instruction>& instr : block.instructions) {
767 bool is_wait = !parse_wait_instr(ctx, instr.get()).empty();
768
769 queued_imm.combine(kill(instr.get(), ctx));
770
771 gen(instr.get(), ctx);
772
773 if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
774 if (!queued_imm.empty()) {
775 emit_waitcnt(ctx, new_instructions, queued_imm);
776 queued_imm = wait_imm();
777 }
778 new_instructions.emplace_back(std::move(instr));
779 }
780 }
781
782 if (!queued_imm.empty())
783 emit_waitcnt(ctx, new_instructions, queued_imm);
784
785 block.instructions.swap(new_instructions);
786 }
787
788 } /* end namespace */
789
790 void insert_wait_states(Program* program)
791 {
792 /* per BB ctx */
793 std::vector<bool> done(program->blocks.size());
794 wait_ctx in_ctx[program->blocks.size()];
795 wait_ctx out_ctx[program->blocks.size()];
796 for (unsigned i = 0; i < program->blocks.size(); i++)
797 in_ctx[i] = wait_ctx(program);
798 std::stack<unsigned> loop_header_indices;
799 unsigned loop_progress = 0;
800
801 for (unsigned i = 0; i < program->blocks.size();) {
802 Block& current = program->blocks[i++];
803 wait_ctx ctx = in_ctx[current.index];
804
805 if (current.kind & block_kind_loop_header) {
806 loop_header_indices.push(current.index);
807 } else if (current.kind & block_kind_loop_exit) {
808 bool repeat = false;
809 if (loop_progress == loop_header_indices.size()) {
810 i = loop_header_indices.top();
811 repeat = true;
812 }
813 loop_header_indices.pop();
814 loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
815 if (repeat)
816 continue;
817 }
818
819 bool changed = false;
820 for (unsigned b : current.linear_preds)
821 changed |= ctx.join(&out_ctx[b], false);
822 for (unsigned b : current.logical_preds)
823 changed |= ctx.join(&out_ctx[b], true);
824
825 in_ctx[current.index] = ctx;
826
827 if (done[current.index] && !changed)
828 continue;
829
830 if (current.instructions.empty()) {
831 out_ctx[current.index] = ctx;
832 continue;
833 }
834
835 loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
836 done[current.index] = true;
837
838 handle_block(program, current, ctx);
839
840 out_ctx[current.index] = ctx;
841 }
842 }
843
844 }
845