aco: make PhysReg in units of bytes
[mesa.git] / src / amd / compiler / aco_insert_waitcnt.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <algorithm>
26 #include <map>
27 #include <stack>
28 #include <math.h>
29
30 #include "aco_ir.h"
31 #include "vulkan/radv_shader.h"
32
33 namespace aco {
34
35 namespace {
36
37 /**
38 * The general idea of this pass is:
39 * The CFG is traversed in reverse postorder (forward) and loops are processed
40 * several times until no progress is made.
41 * Per BB two wait_ctx is maintained: an in-context and out-context.
42 * The in-context is the joined out-contexts of the predecessors.
43 * The context contains a map: gpr -> wait_entry
44 * consisting of the information about the cnt values to be waited for.
45 * Note: After merge-nodes, it might occur that for the same register
46 * multiple cnt values are to be waited for.
47 *
48 * The values are updated according to the encountered instructions:
49 * - additional events increment the counter of waits of the same type
50 * - or erase gprs with counters higher than to be waited for.
51 */
52
53 // TODO: do a more clever insertion of wait_cnt (lgkm_cnt) when there is a load followed by a use of a previous load
54
55 /* Instructions of the same event will finish in-order except for smem
56 * and maybe flat. Instructions of different events may not finish in-order. */
57 enum wait_event : uint16_t {
58 event_smem = 1 << 0,
59 event_lds = 1 << 1,
60 event_gds = 1 << 2,
61 event_vmem = 1 << 3,
62 event_vmem_store = 1 << 4, /* GFX10+ */
63 event_flat = 1 << 5,
64 event_exp_pos = 1 << 6,
65 event_exp_param = 1 << 7,
66 event_exp_mrt_null = 1 << 8,
67 event_gds_gpr_lock = 1 << 9,
68 event_vmem_gpr_lock = 1 << 10,
69 event_sendmsg = 1 << 11,
70 num_events = 12,
71 };
72
73 enum counter_type : uint8_t {
74 counter_exp = 1 << 0,
75 counter_lgkm = 1 << 1,
76 counter_vm = 1 << 2,
77 counter_vs = 1 << 3,
78 num_counters = 4,
79 };
80
81 static const uint16_t exp_events = event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock;
82 static const uint16_t lgkm_events = event_smem | event_lds | event_gds | event_flat | event_sendmsg;
83 static const uint16_t vm_events = event_vmem | event_flat;
84 static const uint16_t vs_events = event_vmem_store;
85
86 uint8_t get_counters_for_event(wait_event ev)
87 {
88 switch (ev) {
89 case event_smem:
90 case event_lds:
91 case event_gds:
92 case event_sendmsg:
93 return counter_lgkm;
94 case event_vmem:
95 return counter_vm;
96 case event_vmem_store:
97 return counter_vs;
98 case event_flat:
99 return counter_vm | counter_lgkm;
100 case event_exp_pos:
101 case event_exp_param:
102 case event_exp_mrt_null:
103 case event_gds_gpr_lock:
104 case event_vmem_gpr_lock:
105 return counter_exp;
106 default:
107 return 0;
108 }
109 }
110
111 uint16_t get_events_for_counter(counter_type ctr)
112 {
113 switch (ctr) {
114 case counter_exp:
115 return exp_events;
116 case counter_lgkm:
117 return lgkm_events;
118 case counter_vm:
119 return vm_events;
120 case counter_vs:
121 return vs_events;
122 }
123 return 0;
124 }
125
126 struct wait_imm {
127 static const uint8_t unset_counter = 0xff;
128
129 uint8_t vm;
130 uint8_t exp;
131 uint8_t lgkm;
132 uint8_t vs;
133
134 wait_imm() :
135 vm(unset_counter), exp(unset_counter), lgkm(unset_counter), vs(unset_counter) {}
136 wait_imm(uint16_t vm_, uint16_t exp_, uint16_t lgkm_, uint16_t vs_) :
137 vm(vm_), exp(exp_), lgkm(lgkm_), vs(vs_) {}
138
139 wait_imm(enum chip_class chip, uint16_t packed) : vs(unset_counter)
140 {
141 vm = packed & 0xf;
142 if (chip >= GFX9)
143 vm |= (packed >> 10) & 0x30;
144
145 exp = (packed >> 4) & 0x7;
146
147 lgkm = (packed >> 8) & 0xf;
148 if (chip >= GFX10)
149 lgkm |= (packed >> 8) & 0x30;
150 }
151
152 uint16_t pack(enum chip_class chip) const
153 {
154 uint16_t imm = 0;
155 assert(exp == unset_counter || exp <= 0x7);
156 switch (chip) {
157 case GFX10:
158 assert(lgkm == unset_counter || lgkm <= 0x3f);
159 assert(vm == unset_counter || vm <= 0x3f);
160 imm = ((vm & 0x30) << 10) | ((lgkm & 0x3f) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
161 break;
162 case GFX9:
163 assert(lgkm == unset_counter || lgkm <= 0xf);
164 assert(vm == unset_counter || vm <= 0x3f);
165 imm = ((vm & 0x30) << 10) | ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
166 break;
167 default:
168 assert(lgkm == unset_counter || lgkm <= 0xf);
169 assert(vm == unset_counter || vm <= 0xf);
170 imm = ((lgkm & 0xf) << 8) | ((exp & 0x7) << 4) | (vm & 0xf);
171 break;
172 }
173 if (chip < GFX9 && vm == wait_imm::unset_counter)
174 imm |= 0xc000; /* should have no effect on pre-GFX9 and now we won't have to worry about the architecture when interpreting the immediate */
175 if (chip < GFX10 && lgkm == wait_imm::unset_counter)
176 imm |= 0x3000; /* should have no effect on pre-GFX10 and now we won't have to worry about the architecture when interpreting the immediate */
177 return imm;
178 }
179
180 bool combine(const wait_imm& other)
181 {
182 bool changed = other.vm < vm || other.exp < exp || other.lgkm < lgkm || other.vs < vs;
183 vm = std::min(vm, other.vm);
184 exp = std::min(exp, other.exp);
185 lgkm = std::min(lgkm, other.lgkm);
186 vs = std::min(vs, other.vs);
187 return changed;
188 }
189
190 bool empty() const
191 {
192 return vm == unset_counter && exp == unset_counter &&
193 lgkm == unset_counter && vs == unset_counter;
194 }
195 };
196
197 struct wait_entry {
198 wait_imm imm;
199 uint16_t events; /* use wait_event notion */
200 uint8_t counters; /* use counter_type notion */
201 bool wait_on_read:1;
202 bool logical:1;
203
204 wait_entry(wait_event event, wait_imm imm, bool logical, bool wait_on_read)
205 : imm(imm), events(event), counters(get_counters_for_event(event)),
206 wait_on_read(wait_on_read), logical(logical) {}
207
208 bool join(const wait_entry& other)
209 {
210 bool changed = (other.events & ~events) ||
211 (other.counters & ~counters) ||
212 (other.wait_on_read && !wait_on_read);
213 events |= other.events;
214 counters |= other.counters;
215 changed |= imm.combine(other.imm);
216 wait_on_read = wait_on_read || other.wait_on_read;
217 assert(logical == other.logical);
218 return changed;
219 }
220
221 void remove_counter(counter_type counter)
222 {
223 counters &= ~counter;
224
225 if (counter == counter_lgkm) {
226 imm.lgkm = wait_imm::unset_counter;
227 events &= ~(event_smem | event_lds | event_gds | event_sendmsg);
228 }
229
230 if (counter == counter_vm) {
231 imm.vm = wait_imm::unset_counter;
232 events &= ~event_vmem;
233 }
234
235 if (counter == counter_exp) {
236 imm.exp = wait_imm::unset_counter;
237 events &= ~(event_exp_pos | event_exp_param | event_exp_mrt_null | event_gds_gpr_lock | event_vmem_gpr_lock);
238 }
239
240 if (counter == counter_vs) {
241 imm.vs = wait_imm::unset_counter;
242 events &= ~event_vmem_store;
243 }
244
245 if (!(counters & counter_lgkm) && !(counters & counter_vm))
246 events &= ~event_flat;
247 }
248 };
249
250 struct wait_ctx {
251 Program *program;
252 enum chip_class chip_class;
253 uint16_t max_vm_cnt;
254 uint16_t max_exp_cnt;
255 uint16_t max_lgkm_cnt;
256 uint16_t max_vs_cnt;
257 uint16_t unordered_events = event_smem | event_flat;
258
259 uint8_t vm_cnt = 0;
260 uint8_t exp_cnt = 0;
261 uint8_t lgkm_cnt = 0;
262 uint8_t vs_cnt = 0;
263 bool pending_flat_lgkm = false;
264 bool pending_flat_vm = false;
265 bool pending_s_buffer_store = false; /* GFX10 workaround */
266
267 wait_imm barrier_imm[barrier_count];
268 uint16_t barrier_events[barrier_count] = {}; /* use wait_event notion */
269
270 std::map<PhysReg,wait_entry> gpr_map;
271
272 /* used for vmem/smem scores */
273 bool collect_statistics;
274 Instruction *gen_instr;
275 std::map<Instruction *, unsigned> unwaited_instrs[num_counters];
276 std::map<PhysReg,std::set<Instruction *>> reg_instrs[num_counters];
277 std::vector<unsigned> wait_distances[num_events];
278
279 wait_ctx() {}
280 wait_ctx(Program *program_)
281 : program(program_),
282 chip_class(program_->chip_class),
283 max_vm_cnt(program_->chip_class >= GFX9 ? 62 : 14),
284 max_exp_cnt(6),
285 max_lgkm_cnt(program_->chip_class >= GFX10 ? 62 : 14),
286 max_vs_cnt(program_->chip_class >= GFX10 ? 62 : 0),
287 unordered_events(event_smem | (program_->chip_class < GFX10 ? event_flat : 0)) {}
288
289 bool join(const wait_ctx* other, bool logical)
290 {
291 bool changed = other->exp_cnt > exp_cnt ||
292 other->vm_cnt > vm_cnt ||
293 other->lgkm_cnt > lgkm_cnt ||
294 other->vs_cnt > vs_cnt ||
295 (other->pending_flat_lgkm && !pending_flat_lgkm) ||
296 (other->pending_flat_vm && !pending_flat_vm);
297
298 exp_cnt = std::max(exp_cnt, other->exp_cnt);
299 vm_cnt = std::max(vm_cnt, other->vm_cnt);
300 lgkm_cnt = std::max(lgkm_cnt, other->lgkm_cnt);
301 vs_cnt = std::max(vs_cnt, other->vs_cnt);
302 pending_flat_lgkm |= other->pending_flat_lgkm;
303 pending_flat_vm |= other->pending_flat_vm;
304 pending_s_buffer_store |= other->pending_s_buffer_store;
305
306 for (std::pair<PhysReg,wait_entry> entry : other->gpr_map)
307 {
308 std::map<PhysReg,wait_entry>::iterator it = gpr_map.find(entry.first);
309 if (entry.second.logical != logical)
310 continue;
311
312 if (it != gpr_map.end()) {
313 changed |= it->second.join(entry.second);
314 } else {
315 gpr_map.insert(entry);
316 changed = true;
317 }
318 }
319
320 for (unsigned i = 0; i < barrier_count; i++) {
321 changed |= barrier_imm[i].combine(other->barrier_imm[i]);
322 changed |= other->barrier_events[i] & ~barrier_events[i];
323 barrier_events[i] |= other->barrier_events[i];
324 }
325
326 /* these are used for statistics, so don't update "changed" */
327 for (unsigned i = 0; i < num_counters; i++) {
328 for (std::pair<Instruction *, unsigned> instr : other->unwaited_instrs[i]) {
329 auto pos = unwaited_instrs[i].find(instr.first);
330 if (pos == unwaited_instrs[i].end())
331 unwaited_instrs[i].insert(instr);
332 else
333 pos->second = std::min(pos->second, instr.second);
334 }
335 /* don't use a foreach loop to avoid copies */
336 for (auto it = other->reg_instrs[i].begin(); it != other->reg_instrs[i].end(); ++it)
337 reg_instrs[i][it->first].insert(it->second.begin(), it->second.end());
338 }
339
340 return changed;
341 }
342
343 void wait_and_remove_from_entry(PhysReg reg, wait_entry& entry, counter_type counter) {
344 if (collect_statistics && (entry.counters & counter)) {
345 unsigned counter_idx = ffs(counter) - 1;
346 for (Instruction *instr : reg_instrs[counter_idx][reg]) {
347 auto pos = unwaited_instrs[counter_idx].find(instr);
348 if (pos == unwaited_instrs[counter_idx].end())
349 continue;
350
351 unsigned distance = pos->second;
352 unsigned events = entry.events & get_events_for_counter(counter);
353 while (events) {
354 unsigned event_idx = u_bit_scan(&events);
355 wait_distances[event_idx].push_back(distance);
356 }
357
358 unwaited_instrs[counter_idx].erase(instr);
359 }
360 reg_instrs[counter_idx][reg].clear();
361 }
362
363 entry.remove_counter(counter);
364 }
365
366 void advance_unwaited_instrs()
367 {
368 for (unsigned i = 0; i < num_counters; i++) {
369 for (auto it = unwaited_instrs[i].begin(); it != unwaited_instrs[i].end(); ++it)
370 it->second++;
371 }
372 }
373 };
374
375 wait_imm check_instr(Instruction* instr, wait_ctx& ctx)
376 {
377 wait_imm wait;
378
379 for (const Operand op : instr->operands) {
380 if (op.isConstant() || op.isUndefined())
381 continue;
382
383 /* check consecutively read gprs */
384 for (unsigned j = 0; j < op.size(); j++) {
385 PhysReg reg{op.physReg() + j};
386 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
387 if (it == ctx.gpr_map.end() || !it->second.wait_on_read)
388 continue;
389
390 wait.combine(it->second.imm);
391 }
392 }
393
394 for (const Definition& def : instr->definitions) {
395 /* check consecutively written gprs */
396 for (unsigned j = 0; j < def.getTemp().size(); j++)
397 {
398 PhysReg reg{def.physReg() + j};
399
400 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.find(reg);
401 if (it == ctx.gpr_map.end())
402 continue;
403
404 /* Vector Memory reads and writes return in the order they were issued */
405 if (instr->isVMEM() && ((it->second.events & vm_events) == event_vmem)) {
406 it->second.remove_counter(counter_vm);
407 if (!it->second.counters)
408 it = ctx.gpr_map.erase(it);
409 continue;
410 }
411
412 /* LDS reads and writes return in the order they were issued. same for GDS */
413 if (instr->format == Format::DS) {
414 bool gds = static_cast<DS_instruction*>(instr)->gds;
415 if ((it->second.events & lgkm_events) == (gds ? event_gds : event_lds)) {
416 it->second.remove_counter(counter_lgkm);
417 if (!it->second.counters)
418 it = ctx.gpr_map.erase(it);
419 continue;
420 }
421 }
422
423 wait.combine(it->second.imm);
424 }
425 }
426
427 return wait;
428 }
429
430 wait_imm parse_wait_instr(wait_ctx& ctx, Instruction *instr)
431 {
432 if (instr->opcode == aco_opcode::s_waitcnt_vscnt &&
433 instr->definitions[0].physReg() == sgpr_null) {
434 wait_imm imm;
435 imm.vs = std::min<uint8_t>(imm.vs, static_cast<SOPK_instruction*>(instr)->imm);
436 return imm;
437 } else if (instr->opcode == aco_opcode::s_waitcnt) {
438 return wait_imm(ctx.chip_class, static_cast<SOPP_instruction*>(instr)->imm);
439 }
440 return wait_imm();
441 }
442
443 wait_imm kill(Instruction* instr, wait_ctx& ctx)
444 {
445 wait_imm imm;
446 if (ctx.exp_cnt || ctx.vm_cnt || ctx.lgkm_cnt)
447 imm.combine(check_instr(instr, ctx));
448
449 imm.combine(parse_wait_instr(ctx, instr));
450
451
452 /* It's required to wait for scalar stores before "writing back" data.
453 * It shouldn't cost anything anyways since we're about to do s_endpgm.
454 */
455 if (ctx.lgkm_cnt && instr->opcode == aco_opcode::s_dcache_wb) {
456 assert(ctx.chip_class >= GFX8);
457 imm.lgkm = 0;
458 }
459
460 if (ctx.chip_class >= GFX10) {
461 /* GFX10: A store followed by a load at the same address causes a problem because
462 * the load doesn't load the correct values unless we wait for the store first.
463 * This is NOT mitigated by an s_nop.
464 *
465 * TODO: Refine this when we have proper alias analysis.
466 */
467 SMEM_instruction *smem = static_cast<SMEM_instruction *>(instr);
468 if (ctx.pending_s_buffer_store &&
469 !smem->definitions.empty() &&
470 !smem->can_reorder && smem->barrier == barrier_buffer) {
471 imm.lgkm = 0;
472 }
473 }
474
475 if (instr->format == Format::PSEUDO_BARRIER) {
476 switch (instr->opcode) {
477 case aco_opcode::p_memory_barrier_common:
478 imm.combine(ctx.barrier_imm[ffs(barrier_atomic) - 1]);
479 imm.combine(ctx.barrier_imm[ffs(barrier_buffer) - 1]);
480 imm.combine(ctx.barrier_imm[ffs(barrier_image) - 1]);
481 if (ctx.program->workgroup_size > ctx.program->wave_size)
482 imm.combine(ctx.barrier_imm[ffs(barrier_shared) - 1]);
483 break;
484 case aco_opcode::p_memory_barrier_atomic:
485 imm.combine(ctx.barrier_imm[ffs(barrier_atomic) - 1]);
486 break;
487 /* see comment in aco_scheduler.cpp's can_move_instr() on why these barriers are merged */
488 case aco_opcode::p_memory_barrier_buffer:
489 case aco_opcode::p_memory_barrier_image:
490 imm.combine(ctx.barrier_imm[ffs(barrier_buffer) - 1]);
491 imm.combine(ctx.barrier_imm[ffs(barrier_image) - 1]);
492 break;
493 case aco_opcode::p_memory_barrier_shared:
494 if (ctx.program->workgroup_size > ctx.program->wave_size)
495 imm.combine(ctx.barrier_imm[ffs(barrier_shared) - 1]);
496 break;
497 case aco_opcode::p_memory_barrier_gs_data:
498 imm.combine(ctx.barrier_imm[ffs(barrier_gs_data) - 1]);
499 break;
500 case aco_opcode::p_memory_barrier_gs_sendmsg:
501 imm.combine(ctx.barrier_imm[ffs(barrier_gs_sendmsg) - 1]);
502 break;
503 default:
504 assert(false);
505 break;
506 }
507 }
508
509 if (!imm.empty()) {
510 if (ctx.pending_flat_vm && imm.vm != wait_imm::unset_counter)
511 imm.vm = 0;
512 if (ctx.pending_flat_lgkm && imm.lgkm != wait_imm::unset_counter)
513 imm.lgkm = 0;
514
515 /* reset counters */
516 ctx.exp_cnt = std::min(ctx.exp_cnt, imm.exp);
517 ctx.vm_cnt = std::min(ctx.vm_cnt, imm.vm);
518 ctx.lgkm_cnt = std::min(ctx.lgkm_cnt, imm.lgkm);
519 ctx.vs_cnt = std::min(ctx.vs_cnt, imm.vs);
520
521 /* update barrier wait imms */
522 for (unsigned i = 0; i < barrier_count; i++) {
523 wait_imm& bar = ctx.barrier_imm[i];
524 uint16_t& bar_ev = ctx.barrier_events[i];
525 if (bar.exp != wait_imm::unset_counter && imm.exp <= bar.exp) {
526 bar.exp = wait_imm::unset_counter;
527 bar_ev &= ~exp_events;
528 }
529 if (bar.vm != wait_imm::unset_counter && imm.vm <= bar.vm) {
530 bar.vm = wait_imm::unset_counter;
531 bar_ev &= ~(vm_events & ~event_flat);
532 }
533 if (bar.lgkm != wait_imm::unset_counter && imm.lgkm <= bar.lgkm) {
534 bar.lgkm = wait_imm::unset_counter;
535 bar_ev &= ~(lgkm_events & ~event_flat);
536 }
537 if (bar.vs != wait_imm::unset_counter && imm.vs <= bar.vs) {
538 bar.vs = wait_imm::unset_counter;
539 bar_ev &= ~vs_events;
540 }
541 if (bar.vm == wait_imm::unset_counter && bar.lgkm == wait_imm::unset_counter)
542 bar_ev &= ~event_flat;
543 }
544
545 /* remove all gprs with higher counter from map */
546 std::map<PhysReg,wait_entry>::iterator it = ctx.gpr_map.begin();
547 while (it != ctx.gpr_map.end())
548 {
549 if (imm.exp != wait_imm::unset_counter && imm.exp <= it->second.imm.exp)
550 ctx.wait_and_remove_from_entry(it->first, it->second, counter_exp);
551 if (imm.vm != wait_imm::unset_counter && imm.vm <= it->second.imm.vm)
552 ctx.wait_and_remove_from_entry(it->first, it->second, counter_vm);
553 if (imm.lgkm != wait_imm::unset_counter && imm.lgkm <= it->second.imm.lgkm)
554 ctx.wait_and_remove_from_entry(it->first, it->second, counter_lgkm);
555 if (imm.lgkm != wait_imm::unset_counter && imm.vs <= it->second.imm.vs)
556 ctx.wait_and_remove_from_entry(it->first, it->second, counter_vs);
557 if (!it->second.counters)
558 it = ctx.gpr_map.erase(it);
559 else
560 it++;
561 }
562 }
563
564 if (imm.vm == 0)
565 ctx.pending_flat_vm = false;
566 if (imm.lgkm == 0) {
567 ctx.pending_flat_lgkm = false;
568 ctx.pending_s_buffer_store = false;
569 }
570
571 return imm;
572 }
573
574 void update_barrier_counter(uint8_t *ctr, unsigned max)
575 {
576 if (*ctr != wait_imm::unset_counter && *ctr < max)
577 (*ctr)++;
578 }
579
580 void update_barrier_imm(wait_ctx& ctx, uint8_t counters, wait_event event, barrier_interaction barrier)
581 {
582 for (unsigned i = 0; i < barrier_count; i++) {
583 wait_imm& bar = ctx.barrier_imm[i];
584 uint16_t& bar_ev = ctx.barrier_events[i];
585 if (barrier & (1 << i)) {
586 bar_ev |= event;
587 if (counters & counter_lgkm)
588 bar.lgkm = 0;
589 if (counters & counter_vm)
590 bar.vm = 0;
591 if (counters & counter_exp)
592 bar.exp = 0;
593 if (counters & counter_vs)
594 bar.vs = 0;
595 } else if (!(bar_ev & ctx.unordered_events) && !(ctx.unordered_events & event)) {
596 if (counters & counter_lgkm && (bar_ev & lgkm_events) == event)
597 update_barrier_counter(&bar.lgkm, ctx.max_lgkm_cnt);
598 if (counters & counter_vm && (bar_ev & vm_events) == event)
599 update_barrier_counter(&bar.vm, ctx.max_vm_cnt);
600 if (counters & counter_exp && (bar_ev & exp_events) == event)
601 update_barrier_counter(&bar.exp, ctx.max_exp_cnt);
602 if (counters & counter_vs && (bar_ev & vs_events) == event)
603 update_barrier_counter(&bar.vs, ctx.max_vs_cnt);
604 }
605 }
606 }
607
608 void update_counters(wait_ctx& ctx, wait_event event, barrier_interaction barrier=barrier_none)
609 {
610 uint8_t counters = get_counters_for_event(event);
611
612 if (counters & counter_lgkm && ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
613 ctx.lgkm_cnt++;
614 if (counters & counter_vm && ctx.vm_cnt <= ctx.max_vm_cnt)
615 ctx.vm_cnt++;
616 if (counters & counter_exp && ctx.exp_cnt <= ctx.max_exp_cnt)
617 ctx.exp_cnt++;
618 if (counters & counter_vs && ctx.vs_cnt <= ctx.max_vs_cnt)
619 ctx.vs_cnt++;
620
621 update_barrier_imm(ctx, counters, event, barrier);
622
623 if (ctx.unordered_events & event)
624 return;
625
626 if (ctx.pending_flat_lgkm)
627 counters &= ~counter_lgkm;
628 if (ctx.pending_flat_vm)
629 counters &= ~counter_vm;
630
631 for (std::pair<const PhysReg,wait_entry>& e : ctx.gpr_map) {
632 wait_entry& entry = e.second;
633
634 if (entry.events & ctx.unordered_events)
635 continue;
636
637 assert(entry.events);
638
639 if ((counters & counter_exp) && (entry.events & exp_events) == event && entry.imm.exp < ctx.max_exp_cnt)
640 entry.imm.exp++;
641 if ((counters & counter_lgkm) && (entry.events & lgkm_events) == event && entry.imm.lgkm < ctx.max_lgkm_cnt)
642 entry.imm.lgkm++;
643 if ((counters & counter_vm) && (entry.events & vm_events) == event && entry.imm.vm < ctx.max_vm_cnt)
644 entry.imm.vm++;
645 if ((counters & counter_vs) && (entry.events & vs_events) == event && entry.imm.vs < ctx.max_vs_cnt)
646 entry.imm.vs++;
647 }
648 }
649
650 void update_counters_for_flat_load(wait_ctx& ctx, barrier_interaction barrier=barrier_none)
651 {
652 assert(ctx.chip_class < GFX10);
653
654 if (ctx.lgkm_cnt <= ctx.max_lgkm_cnt)
655 ctx.lgkm_cnt++;
656 if (ctx.vm_cnt <= ctx.max_vm_cnt)
657 ctx.vm_cnt++;
658
659 update_barrier_imm(ctx, counter_vm | counter_lgkm, event_flat, barrier);
660
661 for (std::pair<PhysReg,wait_entry> e : ctx.gpr_map)
662 {
663 if (e.second.counters & counter_vm)
664 e.second.imm.vm = 0;
665 if (e.second.counters & counter_lgkm)
666 e.second.imm.lgkm = 0;
667 }
668 ctx.pending_flat_lgkm = true;
669 ctx.pending_flat_vm = true;
670 }
671
672 void insert_wait_entry(wait_ctx& ctx, PhysReg reg, RegClass rc, wait_event event, bool wait_on_read)
673 {
674 uint16_t counters = get_counters_for_event(event);
675 wait_imm imm;
676 if (counters & counter_lgkm)
677 imm.lgkm = 0;
678 if (counters & counter_vm)
679 imm.vm = 0;
680 if (counters & counter_exp)
681 imm.exp = 0;
682 if (counters & counter_vs)
683 imm.vs = 0;
684
685 wait_entry new_entry(event, imm, !rc.is_linear(), wait_on_read);
686
687 for (unsigned i = 0; i < rc.size(); i++) {
688 auto it = ctx.gpr_map.emplace(PhysReg{reg.reg()+i}, new_entry);
689 if (!it.second)
690 it.first->second.join(new_entry);
691 }
692
693 if (ctx.collect_statistics) {
694 unsigned counters_todo = counters;
695 while (counters_todo) {
696 unsigned i = u_bit_scan(&counters_todo);
697 ctx.unwaited_instrs[i].insert(std::make_pair(ctx.gen_instr, 0u));
698 for (unsigned j = 0; j < rc.size(); j++)
699 ctx.reg_instrs[i][PhysReg{reg.reg()+j}].insert(ctx.gen_instr);
700 }
701 }
702 }
703
704 void insert_wait_entry(wait_ctx& ctx, Operand op, wait_event event)
705 {
706 if (!op.isConstant() && !op.isUndefined())
707 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false);
708 }
709
710 void insert_wait_entry(wait_ctx& ctx, Definition def, wait_event event)
711 {
712 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true);
713 }
714
715 void gen(Instruction* instr, wait_ctx& ctx)
716 {
717 switch (instr->format) {
718 case Format::EXP: {
719 Export_instruction* exp_instr = static_cast<Export_instruction*>(instr);
720
721 wait_event ev;
722 if (exp_instr->dest <= 9)
723 ev = event_exp_mrt_null;
724 else if (exp_instr->dest <= 15)
725 ev = event_exp_pos;
726 else
727 ev = event_exp_param;
728 update_counters(ctx, ev);
729
730 /* insert new entries for exported vgprs */
731 for (unsigned i = 0; i < 4; i++)
732 {
733 if (exp_instr->enabled_mask & (1 << i)) {
734 unsigned idx = exp_instr->compressed ? i >> 1 : i;
735 assert(idx < exp_instr->operands.size());
736 insert_wait_entry(ctx, exp_instr->operands[idx], ev);
737
738 }
739 }
740 insert_wait_entry(ctx, exec, s2, ev, false);
741 break;
742 }
743 case Format::FLAT: {
744 if (ctx.chip_class < GFX10 && !instr->definitions.empty())
745 update_counters_for_flat_load(ctx, barrier_buffer);
746 else
747 update_counters(ctx, event_flat, barrier_buffer);
748
749 if (!instr->definitions.empty())
750 insert_wait_entry(ctx, instr->definitions[0], event_flat);
751 break;
752 }
753 case Format::SMEM: {
754 SMEM_instruction *smem = static_cast<SMEM_instruction*>(instr);
755 update_counters(ctx, event_smem, static_cast<SMEM_instruction*>(instr)->barrier);
756
757 if (!instr->definitions.empty())
758 insert_wait_entry(ctx, instr->definitions[0], event_smem);
759 else if (ctx.chip_class >= GFX10 &&
760 !smem->can_reorder &&
761 smem->barrier == barrier_buffer)
762 ctx.pending_s_buffer_store = true;
763
764 break;
765 }
766 case Format::DS: {
767 bool gds = static_cast<DS_instruction*>(instr)->gds;
768 update_counters(ctx, gds ? event_gds : event_lds, gds ? barrier_none : barrier_shared);
769 if (gds)
770 update_counters(ctx, event_gds_gpr_lock);
771
772 if (!instr->definitions.empty())
773 insert_wait_entry(ctx, instr->definitions[0], gds ? event_gds : event_lds);
774
775 if (gds) {
776 for (const Operand& op : instr->operands)
777 insert_wait_entry(ctx, op, event_gds_gpr_lock);
778 insert_wait_entry(ctx, exec, s2, event_gds_gpr_lock, false);
779 }
780 break;
781 }
782 case Format::MUBUF:
783 case Format::MTBUF:
784 case Format::MIMG:
785 case Format::GLOBAL: {
786 wait_event ev = !instr->definitions.empty() || ctx.chip_class < GFX10 ? event_vmem : event_vmem_store;
787 update_counters(ctx, ev, get_barrier_interaction(instr));
788
789 if (!instr->definitions.empty())
790 insert_wait_entry(ctx, instr->definitions[0], ev);
791
792 if (ctx.chip_class == GFX6 &&
793 instr->format != Format::MIMG &&
794 instr->operands.size() == 4) {
795 ctx.exp_cnt++;
796 update_counters(ctx, event_vmem_gpr_lock);
797 insert_wait_entry(ctx, instr->operands[3], event_vmem_gpr_lock);
798 } else if (ctx.chip_class == GFX6 &&
799 instr->format == Format::MIMG &&
800 instr->operands[1].regClass().type() == RegType::vgpr) {
801 ctx.exp_cnt++;
802 update_counters(ctx, event_vmem_gpr_lock);
803 insert_wait_entry(ctx, instr->operands[1], event_vmem_gpr_lock);
804 }
805
806 break;
807 }
808 case Format::SOPP: {
809 if (instr->opcode == aco_opcode::s_sendmsg ||
810 instr->opcode == aco_opcode::s_sendmsghalt)
811 update_counters(ctx, event_sendmsg, get_barrier_interaction(instr));
812 }
813 default:
814 break;
815 }
816 }
817
818 void emit_waitcnt(wait_ctx& ctx, std::vector<aco_ptr<Instruction>>& instructions, wait_imm imm)
819 {
820 if (imm.vs != wait_imm::unset_counter) {
821 assert(ctx.chip_class >= GFX10);
822 SOPK_instruction* waitcnt_vs = create_instruction<SOPK_instruction>(aco_opcode::s_waitcnt_vscnt, Format::SOPK, 0, 1);
823 waitcnt_vs->definitions[0] = Definition(sgpr_null, s1);
824 waitcnt_vs->imm = imm.vs;
825 instructions.emplace_back(waitcnt_vs);
826 imm.vs = wait_imm::unset_counter;
827 }
828 if (!imm.empty()) {
829 SOPP_instruction* waitcnt = create_instruction<SOPP_instruction>(aco_opcode::s_waitcnt, Format::SOPP, 0, 0);
830 waitcnt->imm = imm.pack(ctx.chip_class);
831 waitcnt->block = -1;
832 instructions.emplace_back(waitcnt);
833 }
834 }
835
836 void handle_block(Program *program, Block& block, wait_ctx& ctx)
837 {
838 std::vector<aco_ptr<Instruction>> new_instructions;
839
840 wait_imm queued_imm;
841
842 ctx.collect_statistics = program->collect_statistics;
843
844 for (aco_ptr<Instruction>& instr : block.instructions) {
845 bool is_wait = !parse_wait_instr(ctx, instr.get()).empty();
846
847 queued_imm.combine(kill(instr.get(), ctx));
848
849 ctx.gen_instr = instr.get();
850 gen(instr.get(), ctx);
851
852 if (instr->format != Format::PSEUDO_BARRIER && !is_wait) {
853 if (!queued_imm.empty()) {
854 emit_waitcnt(ctx, new_instructions, queued_imm);
855 queued_imm = wait_imm();
856 }
857 new_instructions.emplace_back(std::move(instr));
858
859 if (ctx.collect_statistics)
860 ctx.advance_unwaited_instrs();
861 }
862 }
863
864 if (!queued_imm.empty())
865 emit_waitcnt(ctx, new_instructions, queued_imm);
866
867 block.instructions.swap(new_instructions);
868 }
869
870 } /* end namespace */
871
872 static uint32_t calculate_score(unsigned num_ctx, wait_ctx *ctx, uint32_t event_mask)
873 {
874 double result = 0.0;
875 unsigned num_waits = 0;
876 while (event_mask) {
877 unsigned event_index = u_bit_scan(&event_mask);
878 for (unsigned i = 0; i < num_ctx; i++) {
879 for (unsigned dist : ctx[i].wait_distances[event_index]) {
880 double score = dist;
881 /* for many events, excessive distances provide little benefit, so
882 * decrease the score in that case. */
883 double threshold = INFINITY;
884 double inv_strength = 0.000001;
885 switch (1 << event_index) {
886 case event_smem:
887 threshold = 70.0;
888 inv_strength = 75.0;
889 break;
890 case event_vmem:
891 case event_vmem_store:
892 case event_flat:
893 threshold = 230.0;
894 inv_strength = 150.0;
895 break;
896 case event_lds:
897 threshold = 16.0;
898 break;
899 default:
900 break;
901 }
902 if (score > threshold) {
903 score -= threshold;
904 score = threshold + score / (1.0 + score / inv_strength);
905 }
906
907 /* we don't want increases in high scores to hide decreases in low scores,
908 * so raise to the power of 0.1 before averaging. */
909 result += pow(score, 0.1);
910 num_waits++;
911 }
912 }
913 }
914 return round(pow(result / num_waits, 10.0) * 10.0);
915 }
916
917 void insert_wait_states(Program* program)
918 {
919 /* per BB ctx */
920 std::vector<bool> done(program->blocks.size());
921 wait_ctx in_ctx[program->blocks.size()];
922 wait_ctx out_ctx[program->blocks.size()];
923
924 for (unsigned i = 0; i < program->blocks.size(); i++)
925 in_ctx[i] = wait_ctx(program);
926 std::stack<unsigned> loop_header_indices;
927 unsigned loop_progress = 0;
928
929 for (unsigned i = 0; i < program->blocks.size();) {
930 Block& current = program->blocks[i++];
931 wait_ctx ctx = in_ctx[current.index];
932
933 if (current.kind & block_kind_loop_header) {
934 loop_header_indices.push(current.index);
935 } else if (current.kind & block_kind_loop_exit) {
936 bool repeat = false;
937 if (loop_progress == loop_header_indices.size()) {
938 i = loop_header_indices.top();
939 repeat = true;
940 }
941 loop_header_indices.pop();
942 loop_progress = std::min<unsigned>(loop_progress, loop_header_indices.size());
943 if (repeat)
944 continue;
945 }
946
947 bool changed = false;
948 for (unsigned b : current.linear_preds)
949 changed |= ctx.join(&out_ctx[b], false);
950 for (unsigned b : current.logical_preds)
951 changed |= ctx.join(&out_ctx[b], true);
952
953 if (done[current.index] && !changed) {
954 in_ctx[current.index] = std::move(ctx);
955 continue;
956 } else {
957 in_ctx[current.index] = ctx;
958 }
959
960 if (current.instructions.empty()) {
961 out_ctx[current.index] = std::move(ctx);
962 continue;
963 }
964
965 loop_progress = std::max<unsigned>(loop_progress, current.loop_nest_depth);
966 done[current.index] = true;
967
968 handle_block(program, current, ctx);
969
970 out_ctx[current.index] = std::move(ctx);
971 }
972
973 if (program->collect_statistics) {
974 program->statistics[statistic_vmem_score] =
975 calculate_score(program->blocks.size(), out_ctx, event_vmem | event_flat | event_vmem_store);
976 program->statistics[statistic_smem_score] =
977 calculate_score(program->blocks.size(), out_ctx, event_smem);
978 }
979 }
980
981 }
982