aco: allow barriers to be skipped during scheduling
[mesa.git] / src / amd / compiler / aco_scheduler.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include "aco_ir.h"
26 #include "aco_builder.h"
27 #include <unordered_set>
28 #include <algorithm>
29
30 #include "vulkan/radv_shader.h" // for radv_nir_compiler_options
31 #include "amdgfxregs.h"
32
33 #define SMEM_WINDOW_SIZE (350 - ctx.num_waves * 35)
34 #define VMEM_WINDOW_SIZE (1024 - ctx.num_waves * 64)
35 #define POS_EXP_WINDOW_SIZE 512
36 #define SMEM_MAX_MOVES (64 - ctx.num_waves * 4)
37 #define VMEM_MAX_MOVES (128 - ctx.num_waves * 8)
38 /* creating clauses decreases def-use distances, so make it less aggressive the lower num_waves is */
39 #define VMEM_CLAUSE_MAX_GRAB_DIST ((ctx.num_waves - 1) * 8)
40 #define POS_EXP_MAX_MOVES 512
41
42 namespace aco {
43
44 enum MoveResult {
45 move_success,
46 move_fail_ssa,
47 move_fail_rar,
48 move_fail_pressure,
49 };
50
51 struct MoveState {
52 RegisterDemand max_registers;
53
54 Block *block;
55 Instruction *current;
56 RegisterDemand *register_demand;
57 bool improved_rar;
58
59 std::vector<bool> depends_on;
60 /* Two are needed because, for downwards VMEM scheduling, one needs to
61 * exclude the instructions in the clause, since new instructions in the
62 * clause are not moved past any other instructions in the clause. */
63 std::vector<bool> RAR_dependencies;
64 std::vector<bool> RAR_dependencies_clause;
65
66 int source_idx;
67 int insert_idx, insert_idx_clause;
68 RegisterDemand total_demand, total_demand_clause;
69
70 /* for moving instructions before the current instruction to after it */
71 void downwards_init(int current_idx, bool improved_rar, bool may_form_clauses);
72 MoveResult downwards_move(bool clause);
73 void downwards_skip();
74
75 /* for moving instructions after the first use of the current instruction upwards */
76 void upwards_init(int source_idx, bool improved_rar);
77 bool upwards_check_deps();
78 void upwards_set_insert_idx(int before);
79 MoveResult upwards_move();
80 void upwards_skip();
81
82 private:
83 void downwards_advance_helper();
84 };
85
86 struct sched_ctx {
87 int16_t num_waves;
88 int16_t last_SMEM_stall;
89 int last_SMEM_dep_idx;
90 MoveState mv;
91 };
92
93 /* This scheduler is a simple bottom-up pass based on ideas from
94 * "A Novel Lightweight Instruction Scheduling Algorithm for Just-In-Time Compiler"
95 * from Xiaohua Shi and Peng Guo.
96 * The basic approach is to iterate over all instructions. When a memory instruction
97 * is encountered it tries to move independent instructions from above and below
98 * between the memory instruction and it's first user.
99 * The novelty is that this scheduler cares for the current register pressure:
100 * Instructions will only be moved if the register pressure won't exceed a certain bound.
101 */
102
103 template <typename T>
104 void move_element(T begin_it, size_t idx, size_t before) {
105 if (idx < before) {
106 auto begin = std::next(begin_it, idx);
107 auto end = std::next(begin_it, before);
108 std::rotate(begin, begin + 1, end);
109 } else if (idx > before) {
110 auto begin = std::next(begin_it, before);
111 auto end = std::next(begin_it, idx + 1);
112 std::rotate(begin, end - 1, end);
113 }
114 }
115
116 static RegisterDemand getLiveChanges(aco_ptr<Instruction>& instr)
117 {
118 RegisterDemand changes;
119 for (const Definition& def : instr->definitions) {
120 if (!def.isTemp() || def.isKill())
121 continue;
122 changes += def.getTemp();
123 }
124
125 for (const Operand& op : instr->operands) {
126 if (!op.isTemp() || !op.isFirstKill())
127 continue;
128 changes -= op.getTemp();
129 }
130
131 return changes;
132 }
133
134 static RegisterDemand getTempRegisters(aco_ptr<Instruction>& instr)
135 {
136 RegisterDemand temp_registers;
137 for (const Definition& def : instr->definitions) {
138 if (!def.isTemp() || !def.isKill())
139 continue;
140 temp_registers += def.getTemp();
141 }
142 return temp_registers;
143 }
144
145 void MoveState::downwards_advance_helper()
146 {
147 source_idx--;
148 total_demand.update(register_demand[source_idx]);
149 }
150
151 void MoveState::downwards_init(int current_idx, bool improved_rar_, bool may_form_clauses)
152 {
153 improved_rar = improved_rar_;
154 source_idx = current_idx;
155
156 insert_idx = current_idx + 1;
157 insert_idx_clause = current_idx;
158
159 total_demand = total_demand_clause = register_demand[current_idx];
160
161 std::fill(depends_on.begin(), depends_on.end(), false);
162 if (improved_rar) {
163 std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
164 if (may_form_clauses)
165 std::fill(RAR_dependencies_clause.begin(), RAR_dependencies_clause.end(), false);
166 }
167
168 for (const Operand& op : current->operands) {
169 if (op.isTemp()) {
170 depends_on[op.tempId()] = true;
171 if (improved_rar && op.isFirstKill())
172 RAR_dependencies[op.tempId()] = true;
173 }
174 }
175
176 /* update total_demand/source_idx */
177 downwards_advance_helper();
178 }
179
180 MoveResult MoveState::downwards_move(bool clause)
181 {
182 aco_ptr<Instruction>& instr = block->instructions[source_idx];
183
184 for (const Definition& def : instr->definitions)
185 if (def.isTemp() && depends_on[def.tempId()])
186 return move_fail_ssa;
187
188 /* check if one of candidate's operands is killed by depending instruction */
189 std::vector<bool>& RAR_deps = improved_rar ? (clause ? RAR_dependencies_clause : RAR_dependencies) : depends_on;
190 for (const Operand& op : instr->operands) {
191 if (op.isTemp() && RAR_deps[op.tempId()]) {
192 // FIXME: account for difference in register pressure
193 return move_fail_rar;
194 }
195 }
196
197 if (clause) {
198 for (const Operand& op : instr->operands) {
199 if (op.isTemp()) {
200 depends_on[op.tempId()] = true;
201 if (op.isFirstKill())
202 RAR_dependencies[op.tempId()] = true;
203 }
204 }
205 }
206
207 int dest_insert_idx = clause ? insert_idx_clause : insert_idx;
208 RegisterDemand register_pressure = clause ? total_demand_clause : total_demand;
209
210 const RegisterDemand candidate_diff = getLiveChanges(instr);
211 const RegisterDemand temp = getTempRegisters(instr);
212 if (RegisterDemand(register_pressure - candidate_diff).exceeds(max_registers))
213 return move_fail_pressure;
214 const RegisterDemand temp2 = getTempRegisters(block->instructions[dest_insert_idx - 1]);
215 const RegisterDemand new_demand = register_demand[dest_insert_idx - 1] - temp2 + temp;
216 if (new_demand.exceeds(max_registers))
217 return move_fail_pressure;
218
219 /* move the candidate below the memory load */
220 move_element(block->instructions.begin(), source_idx, dest_insert_idx);
221
222 /* update register pressure */
223 move_element(register_demand, source_idx, dest_insert_idx);
224 for (int i = source_idx; i < dest_insert_idx - 1; i++)
225 register_demand[i] -= candidate_diff;
226 register_demand[dest_insert_idx - 1] = new_demand;
227 total_demand_clause -= candidate_diff;
228 insert_idx_clause--;
229 if (!clause) {
230 total_demand -= candidate_diff;
231 insert_idx--;
232 }
233
234 downwards_advance_helper();
235 return move_success;
236 }
237
238 void MoveState::downwards_skip()
239 {
240 aco_ptr<Instruction>& instr = block->instructions[source_idx];
241
242 for (const Operand& op : instr->operands) {
243 if (op.isTemp()) {
244 depends_on[op.tempId()] = true;
245 if (improved_rar && op.isFirstKill()) {
246 RAR_dependencies[op.tempId()] = true;
247 RAR_dependencies_clause[op.tempId()] = true;
248 }
249 }
250 }
251 total_demand_clause.update(register_demand[source_idx]);
252
253 downwards_advance_helper();
254 }
255
256 void MoveState::upwards_init(int source_idx_, bool improved_rar_)
257 {
258 source_idx = source_idx_;
259 improved_rar = improved_rar_;
260
261 insert_idx = -1;
262
263 std::fill(depends_on.begin(), depends_on.end(), false);
264 std::fill(RAR_dependencies.begin(), RAR_dependencies.end(), false);
265
266 for (const Definition& def : current->definitions) {
267 if (def.isTemp())
268 depends_on[def.tempId()] = true;
269 }
270 }
271
272 bool MoveState::upwards_check_deps()
273 {
274 aco_ptr<Instruction>& instr = block->instructions[source_idx];
275 for (const Operand& op : instr->operands) {
276 if (op.isTemp() && depends_on[op.tempId()])
277 return false;
278 }
279 return true;
280 }
281
282 void MoveState::upwards_set_insert_idx(int before)
283 {
284 insert_idx = before;
285 total_demand = register_demand[before - 1];
286 }
287
288 MoveResult MoveState::upwards_move()
289 {
290 assert(insert_idx >= 0);
291
292 aco_ptr<Instruction>& instr = block->instructions[source_idx];
293 for (const Operand& op : instr->operands) {
294 if (op.isTemp() && depends_on[op.tempId()])
295 return move_fail_ssa;
296 }
297
298 /* check if candidate uses/kills an operand which is used by a dependency */
299 for (const Operand& op : instr->operands) {
300 if (op.isTemp() && (!improved_rar || op.isFirstKill()) && RAR_dependencies[op.tempId()])
301 return move_fail_rar;
302 }
303
304 /* check if register pressure is low enough: the diff is negative if register pressure is decreased */
305 const RegisterDemand candidate_diff = getLiveChanges(instr);
306 const RegisterDemand temp = getTempRegisters(instr);
307 if (RegisterDemand(total_demand + candidate_diff).exceeds(max_registers))
308 return move_fail_pressure;
309 const RegisterDemand temp2 = getTempRegisters(block->instructions[insert_idx - 1]);
310 const RegisterDemand new_demand = register_demand[insert_idx - 1] - temp2 + candidate_diff + temp;
311 if (new_demand.exceeds(max_registers))
312 return move_fail_pressure;
313
314 /* move the candidate above the insert_idx */
315 move_element(block->instructions.begin(), source_idx, insert_idx);
316
317 /* update register pressure */
318 move_element(register_demand, source_idx, insert_idx);
319 for (int i = insert_idx + 1; i <= source_idx; i++)
320 register_demand[i] += candidate_diff;
321 register_demand[insert_idx] = new_demand;
322 total_demand += candidate_diff;
323
324 insert_idx++;
325
326 total_demand.update(register_demand[source_idx]);
327 source_idx++;
328
329 return move_success;
330 }
331
332 void MoveState::upwards_skip()
333 {
334 if (insert_idx >= 0) {
335 aco_ptr<Instruction>& instr = block->instructions[source_idx];
336 for (const Definition& def : instr->definitions) {
337 if (def.isTemp())
338 depends_on[def.tempId()] = true;
339 }
340 for (const Operand& op : instr->operands) {
341 if (op.isTemp())
342 RAR_dependencies[op.tempId()] = true;
343 }
344 total_demand.update(register_demand[source_idx]);
345 }
346
347 source_idx++;
348 }
349
350 bool can_reorder(Instruction* candidate)
351 {
352 switch (candidate->format) {
353 case Format::SMEM:
354 return static_cast<SMEM_instruction*>(candidate)->can_reorder;
355 case Format::MUBUF:
356 return static_cast<MUBUF_instruction*>(candidate)->can_reorder;
357 case Format::MIMG:
358 return static_cast<MIMG_instruction*>(candidate)->can_reorder;
359 case Format::MTBUF:
360 return static_cast<MTBUF_instruction*>(candidate)->can_reorder;
361 case Format::FLAT:
362 case Format::GLOBAL:
363 case Format::SCRATCH:
364 return static_cast<FLAT_instruction*>(candidate)->can_reorder;
365 default:
366 return true;
367 }
368 }
369
370 bool is_gs_or_done_sendmsg(Instruction *instr)
371 {
372 if (instr->opcode == aco_opcode::s_sendmsg) {
373 uint16_t imm = static_cast<SOPP_instruction*>(instr)->imm;
374 return (imm & sendmsg_id_mask) == _sendmsg_gs ||
375 (imm & sendmsg_id_mask) == _sendmsg_gs_done;
376 }
377 return false;
378 }
379
380 bool is_done_sendmsg(Instruction *instr)
381 {
382 if (instr->opcode == aco_opcode::s_sendmsg) {
383 uint16_t imm = static_cast<SOPP_instruction*>(instr)->imm;
384 return (imm & sendmsg_id_mask) == _sendmsg_gs_done;
385 }
386 return false;
387 }
388
389 barrier_interaction get_barrier_interaction(Instruction* instr)
390 {
391 switch (instr->format) {
392 case Format::SMEM:
393 return static_cast<SMEM_instruction*>(instr)->barrier;
394 case Format::MUBUF:
395 return static_cast<MUBUF_instruction*>(instr)->barrier;
396 case Format::MIMG:
397 return static_cast<MIMG_instruction*>(instr)->barrier;
398 case Format::MTBUF:
399 return static_cast<MTBUF_instruction*>(instr)->barrier;
400 case Format::FLAT:
401 case Format::GLOBAL:
402 case Format::SCRATCH:
403 return static_cast<FLAT_instruction*>(instr)->barrier;
404 case Format::DS:
405 return barrier_shared;
406 case Format::SOPP:
407 if (is_done_sendmsg(instr))
408 return (barrier_interaction)(barrier_gs_data | barrier_gs_sendmsg);
409 else if (is_gs_or_done_sendmsg(instr))
410 return barrier_gs_sendmsg;
411 else
412 return barrier_none;
413 case Format::PSEUDO_BARRIER:
414 return barrier_barrier;
415 default:
416 return barrier_none;
417 }
418 }
419
420 barrier_interaction parse_barrier(Instruction *instr)
421 {
422 if (instr->format == Format::PSEUDO_BARRIER) {
423 switch (instr->opcode) {
424 case aco_opcode::p_memory_barrier_atomic:
425 return barrier_atomic;
426 /* For now, buffer and image barriers are treated the same. this is because of
427 * dEQP-VK.memory_model.message_passing.core11.u32.coherent.fence_fence.atomicwrite.device.payload_nonlocal.buffer.guard_nonlocal.image.comp
428 * which seems to use an image load to determine if the result of a buffer load is valid. So the ordering of the two loads is important.
429 * I /think/ we should probably eventually expand the meaning of a buffer barrier so that all buffer operations before it, must stay before it
430 * and that both image and buffer operations after it, must stay after it. We should also do the same for image barriers.
431 * Or perhaps the problem is that we don't have a combined barrier instruction for both buffers and images, but the CTS test expects us to?
432 * Either way, this solution should work. */
433 case aco_opcode::p_memory_barrier_buffer:
434 case aco_opcode::p_memory_barrier_image:
435 return (barrier_interaction)(barrier_image | barrier_buffer);
436 case aco_opcode::p_memory_barrier_shared:
437 return barrier_shared;
438 case aco_opcode::p_memory_barrier_common:
439 return (barrier_interaction)(barrier_image | barrier_buffer | barrier_shared | barrier_atomic);
440 case aco_opcode::p_memory_barrier_gs_data:
441 return barrier_gs_data;
442 case aco_opcode::p_memory_barrier_gs_sendmsg:
443 return barrier_gs_sendmsg;
444 default:
445 break;
446 }
447 } else if (instr->opcode == aco_opcode::s_barrier) {
448 return (barrier_interaction)(barrier_barrier | barrier_image | barrier_buffer | barrier_shared | barrier_atomic);
449 }
450 return barrier_none;
451 }
452
453 struct hazard_query {
454 bool contains_spill;
455 int barriers;
456 int barrier_interaction;
457 bool can_reorder_vmem;
458 bool can_reorder_smem;
459 };
460
461 void init_hazard_query(hazard_query *query) {
462 query->contains_spill = false;
463 query->barriers = 0;
464 query->barrier_interaction = 0;
465 query->can_reorder_vmem = true;
466 query->can_reorder_smem = true;
467 }
468
469 void add_to_hazard_query(hazard_query *query, Instruction *instr)
470 {
471 query->barriers |= parse_barrier(instr);
472 query->barrier_interaction |= get_barrier_interaction(instr);
473 if (instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload)
474 query->contains_spill = true;
475
476 bool can_reorder_instr = can_reorder(instr);
477 query->can_reorder_smem &= instr->format != Format::SMEM || can_reorder_instr;
478 query->can_reorder_vmem &= !(instr->isVMEM() || instr->isFlatOrGlobal()) || can_reorder_instr;
479 }
480
481 enum HazardResult {
482 hazard_success,
483 hazard_fail_reorder_vmem_smem,
484 hazard_fail_reorder_ds,
485 hazard_fail_reorder_sendmsg,
486 hazard_fail_spill,
487 hazard_fail_export,
488 hazard_fail_barrier,
489 /* Must stop at these failures. The hazard query code doesn't consider them
490 * when added. */
491 hazard_fail_exec,
492 hazard_fail_memtime,
493 };
494
495 HazardResult perform_hazard_query(hazard_query *query, Instruction *instr)
496 {
497 bool can_reorder_candidate = can_reorder(instr);
498
499 if (instr->opcode == aco_opcode::p_exit_early_if)
500 return hazard_fail_exec;
501 for (const Definition& def : instr->definitions) {
502 if (def.isFixed() && def.physReg() == exec)
503 return hazard_fail_exec;
504 }
505
506 /* don't move exports so that they stay closer together */
507 if (instr->format == Format::EXP)
508 return hazard_fail_export;
509
510 /* don't move s_memtime/s_memrealtime */
511 if (instr->opcode == aco_opcode::s_memtime || instr->opcode == aco_opcode::s_memrealtime)
512 return hazard_fail_memtime;
513
514 if (query->barrier_interaction && (query->barrier_interaction & parse_barrier(instr)))
515 return hazard_fail_barrier;
516 if (query->barriers && (query->barriers & get_barrier_interaction(instr)))
517 return hazard_fail_barrier;
518
519 if (!query->can_reorder_smem && instr->format == Format::SMEM && !can_reorder_candidate)
520 return hazard_fail_reorder_vmem_smem;
521 if (!query->can_reorder_vmem && (instr->isVMEM() || instr->isFlatOrGlobal()) && !can_reorder_candidate)
522 return hazard_fail_reorder_vmem_smem;
523 if ((query->barrier_interaction & barrier_shared) && instr->format == Format::DS)
524 return hazard_fail_reorder_ds;
525 if (is_gs_or_done_sendmsg(instr) && (query->barrier_interaction & get_barrier_interaction(instr)))
526 return hazard_fail_reorder_sendmsg;
527
528 if ((instr->opcode == aco_opcode::p_spill || instr->opcode == aco_opcode::p_reload) &&
529 query->contains_spill)
530 return hazard_fail_spill;
531
532 return hazard_success;
533 }
534
535 void schedule_SMEM(sched_ctx& ctx, Block* block,
536 std::vector<RegisterDemand>& register_demand,
537 Instruction* current, int idx)
538 {
539 assert(idx != 0);
540 int window_size = SMEM_WINDOW_SIZE;
541 int max_moves = SMEM_MAX_MOVES;
542 int16_t k = 0;
543
544 /* don't move s_memtime/s_memrealtime */
545 if (current->opcode == aco_opcode::s_memtime || current->opcode == aco_opcode::s_memrealtime)
546 return;
547
548 /* first, check if we have instructions before current to move down */
549 hazard_query hq;
550 init_hazard_query(&hq);
551 add_to_hazard_query(&hq, current);
552
553 ctx.mv.downwards_init(idx, false, false);
554
555 for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int) idx - window_size; candidate_idx--) {
556 assert(candidate_idx >= 0);
557 assert(candidate_idx == ctx.mv.source_idx);
558 aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
559
560 /* break if we'd make the previous SMEM instruction stall */
561 bool can_stall_prev_smem = idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
562 if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
563 break;
564
565 /* break when encountering another MEM instruction, logical_start or barriers */
566 if (candidate->opcode == aco_opcode::p_logical_start)
567 break;
568 if (candidate->isVMEM())
569 break;
570
571 bool can_move_down = true;
572
573 HazardResult haz = perform_hazard_query(&hq, candidate.get());
574 if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill || haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier)
575 can_move_down = false;
576 else if (haz != hazard_success)
577 break;
578
579 /* don't use LDS/GDS instructions to hide latency since it can
580 * significanly worsen LDS scheduling */
581 if (candidate->format == Format::DS || !can_move_down) {
582 add_to_hazard_query(&hq, candidate.get());
583 ctx.mv.downwards_skip();
584 continue;
585 }
586
587 MoveResult res = ctx.mv.downwards_move(false);
588 if (res == move_fail_ssa || res == move_fail_rar) {
589 add_to_hazard_query(&hq, candidate.get());
590 ctx.mv.downwards_skip();
591 continue;
592 } else if (res == move_fail_pressure) {
593 break;
594 }
595
596 if (candidate_idx < ctx.last_SMEM_dep_idx)
597 ctx.last_SMEM_stall++;
598 k++;
599 }
600
601 /* find the first instruction depending on current or find another MEM */
602 ctx.mv.upwards_init(idx + 1, false);
603
604 bool found_dependency = false;
605 /* second, check if we have instructions after current to move up */
606 for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int) idx + window_size; candidate_idx++) {
607 assert(candidate_idx == ctx.mv.source_idx);
608 assert(candidate_idx < (int) block->instructions.size());
609 aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
610
611 if (candidate->opcode == aco_opcode::p_logical_end)
612 break;
613
614 /* check if candidate depends on current */
615 bool is_dependency = !found_dependency && !ctx.mv.upwards_check_deps();
616 /* no need to steal from following VMEM instructions */
617 if (is_dependency && candidate->isVMEM())
618 break;
619
620 if (found_dependency) {
621 HazardResult haz = perform_hazard_query(&hq, candidate.get());
622 if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
623 haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier)
624 is_dependency = true;
625 else if (haz != hazard_success)
626 break;
627 }
628
629 if (is_dependency) {
630 if (!found_dependency) {
631 ctx.mv.upwards_set_insert_idx(candidate_idx);
632 init_hazard_query(&hq);
633 found_dependency = true;
634 }
635 }
636
637 if (is_dependency || !found_dependency) {
638 if (found_dependency)
639 add_to_hazard_query(&hq, candidate.get());
640 else
641 k++;
642 ctx.mv.upwards_skip();
643 continue;
644 }
645
646 MoveResult res = ctx.mv.upwards_move();
647 if (res == move_fail_ssa || res == move_fail_rar) {
648 /* no need to steal from following VMEM instructions */
649 if (res == move_fail_ssa && candidate->isVMEM())
650 break;
651 add_to_hazard_query(&hq, candidate.get());
652 ctx.mv.upwards_skip();
653 continue;
654 } else if (res == move_fail_pressure) {
655 break;
656 }
657 k++;
658 }
659
660 ctx.last_SMEM_dep_idx = found_dependency ? ctx.mv.insert_idx : 0;
661 ctx.last_SMEM_stall = 10 - ctx.num_waves - k;
662 }
663
664 void schedule_VMEM(sched_ctx& ctx, Block* block,
665 std::vector<RegisterDemand>& register_demand,
666 Instruction* current, int idx)
667 {
668 assert(idx != 0);
669 int window_size = VMEM_WINDOW_SIZE;
670 int max_moves = VMEM_MAX_MOVES;
671 int clause_max_grab_dist = VMEM_CLAUSE_MAX_GRAB_DIST;
672 int16_t k = 0;
673
674 /* first, check if we have instructions before current to move down */
675 hazard_query indep_hq;
676 hazard_query clause_hq;
677 init_hazard_query(&indep_hq);
678 init_hazard_query(&clause_hq);
679 add_to_hazard_query(&indep_hq, current);
680
681 ctx.mv.downwards_init(idx, true, true);
682
683 for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int) idx - window_size; candidate_idx--) {
684 assert(candidate_idx == ctx.mv.source_idx);
685 assert(candidate_idx >= 0);
686 aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
687 bool is_vmem = candidate->isVMEM() || candidate->isFlatOrGlobal();
688
689 /* break when encountering another VMEM instruction, logical_start or barriers */
690 if (candidate->opcode == aco_opcode::p_logical_start)
691 break;
692
693 /* break if we'd make the previous SMEM instruction stall */
694 bool can_stall_prev_smem = idx <= ctx.last_SMEM_dep_idx && candidate_idx < ctx.last_SMEM_dep_idx;
695 if (can_stall_prev_smem && ctx.last_SMEM_stall >= 0)
696 break;
697
698 bool part_of_clause = false;
699 if (current->isVMEM() == candidate->isVMEM()) {
700 bool same_resource = true;
701 if (current->isVMEM())
702 same_resource = candidate->operands[0].tempId() == current->operands[0].tempId();
703 int grab_dist = ctx.mv.insert_idx_clause - candidate_idx;
704 /* We can't easily tell how much this will decrease the def-to-use
705 * distances, so just use how far it will be moved as a heuristic. */
706 part_of_clause = same_resource && grab_dist < clause_max_grab_dist;
707 }
708
709 /* if current depends on candidate, add additional dependencies and continue */
710 bool can_move_down = !is_vmem || part_of_clause;
711
712 HazardResult haz = perform_hazard_query(part_of_clause ? &clause_hq : &indep_hq, candidate.get());
713 if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
714 haz == hazard_fail_reorder_sendmsg || haz == hazard_fail_barrier)
715 can_move_down = false;
716 else if (haz != hazard_success)
717 break;
718
719 if (!can_move_down) {
720 add_to_hazard_query(&indep_hq, candidate.get());
721 add_to_hazard_query(&clause_hq, candidate.get());
722 ctx.mv.downwards_skip();
723 continue;
724 }
725
726 MoveResult res = ctx.mv.downwards_move(part_of_clause);
727 if (res == move_fail_ssa || res == move_fail_rar) {
728 add_to_hazard_query(&indep_hq, candidate.get());
729 add_to_hazard_query(&clause_hq, candidate.get());
730 ctx.mv.downwards_skip();
731 continue;
732 } else if (res == move_fail_pressure) {
733 break;
734 }
735 k++;
736 if (candidate_idx < ctx.last_SMEM_dep_idx)
737 ctx.last_SMEM_stall++;
738 }
739
740 /* find the first instruction depending on current or find another VMEM */
741 ctx.mv.upwards_init(idx + 1, true);
742
743 bool found_dependency = false;
744 /* second, check if we have instructions after current to move up */
745 for (int candidate_idx = idx + 1; k < max_moves && candidate_idx < (int) idx + window_size; candidate_idx++) {
746 assert(candidate_idx == ctx.mv.source_idx);
747 assert(candidate_idx < (int) block->instructions.size());
748 aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
749 bool is_vmem = candidate->isVMEM() || candidate->isFlatOrGlobal();
750
751 if (candidate->opcode == aco_opcode::p_logical_end)
752 break;
753
754 /* check if candidate depends on current */
755 bool is_dependency = false;
756 if (found_dependency) {
757 HazardResult haz = perform_hazard_query(&indep_hq, candidate.get());
758 if (haz == hazard_fail_reorder_ds || haz == hazard_fail_spill ||
759 haz == hazard_fail_reorder_vmem_smem || haz == hazard_fail_reorder_sendmsg ||
760 haz == hazard_fail_barrier)
761 is_dependency = true;
762 else if (haz != hazard_success)
763 break;
764 }
765
766 is_dependency |= !found_dependency && !ctx.mv.upwards_check_deps();
767 if (is_dependency) {
768 if (!found_dependency) {
769 ctx.mv.upwards_set_insert_idx(candidate_idx);
770 init_hazard_query(&indep_hq);
771 found_dependency = true;
772 }
773 } else if (is_vmem) {
774 /* don't move up dependencies of other VMEM instructions */
775 for (const Definition& def : candidate->definitions) {
776 if (def.isTemp())
777 ctx.mv.depends_on[def.tempId()] = true;
778 }
779 }
780
781 if (is_dependency || !found_dependency) {
782 if (found_dependency)
783 add_to_hazard_query(&indep_hq, candidate.get());
784 ctx.mv.upwards_skip();
785 continue;
786 }
787
788 MoveResult res = ctx.mv.upwards_move();
789 if (res == move_fail_ssa || res == move_fail_rar) {
790 add_to_hazard_query(&indep_hq, candidate.get());
791 ctx.mv.upwards_skip();
792 continue;
793 } else if (res == move_fail_pressure) {
794 break;
795 }
796 k++;
797 }
798 }
799
800 void schedule_position_export(sched_ctx& ctx, Block* block,
801 std::vector<RegisterDemand>& register_demand,
802 Instruction* current, int idx)
803 {
804 assert(idx != 0);
805 int window_size = POS_EXP_WINDOW_SIZE;
806 int max_moves = POS_EXP_MAX_MOVES;
807 int16_t k = 0;
808
809 ctx.mv.downwards_init(idx, true, false);
810
811 hazard_query hq;
812 init_hazard_query(&hq);
813 add_to_hazard_query(&hq, current);
814
815 for (int candidate_idx = idx - 1; k < max_moves && candidate_idx > (int) idx - window_size; candidate_idx--) {
816 assert(candidate_idx >= 0);
817 aco_ptr<Instruction>& candidate = block->instructions[candidate_idx];
818
819 if (candidate->opcode == aco_opcode::p_logical_start)
820 break;
821 if (candidate->isVMEM() || candidate->format == Format::SMEM || candidate->isFlatOrGlobal())
822 break;
823
824 HazardResult haz = perform_hazard_query(&hq, candidate.get());
825 if (haz == hazard_fail_exec || haz == hazard_fail_export || haz == hazard_fail_memtime)
826 break;
827
828 if (haz != hazard_success) {
829 add_to_hazard_query(&hq, candidate.get());
830 ctx.mv.downwards_skip();
831 continue;
832 }
833
834 MoveResult res = ctx.mv.downwards_move(false);
835 if (res == move_fail_ssa || res == move_fail_rar) {
836 add_to_hazard_query(&hq, candidate.get());
837 ctx.mv.downwards_skip();
838 continue;
839 } else if (res == move_fail_pressure) {
840 break;
841 }
842 k++;
843 }
844 }
845
846 void schedule_block(sched_ctx& ctx, Program *program, Block* block, live& live_vars)
847 {
848 ctx.last_SMEM_dep_idx = 0;
849 ctx.last_SMEM_stall = INT16_MIN;
850 ctx.mv.block = block;
851 ctx.mv.register_demand = live_vars.register_demand[block->index].data();
852
853 /* go through all instructions and find memory loads */
854 for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
855 Instruction* current = block->instructions[idx].get();
856
857 if (current->definitions.empty())
858 continue;
859
860 if (current->isVMEM() || current->isFlatOrGlobal()) {
861 ctx.mv.current = current;
862 schedule_VMEM(ctx, block, live_vars.register_demand[block->index], current, idx);
863 }
864
865 if (current->format == Format::SMEM) {
866 ctx.mv.current = current;
867 schedule_SMEM(ctx, block, live_vars.register_demand[block->index], current, idx);
868 }
869 }
870
871 if ((program->stage & hw_vs) && block->index == program->blocks.size() - 1) {
872 /* Try to move position exports as far up as possible, to reduce register
873 * usage and because ISA reference guides say so. */
874 for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
875 Instruction* current = block->instructions[idx].get();
876
877 if (current->format == Format::EXP) {
878 unsigned target = static_cast<Export_instruction*>(current)->dest;
879 if (target >= V_008DFC_SQ_EXP_POS && target < V_008DFC_SQ_EXP_PARAM) {
880 ctx.mv.current = current;
881 schedule_position_export(ctx, block, live_vars.register_demand[block->index], current, idx);
882 }
883 }
884 }
885 }
886
887 /* resummarize the block's register demand */
888 block->register_demand = RegisterDemand();
889 for (unsigned idx = 0; idx < block->instructions.size(); idx++) {
890 block->register_demand.update(live_vars.register_demand[block->index][idx]);
891 }
892 }
893
894
895 void schedule_program(Program *program, live& live_vars)
896 {
897 sched_ctx ctx;
898 ctx.mv.depends_on.resize(program->peekAllocationId());
899 ctx.mv.RAR_dependencies.resize(program->peekAllocationId());
900 ctx.mv.RAR_dependencies_clause.resize(program->peekAllocationId());
901 /* Allowing the scheduler to reduce the number of waves to as low as 5
902 * improves performance of Thrones of Britannia significantly and doesn't
903 * seem to hurt anything else. */
904 if (program->num_waves <= 5)
905 ctx.num_waves = program->num_waves;
906 else if (program->max_reg_demand.vgpr >= 32)
907 ctx.num_waves = 5;
908 else if (program->max_reg_demand.vgpr >= 28)
909 ctx.num_waves = 6;
910 else if (program->max_reg_demand.vgpr >= 24)
911 ctx.num_waves = 7;
912 else
913 ctx.num_waves = 8;
914 ctx.num_waves = std::max<uint16_t>(ctx.num_waves, program->min_waves);
915
916 assert(ctx.num_waves > 0 && ctx.num_waves <= program->num_waves);
917 ctx.mv.max_registers = { int16_t(get_addr_vgpr_from_waves(program, ctx.num_waves) - 2),
918 int16_t(get_addr_sgpr_from_waves(program, ctx.num_waves))};
919
920 for (Block& block : program->blocks)
921 schedule_block(ctx, program, &block, live_vars);
922
923 /* update max_reg_demand and num_waves */
924 RegisterDemand new_demand;
925 for (Block& block : program->blocks) {
926 new_demand.update(block.register_demand);
927 }
928 update_vgpr_sgpr_demand(program, new_demand);
929
930 /* if enabled, this code asserts that register_demand is updated correctly */
931 #if 0
932 int prev_num_waves = program->num_waves;
933 const RegisterDemand prev_max_demand = program->max_reg_demand;
934
935 std::vector<RegisterDemand> demands(program->blocks.size());
936 for (unsigned j = 0; j < program->blocks.size(); j++) {
937 demands[j] = program->blocks[j].register_demand;
938 }
939
940 struct radv_nir_compiler_options options;
941 options.chip_class = program->chip_class;
942 live live_vars2 = aco::live_var_analysis(program, &options);
943
944 for (unsigned j = 0; j < program->blocks.size(); j++) {
945 Block &b = program->blocks[j];
946 for (unsigned i = 0; i < b.instructions.size(); i++)
947 assert(live_vars.register_demand[b.index][i] == live_vars2.register_demand[b.index][i]);
948 assert(b.register_demand == demands[j]);
949 }
950
951 assert(program->max_reg_demand == prev_max_demand);
952 assert(program->num_waves == prev_num_waves);
953 #endif
954 }
955
956 }