aco: use s_waitcnt_depctr to mitigate VMEMtoScalarWriteHazard
[mesa.git] / src / amd / compiler / aco_reduce_assign.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #include "aco_ir.h"
27 #include "aco_builder.h"
28
29 /*
30 * Insert p_linear_start instructions right before RA to correctly allocate
31 * temporaries for reductions that have to disrespect EXEC by executing in
32 * WWM.
33 */
34
35 namespace aco {
36
37 void setup_reduce_temp(Program* program)
38 {
39 unsigned last_top_level_block_idx = 0;
40 unsigned maxSize = 0;
41
42 std::vector<bool> hasReductions(program->blocks.size());
43 for (Block& block : program->blocks) {
44 for (aco_ptr<Instruction>& instr : block.instructions) {
45 if (instr->format != Format::PSEUDO_REDUCTION)
46 continue;
47
48 maxSize = MAX2(maxSize, instr->operands[0].size());
49 hasReductions[block.index] = true;
50 }
51 }
52
53 if (maxSize == 0)
54 return;
55
56 assert(maxSize == 1 || maxSize == 2);
57 Temp reduceTmp(0, RegClass(RegType::vgpr, maxSize).as_linear());
58 Temp vtmp(0, RegClass(RegType::vgpr, maxSize).as_linear());
59 int inserted_at = -1;
60 int vtmp_inserted_at = -1;
61 bool reduceTmp_in_loop = false;
62 bool vtmp_in_loop = false;
63
64 for (Block& block : program->blocks) {
65
66 /* insert p_end_linear_vgpr after the outermost loop */
67 if (reduceTmp_in_loop && block.loop_nest_depth == 0) {
68 assert(inserted_at == (int)last_top_level_block_idx);
69
70 aco_ptr<Instruction> end{create_instruction<Instruction>(aco_opcode::p_end_linear_vgpr, Format::PSEUDO, vtmp_in_loop ? 2 : 1, 0)};
71 end->operands[0] = Operand(reduceTmp);
72 if (vtmp_in_loop)
73 end->operands[1] = Operand(vtmp);
74 /* insert after the phis of the loop exit block */
75 std::vector<aco_ptr<Instruction>>::iterator it = block.instructions.begin();
76 while ((*it)->opcode == aco_opcode::p_linear_phi || (*it)->opcode == aco_opcode::p_phi)
77 ++it;
78 block.instructions.insert(it, std::move(end));
79 reduceTmp_in_loop = false;
80 }
81
82 if (block.kind & block_kind_top_level)
83 last_top_level_block_idx = block.index;
84
85 if (!hasReductions[block.index])
86 continue;
87
88 std::vector<aco_ptr<Instruction>>::iterator it;
89 for (it = block.instructions.begin(); it != block.instructions.end(); ++it) {
90 Instruction *instr = (*it).get();
91 if (instr->format != Format::PSEUDO_REDUCTION)
92 continue;
93
94 ReduceOp op = static_cast<Pseudo_reduction_instruction *>(instr)->reduce_op;
95 reduceTmp_in_loop |= block.loop_nest_depth > 0;
96
97 if ((int)last_top_level_block_idx != inserted_at) {
98 reduceTmp = {program->allocateId(), reduceTmp.regClass()};
99 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
100 create->definitions[0] = Definition(reduceTmp);
101 /* find the right place to insert this definition */
102 if (last_top_level_block_idx == block.index) {
103 /* insert right before the current instruction */
104 it = block.instructions.insert(it, std::move(create));
105 it++;
106 /* inserted_at is intentionally not updated here, so later blocks
107 * would insert at the end instead of using this one. */
108 } else {
109 assert(last_top_level_block_idx < block.index);
110 /* insert before the branch at last top level block */
111 std::vector<aco_ptr<Instruction>>& instructions = program->blocks[last_top_level_block_idx].instructions;
112 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
113 inserted_at = last_top_level_block_idx;
114 }
115 }
116
117 /* same as before, except for the vector temporary instead of the reduce temporary */
118 unsigned cluster_size = static_cast<Pseudo_reduction_instruction *>(instr)->cluster_size;
119 bool need_vtmp = op == imul32 || op == fadd64 || op == fmul64 ||
120 op == fmin64 || op == fmax64 || op == umin64 ||
121 op == umax64 || op == imin64 || op == imax64 ||
122 op == imul64;
123 bool gfx10_need_vtmp = op == imul8 || op == imax8 || op == imin8 || op == umin8 ||
124 op == imul16 || op == imax16 || op == imin16 || op == umin16 ||
125 op == iadd64;
126
127 if (program->chip_class >= GFX10 && cluster_size == 64)
128 need_vtmp = true;
129 if (program->chip_class >= GFX10 && gfx10_need_vtmp)
130 need_vtmp = true;
131 if (program->chip_class <= GFX7)
132 need_vtmp = true;
133
134 need_vtmp |= cluster_size == 32;
135
136 vtmp_in_loop |= need_vtmp && block.loop_nest_depth > 0;
137 if (need_vtmp && (int)last_top_level_block_idx != vtmp_inserted_at) {
138 vtmp = {program->allocateId(), vtmp.regClass()};
139 aco_ptr<Pseudo_instruction> create{create_instruction<Pseudo_instruction>(aco_opcode::p_start_linear_vgpr, Format::PSEUDO, 0, 1)};
140 create->definitions[0] = Definition(vtmp);
141 if (last_top_level_block_idx == block.index) {
142 it = block.instructions.insert(it, std::move(create));
143 it++;
144 } else {
145 assert(last_top_level_block_idx < block.index);
146 std::vector<aco_ptr<Instruction>>& instructions = program->blocks[last_top_level_block_idx].instructions;
147 instructions.insert(std::next(instructions.begin(), instructions.size() - 1), std::move(create));
148 vtmp_inserted_at = last_top_level_block_idx;
149 }
150 }
151
152 instr->operands[1] = Operand(reduceTmp);
153 if (need_vtmp)
154 instr->operands[2] = Operand(vtmp);
155
156 /* scalar temporary */
157 Builder bld(program);
158 instr->definitions[1] = bld.def(s2);
159
160 /* scalar identity temporary */
161 bool need_sitmp = (program->chip_class <= GFX7 || program->chip_class >= GFX10) && instr->opcode != aco_opcode::p_reduce;
162 if (instr->opcode == aco_opcode::p_exclusive_scan) {
163 need_sitmp |=
164 (op == imin8 || op == imin16 || op == imin32 || op == imin64 ||
165 op == imax8 || op == imax16 || op == imax32 || op == imax64 ||
166 op == fmin16 || op == fmin32 || op == fmin64 ||
167 op == fmax16 || op == fmax32 || op == fmax64 ||
168 op == fmul16 || op == fmul64);
169 }
170 if (need_sitmp) {
171 instr->definitions[2] = bld.def(RegClass(RegType::sgpr, instr->operands[0].size()));
172 }
173
174 /* vcc clobber */
175 bool clobber_vcc = false;
176 if ((op == iadd32 || op == imul64) && program->chip_class < GFX9)
177 clobber_vcc = true;
178 if (op == iadd64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)
179 clobber_vcc = true;
180
181 if (clobber_vcc)
182 instr->definitions[4] = Definition(vcc, bld.lm);
183 }
184 }
185 }
186
187 };
188