aco: compact various Instruction classes
[mesa.git] / src / amd / compiler / aco_opt_value_numbering.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <map>
26 #include <unordered_map>
27 #include "aco_ir.h"
28
29 /*
30 * Implements the algorithm for dominator-tree value numbering
31 * from "Value Numbering" by Briggs, Cooper, and Simpson.
32 */
33
34 namespace aco {
35 namespace {
36
37 struct InstrHash {
38 std::size_t operator()(Instruction* instr) const
39 {
40 uint64_t hash = (uint64_t) instr->opcode + (uint64_t) instr->format;
41 for (unsigned i = 0; i < instr->operands.size(); i++) {
42 Operand op = instr->operands[i];
43 uint64_t val = op.isTemp() ? op.tempId() : op.isFixed() ? op.physReg() : op.constantValue();
44 hash |= val << (i+1) * 8;
45 }
46 if (instr->isVOP3()) {
47 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(instr);
48 for (unsigned i = 0; i < 3; i++) {
49 hash ^= vop3->abs[i] << (i*3 + 0);
50 hash ^= vop3->neg[i] << (i*3 + 2);
51 }
52 hash ^= vop3->opsel * 13;
53 hash ^= (vop3->clamp << 28) * 13;
54 hash += vop3->omod << 19;
55 }
56 switch (instr->format) {
57 case Format::SMEM:
58 break;
59 case Format::VINTRP: {
60 Interp_instruction* interp = static_cast<Interp_instruction*>(instr);
61 hash ^= interp->attribute << 13;
62 hash ^= interp->component << 27;
63 break;
64 }
65 case Format::DS:
66 break;
67 default:
68 break;
69 }
70
71 return hash;
72 }
73 };
74
75 struct InstrPred {
76 bool operator()(Instruction* a, Instruction* b) const
77 {
78 if (a->format != b->format)
79 return false;
80 if (a->opcode != b->opcode)
81 return false;
82 if (a->operands.size() != b->operands.size() || a->definitions.size() != b->definitions.size())
83 return false; /* possible with pseudo-instructions */
84 for (unsigned i = 0; i < a->operands.size(); i++) {
85 if (a->operands[i].isConstant()) {
86 if (!b->operands[i].isConstant())
87 return false;
88 if (a->operands[i].constantValue() != b->operands[i].constantValue())
89 return false;
90 }
91 else if (a->operands[i].isTemp()) {
92 if (!b->operands[i].isTemp())
93 return false;
94 if (a->operands[i].tempId() != b->operands[i].tempId())
95 return false;
96 }
97 else if (a->operands[i].isUndefined() ^ b->operands[i].isUndefined())
98 return false;
99 if (a->operands[i].isFixed()) {
100 if (!b->operands[i].isFixed())
101 return false;
102 if (a->operands[i].physReg() != b->operands[i].physReg())
103 return false;
104 if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags)
105 return false;
106 }
107 }
108 for (unsigned i = 0; i < a->definitions.size(); i++) {
109 if (a->definitions[i].isTemp()) {
110 if (!b->definitions[i].isTemp())
111 return false;
112 if (a->definitions[i].regClass() != b->definitions[i].regClass())
113 return false;
114 }
115 if (a->definitions[i].isFixed()) {
116 if (!b->definitions[i].isFixed())
117 return false;
118 if (a->definitions[i].physReg() != b->definitions[i].physReg())
119 return false;
120 if (a->definitions[i].physReg() == exec)
121 return false;
122 }
123 }
124
125 if (a->opcode == aco_opcode::v_readfirstlane_b32)
126 return a->pass_flags == b->pass_flags;
127
128 /* The results of VOPC depend on the exec mask if used for subgroup operations. */
129 if ((uint32_t) a->format & (uint32_t) Format::VOPC && a->pass_flags != b->pass_flags)
130 return false;
131
132 if (a->isVOP3()) {
133 VOP3A_instruction* a3 = static_cast<VOP3A_instruction*>(a);
134 VOP3A_instruction* b3 = static_cast<VOP3A_instruction*>(b);
135 for (unsigned i = 0; i < 3; i++) {
136 if (a3->abs[i] != b3->abs[i] ||
137 a3->neg[i] != b3->neg[i])
138 return false;
139 }
140 return a3->clamp == b3->clamp &&
141 a3->omod == b3->omod &&
142 a3->opsel == b3->opsel;
143 }
144 if (a->isDPP()) {
145 DPP_instruction* aDPP = static_cast<DPP_instruction*>(a);
146 DPP_instruction* bDPP = static_cast<DPP_instruction*>(b);
147 return aDPP->pass_flags == bDPP->pass_flags &&
148 aDPP->dpp_ctrl == bDPP->dpp_ctrl &&
149 aDPP->bank_mask == bDPP->bank_mask &&
150 aDPP->row_mask == bDPP->row_mask &&
151 aDPP->bound_ctrl == bDPP->bound_ctrl &&
152 aDPP->abs[0] == bDPP->abs[0] &&
153 aDPP->abs[1] == bDPP->abs[1] &&
154 aDPP->neg[0] == bDPP->neg[0] &&
155 aDPP->neg[1] == bDPP->neg[1];
156 }
157
158 switch (a->format) {
159 case Format::SOPK: {
160 SOPK_instruction* aK = static_cast<SOPK_instruction*>(a);
161 SOPK_instruction* bK = static_cast<SOPK_instruction*>(b);
162 return aK->imm == bK->imm;
163 }
164 case Format::SMEM: {
165 SMEM_instruction* aS = static_cast<SMEM_instruction*>(a);
166 SMEM_instruction* bS = static_cast<SMEM_instruction*>(b);
167 return aS->can_reorder && bS->can_reorder &&
168 aS->glc == bS->glc && aS->nv == bS->nv;
169 }
170 case Format::VINTRP: {
171 Interp_instruction* aI = static_cast<Interp_instruction*>(a);
172 Interp_instruction* bI = static_cast<Interp_instruction*>(b);
173 if (aI->attribute != bI->attribute)
174 return false;
175 if (aI->component != bI->component)
176 return false;
177 return true;
178 }
179 case Format::PSEUDO_REDUCTION: {
180 Pseudo_reduction_instruction *aR = static_cast<Pseudo_reduction_instruction*>(a);
181 Pseudo_reduction_instruction *bR = static_cast<Pseudo_reduction_instruction*>(b);
182 return aR->pass_flags == bR->pass_flags &&
183 aR->reduce_op == bR->reduce_op &&
184 aR->cluster_size == bR->cluster_size;
185 }
186 case Format::MTBUF: {
187 /* this is fine since they are only used for vertex input fetches */
188 MTBUF_instruction* aM = static_cast<MTBUF_instruction *>(a);
189 MTBUF_instruction* bM = static_cast<MTBUF_instruction *>(b);
190 return aM->can_reorder && bM->can_reorder &&
191 aM->barrier == bM->barrier &&
192 aM->dfmt == bM->dfmt &&
193 aM->nfmt == bM->nfmt &&
194 aM->offset == bM->offset &&
195 aM->offen == bM->offen &&
196 aM->idxen == bM->idxen &&
197 aM->glc == bM->glc &&
198 aM->slc == bM->slc &&
199 aM->tfe == bM->tfe &&
200 aM->disable_wqm == bM->disable_wqm;
201 }
202 /* we want to optimize these in NIR and don't hassle with load-store dependencies */
203 case Format::MUBUF:
204 case Format::FLAT:
205 case Format::GLOBAL:
206 case Format::SCRATCH:
207 case Format::EXP:
208 case Format::SOPP:
209 case Format::PSEUDO_BRANCH:
210 case Format::PSEUDO_BARRIER:
211 return false;
212 case Format::DS: {
213 if (a->opcode != aco_opcode::ds_bpermute_b32 &&
214 a->opcode != aco_opcode::ds_permute_b32 &&
215 a->opcode != aco_opcode::ds_swizzle_b32)
216 return false;
217 DS_instruction* aD = static_cast<DS_instruction *>(a);
218 DS_instruction* bD = static_cast<DS_instruction *>(b);
219 return aD->pass_flags == bD->pass_flags &&
220 aD->gds == bD->gds &&
221 aD->offset0 == bD->offset0 &&
222 aD->offset1 == bD->offset1;
223 }
224 case Format::MIMG: {
225 MIMG_instruction* aM = static_cast<MIMG_instruction*>(a);
226 MIMG_instruction* bM = static_cast<MIMG_instruction*>(b);
227 return aM->can_reorder && bM->can_reorder &&
228 aM->barrier == bM->barrier &&
229 aM->dmask == bM->dmask &&
230 aM->unrm == bM->unrm &&
231 aM->glc == bM->glc &&
232 aM->slc == bM->slc &&
233 aM->tfe == bM->tfe &&
234 aM->da == bM->da &&
235 aM->lwe == bM->lwe &&
236 aM->r128 == bM->r128 &&
237 aM->a16 == bM->a16 &&
238 aM->d16 == bM->d16 &&
239 aM->disable_wqm == bM->disable_wqm;
240 }
241 default:
242 return true;
243 }
244 }
245 };
246
247 using expr_set = std::unordered_map<Instruction*, uint32_t, InstrHash, InstrPred>;
248
249 struct vn_ctx {
250 Program* program;
251 expr_set expr_values;
252 std::map<uint32_t, Temp> renames;
253
254 /* The exec id should be the same on the same level of control flow depth.
255 * Together with the check for dominator relations, it is safe to assume
256 * that the same exec_id also means the same execution mask.
257 * Discards increment the exec_id, so that it won't return to the previous value.
258 */
259 uint32_t exec_id = 1;
260
261 vn_ctx(Program* program) : program(program) {}
262 };
263
264
265 /* dominates() returns true if the parent block dominates the child block and
266 * if the parent block is part of the same loop or has a smaller loop nest depth.
267 */
268 bool dominates(vn_ctx& ctx, uint32_t parent, uint32_t child)
269 {
270 unsigned parent_loop_nest_depth = ctx.program->blocks[parent].loop_nest_depth;
271 while (parent < child && parent_loop_nest_depth <= ctx.program->blocks[child].loop_nest_depth)
272 child = ctx.program->blocks[child].logical_idom;
273
274 return parent == child;
275 }
276
277 void process_block(vn_ctx& ctx, Block& block)
278 {
279 std::vector<aco_ptr<Instruction>> new_instructions;
280 new_instructions.reserve(block.instructions.size());
281
282 for (aco_ptr<Instruction>& instr : block.instructions) {
283 /* first, rename operands */
284 for (Operand& op : instr->operands) {
285 if (!op.isTemp())
286 continue;
287 auto it = ctx.renames.find(op.tempId());
288 if (it != ctx.renames.end())
289 op.setTemp(it->second);
290 }
291
292 if (instr->opcode == aco_opcode::p_discard_if ||
293 instr->opcode == aco_opcode::p_demote_to_helper)
294 ctx.exec_id++;
295
296 if (instr->definitions.empty() || instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi) {
297 new_instructions.emplace_back(std::move(instr));
298 continue;
299 }
300
301 /* simple copy-propagation through renaming */
302 if ((instr->opcode == aco_opcode::s_mov_b32 || instr->opcode == aco_opcode::s_mov_b64 || instr->opcode == aco_opcode::v_mov_b32) &&
303 !instr->definitions[0].isFixed() && instr->operands[0].isTemp() && instr->operands[0].regClass() == instr->definitions[0].regClass() &&
304 !instr->isDPP() && !((int)instr->format & (int)Format::SDWA)) {
305 ctx.renames[instr->definitions[0].tempId()] = instr->operands[0].getTemp();
306 }
307
308 instr->pass_flags = ctx.exec_id;
309 std::pair<expr_set::iterator, bool> res = ctx.expr_values.emplace(instr.get(), block.index);
310
311 /* if there was already an expression with the same value number */
312 if (!res.second) {
313 Instruction* orig_instr = res.first->first;
314 assert(instr->definitions.size() == orig_instr->definitions.size());
315 /* check if the original instruction dominates the current one */
316 if (dominates(ctx, res.first->second, block.index) &&
317 ctx.program->blocks[res.first->second].fp_mode.canReplace(block.fp_mode)) {
318 for (unsigned i = 0; i < instr->definitions.size(); i++) {
319 assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass());
320 assert(instr->definitions[i].isTemp());
321 ctx.renames[instr->definitions[i].tempId()] = orig_instr->definitions[i].getTemp();
322 }
323 } else {
324 ctx.expr_values.erase(res.first);
325 ctx.expr_values.emplace(instr.get(), block.index);
326 new_instructions.emplace_back(std::move(instr));
327 }
328 } else {
329 new_instructions.emplace_back(std::move(instr));
330 }
331 }
332
333 block.instructions = std::move(new_instructions);
334 }
335
336 void rename_phi_operands(Block& block, std::map<uint32_t, Temp>& renames)
337 {
338 for (aco_ptr<Instruction>& phi : block.instructions) {
339 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
340 break;
341
342 for (Operand& op : phi->operands) {
343 if (!op.isTemp())
344 continue;
345 auto it = renames.find(op.tempId());
346 if (it != renames.end())
347 op.setTemp(it->second);
348 }
349 }
350 }
351 } /* end namespace */
352
353
354 void value_numbering(Program* program)
355 {
356 vn_ctx ctx(program);
357 std::vector<unsigned> loop_headers;
358
359 for (Block& block : program->blocks) {
360 assert(ctx.exec_id > 0);
361 /* decrement exec_id when leaving nested control flow */
362 if (block.kind & block_kind_loop_header)
363 loop_headers.push_back(block.index);
364 if (block.kind & block_kind_merge) {
365 ctx.exec_id--;
366 } else if (block.kind & block_kind_loop_exit) {
367 ctx.exec_id -= program->blocks[loop_headers.back()].linear_preds.size();
368 ctx.exec_id -= block.linear_preds.size();
369 loop_headers.pop_back();
370 }
371
372 if (block.logical_idom != -1)
373 process_block(ctx, block);
374 else
375 rename_phi_operands(block, ctx.renames);
376
377 /* increment exec_id when entering nested control flow */
378 if (block.kind & block_kind_branch ||
379 block.kind & block_kind_loop_preheader ||
380 block.kind & block_kind_break ||
381 block.kind & block_kind_continue ||
382 block.kind & block_kind_discard)
383 ctx.exec_id++;
384 else if (block.kind & block_kind_continue_or_break)
385 ctx.exec_id += 2;
386 }
387
388 /* rename loop header phi operands */
389 for (Block& block : program->blocks) {
390 if (block.kind & block_kind_loop_header)
391 rename_phi_operands(block, ctx.renames);
392 }
393 }
394
395 }