aco: use nir_addition_might_overflow to combine additions into SMEM
[mesa.git] / src / amd / compiler / aco_opt_value_numbering.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <map>
26 #include <unordered_map>
27 #include "aco_ir.h"
28
29 /*
30 * Implements the algorithm for dominator-tree value numbering
31 * from "Value Numbering" by Briggs, Cooper, and Simpson.
32 */
33
34 namespace aco {
35 namespace {
36
37 inline
38 uint32_t murmur_32_scramble(uint32_t h, uint32_t k) {
39 k *= 0xcc9e2d51;
40 k = (k << 15) | (k >> 17);
41 h ^= k * 0x1b873593;
42 h = (h << 13) | (h >> 19);
43 h = h * 5 + 0xe6546b64;
44 return h;
45 }
46
47 template<typename T>
48 uint32_t hash_murmur_32(Instruction* instr)
49 {
50 uint32_t hash = uint32_t(instr->format) << 16 | uint32_t(instr->opcode);
51
52 for (const Operand& op : instr->operands)
53 hash = murmur_32_scramble(hash, op.constantValue());
54
55 /* skip format, opcode and pass_flags */
56 for (unsigned i = 2; i < (sizeof(T) >> 2); i++) {
57 uint32_t u;
58 /* Accesses it though a byte array, so doesn't violate the strict aliasing rule */
59 memcpy(&u, reinterpret_cast<uint8_t *>(instr) + i * 4, 4);
60 hash = murmur_32_scramble(hash, u);
61 }
62
63 /* Finalize. */
64 uint32_t len = instr->operands.size() + instr->definitions.size() + sizeof(T);
65 hash ^= len;
66 hash ^= hash >> 16;
67 hash *= 0x85ebca6b;
68 hash ^= hash >> 13;
69 hash *= 0xc2b2ae35;
70 hash ^= hash >> 16;
71 return hash;
72 }
73
74 struct InstrHash {
75 /* This hash function uses the Murmur3 algorithm written by Austin Appleby
76 * https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp
77 *
78 * In order to calculate the expression set, only the right-hand-side of an
79 * instruction is used for the hash, i.e. everything except the definitions.
80 */
81 std::size_t operator()(Instruction* instr) const
82 {
83 if (instr->isVOP3())
84 return hash_murmur_32<VOP3A_instruction>(instr);
85
86 if (instr->isDPP())
87 return hash_murmur_32<DPP_instruction>(instr);
88
89 if (instr->isSDWA())
90 return hash_murmur_32<SDWA_instruction>(instr);
91
92 switch (instr->format) {
93 case Format::SMEM:
94 return hash_murmur_32<SMEM_instruction>(instr);
95 case Format::VINTRP:
96 return hash_murmur_32<Interp_instruction>(instr);
97 case Format::DS:
98 return hash_murmur_32<DS_instruction>(instr);
99 case Format::SOPP:
100 return hash_murmur_32<SOPP_instruction>(instr);
101 case Format::SOPK:
102 return hash_murmur_32<SOPK_instruction>(instr);
103 case Format::EXP:
104 return hash_murmur_32<Export_instruction>(instr);
105 case Format::MUBUF:
106 return hash_murmur_32<MUBUF_instruction>(instr);
107 case Format::MIMG:
108 return hash_murmur_32<MIMG_instruction>(instr);
109 case Format::MTBUF:
110 return hash_murmur_32<MTBUF_instruction>(instr);
111 case Format::FLAT:
112 return hash_murmur_32<FLAT_instruction>(instr);
113 case Format::PSEUDO_BRANCH:
114 return hash_murmur_32<Pseudo_branch_instruction>(instr);
115 case Format::PSEUDO_REDUCTION:
116 return hash_murmur_32<Pseudo_reduction_instruction>(instr);
117 default:
118 return hash_murmur_32<Instruction>(instr);
119 }
120 }
121 };
122
123 struct InstrPred {
124 bool operator()(Instruction* a, Instruction* b) const
125 {
126 if (a->format != b->format)
127 return false;
128 if (a->opcode != b->opcode)
129 return false;
130 if (a->operands.size() != b->operands.size() || a->definitions.size() != b->definitions.size())
131 return false; /* possible with pseudo-instructions */
132 for (unsigned i = 0; i < a->operands.size(); i++) {
133 if (a->operands[i].isConstant()) {
134 if (!b->operands[i].isConstant())
135 return false;
136 if (a->operands[i].constantValue() != b->operands[i].constantValue())
137 return false;
138 }
139 else if (a->operands[i].isTemp()) {
140 if (!b->operands[i].isTemp())
141 return false;
142 if (a->operands[i].tempId() != b->operands[i].tempId())
143 return false;
144 }
145 else if (a->operands[i].isUndefined() ^ b->operands[i].isUndefined())
146 return false;
147 if (a->operands[i].isFixed()) {
148 if (!b->operands[i].isFixed())
149 return false;
150 if (a->operands[i].physReg() != b->operands[i].physReg())
151 return false;
152 if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags)
153 return false;
154 }
155 }
156 for (unsigned i = 0; i < a->definitions.size(); i++) {
157 if (a->definitions[i].isTemp()) {
158 if (!b->definitions[i].isTemp())
159 return false;
160 if (a->definitions[i].regClass() != b->definitions[i].regClass())
161 return false;
162 }
163 if (a->definitions[i].isFixed()) {
164 if (!b->definitions[i].isFixed())
165 return false;
166 if (a->definitions[i].physReg() != b->definitions[i].physReg())
167 return false;
168 if (a->definitions[i].physReg() == exec)
169 return false;
170 }
171 }
172
173 if (a->opcode == aco_opcode::v_readfirstlane_b32)
174 return a->pass_flags == b->pass_flags;
175
176 /* The results of VOPC depend on the exec mask if used for subgroup operations. */
177 if ((uint32_t) a->format & (uint32_t) Format::VOPC && a->pass_flags != b->pass_flags)
178 return false;
179
180 if (a->isVOP3()) {
181 VOP3A_instruction* a3 = static_cast<VOP3A_instruction*>(a);
182 VOP3A_instruction* b3 = static_cast<VOP3A_instruction*>(b);
183 for (unsigned i = 0; i < 3; i++) {
184 if (a3->abs[i] != b3->abs[i] ||
185 a3->neg[i] != b3->neg[i])
186 return false;
187 }
188 return a3->clamp == b3->clamp &&
189 a3->omod == b3->omod &&
190 a3->opsel == b3->opsel;
191 }
192 if (a->isDPP()) {
193 DPP_instruction* aDPP = static_cast<DPP_instruction*>(a);
194 DPP_instruction* bDPP = static_cast<DPP_instruction*>(b);
195 return aDPP->pass_flags == bDPP->pass_flags &&
196 aDPP->dpp_ctrl == bDPP->dpp_ctrl &&
197 aDPP->bank_mask == bDPP->bank_mask &&
198 aDPP->row_mask == bDPP->row_mask &&
199 aDPP->bound_ctrl == bDPP->bound_ctrl &&
200 aDPP->abs[0] == bDPP->abs[0] &&
201 aDPP->abs[1] == bDPP->abs[1] &&
202 aDPP->neg[0] == bDPP->neg[0] &&
203 aDPP->neg[1] == bDPP->neg[1];
204 }
205 if (a->isSDWA()) {
206 SDWA_instruction* aSDWA = static_cast<SDWA_instruction*>(a);
207 SDWA_instruction* bSDWA = static_cast<SDWA_instruction*>(b);
208 return aSDWA->sel[0] == bSDWA->sel[0] &&
209 aSDWA->sel[1] == bSDWA->sel[1] &&
210 aSDWA->dst_sel == bSDWA->dst_sel &&
211 aSDWA->abs[0] == bSDWA->abs[0] &&
212 aSDWA->abs[1] == bSDWA->abs[1] &&
213 aSDWA->neg[0] == bSDWA->neg[0] &&
214 aSDWA->neg[1] == bSDWA->neg[1] &&
215 aSDWA->dst_preserve == bSDWA->dst_preserve &&
216 aSDWA->clamp == bSDWA->clamp &&
217 aSDWA->omod == bSDWA->omod;
218 }
219
220 switch (a->format) {
221 case Format::SOPK: {
222 SOPK_instruction* aK = static_cast<SOPK_instruction*>(a);
223 SOPK_instruction* bK = static_cast<SOPK_instruction*>(b);
224 return aK->imm == bK->imm;
225 }
226 case Format::SMEM: {
227 SMEM_instruction* aS = static_cast<SMEM_instruction*>(a);
228 SMEM_instruction* bS = static_cast<SMEM_instruction*>(b);
229 /* isel shouldn't be creating situations where this assertion fails */
230 assert(aS->prevent_overflow == bS->prevent_overflow);
231 return aS->can_reorder && bS->can_reorder &&
232 aS->glc == bS->glc && aS->nv == bS->nv &&
233 aS->prevent_overflow == bS->prevent_overflow;
234 }
235 case Format::VINTRP: {
236 Interp_instruction* aI = static_cast<Interp_instruction*>(a);
237 Interp_instruction* bI = static_cast<Interp_instruction*>(b);
238 if (aI->attribute != bI->attribute)
239 return false;
240 if (aI->component != bI->component)
241 return false;
242 return true;
243 }
244 case Format::PSEUDO_REDUCTION: {
245 Pseudo_reduction_instruction *aR = static_cast<Pseudo_reduction_instruction*>(a);
246 Pseudo_reduction_instruction *bR = static_cast<Pseudo_reduction_instruction*>(b);
247 return aR->pass_flags == bR->pass_flags &&
248 aR->reduce_op == bR->reduce_op &&
249 aR->cluster_size == bR->cluster_size;
250 }
251 case Format::MTBUF: {
252 MTBUF_instruction* aM = static_cast<MTBUF_instruction *>(a);
253 MTBUF_instruction* bM = static_cast<MTBUF_instruction *>(b);
254 return aM->can_reorder && bM->can_reorder &&
255 aM->barrier == bM->barrier &&
256 aM->dfmt == bM->dfmt &&
257 aM->nfmt == bM->nfmt &&
258 aM->offset == bM->offset &&
259 aM->offen == bM->offen &&
260 aM->idxen == bM->idxen &&
261 aM->glc == bM->glc &&
262 aM->dlc == bM->dlc &&
263 aM->slc == bM->slc &&
264 aM->tfe == bM->tfe &&
265 aM->disable_wqm == bM->disable_wqm;
266 }
267 case Format::MUBUF: {
268 MUBUF_instruction* aM = static_cast<MUBUF_instruction *>(a);
269 MUBUF_instruction* bM = static_cast<MUBUF_instruction *>(b);
270 return aM->can_reorder && bM->can_reorder &&
271 aM->barrier == bM->barrier &&
272 aM->offset == bM->offset &&
273 aM->offen == bM->offen &&
274 aM->idxen == bM->idxen &&
275 aM->glc == bM->glc &&
276 aM->dlc == bM->dlc &&
277 aM->slc == bM->slc &&
278 aM->tfe == bM->tfe &&
279 aM->lds == bM->lds &&
280 aM->disable_wqm == bM->disable_wqm;
281 }
282 /* we want to optimize these in NIR and don't hassle with load-store dependencies */
283 case Format::FLAT:
284 case Format::GLOBAL:
285 case Format::SCRATCH:
286 case Format::EXP:
287 case Format::SOPP:
288 case Format::PSEUDO_BRANCH:
289 case Format::PSEUDO_BARRIER:
290 return false;
291 case Format::DS: {
292 if (a->opcode != aco_opcode::ds_bpermute_b32 &&
293 a->opcode != aco_opcode::ds_permute_b32 &&
294 a->opcode != aco_opcode::ds_swizzle_b32)
295 return false;
296 DS_instruction* aD = static_cast<DS_instruction *>(a);
297 DS_instruction* bD = static_cast<DS_instruction *>(b);
298 return aD->pass_flags == bD->pass_flags &&
299 aD->gds == bD->gds &&
300 aD->offset0 == bD->offset0 &&
301 aD->offset1 == bD->offset1;
302 }
303 case Format::MIMG: {
304 MIMG_instruction* aM = static_cast<MIMG_instruction*>(a);
305 MIMG_instruction* bM = static_cast<MIMG_instruction*>(b);
306 return aM->can_reorder && bM->can_reorder &&
307 aM->barrier == bM->barrier &&
308 aM->dmask == bM->dmask &&
309 aM->unrm == bM->unrm &&
310 aM->glc == bM->glc &&
311 aM->slc == bM->slc &&
312 aM->tfe == bM->tfe &&
313 aM->da == bM->da &&
314 aM->lwe == bM->lwe &&
315 aM->r128 == bM->r128 &&
316 aM->a16 == bM->a16 &&
317 aM->d16 == bM->d16 &&
318 aM->disable_wqm == bM->disable_wqm;
319 }
320 default:
321 return true;
322 }
323 }
324 };
325
326 using expr_set = std::unordered_map<Instruction*, uint32_t, InstrHash, InstrPred>;
327
328 struct vn_ctx {
329 Program* program;
330 expr_set expr_values;
331 std::map<uint32_t, Temp> renames;
332
333 /* The exec id should be the same on the same level of control flow depth.
334 * Together with the check for dominator relations, it is safe to assume
335 * that the same exec_id also means the same execution mask.
336 * Discards increment the exec_id, so that it won't return to the previous value.
337 */
338 uint32_t exec_id = 1;
339
340 vn_ctx(Program* program) : program(program) {
341 static_assert(sizeof(Temp) == 4, "Temp must fit in 32bits");
342 unsigned size = 0;
343 for (Block& block : program->blocks)
344 size += block.instructions.size();
345 expr_values.reserve(size);
346 }
347 };
348
349
350 /* dominates() returns true if the parent block dominates the child block and
351 * if the parent block is part of the same loop or has a smaller loop nest depth.
352 */
353 bool dominates(vn_ctx& ctx, uint32_t parent, uint32_t child)
354 {
355 unsigned parent_loop_nest_depth = ctx.program->blocks[parent].loop_nest_depth;
356 while (parent < child && parent_loop_nest_depth <= ctx.program->blocks[child].loop_nest_depth)
357 child = ctx.program->blocks[child].logical_idom;
358
359 return parent == child;
360 }
361
362 void process_block(vn_ctx& ctx, Block& block)
363 {
364 std::vector<aco_ptr<Instruction>> new_instructions;
365 new_instructions.reserve(block.instructions.size());
366
367 for (aco_ptr<Instruction>& instr : block.instructions) {
368 /* first, rename operands */
369 for (Operand& op : instr->operands) {
370 if (!op.isTemp())
371 continue;
372 auto it = ctx.renames.find(op.tempId());
373 if (it != ctx.renames.end())
374 op.setTemp(it->second);
375 }
376
377 if (instr->opcode == aco_opcode::p_discard_if ||
378 instr->opcode == aco_opcode::p_demote_to_helper)
379 ctx.exec_id++;
380
381 if (instr->definitions.empty() || instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi) {
382 new_instructions.emplace_back(std::move(instr));
383 continue;
384 }
385
386 /* simple copy-propagation through renaming */
387 if ((instr->opcode == aco_opcode::s_mov_b32 || instr->opcode == aco_opcode::s_mov_b64 || instr->opcode == aco_opcode::v_mov_b32) &&
388 !instr->definitions[0].isFixed() && instr->operands[0].isTemp() && instr->operands[0].regClass() == instr->definitions[0].regClass() &&
389 !instr->isDPP() && !((int)instr->format & (int)Format::SDWA)) {
390 ctx.renames[instr->definitions[0].tempId()] = instr->operands[0].getTemp();
391 }
392
393 instr->pass_flags = ctx.exec_id;
394 std::pair<expr_set::iterator, bool> res = ctx.expr_values.emplace(instr.get(), block.index);
395
396 /* if there was already an expression with the same value number */
397 if (!res.second) {
398 Instruction* orig_instr = res.first->first;
399 assert(instr->definitions.size() == orig_instr->definitions.size());
400 /* check if the original instruction dominates the current one */
401 if (dominates(ctx, res.first->second, block.index) &&
402 ctx.program->blocks[res.first->second].fp_mode.canReplace(block.fp_mode)) {
403 for (unsigned i = 0; i < instr->definitions.size(); i++) {
404 assert(instr->definitions[i].regClass() == orig_instr->definitions[i].regClass());
405 assert(instr->definitions[i].isTemp());
406 ctx.renames[instr->definitions[i].tempId()] = orig_instr->definitions[i].getTemp();
407 if (instr->definitions[i].isPrecise())
408 orig_instr->definitions[i].setPrecise(true);
409 /* SPIR_V spec says that an instruction marked with NUW wrapping
410 * around is undefined behaviour, so we can break additions in
411 * other contexts.
412 */
413 if (instr->definitions[i].isNUW())
414 orig_instr->definitions[i].setNUW(true);
415 }
416 } else {
417 ctx.expr_values.erase(res.first);
418 ctx.expr_values.emplace(instr.get(), block.index);
419 new_instructions.emplace_back(std::move(instr));
420 }
421 } else {
422 new_instructions.emplace_back(std::move(instr));
423 }
424 }
425
426 block.instructions = std::move(new_instructions);
427 }
428
429 void rename_phi_operands(Block& block, std::map<uint32_t, Temp>& renames)
430 {
431 for (aco_ptr<Instruction>& phi : block.instructions) {
432 if (phi->opcode != aco_opcode::p_phi && phi->opcode != aco_opcode::p_linear_phi)
433 break;
434
435 for (Operand& op : phi->operands) {
436 if (!op.isTemp())
437 continue;
438 auto it = renames.find(op.tempId());
439 if (it != renames.end())
440 op.setTemp(it->second);
441 }
442 }
443 }
444 } /* end namespace */
445
446
447 void value_numbering(Program* program)
448 {
449 vn_ctx ctx(program);
450 std::vector<unsigned> loop_headers;
451
452 for (Block& block : program->blocks) {
453 assert(ctx.exec_id > 0);
454 /* decrement exec_id when leaving nested control flow */
455 if (block.kind & block_kind_loop_header)
456 loop_headers.push_back(block.index);
457 if (block.kind & block_kind_merge) {
458 ctx.exec_id--;
459 } else if (block.kind & block_kind_loop_exit) {
460 ctx.exec_id -= program->blocks[loop_headers.back()].linear_preds.size();
461 ctx.exec_id -= block.linear_preds.size();
462 loop_headers.pop_back();
463 }
464
465 if (block.logical_idom != -1)
466 process_block(ctx, block);
467 else
468 rename_phi_operands(block, ctx.renames);
469
470 /* increment exec_id when entering nested control flow */
471 if (block.kind & block_kind_branch ||
472 block.kind & block_kind_loop_preheader ||
473 block.kind & block_kind_break ||
474 block.kind & block_kind_continue ||
475 block.kind & block_kind_discard)
476 ctx.exec_id++;
477 else if (block.kind & block_kind_continue_or_break)
478 ctx.exec_id += 2;
479 }
480
481 /* rename loop header phi operands */
482 for (Block& block : program->blocks) {
483 if (block.kind & block_kind_loop_header)
484 rename_phi_operands(block, ctx.renames);
485 }
486 }
487
488 }