a23ff7ce01762c3466f3c2cce0b631a667bed5fa
[mesa.git] / src / amd / compiler / aco_ir.h
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #ifndef ACO_IR_H
26 #define ACO_IR_H
27
28 #include <vector>
29 #include <set>
30 #include <unordered_set>
31 #include <bitset>
32 #include <memory>
33
34 #include "nir.h"
35 #include "ac_binary.h"
36 #include "amd_family.h"
37 #include "aco_opcodes.h"
38 #include "aco_util.h"
39
40 struct radv_nir_compiler_options;
41 struct radv_shader_args;
42 struct radv_shader_info;
43
44 namespace aco {
45
46 extern uint64_t debug_flags;
47
48 enum {
49 DEBUG_VALIDATE = 0x1,
50 DEBUG_VALIDATE_RA = 0x2,
51 DEBUG_PERFWARN = 0x4,
52 };
53
54 /**
55 * Representation of the instruction's microcode encoding format
56 * Note: Some Vector ALU Formats can be combined, such that:
57 * - VOP2* | VOP3A represents a VOP2 instruction in VOP3A encoding
58 * - VOP2* | DPP represents a VOP2 instruction with data parallel primitive.
59 * - VOP2* | SDWA represents a VOP2 instruction with sub-dword addressing.
60 *
61 * (*) The same is applicable for VOP1 and VOPC instructions.
62 */
63 enum class Format : std::uint16_t {
64 /* Pseudo Instruction Format */
65 PSEUDO = 0,
66 /* Scalar ALU & Control Formats */
67 SOP1 = 1,
68 SOP2 = 2,
69 SOPK = 3,
70 SOPP = 4,
71 SOPC = 5,
72 /* Scalar Memory Format */
73 SMEM = 6,
74 /* LDS/GDS Format */
75 DS = 8,
76 /* Vector Memory Buffer Formats */
77 MTBUF = 9,
78 MUBUF = 10,
79 /* Vector Memory Image Format */
80 MIMG = 11,
81 /* Export Format */
82 EXP = 12,
83 /* Flat Formats */
84 FLAT = 13,
85 GLOBAL = 14,
86 SCRATCH = 15,
87
88 PSEUDO_BRANCH = 16,
89 PSEUDO_BARRIER = 17,
90 PSEUDO_REDUCTION = 18,
91
92 /* Vector ALU Formats */
93 VOP3P = 19,
94 VOP1 = 1 << 8,
95 VOP2 = 1 << 9,
96 VOPC = 1 << 10,
97 VOP3 = 1 << 11,
98 VOP3A = 1 << 11,
99 VOP3B = 1 << 11,
100 /* Vector Parameter Interpolation Format */
101 VINTRP = 1 << 12,
102 DPP = 1 << 13,
103 SDWA = 1 << 14,
104 };
105
106 enum barrier_interaction : uint8_t {
107 barrier_none = 0,
108 barrier_buffer = 0x1,
109 barrier_image = 0x2,
110 barrier_atomic = 0x4,
111 barrier_shared = 0x8,
112 /* used for geometry shaders to ensure vertex data writes are before the
113 * GS_DONE s_sendmsg. */
114 barrier_gs_data = 0x10,
115 /* used for geometry shaders to ensure s_sendmsg instructions are in-order. */
116 barrier_gs_sendmsg = 0x20,
117 /* used by barriers. created by s_barrier */
118 barrier_barrier = 0x40,
119 barrier_count = 7,
120 };
121
122 enum fp_round {
123 fp_round_ne = 0,
124 fp_round_pi = 1,
125 fp_round_ni = 2,
126 fp_round_tz = 3,
127 };
128
129 enum fp_denorm {
130 /* Note that v_rcp_f32, v_exp_f32, v_log_f32, v_sqrt_f32, v_rsq_f32 and
131 * v_mad_f32/v_madak_f32/v_madmk_f32/v_mac_f32 always flush denormals. */
132 fp_denorm_flush = 0x0,
133 fp_denorm_keep = 0x3,
134 };
135
136 struct float_mode {
137 /* matches encoding of the MODE register */
138 union {
139 struct {
140 fp_round round32:2;
141 fp_round round16_64:2;
142 unsigned denorm32:2;
143 unsigned denorm16_64:2;
144 };
145 struct {
146 uint8_t round:4;
147 uint8_t denorm:4;
148 };
149 uint8_t val = 0;
150 };
151 /* if false, optimizations which may remove infs/nan/-0.0 can be done */
152 bool preserve_signed_zero_inf_nan32:1;
153 bool preserve_signed_zero_inf_nan16_64:1;
154 /* if false, optimizations which may remove denormal flushing can be done */
155 bool must_flush_denorms32:1;
156 bool must_flush_denorms16_64:1;
157 bool care_about_round32:1;
158 bool care_about_round16_64:1;
159
160 /* Returns true if instructions using the mode "other" can safely use the
161 * current one instead. */
162 bool canReplace(float_mode other) const noexcept {
163 return val == other.val &&
164 (preserve_signed_zero_inf_nan32 || !other.preserve_signed_zero_inf_nan32) &&
165 (preserve_signed_zero_inf_nan16_64 || !other.preserve_signed_zero_inf_nan16_64) &&
166 (must_flush_denorms32 || !other.must_flush_denorms32) &&
167 (must_flush_denorms16_64 || !other.must_flush_denorms16_64) &&
168 (care_about_round32 || !other.care_about_round32) &&
169 (care_about_round16_64 || !other.care_about_round16_64);
170 }
171 };
172
173 constexpr Format asVOP3(Format format) {
174 return (Format) ((uint32_t) Format::VOP3 | (uint32_t) format);
175 };
176
177 constexpr Format asSDWA(Format format) {
178 assert(format == Format::VOP1 || format == Format::VOP2 || format == Format::VOPC);
179 return (Format) ((uint32_t) Format::SDWA | (uint32_t) format);
180 }
181
182 enum class RegType {
183 none = 0,
184 sgpr,
185 vgpr,
186 linear_vgpr,
187 };
188
189 struct RegClass {
190
191 enum RC : uint8_t {
192 s1 = 1,
193 s2 = 2,
194 s3 = 3,
195 s4 = 4,
196 s6 = 6,
197 s8 = 8,
198 s16 = 16,
199 v1 = s1 | (1 << 5),
200 v2 = s2 | (1 << 5),
201 v3 = s3 | (1 << 5),
202 v4 = s4 | (1 << 5),
203 v5 = 5 | (1 << 5),
204 v6 = 6 | (1 << 5),
205 v7 = 7 | (1 << 5),
206 v8 = 8 | (1 << 5),
207 /* byte-sized register class */
208 v1b = v1 | (1 << 7),
209 v2b = v2 | (1 << 7),
210 v3b = v3 | (1 << 7),
211 v4b = v4 | (1 << 7),
212 v6b = v6 | (1 << 7),
213 v8b = v8 | (1 << 7),
214 /* these are used for WWM and spills to vgpr */
215 v1_linear = v1 | (1 << 6),
216 v2_linear = v2 | (1 << 6),
217 };
218
219 RegClass() = default;
220 constexpr RegClass(RC rc)
221 : rc(rc) {}
222 constexpr RegClass(RegType type, unsigned size)
223 : rc((RC) ((type == RegType::vgpr ? 1 << 5 : 0) | size)) {}
224
225 constexpr operator RC() const { return rc; }
226 explicit operator bool() = delete;
227
228 constexpr RegType type() const { return rc <= RC::s16 ? RegType::sgpr : RegType::vgpr; }
229 constexpr bool is_subdword() const { return rc & (1 << 7); }
230 constexpr unsigned bytes() const { return ((unsigned) rc & 0x1F) * (is_subdword() ? 1 : 4); }
231 //TODO: use size() less in favor of bytes()
232 constexpr unsigned size() const { return (bytes() + 3) >> 2; }
233 constexpr bool is_linear() const { return rc <= RC::s16 || rc & (1 << 6); }
234 constexpr RegClass as_linear() const { return RegClass((RC) (rc | (1 << 6))); }
235 constexpr RegClass as_subdword() const { return RegClass((RC) (rc | 1 << 7)); }
236
237 static constexpr RegClass get(RegType type, unsigned bytes) {
238 if (type == RegType::sgpr) {
239 return RegClass(type, DIV_ROUND_UP(bytes, 4u));
240 } else {
241 return bytes % 4u ? RegClass(type, bytes).as_subdword() :
242 RegClass(type, bytes / 4u);
243 }
244 }
245
246 private:
247 RC rc;
248 };
249
250 /* transitional helper expressions */
251 static constexpr RegClass s1{RegClass::s1};
252 static constexpr RegClass s2{RegClass::s2};
253 static constexpr RegClass s3{RegClass::s3};
254 static constexpr RegClass s4{RegClass::s4};
255 static constexpr RegClass s8{RegClass::s8};
256 static constexpr RegClass s16{RegClass::s16};
257 static constexpr RegClass v1{RegClass::v1};
258 static constexpr RegClass v2{RegClass::v2};
259 static constexpr RegClass v3{RegClass::v3};
260 static constexpr RegClass v4{RegClass::v4};
261 static constexpr RegClass v5{RegClass::v5};
262 static constexpr RegClass v6{RegClass::v6};
263 static constexpr RegClass v7{RegClass::v7};
264 static constexpr RegClass v8{RegClass::v8};
265 static constexpr RegClass v1b{RegClass::v1b};
266 static constexpr RegClass v2b{RegClass::v2b};
267 static constexpr RegClass v3b{RegClass::v3b};
268 static constexpr RegClass v4b{RegClass::v4b};
269 static constexpr RegClass v6b{RegClass::v6b};
270 static constexpr RegClass v8b{RegClass::v8b};
271
272 /**
273 * Temp Class
274 * Each temporary virtual register has a
275 * register class (i.e. size and type)
276 * and SSA id.
277 */
278 struct Temp {
279 Temp() noexcept : id_(0), reg_class(0) {}
280 constexpr Temp(uint32_t id, RegClass cls) noexcept
281 : id_(id), reg_class(uint8_t(cls)) {}
282
283 constexpr uint32_t id() const noexcept { return id_; }
284 constexpr RegClass regClass() const noexcept { return (RegClass::RC)reg_class; }
285
286 constexpr unsigned bytes() const noexcept { return regClass().bytes(); }
287 constexpr unsigned size() const noexcept { return regClass().size(); }
288 constexpr RegType type() const noexcept { return regClass().type(); }
289 constexpr bool is_linear() const noexcept { return regClass().is_linear(); }
290
291 constexpr bool operator <(Temp other) const noexcept { return id() < other.id(); }
292 constexpr bool operator==(Temp other) const noexcept { return id() == other.id(); }
293 constexpr bool operator!=(Temp other) const noexcept { return id() != other.id(); }
294
295 private:
296 uint32_t id_: 24;
297 uint32_t reg_class : 8;
298 };
299
300 /**
301 * PhysReg
302 * Represents the physical register for each
303 * Operand and Definition.
304 */
305 struct PhysReg {
306 constexpr PhysReg() = default;
307 explicit constexpr PhysReg(unsigned r) : reg_b(r << 2) {}
308 constexpr unsigned reg() const { return reg_b >> 2; }
309 constexpr unsigned byte() const { return reg_b & 0x3; }
310 constexpr operator unsigned() const { return reg(); }
311 constexpr bool operator==(PhysReg other) const { return reg_b == other.reg_b; }
312 constexpr bool operator!=(PhysReg other) const { return reg_b != other.reg_b; }
313 constexpr bool operator <(PhysReg other) const { return reg_b < other.reg_b; }
314 constexpr PhysReg advance(int bytes) const { PhysReg res = *this; res.reg_b += bytes; return res; }
315
316 uint16_t reg_b = 0;
317 };
318
319 /* helper expressions for special registers */
320 static constexpr PhysReg m0{124};
321 static constexpr PhysReg vcc{106};
322 static constexpr PhysReg vcc_hi{107};
323 static constexpr PhysReg sgpr_null{125}; /* GFX10+ */
324 static constexpr PhysReg exec{126};
325 static constexpr PhysReg exec_lo{126};
326 static constexpr PhysReg exec_hi{127};
327 static constexpr PhysReg vccz{251};
328 static constexpr PhysReg execz{252};
329 static constexpr PhysReg scc{253};
330
331 /**
332 * Operand Class
333 * Initially, each Operand refers to either
334 * a temporary virtual register
335 * or to a constant value
336 * Temporary registers get mapped to physical register during RA
337 * Constant values are inlined into the instruction sequence.
338 */
339 class Operand final
340 {
341 public:
342 constexpr Operand()
343 : reg_(PhysReg{128}), isTemp_(false), isFixed_(true), isConstant_(false),
344 isKill_(false), isUndef_(true), isFirstKill_(false), constSize(0),
345 isLateKill_(false) {}
346
347 explicit Operand(Temp r) noexcept
348 {
349 data_.temp = r;
350 if (r.id()) {
351 isTemp_ = true;
352 } else {
353 isUndef_ = true;
354 setFixed(PhysReg{128});
355 }
356 };
357 explicit Operand(uint8_t v) noexcept
358 {
359 /* 8-bit constants are only used for copies and copies from any 8-bit
360 * constant can be implemented with a SDWA v_mul_u32_u24. So consider all
361 * to be inline constants. */
362 data_.i = v;
363 isConstant_ = true;
364 constSize = 0;
365 setFixed(PhysReg{0u});
366 };
367 explicit Operand(uint16_t v) noexcept
368 {
369 data_.i = v;
370 isConstant_ = true;
371 constSize = 1;
372 if (v <= 64)
373 setFixed(PhysReg{128u + v});
374 else if (v >= 0xFFF0) /* [-16 .. -1] */
375 setFixed(PhysReg{192u + (0xFFFF - v)});
376 else if (v == 0x3800) /* 0.5 */
377 setFixed(PhysReg{240});
378 else if (v == 0xB800) /* -0.5 */
379 setFixed(PhysReg{241});
380 else if (v == 0x3C00) /* 1.0 */
381 setFixed(PhysReg{242});
382 else if (v == 0xBC00) /* -1.0 */
383 setFixed(PhysReg{243});
384 else if (v == 0x4000) /* 2.0 */
385 setFixed(PhysReg{244});
386 else if (v == 0xC000) /* -2.0 */
387 setFixed(PhysReg{245});
388 else if (v == 0x4400) /* 4.0 */
389 setFixed(PhysReg{246});
390 else if (v == 0xC400) /* -4.0 */
391 setFixed(PhysReg{247});
392 else if (v == 0x3118) /* 1/2 PI */
393 setFixed(PhysReg{248});
394 else /* Literal Constant */
395 setFixed(PhysReg{255});
396 };
397 explicit Operand(uint32_t v, bool is64bit = false) noexcept
398 {
399 data_.i = v;
400 isConstant_ = true;
401 constSize = is64bit ? 3 : 2;
402 if (v <= 64)
403 setFixed(PhysReg{128 + v});
404 else if (v >= 0xFFFFFFF0) /* [-16 .. -1] */
405 setFixed(PhysReg{192 - v});
406 else if (v == 0x3f000000) /* 0.5 */
407 setFixed(PhysReg{240});
408 else if (v == 0xbf000000) /* -0.5 */
409 setFixed(PhysReg{241});
410 else if (v == 0x3f800000) /* 1.0 */
411 setFixed(PhysReg{242});
412 else if (v == 0xbf800000) /* -1.0 */
413 setFixed(PhysReg{243});
414 else if (v == 0x40000000) /* 2.0 */
415 setFixed(PhysReg{244});
416 else if (v == 0xc0000000) /* -2.0 */
417 setFixed(PhysReg{245});
418 else if (v == 0x40800000) /* 4.0 */
419 setFixed(PhysReg{246});
420 else if (v == 0xc0800000) /* -4.0 */
421 setFixed(PhysReg{247});
422 else { /* Literal Constant */
423 assert(!is64bit && "attempt to create a 64-bit literal constant");
424 setFixed(PhysReg{255});
425 }
426 };
427 explicit Operand(uint64_t v) noexcept
428 {
429 isConstant_ = true;
430 constSize = 3;
431 if (v <= 64) {
432 data_.i = (uint32_t) v;
433 setFixed(PhysReg{128 + (uint32_t) v});
434 } else if (v >= 0xFFFFFFFFFFFFFFF0) { /* [-16 .. -1] */
435 data_.i = (uint32_t) v;
436 setFixed(PhysReg{192 - (uint32_t) v});
437 } else if (v == 0x3FE0000000000000) { /* 0.5 */
438 data_.i = 0x3f000000;
439 setFixed(PhysReg{240});
440 } else if (v == 0xBFE0000000000000) { /* -0.5 */
441 data_.i = 0xbf000000;
442 setFixed(PhysReg{241});
443 } else if (v == 0x3FF0000000000000) { /* 1.0 */
444 data_.i = 0x3f800000;
445 setFixed(PhysReg{242});
446 } else if (v == 0xBFF0000000000000) { /* -1.0 */
447 data_.i = 0xbf800000;
448 setFixed(PhysReg{243});
449 } else if (v == 0x4000000000000000) { /* 2.0 */
450 data_.i = 0x40000000;
451 setFixed(PhysReg{244});
452 } else if (v == 0xC000000000000000) { /* -2.0 */
453 data_.i = 0xc0000000;
454 setFixed(PhysReg{245});
455 } else if (v == 0x4010000000000000) { /* 4.0 */
456 data_.i = 0x40800000;
457 setFixed(PhysReg{246});
458 } else if (v == 0xC010000000000000) { /* -4.0 */
459 data_.i = 0xc0800000;
460 setFixed(PhysReg{247});
461 } else { /* Literal Constant: we don't know if it is a long or double.*/
462 isConstant_ = 0;
463 assert(false && "attempt to create a 64-bit literal constant");
464 }
465 };
466 explicit Operand(RegClass type) noexcept
467 {
468 isUndef_ = true;
469 data_.temp = Temp(0, type);
470 setFixed(PhysReg{128});
471 };
472 explicit Operand(PhysReg reg, RegClass type) noexcept
473 {
474 data_.temp = Temp(0, type);
475 setFixed(reg);
476 }
477
478 constexpr bool isTemp() const noexcept
479 {
480 return isTemp_;
481 }
482
483 constexpr void setTemp(Temp t) noexcept {
484 assert(!isConstant_);
485 isTemp_ = true;
486 data_.temp = t;
487 }
488
489 constexpr Temp getTemp() const noexcept
490 {
491 return data_.temp;
492 }
493
494 constexpr uint32_t tempId() const noexcept
495 {
496 return data_.temp.id();
497 }
498
499 constexpr bool hasRegClass() const noexcept
500 {
501 return isTemp() || isUndefined();
502 }
503
504 constexpr RegClass regClass() const noexcept
505 {
506 return data_.temp.regClass();
507 }
508
509 constexpr unsigned bytes() const noexcept
510 {
511 if (isConstant())
512 return 1 << constSize;
513 else
514 return data_.temp.bytes();
515 }
516
517 constexpr unsigned size() const noexcept
518 {
519 if (isConstant())
520 return constSize > 2 ? 2 : 1;
521 else
522 return data_.temp.size();
523 }
524
525 constexpr bool isFixed() const noexcept
526 {
527 return isFixed_;
528 }
529
530 constexpr PhysReg physReg() const noexcept
531 {
532 return reg_;
533 }
534
535 constexpr void setFixed(PhysReg reg) noexcept
536 {
537 isFixed_ = reg != unsigned(-1);
538 reg_ = reg;
539 }
540
541 constexpr bool isConstant() const noexcept
542 {
543 return isConstant_;
544 }
545
546 constexpr bool isLiteral() const noexcept
547 {
548 return isConstant() && reg_ == 255;
549 }
550
551 constexpr bool isUndefined() const noexcept
552 {
553 return isUndef_;
554 }
555
556 constexpr uint32_t constantValue() const noexcept
557 {
558 return data_.i;
559 }
560
561 constexpr bool constantEquals(uint32_t cmp) const noexcept
562 {
563 return isConstant() && constantValue() == cmp;
564 }
565
566 constexpr uint64_t constantValue64(bool signext=false) const noexcept
567 {
568 if (constSize == 3) {
569 if (reg_ <= 192)
570 return reg_ - 128;
571 else if (reg_ <= 208)
572 return 0xFFFFFFFFFFFFFFFF - (reg_ - 193);
573
574 switch (reg_) {
575 case 240:
576 return 0x3FE0000000000000;
577 case 241:
578 return 0xBFE0000000000000;
579 case 242:
580 return 0x3FF0000000000000;
581 case 243:
582 return 0xBFF0000000000000;
583 case 244:
584 return 0x4000000000000000;
585 case 245:
586 return 0xC000000000000000;
587 case 246:
588 return 0x4010000000000000;
589 case 247:
590 return 0xC010000000000000;
591 }
592 } else if (constSize == 1) {
593 return (signext && (data_.i & 0x8000u) ? 0xffffffffffff0000ull : 0ull) | data_.i;
594 } else if (constSize == 0) {
595 return (signext && (data_.i & 0x80u) ? 0xffffffffffffff00ull : 0ull) | data_.i;
596 }
597 return (signext && (data_.i & 0x80000000u) ? 0xffffffff00000000ull : 0ull) | data_.i;
598 }
599
600 constexpr bool isOfType(RegType type) const noexcept
601 {
602 return hasRegClass() && regClass().type() == type;
603 }
604
605 /* Indicates that the killed operand's live range intersects with the
606 * instruction's definitions. Unlike isKill() and isFirstKill(), this is
607 * not set by liveness analysis. */
608 constexpr void setLateKill(bool flag) noexcept
609 {
610 isLateKill_ = flag;
611 }
612
613 constexpr bool isLateKill() const noexcept
614 {
615 return isLateKill_;
616 }
617
618 constexpr void setKill(bool flag) noexcept
619 {
620 isKill_ = flag;
621 if (!flag)
622 setFirstKill(false);
623 }
624
625 constexpr bool isKill() const noexcept
626 {
627 return isKill_ || isFirstKill();
628 }
629
630 constexpr void setFirstKill(bool flag) noexcept
631 {
632 isFirstKill_ = flag;
633 if (flag)
634 setKill(flag);
635 }
636
637 /* When there are multiple operands killing the same temporary,
638 * isFirstKill() is only returns true for the first one. */
639 constexpr bool isFirstKill() const noexcept
640 {
641 return isFirstKill_;
642 }
643
644 constexpr bool isKillBeforeDef() const noexcept
645 {
646 return isKill() && !isLateKill();
647 }
648
649 constexpr bool isFirstKillBeforeDef() const noexcept
650 {
651 return isFirstKill() && !isLateKill();
652 }
653
654 constexpr bool operator == (Operand other) const noexcept
655 {
656 if (other.size() != size())
657 return false;
658 if (isFixed() != other.isFixed() || isKillBeforeDef() != other.isKillBeforeDef())
659 return false;
660 if (isFixed() && other.isFixed() && physReg() != other.physReg())
661 return false;
662 if (isLiteral())
663 return other.isLiteral() && other.constantValue() == constantValue();
664 else if (isConstant())
665 return other.isConstant() && other.physReg() == physReg();
666 else if (isUndefined())
667 return other.isUndefined() && other.regClass() == regClass();
668 else
669 return other.isTemp() && other.getTemp() == getTemp();
670 }
671 private:
672 union {
673 uint32_t i;
674 float f;
675 Temp temp = Temp(0, s1);
676 } data_;
677 PhysReg reg_;
678 union {
679 struct {
680 uint8_t isTemp_:1;
681 uint8_t isFixed_:1;
682 uint8_t isConstant_:1;
683 uint8_t isKill_:1;
684 uint8_t isUndef_:1;
685 uint8_t isFirstKill_:1;
686 uint8_t constSize:2;
687 uint8_t isLateKill_:1;
688 };
689 /* can't initialize bit-fields in c++11, so work around using a union */
690 uint16_t control_ = 0;
691 };
692 };
693
694 /**
695 * Definition Class
696 * Definitions are the results of Instructions
697 * and refer to temporary virtual registers
698 * which are later mapped to physical registers
699 */
700 class Definition final
701 {
702 public:
703 constexpr Definition() : temp(Temp(0, s1)), reg_(0), isFixed_(0), hasHint_(0), isKill_(0), isPrecise_(0) {}
704 Definition(uint32_t index, RegClass type) noexcept
705 : temp(index, type) {}
706 explicit Definition(Temp tmp) noexcept
707 : temp(tmp) {}
708 Definition(PhysReg reg, RegClass type) noexcept
709 : temp(Temp(0, type))
710 {
711 setFixed(reg);
712 }
713 Definition(uint32_t tmpId, PhysReg reg, RegClass type) noexcept
714 : temp(Temp(tmpId, type))
715 {
716 setFixed(reg);
717 }
718
719 constexpr bool isTemp() const noexcept
720 {
721 return tempId() > 0;
722 }
723
724 constexpr Temp getTemp() const noexcept
725 {
726 return temp;
727 }
728
729 constexpr uint32_t tempId() const noexcept
730 {
731 return temp.id();
732 }
733
734 constexpr void setTemp(Temp t) noexcept {
735 temp = t;
736 }
737
738 constexpr RegClass regClass() const noexcept
739 {
740 return temp.regClass();
741 }
742
743 constexpr unsigned bytes() const noexcept
744 {
745 return temp.bytes();
746 }
747
748 constexpr unsigned size() const noexcept
749 {
750 return temp.size();
751 }
752
753 constexpr bool isFixed() const noexcept
754 {
755 return isFixed_;
756 }
757
758 constexpr PhysReg physReg() const noexcept
759 {
760 return reg_;
761 }
762
763 constexpr void setFixed(PhysReg reg) noexcept
764 {
765 isFixed_ = 1;
766 reg_ = reg;
767 }
768
769 constexpr void setHint(PhysReg reg) noexcept
770 {
771 hasHint_ = 1;
772 reg_ = reg;
773 }
774
775 constexpr bool hasHint() const noexcept
776 {
777 return hasHint_;
778 }
779
780 constexpr void setKill(bool flag) noexcept
781 {
782 isKill_ = flag;
783 }
784
785 constexpr bool isKill() const noexcept
786 {
787 return isKill_;
788 }
789
790 constexpr void setPrecise(bool precise) noexcept
791 {
792 isPrecise_ = precise;
793 }
794
795 constexpr bool isPrecise() const noexcept
796 {
797 return isPrecise_;
798 }
799
800 private:
801 Temp temp = Temp(0, s1);
802 PhysReg reg_;
803 union {
804 struct {
805 uint8_t isFixed_:1;
806 uint8_t hasHint_:1;
807 uint8_t isKill_:1;
808 uint8_t isPrecise_:1;
809 };
810 /* can't initialize bit-fields in c++11, so work around using a union */
811 uint8_t control_ = 0;
812 };
813 };
814
815 struct Block;
816
817 struct Instruction {
818 aco_opcode opcode;
819 Format format;
820 uint32_t pass_flags;
821
822 aco::span<Operand> operands;
823 aco::span<Definition> definitions;
824
825 constexpr bool isVALU() const noexcept
826 {
827 return ((uint16_t) format & (uint16_t) Format::VOP1) == (uint16_t) Format::VOP1
828 || ((uint16_t) format & (uint16_t) Format::VOP2) == (uint16_t) Format::VOP2
829 || ((uint16_t) format & (uint16_t) Format::VOPC) == (uint16_t) Format::VOPC
830 || ((uint16_t) format & (uint16_t) Format::VOP3A) == (uint16_t) Format::VOP3A
831 || ((uint16_t) format & (uint16_t) Format::VOP3B) == (uint16_t) Format::VOP3B
832 || format == Format::VOP3P;
833 }
834
835 constexpr bool isSALU() const noexcept
836 {
837 return format == Format::SOP1 ||
838 format == Format::SOP2 ||
839 format == Format::SOPC ||
840 format == Format::SOPK ||
841 format == Format::SOPP;
842 }
843
844 constexpr bool isVMEM() const noexcept
845 {
846 return format == Format::MTBUF ||
847 format == Format::MUBUF ||
848 format == Format::MIMG;
849 }
850
851 constexpr bool isDPP() const noexcept
852 {
853 return (uint16_t) format & (uint16_t) Format::DPP;
854 }
855
856 constexpr bool isVOP3() const noexcept
857 {
858 return ((uint16_t) format & (uint16_t) Format::VOP3A) ||
859 ((uint16_t) format & (uint16_t) Format::VOP3B);
860 }
861
862 constexpr bool isSDWA() const noexcept
863 {
864 return (uint16_t) format & (uint16_t) Format::SDWA;
865 }
866
867 constexpr bool isFlatOrGlobal() const noexcept
868 {
869 return format == Format::FLAT || format == Format::GLOBAL;
870 }
871
872 constexpr bool usesModifiers() const noexcept;
873
874 constexpr bool reads_exec() const noexcept
875 {
876 for (const Operand& op : operands) {
877 if (op.isFixed() && op.physReg() == exec)
878 return true;
879 }
880 return false;
881 }
882 };
883 static_assert(sizeof(Instruction) == 16, "Unexpected padding");
884
885 struct SOPK_instruction : public Instruction {
886 uint16_t imm;
887 uint16_t padding;
888 };
889 static_assert(sizeof(SOPK_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
890
891 struct SOPP_instruction : public Instruction {
892 uint32_t imm;
893 int block;
894 };
895 static_assert(sizeof(SOPP_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
896
897 struct SOPC_instruction : public Instruction {
898 };
899 static_assert(sizeof(SOPC_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
900
901 struct SOP1_instruction : public Instruction {
902 };
903 static_assert(sizeof(SOP1_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
904
905 struct SOP2_instruction : public Instruction {
906 };
907 static_assert(sizeof(SOP2_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
908
909 /**
910 * Scalar Memory Format:
911 * For s_(buffer_)load_dword*:
912 * Operand(0): SBASE - SGPR-pair which provides base address
913 * Operand(1): Offset - immediate (un)signed offset or SGPR
914 * Operand(2) / Definition(0): SDATA - SGPR for read / write result
915 * Operand(n-1): SOffset - SGPR offset (Vega only)
916 *
917 * Having no operands is also valid for instructions such as s_dcache_inv.
918 *
919 */
920 struct SMEM_instruction : public Instruction {
921 barrier_interaction barrier;
922 bool glc : 1; /* VI+: globally coherent */
923 bool dlc : 1; /* NAVI: device level coherent */
924 bool nv : 1; /* VEGA only: Non-volatile */
925 bool can_reorder : 1;
926 bool disable_wqm : 1;
927 bool prevent_overflow : 1; /* avoid overflow when combining additions */
928 uint32_t padding: 18;
929 };
930 static_assert(sizeof(SMEM_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
931
932 struct VOP1_instruction : public Instruction {
933 };
934 static_assert(sizeof(VOP1_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
935
936 struct VOP2_instruction : public Instruction {
937 };
938 static_assert(sizeof(VOP2_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
939
940 struct VOPC_instruction : public Instruction {
941 };
942 static_assert(sizeof(VOPC_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
943
944 struct VOP3A_instruction : public Instruction {
945 bool abs[3];
946 bool neg[3];
947 uint8_t opsel : 4;
948 uint8_t omod : 2;
949 bool clamp : 1;
950 uint32_t padding : 9;
951 };
952 static_assert(sizeof(VOP3A_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
953
954 struct VOP3P_instruction : public Instruction {
955 bool neg_lo[3];
956 bool neg_hi[3];
957 uint8_t opsel_lo : 3;
958 uint8_t opsel_hi : 3;
959 bool clamp : 1;
960 uint32_t padding : 9;
961 };
962 static_assert(sizeof(VOP3P_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
963
964 /**
965 * Data Parallel Primitives Format:
966 * This format can be used for VOP1, VOP2 or VOPC instructions.
967 * The swizzle applies to the src0 operand.
968 *
969 */
970 struct DPP_instruction : public Instruction {
971 bool abs[2];
972 bool neg[2];
973 uint16_t dpp_ctrl;
974 uint8_t row_mask : 4;
975 uint8_t bank_mask : 4;
976 bool bound_ctrl : 1;
977 uint32_t padding : 7;
978 };
979 static_assert(sizeof(DPP_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
980
981 enum sdwa_sel : uint8_t {
982 /* masks */
983 sdwa_wordnum = 0x1,
984 sdwa_bytenum = 0x3,
985 sdwa_asuint = 0x7 | 0x10,
986 sdwa_rasize = 0x3,
987
988 /* flags */
989 sdwa_isword = 0x4,
990 sdwa_sext = 0x8,
991 sdwa_isra = 0x10,
992
993 /* specific values */
994 sdwa_ubyte0 = 0,
995 sdwa_ubyte1 = 1,
996 sdwa_ubyte2 = 2,
997 sdwa_ubyte3 = 3,
998 sdwa_uword0 = sdwa_isword | 0,
999 sdwa_uword1 = sdwa_isword | 1,
1000 sdwa_udword = 6,
1001
1002 sdwa_sbyte0 = sdwa_ubyte0 | sdwa_sext,
1003 sdwa_sbyte1 = sdwa_ubyte1 | sdwa_sext,
1004 sdwa_sbyte2 = sdwa_ubyte2 | sdwa_sext,
1005 sdwa_sbyte3 = sdwa_ubyte3 | sdwa_sext,
1006 sdwa_sword0 = sdwa_uword0 | sdwa_sext,
1007 sdwa_sword1 = sdwa_uword1 | sdwa_sext,
1008 sdwa_sdword = sdwa_udword | sdwa_sext,
1009
1010 /* register-allocated */
1011 sdwa_ubyte = 1 | sdwa_isra,
1012 sdwa_uword = 2 | sdwa_isra,
1013 sdwa_sbyte = sdwa_ubyte | sdwa_sext,
1014 sdwa_sword = sdwa_uword | sdwa_sext,
1015 };
1016
1017 /**
1018 * Sub-Dword Addressing Format:
1019 * This format can be used for VOP1, VOP2 or VOPC instructions.
1020 *
1021 * omod and SGPR/constant operands are only available on GFX9+. For VOPC,
1022 * the definition doesn't have to be VCC on GFX9+.
1023 *
1024 */
1025 struct SDWA_instruction : public Instruction {
1026 /* these destination modifiers aren't available with VOPC except for
1027 * clamp on GFX8 */
1028 uint8_t sel[2];
1029 uint8_t dst_sel;
1030 bool neg[2];
1031 bool abs[2];
1032 bool dst_preserve : 1;
1033 bool clamp : 1;
1034 uint8_t omod : 2; /* GFX9+ */
1035 uint32_t padding : 4;
1036 };
1037 static_assert(sizeof(SDWA_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
1038
1039 struct Interp_instruction : public Instruction {
1040 uint8_t attribute;
1041 uint8_t component;
1042 uint16_t padding;
1043 };
1044 static_assert(sizeof(Interp_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1045
1046 /**
1047 * Local and Global Data Sharing instructions
1048 * Operand(0): ADDR - VGPR which supplies the address.
1049 * Operand(1): DATA0 - First data VGPR.
1050 * Operand(2): DATA1 - Second data VGPR.
1051 * Operand(n-1): M0 - LDS size.
1052 * Definition(0): VDST - Destination VGPR when results returned to VGPRs.
1053 *
1054 */
1055 struct DS_instruction : public Instruction {
1056 int16_t offset0;
1057 int8_t offset1;
1058 bool gds;
1059 };
1060 static_assert(sizeof(DS_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1061
1062 /**
1063 * Vector Memory Untyped-buffer Instructions
1064 * Operand(0): SRSRC - Specifies which SGPR supplies T# (resource constant)
1065 * Operand(1): VADDR - Address source. Can carry an index and/or offset
1066 * Operand(2): SOFFSET - SGPR to supply unsigned byte offset. (SGPR, M0, or inline constant)
1067 * Operand(3) / Definition(0): VDATA - Vector GPR for write result / read data
1068 *
1069 */
1070 struct MUBUF_instruction : public Instruction {
1071 uint16_t offset : 12; /* Unsigned byte offset - 12 bit */
1072 bool offen : 1; /* Supply an offset from VGPR (VADDR) */
1073 bool idxen : 1; /* Supply an index from VGPR (VADDR) */
1074 bool addr64 : 1; /* SI, CIK: Address size is 64-bit */
1075 bool glc : 1; /* globally coherent */
1076 bool dlc : 1; /* NAVI: device level coherent */
1077 bool slc : 1; /* system level coherent */
1078 bool tfe : 1; /* texture fail enable */
1079 bool lds : 1; /* Return read-data to LDS instead of VGPRs */
1080 bool disable_wqm : 1; /* Require an exec mask without helper invocations */
1081 bool can_reorder : 1;
1082 bool swizzled:1;
1083 uint8_t padding : 1;
1084 barrier_interaction barrier;
1085 };
1086 static_assert(sizeof(MUBUF_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1087
1088 /**
1089 * Vector Memory Typed-buffer Instructions
1090 * Operand(0): SRSRC - Specifies which SGPR supplies T# (resource constant)
1091 * Operand(1): VADDR - Address source. Can carry an index and/or offset
1092 * Operand(2): SOFFSET - SGPR to supply unsigned byte offset. (SGPR, M0, or inline constant)
1093 * Operand(3) / Definition(0): VDATA - Vector GPR for write result / read data
1094 *
1095 */
1096 struct MTBUF_instruction : public Instruction {
1097 uint16_t offset; /* Unsigned byte offset - 12 bit */
1098 barrier_interaction barrier;
1099 uint8_t dfmt : 4; /* Data Format of data in memory buffer */
1100 uint8_t nfmt : 3; /* Numeric format of data in memory */
1101 bool offen : 1; /* Supply an offset from VGPR (VADDR) */
1102 bool idxen : 1; /* Supply an index from VGPR (VADDR) */
1103 bool glc : 1; /* globally coherent */
1104 bool dlc : 1; /* NAVI: device level coherent */
1105 bool slc : 1; /* system level coherent */
1106 bool tfe : 1; /* texture fail enable */
1107 bool disable_wqm : 1; /* Require an exec mask without helper invocations */
1108 bool can_reorder : 1;
1109 uint32_t padding : 25;
1110 };
1111 static_assert(sizeof(MTBUF_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
1112
1113 /**
1114 * Vector Memory Image Instructions
1115 * Operand(0) SRSRC - Scalar GPR that specifies the resource constant.
1116 * Operand(1): SSAMP - Scalar GPR that specifies sampler constant.
1117 * or VDATA - Vector GPR for write data.
1118 * Operand(2): VADDR - Address source. Can carry an offset or an index.
1119 * Definition(0): VDATA - Vector GPR for read result.
1120 *
1121 */
1122 struct MIMG_instruction : public Instruction {
1123 uint8_t dmask; /* Data VGPR enable mask */
1124 uint8_t dim : 3; /* NAVI: dimensionality */
1125 bool unrm : 1; /* Force address to be un-normalized */
1126 bool dlc : 1; /* NAVI: device level coherent */
1127 bool glc : 1; /* globally coherent */
1128 bool slc : 1; /* system level coherent */
1129 bool tfe : 1; /* texture fail enable */
1130 bool da : 1; /* declare an array */
1131 bool lwe : 1; /* Force data to be un-normalized */
1132 bool r128 : 1; /* NAVI: Texture resource size */
1133 bool a16 : 1; /* VEGA, NAVI: Address components are 16-bits */
1134 bool d16 : 1; /* Convert 32-bit data to 16-bit data */
1135 bool disable_wqm : 1; /* Require an exec mask without helper invocations */
1136 bool can_reorder : 1;
1137 uint8_t padding : 1;
1138 barrier_interaction barrier;
1139 };
1140 static_assert(sizeof(MIMG_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1141
1142 /**
1143 * Flat/Scratch/Global Instructions
1144 * Operand(0): ADDR
1145 * Operand(1): SADDR
1146 * Operand(2) / Definition(0): DATA/VDST
1147 *
1148 */
1149 struct FLAT_instruction : public Instruction {
1150 uint16_t offset; /* Vega/Navi only */
1151 bool slc : 1; /* system level coherent */
1152 bool glc : 1; /* globally coherent */
1153 bool dlc : 1; /* NAVI: device level coherent */
1154 bool lds : 1;
1155 bool nv : 1;
1156 bool disable_wqm : 1; /* Require an exec mask without helper invocations */
1157 bool can_reorder : 1;
1158 uint8_t padding : 1;
1159 barrier_interaction barrier;
1160 };
1161 static_assert(sizeof(FLAT_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1162
1163 struct Export_instruction : public Instruction {
1164 uint8_t enabled_mask;
1165 uint8_t dest;
1166 bool compressed : 1;
1167 bool done : 1;
1168 bool valid_mask : 1;
1169 uint32_t padding : 13;
1170 };
1171 static_assert(sizeof(Export_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1172
1173 struct Pseudo_instruction : public Instruction {
1174 PhysReg scratch_sgpr; /* might not be valid if it's not needed */
1175 bool tmp_in_scc;
1176 uint8_t padding;
1177 };
1178 static_assert(sizeof(Pseudo_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1179
1180 struct Pseudo_branch_instruction : public Instruction {
1181 /* target[0] is the block index of the branch target.
1182 * For conditional branches, target[1] contains the fall-through alternative.
1183 * A value of 0 means the target has not been initialized (BB0 cannot be a branch target).
1184 */
1185 uint32_t target[2];
1186 };
1187 static_assert(sizeof(Pseudo_branch_instruction) == sizeof(Instruction) + 8, "Unexpected padding");
1188
1189 struct Pseudo_barrier_instruction : public Instruction {
1190 };
1191 static_assert(sizeof(Pseudo_barrier_instruction) == sizeof(Instruction) + 0, "Unexpected padding");
1192
1193 enum ReduceOp : uint16_t {
1194 iadd8, iadd16, iadd32, iadd64,
1195 imul8, imul16, imul32, imul64,
1196 fadd16, fadd32, fadd64,
1197 fmul16, fmul32, fmul64,
1198 imin8, imin16, imin32, imin64,
1199 imax8, imax16, imax32, imax64,
1200 umin8, umin16, umin32, umin64,
1201 umax8, umax16, umax32, umax64,
1202 fmin16, fmin32, fmin64,
1203 fmax16, fmax32, fmax64,
1204 iand8, iand16, iand32, iand64,
1205 ior8, ior16, ior32, ior64,
1206 ixor8, ixor16, ixor32, ixor64,
1207 };
1208
1209 /**
1210 * Subgroup Reduction Instructions, everything except for the data to be
1211 * reduced and the result as inserted by setup_reduce_temp().
1212 * Operand(0): data to be reduced
1213 * Operand(1): reduce temporary
1214 * Operand(2): vector temporary
1215 * Definition(0): result
1216 * Definition(1): scalar temporary
1217 * Definition(2): scalar identity temporary (not used to store identity on GFX10)
1218 * Definition(3): scc clobber
1219 * Definition(4): vcc clobber
1220 *
1221 */
1222 struct Pseudo_reduction_instruction : public Instruction {
1223 ReduceOp reduce_op;
1224 uint16_t cluster_size; // must be 0 for scans
1225 };
1226 static_assert(sizeof(Pseudo_reduction_instruction) == sizeof(Instruction) + 4, "Unexpected padding");
1227
1228 struct instr_deleter_functor {
1229 void operator()(void* p) {
1230 free(p);
1231 }
1232 };
1233
1234 template<typename T>
1235 using aco_ptr = std::unique_ptr<T, instr_deleter_functor>;
1236
1237 template<typename T>
1238 T* create_instruction(aco_opcode opcode, Format format, uint32_t num_operands, uint32_t num_definitions)
1239 {
1240 std::size_t size = sizeof(T) + num_operands * sizeof(Operand) + num_definitions * sizeof(Definition);
1241 char *data = (char*) calloc(1, size);
1242 T* inst = (T*) data;
1243
1244 inst->opcode = opcode;
1245 inst->format = format;
1246
1247 uint16_t operands_offset = data + sizeof(T) - (char*)&inst->operands;
1248 inst->operands = aco::span<Operand>(operands_offset, num_operands);
1249 uint16_t definitions_offset = (char*)inst->operands.end() - (char*)&inst->definitions;
1250 inst->definitions = aco::span<Definition>(definitions_offset, num_definitions);
1251
1252 return inst;
1253 }
1254
1255 constexpr bool Instruction::usesModifiers() const noexcept
1256 {
1257 if (isDPP() || isSDWA())
1258 return true;
1259
1260 if (format == Format::VOP3P) {
1261 const VOP3P_instruction *vop3p = static_cast<const VOP3P_instruction*>(this);
1262 for (unsigned i = 0; i < operands.size(); i++) {
1263 if (vop3p->neg_lo[i] || vop3p->neg_hi[i])
1264 return true;
1265 }
1266 return vop3p->opsel_lo || vop3p->opsel_hi || vop3p->clamp;
1267 } else if (isVOP3()) {
1268 const VOP3A_instruction *vop3 = static_cast<const VOP3A_instruction*>(this);
1269 for (unsigned i = 0; i < operands.size(); i++) {
1270 if (vop3->abs[i] || vop3->neg[i])
1271 return true;
1272 }
1273 return vop3->opsel || vop3->clamp || vop3->omod;
1274 }
1275 return false;
1276 }
1277
1278 constexpr bool is_phi(Instruction* instr)
1279 {
1280 return instr->opcode == aco_opcode::p_phi || instr->opcode == aco_opcode::p_linear_phi;
1281 }
1282
1283 static inline bool is_phi(aco_ptr<Instruction>& instr)
1284 {
1285 return is_phi(instr.get());
1286 }
1287
1288 barrier_interaction get_barrier_interaction(const Instruction* instr);
1289 bool is_dead(const std::vector<uint16_t>& uses, Instruction *instr);
1290
1291 bool can_use_opsel(chip_class chip, aco_opcode op, int idx, bool high);
1292 bool can_use_SDWA(chip_class chip, const aco_ptr<Instruction>& instr);
1293 /* updates "instr" and returns the old instruction (or NULL if no update was needed) */
1294 aco_ptr<Instruction> convert_to_SDWA(chip_class chip, aco_ptr<Instruction>& instr);
1295
1296 enum block_kind {
1297 /* uniform indicates that leaving this block,
1298 * all actives lanes stay active */
1299 block_kind_uniform = 1 << 0,
1300 block_kind_top_level = 1 << 1,
1301 block_kind_loop_preheader = 1 << 2,
1302 block_kind_loop_header = 1 << 3,
1303 block_kind_loop_exit = 1 << 4,
1304 block_kind_continue = 1 << 5,
1305 block_kind_break = 1 << 6,
1306 block_kind_continue_or_break = 1 << 7,
1307 block_kind_discard = 1 << 8,
1308 block_kind_branch = 1 << 9,
1309 block_kind_merge = 1 << 10,
1310 block_kind_invert = 1 << 11,
1311 block_kind_uses_discard_if = 1 << 12,
1312 block_kind_needs_lowering = 1 << 13,
1313 block_kind_uses_demote = 1 << 14,
1314 block_kind_export_end = 1 << 15,
1315 };
1316
1317
1318 struct RegisterDemand {
1319 constexpr RegisterDemand() = default;
1320 constexpr RegisterDemand(const int16_t v, const int16_t s) noexcept
1321 : vgpr{v}, sgpr{s} {}
1322 int16_t vgpr = 0;
1323 int16_t sgpr = 0;
1324
1325 constexpr friend bool operator==(const RegisterDemand a, const RegisterDemand b) noexcept {
1326 return a.vgpr == b.vgpr && a.sgpr == b.sgpr;
1327 }
1328
1329 constexpr bool exceeds(const RegisterDemand other) const noexcept {
1330 return vgpr > other.vgpr || sgpr > other.sgpr;
1331 }
1332
1333 constexpr RegisterDemand operator+(const Temp t) const noexcept {
1334 if (t.type() == RegType::sgpr)
1335 return RegisterDemand( vgpr, sgpr + t.size() );
1336 else
1337 return RegisterDemand( vgpr + t.size(), sgpr );
1338 }
1339
1340 constexpr RegisterDemand operator+(const RegisterDemand other) const noexcept {
1341 return RegisterDemand(vgpr + other.vgpr, sgpr + other.sgpr);
1342 }
1343
1344 constexpr RegisterDemand operator-(const RegisterDemand other) const noexcept {
1345 return RegisterDemand(vgpr - other.vgpr, sgpr - other.sgpr);
1346 }
1347
1348 constexpr RegisterDemand& operator+=(const RegisterDemand other) noexcept {
1349 vgpr += other.vgpr;
1350 sgpr += other.sgpr;
1351 return *this;
1352 }
1353
1354 constexpr RegisterDemand& operator-=(const RegisterDemand other) noexcept {
1355 vgpr -= other.vgpr;
1356 sgpr -= other.sgpr;
1357 return *this;
1358 }
1359
1360 constexpr RegisterDemand& operator+=(const Temp t) noexcept {
1361 if (t.type() == RegType::sgpr)
1362 sgpr += t.size();
1363 else
1364 vgpr += t.size();
1365 return *this;
1366 }
1367
1368 constexpr RegisterDemand& operator-=(const Temp t) noexcept {
1369 if (t.type() == RegType::sgpr)
1370 sgpr -= t.size();
1371 else
1372 vgpr -= t.size();
1373 return *this;
1374 }
1375
1376 constexpr void update(const RegisterDemand other) noexcept {
1377 vgpr = std::max(vgpr, other.vgpr);
1378 sgpr = std::max(sgpr, other.sgpr);
1379 }
1380
1381 };
1382
1383 /* CFG */
1384 struct Block {
1385 float_mode fp_mode;
1386 unsigned index;
1387 unsigned offset = 0;
1388 std::vector<aco_ptr<Instruction>> instructions;
1389 std::vector<unsigned> logical_preds;
1390 std::vector<unsigned> linear_preds;
1391 std::vector<unsigned> logical_succs;
1392 std::vector<unsigned> linear_succs;
1393 RegisterDemand register_demand = RegisterDemand();
1394 uint16_t loop_nest_depth = 0;
1395 uint16_t kind = 0;
1396 int logical_idom = -1;
1397 int linear_idom = -1;
1398 Temp live_out_exec = Temp();
1399
1400 /* this information is needed for predecessors to blocks with phis when
1401 * moving out of ssa */
1402 bool scc_live_out = false;
1403 PhysReg scratch_sgpr = PhysReg(); /* only needs to be valid if scc_live_out != false */
1404
1405 Block(unsigned idx) : index(idx) {}
1406 Block() : index(0) {}
1407 };
1408
1409 using Stage = uint16_t;
1410
1411 /* software stages */
1412 static constexpr Stage sw_vs = 1 << 0;
1413 static constexpr Stage sw_gs = 1 << 1;
1414 static constexpr Stage sw_tcs = 1 << 2;
1415 static constexpr Stage sw_tes = 1 << 3;
1416 static constexpr Stage sw_fs = 1 << 4;
1417 static constexpr Stage sw_cs = 1 << 5;
1418 static constexpr Stage sw_gs_copy = 1 << 6;
1419 static constexpr Stage sw_mask = 0x7f;
1420
1421 /* hardware stages (can't be OR'd, just a mask for convenience when testing multiple) */
1422 static constexpr Stage hw_vs = 1 << 7;
1423 static constexpr Stage hw_es = 1 << 8; /* Export shader: pre-GS (VS or TES) on GFX6-8. Combined into GS on GFX9 (and GFX10/legacy). */
1424 static constexpr Stage hw_gs = 1 << 9; /* Geometry shader on GFX10/legacy and GFX6-9. */
1425 static constexpr Stage hw_ngg_gs = 1 << 10; /* Geometry shader on GFX10/NGG. */
1426 static constexpr Stage hw_ls = 1 << 11; /* Local shader: pre-TCS (VS) on GFX6-8. Combined into HS on GFX9 (and GFX10/legacy). */
1427 static constexpr Stage hw_hs = 1 << 12; /* Hull shader: TCS on GFX6-8. Merged VS and TCS on GFX9-10. */
1428 static constexpr Stage hw_fs = 1 << 13;
1429 static constexpr Stage hw_cs = 1 << 14;
1430 static constexpr Stage hw_mask = 0xff << 7;
1431
1432 /* possible settings of Program::stage */
1433 static constexpr Stage vertex_vs = sw_vs | hw_vs;
1434 static constexpr Stage fragment_fs = sw_fs | hw_fs;
1435 static constexpr Stage compute_cs = sw_cs | hw_cs;
1436 static constexpr Stage tess_eval_vs = sw_tes | hw_vs;
1437 static constexpr Stage gs_copy_vs = sw_gs_copy | hw_vs;
1438 /* GFX10/NGG */
1439 static constexpr Stage ngg_vertex_gs = sw_vs | hw_ngg_gs;
1440 static constexpr Stage ngg_vertex_geometry_gs = sw_vs | sw_gs | hw_ngg_gs;
1441 static constexpr Stage ngg_tess_eval_gs = sw_tes | hw_ngg_gs;
1442 static constexpr Stage ngg_tess_eval_geometry_gs = sw_tes | sw_gs | hw_ngg_gs;
1443 /* GFX9 (and GFX10 if NGG isn't used) */
1444 static constexpr Stage vertex_geometry_gs = sw_vs | sw_gs | hw_gs;
1445 static constexpr Stage vertex_tess_control_hs = sw_vs | sw_tcs | hw_hs;
1446 static constexpr Stage tess_eval_geometry_gs = sw_tes | sw_gs | hw_gs;
1447 /* pre-GFX9 */
1448 static constexpr Stage vertex_ls = sw_vs | hw_ls; /* vertex before tesselation control */
1449 static constexpr Stage vertex_es = sw_vs | hw_es; /* vertex before geometry */
1450 static constexpr Stage tess_control_hs = sw_tcs | hw_hs;
1451 static constexpr Stage tess_eval_es = sw_tes | hw_es; /* tesselation evaluation before geometry */
1452 static constexpr Stage geometry_gs = sw_gs | hw_gs;
1453
1454 enum statistic {
1455 statistic_hash,
1456 statistic_instructions,
1457 statistic_copies,
1458 statistic_branches,
1459 statistic_cycles,
1460 statistic_vmem_clauses,
1461 statistic_smem_clauses,
1462 statistic_vmem_score,
1463 statistic_smem_score,
1464 statistic_sgpr_presched,
1465 statistic_vgpr_presched,
1466 num_statistics
1467 };
1468
1469 class Program final {
1470 public:
1471 float_mode next_fp_mode;
1472 std::vector<Block> blocks;
1473 RegisterDemand max_reg_demand = RegisterDemand();
1474 uint16_t num_waves = 0;
1475 uint16_t max_waves = 0; /* maximum number of waves, regardless of register usage */
1476 ac_shader_config* config;
1477 struct radv_shader_info *info;
1478 enum chip_class chip_class;
1479 enum radeon_family family;
1480 unsigned wave_size;
1481 RegClass lane_mask;
1482 Stage stage; /* Stage */
1483 bool needs_exact = false; /* there exists an instruction with disable_wqm = true */
1484 bool needs_wqm = false; /* there exists a p_wqm instruction */
1485 bool wb_smem_l1_on_end = false;
1486
1487 std::vector<uint8_t> constant_data;
1488 Temp private_segment_buffer;
1489 Temp scratch_offset;
1490
1491 uint16_t min_waves = 0;
1492 uint16_t lds_alloc_granule;
1493 uint32_t lds_limit; /* in bytes */
1494 bool has_16bank_lds;
1495 uint16_t vgpr_limit;
1496 uint16_t sgpr_limit;
1497 uint16_t physical_sgprs;
1498 uint16_t sgpr_alloc_granule; /* minus one. must be power of two */
1499 uint16_t vgpr_alloc_granule; /* minus one. must be power of two */
1500 unsigned workgroup_size; /* if known; otherwise UINT_MAX */
1501
1502 bool xnack_enabled = false;
1503 bool sram_ecc_enabled = false;
1504 bool has_fast_fma32 = false;
1505
1506 bool needs_vcc = false;
1507 bool needs_flat_scr = false;
1508
1509 bool collect_statistics = false;
1510 uint32_t statistics[num_statistics];
1511
1512 uint32_t allocateId()
1513 {
1514 assert(allocationID <= 16777215);
1515 return allocationID++;
1516 }
1517
1518 uint32_t peekAllocationId()
1519 {
1520 return allocationID;
1521 }
1522
1523 void setAllocationId(uint32_t id)
1524 {
1525 allocationID = id;
1526 }
1527
1528 Block* create_and_insert_block() {
1529 blocks.emplace_back(blocks.size());
1530 blocks.back().fp_mode = next_fp_mode;
1531 return &blocks.back();
1532 }
1533
1534 Block* insert_block(Block&& block) {
1535 block.index = blocks.size();
1536 block.fp_mode = next_fp_mode;
1537 blocks.emplace_back(std::move(block));
1538 return &blocks.back();
1539 }
1540
1541 private:
1542 uint32_t allocationID = 1;
1543 };
1544
1545 struct TempHash {
1546 std::size_t operator()(Temp t) const {
1547 return t.id();
1548 }
1549 };
1550 using TempSet = std::unordered_set<Temp, TempHash>;
1551
1552 struct live {
1553 /* live temps out per block */
1554 std::vector<TempSet> live_out;
1555 /* register demand (sgpr/vgpr) per instruction per block */
1556 std::vector<std::vector<RegisterDemand>> register_demand;
1557 };
1558
1559 void select_program(Program *program,
1560 unsigned shader_count,
1561 struct nir_shader *const *shaders,
1562 ac_shader_config* config,
1563 struct radv_shader_args *args);
1564 void select_gs_copy_shader(Program *program, struct nir_shader *gs_shader,
1565 ac_shader_config* config,
1566 struct radv_shader_args *args);
1567
1568 void lower_wqm(Program* program, live& live_vars,
1569 const struct radv_nir_compiler_options *options);
1570 void lower_phis(Program* program);
1571 void calc_min_waves(Program* program);
1572 void update_vgpr_sgpr_demand(Program* program, const RegisterDemand new_demand);
1573 live live_var_analysis(Program* program, const struct radv_nir_compiler_options *options);
1574 std::vector<uint16_t> dead_code_analysis(Program *program);
1575 void dominator_tree(Program* program);
1576 void insert_exec_mask(Program *program);
1577 void value_numbering(Program* program);
1578 void optimize(Program* program);
1579 void setup_reduce_temp(Program* program);
1580 void lower_to_cssa(Program* program, live& live_vars, const struct radv_nir_compiler_options *options);
1581 void register_allocation(Program *program, std::vector<TempSet>& live_out_per_block);
1582 void ssa_elimination(Program* program);
1583 void lower_to_hw_instr(Program* program);
1584 void schedule_program(Program* program, live& live_vars);
1585 void spill(Program* program, live& live_vars, const struct radv_nir_compiler_options *options);
1586 void insert_wait_states(Program* program);
1587 void insert_NOPs(Program* program);
1588 unsigned emit_program(Program* program, std::vector<uint32_t>& code);
1589 void print_asm(Program *program, std::vector<uint32_t>& binary,
1590 unsigned exec_size, std::ostream& out);
1591 void validate(Program* program, FILE *output);
1592 bool validate_ra(Program* program, const struct radv_nir_compiler_options *options, FILE *output);
1593 #ifndef NDEBUG
1594 void perfwarn(bool cond, const char *msg, Instruction *instr=NULL);
1595 #else
1596 #define perfwarn(program, cond, msg, ...) do {} while(0)
1597 #endif
1598
1599 void collect_presched_stats(Program *program);
1600 void collect_preasm_stats(Program *program);
1601 void collect_postasm_stats(Program *program, const std::vector<uint32_t>& code);
1602
1603 void aco_print_instr(const Instruction *instr, FILE *output);
1604 void aco_print_program(const Program *program, FILE *output);
1605
1606 /* utilities for dealing with register demand */
1607 RegisterDemand get_live_changes(aco_ptr<Instruction>& instr);
1608 RegisterDemand get_temp_registers(aco_ptr<Instruction>& instr);
1609 RegisterDemand get_demand_before(RegisterDemand demand, aco_ptr<Instruction>& instr, aco_ptr<Instruction>& instr_before);
1610
1611 /* number of sgprs that need to be allocated but might notbe addressable as s0-s105 */
1612 uint16_t get_extra_sgprs(Program *program);
1613
1614 /* get number of sgprs/vgprs allocated required to address a number of sgprs/vgprs */
1615 uint16_t get_sgpr_alloc(Program *program, uint16_t addressable_sgprs);
1616 uint16_t get_vgpr_alloc(Program *program, uint16_t addressable_vgprs);
1617
1618 /* return number of addressable sgprs/vgprs for max_waves */
1619 uint16_t get_addr_sgpr_from_waves(Program *program, uint16_t max_waves);
1620 uint16_t get_addr_vgpr_from_waves(Program *program, uint16_t max_waves);
1621
1622 typedef struct {
1623 const int16_t opcode_gfx7[static_cast<int>(aco_opcode::num_opcodes)];
1624 const int16_t opcode_gfx9[static_cast<int>(aco_opcode::num_opcodes)];
1625 const int16_t opcode_gfx10[static_cast<int>(aco_opcode::num_opcodes)];
1626 const std::bitset<static_cast<int>(aco_opcode::num_opcodes)> can_use_input_modifiers;
1627 const std::bitset<static_cast<int>(aco_opcode::num_opcodes)> can_use_output_modifiers;
1628 const std::bitset<static_cast<int>(aco_opcode::num_opcodes)> is_atomic;
1629 const char *name[static_cast<int>(aco_opcode::num_opcodes)];
1630 const aco::Format format[static_cast<int>(aco_opcode::num_opcodes)];
1631 /* sizes used for input/output modifiers and constants */
1632 const unsigned operand_size[static_cast<int>(aco_opcode::num_opcodes)];
1633 const unsigned definition_size[static_cast<int>(aco_opcode::num_opcodes)];
1634 } Info;
1635
1636 extern const Info instr_info;
1637
1638 }
1639
1640 #endif /* ACO_IR_H */
1641