2 * Copyright © 2018 Valve Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include "ac_binary.h"
35 #include "amd_family.h"
36 #include "aco_opcodes.h"
39 struct radv_nir_compiler_options
;
40 struct radv_shader_info
;
44 extern uint64_t debug_flags
;
48 DEBUG_VALIDATE_RA
= 0x2,
53 * Representation of the instruction's microcode encoding format
54 * Note: Some Vector ALU Formats can be combined, such that:
55 * - VOP2* | VOP3A represents a VOP2 instruction in VOP3A encoding
56 * - VOP2* | DPP represents a VOP2 instruction with data parallel primitive.
57 * - VOP2* | SDWA represents a VOP2 instruction with sub-dword addressing.
59 * (*) The same is applicable for VOP1 and VOPC instructions.
61 enum class Format
: std::uint16_t {
62 /* Pseudo Instruction Format */
64 /* Scalar ALU & Control Formats */
70 /* Scalar Memory Format */
74 /* Vector Memory Buffer Formats */
77 /* Vector Memory Image Format */
88 PSEUDO_REDUCTION
= 18,
90 /* Vector ALU Formats */
98 /* Vector Parameter Interpolation Format */
104 enum barrier_interaction
{
106 barrier_buffer
= 0x1,
108 barrier_atomic
= 0x4,
109 barrier_shared
= 0x8,
113 constexpr Format
asVOP3(Format format
) {
114 return (Format
) ((uint32_t) Format::VOP3
| (uint32_t) format
);
142 /* these are used for WWM and spills to vgpr */
143 v1_linear
= v1
| (1 << 6),
144 v2_linear
= v2
| (1 << 6),
147 RegClass() = default;
148 constexpr RegClass(RC rc
)
150 constexpr RegClass(RegType type
, unsigned size
)
151 : rc((RC
) ((type
== RegType::vgpr
? 1 << 5 : 0) | size
)) {}
153 constexpr operator RC() const { return rc
; }
154 explicit operator bool() = delete;
156 constexpr RegType
type() const { return rc
<= RC::s16
? RegType::sgpr
: RegType::vgpr
; }
157 constexpr unsigned size() const { return (unsigned) rc
& 0x1F; }
158 constexpr bool is_linear() const { return rc
<= RC::s16
|| rc
& (1 << 6); }
159 constexpr RegClass
as_linear() const { return RegClass((RC
) (rc
| (1 << 6))); }
165 /* transitional helper expressions */
166 static constexpr RegClass s1
{RegClass::s1
};
167 static constexpr RegClass s2
{RegClass::s2
};
168 static constexpr RegClass s3
{RegClass::s3
};
169 static constexpr RegClass s4
{RegClass::s4
};
170 static constexpr RegClass s8
{RegClass::s8
};
171 static constexpr RegClass s16
{RegClass::s16
};
172 static constexpr RegClass v1
{RegClass::v1
};
173 static constexpr RegClass v2
{RegClass::v2
};
174 static constexpr RegClass v3
{RegClass::v3
};
175 static constexpr RegClass v4
{RegClass::v4
};
176 static constexpr RegClass v5
{RegClass::v5
};
177 static constexpr RegClass v6
{RegClass::v6
};
178 static constexpr RegClass v7
{RegClass::v7
};
179 static constexpr RegClass v8
{RegClass::v8
};
183 * Each temporary virtual register has a
184 * register class (i.e. size and type)
189 constexpr Temp(uint32_t id
, RegClass cls
) noexcept
190 : id_(id
), reg_class(cls
) {}
192 constexpr uint32_t id() const noexcept
{ return id_
; }
193 constexpr RegClass
regClass() const noexcept
{ return reg_class
; }
195 constexpr unsigned size() const noexcept
{ return reg_class
.size(); }
196 constexpr RegType
type() const noexcept
{ return reg_class
.type(); }
197 constexpr bool is_linear() const noexcept
{ return reg_class
.is_linear(); }
199 constexpr bool operator <(Temp other
) const noexcept
{ return id() < other
.id(); }
200 constexpr bool operator==(Temp other
) const noexcept
{ return id() == other
.id(); }
201 constexpr bool operator!=(Temp other
) const noexcept
{ return id() != other
.id(); }
210 * Represents the physical register for each
211 * Operand and Definition.
214 constexpr PhysReg() = default;
215 explicit constexpr PhysReg(unsigned r
) : reg(r
) {}
216 constexpr operator unsigned() const { return reg
; }
221 /* helper expressions for special registers */
222 static constexpr PhysReg m0
{124};
223 static constexpr PhysReg vcc
{106};
224 static constexpr PhysReg exec
{126};
225 static constexpr PhysReg exec_lo
{126};
226 static constexpr PhysReg exec_hi
{127};
227 static constexpr PhysReg scc
{253};
231 * Initially, each Operand refers to either
232 * a temporary virtual register
233 * or to a constant value
234 * Temporary registers get mapped to physical register during RA
235 * Constant values are inlined into the instruction sequence.
241 : reg_(PhysReg
{128}), isTemp_(false), isFixed_(true), isConstant_(false),
242 isKill_(false), isUndef_(true), isFirstKill_(false), is64BitConst_(false) {}
244 explicit Operand(Temp r
) noexcept
251 setFixed(PhysReg
{128});
254 explicit Operand(uint32_t v
) noexcept
259 setFixed(PhysReg
{128 + v
});
260 else if (v
>= 0xFFFFFFF0) /* [-16 .. -1] */
261 setFixed(PhysReg
{192 - v
});
262 else if (v
== 0x3f000000) /* 0.5 */
263 setFixed(PhysReg
{240});
264 else if (v
== 0xbf000000) /* -0.5 */
265 setFixed(PhysReg
{241});
266 else if (v
== 0x3f800000) /* 1.0 */
267 setFixed(PhysReg
{242});
268 else if (v
== 0xbf800000) /* -1.0 */
269 setFixed(PhysReg
{243});
270 else if (v
== 0x40000000) /* 2.0 */
271 setFixed(PhysReg
{244});
272 else if (v
== 0xc0000000) /* -2.0 */
273 setFixed(PhysReg
{245});
274 else if (v
== 0x40800000) /* 4.0 */
275 setFixed(PhysReg
{246});
276 else if (v
== 0xc0800000) /* -4.0 */
277 setFixed(PhysReg
{247});
278 else if (v
== 0x3e22f983) /* 1/(2*PI) */
279 setFixed(PhysReg
{248});
280 else /* Literal Constant */
281 setFixed(PhysReg
{255});
283 explicit Operand(uint64_t v
) noexcept
286 is64BitConst_
= true;
288 setFixed(PhysReg
{128 + (uint32_t) v
});
289 else if (v
>= 0xFFFFFFFFFFFFFFF0) /* [-16 .. -1] */
290 setFixed(PhysReg
{192 - (uint32_t) v
});
291 else if (v
== 0x3FE0000000000000) /* 0.5 */
292 setFixed(PhysReg
{240});
293 else if (v
== 0xBFE0000000000000) /* -0.5 */
294 setFixed(PhysReg
{241});
295 else if (v
== 0x3FF0000000000000) /* 1.0 */
296 setFixed(PhysReg
{242});
297 else if (v
== 0xBFF0000000000000) /* -1.0 */
298 setFixed(PhysReg
{243});
299 else if (v
== 0x4000000000000000) /* 2.0 */
300 setFixed(PhysReg
{244});
301 else if (v
== 0xC000000000000000) /* -2.0 */
302 setFixed(PhysReg
{245});
303 else if (v
== 0x4010000000000000) /* 4.0 */
304 setFixed(PhysReg
{246});
305 else if (v
== 0xC010000000000000) /* -4.0 */
306 setFixed(PhysReg
{247});
307 else if (v
== 0x3fc45f306dc9c882) /* 1/(2*PI) */
308 setFixed(PhysReg
{248});
309 else { /* Literal Constant: we don't know if it is a long or double.*/
311 assert(false && "attempt to create a 64-bit literal constant");
314 explicit Operand(RegClass type
) noexcept
317 data_
.temp
= Temp(0, type
);
318 setFixed(PhysReg
{128});
320 explicit Operand(PhysReg reg
, RegClass type
) noexcept
322 data_
.temp
= Temp(0, type
);
326 constexpr bool isTemp() const noexcept
331 constexpr void setTemp(Temp t
) noexcept
{
332 assert(!isConstant_
);
337 constexpr Temp
getTemp() const noexcept
342 constexpr uint32_t tempId() const noexcept
344 return data_
.temp
.id();
347 constexpr bool hasRegClass() const noexcept
349 return isTemp() || isUndefined();
352 constexpr RegClass
regClass() const noexcept
354 return data_
.temp
.regClass();
357 constexpr unsigned size() const noexcept
360 return is64BitConst_
? 2 : 1;
362 return data_
.temp
.size();
365 constexpr bool isFixed() const noexcept
370 constexpr PhysReg
physReg() const noexcept
375 constexpr void setFixed(PhysReg reg
) noexcept
377 isFixed_
= reg
!= unsigned(-1);
381 constexpr bool isConstant() const noexcept
386 constexpr bool isLiteral() const noexcept
388 return isConstant() && reg_
== 255;
391 constexpr bool isUndefined() const noexcept
396 constexpr uint32_t constantValue() const noexcept
401 constexpr bool constantEquals(uint32_t cmp
) const noexcept
403 return isConstant() && constantValue() == cmp
;
406 constexpr void setKill(bool flag
) noexcept
413 constexpr bool isKill() const noexcept
415 return isKill_
|| isFirstKill();
418 constexpr void setFirstKill(bool flag
) noexcept
425 /* When there are multiple operands killing the same temporary,
426 * isFirstKill() is only returns true for the first one. */
427 constexpr bool isFirstKill() const noexcept
436 Temp temp
= Temp(0, s1
);
443 uint8_t isConstant_
:1;
446 uint8_t isFirstKill_
:1;
447 uint8_t is64BitConst_
:1;
449 /* can't initialize bit-fields in c++11, so work around using a union */
450 uint8_t control_
= 0;
456 * Definitions are the results of Instructions
457 * and refer to temporary virtual registers
458 * which are later mapped to physical registers
460 class Definition final
463 constexpr Definition() : temp(Temp(0, s1
)), reg_(0), isFixed_(0), hasHint_(0), isKill_(0) {}
464 Definition(uint32_t index
, RegClass type
) noexcept
465 : temp(index
, type
) {}
466 explicit Definition(Temp tmp
) noexcept
468 Definition(PhysReg reg
, RegClass type
) noexcept
469 : temp(Temp(0, type
))
473 Definition(uint32_t tmpId
, PhysReg reg
, RegClass type
) noexcept
474 : temp(Temp(tmpId
, type
))
479 constexpr bool isTemp() const noexcept
484 constexpr Temp
getTemp() const noexcept
489 constexpr uint32_t tempId() const noexcept
494 constexpr void setTemp(Temp t
) noexcept
{
498 constexpr RegClass
regClass() const noexcept
500 return temp
.regClass();
503 constexpr unsigned size() const noexcept
508 constexpr bool isFixed() const noexcept
513 constexpr PhysReg
physReg() const noexcept
518 constexpr void setFixed(PhysReg reg
) noexcept
524 constexpr void setHint(PhysReg reg
) noexcept
530 constexpr bool hasHint() const noexcept
535 constexpr void setKill(bool flag
) noexcept
540 constexpr bool isKill() const noexcept
546 Temp temp
= Temp(0, s1
);
554 /* can't initialize bit-fields in c++11, so work around using a union */
555 uint8_t control_
= 0;
565 aco::span
<Operand
> operands
;
566 aco::span
<Definition
> definitions
;
568 constexpr bool isVALU() const noexcept
570 return ((uint16_t) format
& (uint16_t) Format::VOP1
) == (uint16_t) Format::VOP1
571 || ((uint16_t) format
& (uint16_t) Format::VOP2
) == (uint16_t) Format::VOP2
572 || ((uint16_t) format
& (uint16_t) Format::VOPC
) == (uint16_t) Format::VOPC
573 || ((uint16_t) format
& (uint16_t) Format::VOP3A
) == (uint16_t) Format::VOP3A
574 || ((uint16_t) format
& (uint16_t) Format::VOP3B
) == (uint16_t) Format::VOP3B
575 || ((uint16_t) format
& (uint16_t) Format::VOP3P
) == (uint16_t) Format::VOP3P
;
578 constexpr bool isSALU() const noexcept
580 return format
== Format::SOP1
||
581 format
== Format::SOP2
||
582 format
== Format::SOPC
||
583 format
== Format::SOPK
||
584 format
== Format::SOPP
;
587 constexpr bool isVMEM() const noexcept
589 return format
== Format::MTBUF
||
590 format
== Format::MUBUF
||
591 format
== Format::MIMG
;
594 constexpr bool isDPP() const noexcept
596 return (uint16_t) format
& (uint16_t) Format::DPP
;
599 constexpr bool isVOP3() const noexcept
601 return ((uint16_t) format
& (uint16_t) Format::VOP3A
) ||
602 ((uint16_t) format
& (uint16_t) Format::VOP3B
) ||
603 format
== Format::VOP3P
;
606 constexpr bool isSDWA() const noexcept
608 return (uint16_t) format
& (uint16_t) Format::SDWA
;
611 constexpr bool isFlatOrGlobal() const noexcept
613 return format
== Format::FLAT
|| format
== Format::GLOBAL
;
617 struct SOPK_instruction
: public Instruction
{
621 struct SOPP_instruction
: public Instruction
{
626 struct SOPC_instruction
: public Instruction
{
629 struct SOP1_instruction
: public Instruction
{
632 struct SOP2_instruction
: public Instruction
{
636 * Scalar Memory Format:
637 * For s_(buffer_)load_dword*:
638 * Operand(0): SBASE - SGPR-pair which provides base address
639 * Operand(1): Offset - immediate (un)signed offset or SGPR
640 * Operand(2) / Definition(0): SDATA - SGPR for read / write result
641 * Operand(n-1): SOffset - SGPR offset (Vega only)
643 * Having no operands is also valid for instructions such as s_dcache_inv.
646 struct SMEM_instruction
: public Instruction
{
647 bool glc
; /* VI+: globally coherent */
648 bool dlc
; /* NAVI: device level coherent */
649 bool nv
; /* VEGA only: Non-volatile */
652 barrier_interaction barrier
;
655 struct VOP1_instruction
: public Instruction
{
658 struct VOP2_instruction
: public Instruction
{
661 struct VOPC_instruction
: public Instruction
{
664 struct VOP3A_instruction
: public Instruction
{
673 * Data Parallel Primitives Format:
674 * This format can be used for VOP1, VOP2 or VOPC instructions.
675 * The swizzle applies to the src0 operand.
678 struct DPP_instruction
: public Instruction
{
687 struct Interp_instruction
: public Instruction
{
693 * Local and Global Data Sharing instructions
694 * Operand(0): ADDR - VGPR which supplies the address.
695 * Operand(1): DATA0 - First data VGPR.
696 * Operand(2): DATA1 - Second data VGPR.
697 * Operand(n-1): M0 - LDS size.
698 * Definition(0): VDST - Destination VGPR when results returned to VGPRs.
701 struct DS_instruction
: public Instruction
{
708 * Vector Memory Untyped-buffer Instructions
709 * Operand(0): VADDR - Address source. Can carry an index and/or offset
710 * Operand(1): SRSRC - Specifies which SGPR supplies T# (resource constant)
711 * Operand(2): SOFFSET - SGPR to supply unsigned byte offset. (SGPR, M0, or inline constant)
712 * Operand(3) / Definition(0): VDATA - Vector GPR for write result / read data
715 struct MUBUF_instruction
: public Instruction
{
716 unsigned offset
; /* Unsigned byte offset - 12 bit */
717 bool offen
; /* Supply an offset from VGPR (VADDR) */
718 bool idxen
; /* Supply an index from VGPR (VADDR) */
719 bool glc
; /* globally coherent */
720 bool dlc
; /* NAVI: device level coherent */
721 bool slc
; /* system level coherent */
722 bool tfe
; /* texture fail enable */
723 bool lds
; /* Return read-data to LDS instead of VGPRs */
724 bool disable_wqm
; /* Require an exec mask without helper invocations */
726 barrier_interaction barrier
;
730 * Vector Memory Typed-buffer Instructions
731 * Operand(0): VADDR - Address source. Can carry an index and/or offset
732 * Operand(1): SRSRC - Specifies which SGPR supplies T# (resource constant)
733 * Operand(2): SOFFSET - SGPR to supply unsigned byte offset. (SGPR, M0, or inline constant)
734 * Operand(3) / Definition(0): VDATA - Vector GPR for write result / read data
737 struct MTBUF_instruction
: public Instruction
{
740 uint8_t dfmt
: 4; /* Data Format of data in memory buffer */
741 uint8_t nfmt
: 3; /* Numeric format of data in memory */
743 uint8_t img_format
; /* Buffer or image format as used by GFX10 */
745 unsigned offset
; /* Unsigned byte offset - 12 bit */
746 bool offen
; /* Supply an offset from VGPR (VADDR) */
747 bool idxen
; /* Supply an index from VGPR (VADDR) */
748 bool glc
; /* globally coherent */
749 bool dlc
; /* NAVI: device level coherent */
750 bool slc
; /* system level coherent */
751 bool tfe
; /* texture fail enable */
752 bool disable_wqm
; /* Require an exec mask without helper invocations */
754 barrier_interaction barrier
;
758 * Vector Memory Image Instructions
759 * Operand(0): VADDR - Address source. Can carry an offset or an index.
760 * Operand(1): SRSRC - Scalar GPR that specifies the resource constant.
761 * Operand(2): SSAMP - Scalar GPR that specifies sampler constant.
762 * Operand(3) / Definition(0): VDATA - Vector GPR for read / write result.
765 struct MIMG_instruction
: public Instruction
{
766 unsigned dmask
; /* Data VGPR enable mask */
767 bool unrm
; /* Force address to be un-normalized */
768 bool dlc
; /* NAVI: device level coherent */
769 bool glc
; /* globally coherent */
770 bool slc
; /* system level coherent */
771 bool tfe
; /* texture fail enable */
772 bool da
; /* declare an array */
773 bool lwe
; /* Force data to be un-normalized */
774 bool r128
; /* NAVI: Texture resource size */
775 bool a16
; /* VEGA, NAVI: Address components are 16-bits */
776 bool d16
; /* Convert 32-bit data to 16-bit data */
777 bool disable_wqm
; /* Require an exec mask without helper invocations */
779 barrier_interaction barrier
;
783 * Flat/Scratch/Global Instructions
786 * Operand(2) / Definition(0): DATA/VDST
789 struct FLAT_instruction
: public Instruction
{
790 uint16_t offset
; /* Vega only */
797 struct Export_instruction
: public Instruction
{
798 unsigned enabled_mask
;
805 struct Pseudo_instruction
: public Instruction
{
807 PhysReg scratch_sgpr
; /* might not be valid if it's not needed */
810 struct Pseudo_branch_instruction
: public Instruction
{
811 /* target[0] is the block index of the branch target.
812 * For conditional branches, target[1] contains the fall-through alternative.
813 * A value of 0 means the target has not been initialized (BB0 cannot be a branch target).
818 struct Pseudo_barrier_instruction
: public Instruction
{
838 * Subgroup Reduction Instructions, everything except for the data to be
839 * reduced and the result as inserted by setup_reduce_temp().
840 * Operand(0): data to be reduced
841 * Operand(1): reduce temporary
842 * Operand(2): vector temporary
843 * Definition(0): result
844 * Definition(1): scalar temporary
845 * Definition(2): scalar identity temporary
846 * Definition(3): scc clobber
847 * Definition(4): vcc clobber
850 struct Pseudo_reduction_instruction
: public Instruction
{
852 unsigned cluster_size
; // must be 0 for scans
855 struct instr_deleter_functor
{
856 void operator()(void* p
) {
862 using aco_ptr
= std::unique_ptr
<T
, instr_deleter_functor
>;
865 T
* create_instruction(aco_opcode opcode
, Format format
, uint32_t num_operands
, uint32_t num_definitions
)
867 std::size_t size
= sizeof(T
) + num_operands
* sizeof(Operand
) + num_definitions
* sizeof(Definition
);
868 char *data
= (char*) calloc(1, size
);
871 inst
->opcode
= opcode
;
872 inst
->format
= format
;
874 inst
->operands
= aco::span
<Operand
>((Operand
*)(data
+ sizeof(T
)), num_operands
);
875 inst
->definitions
= aco::span
<Definition
>((Definition
*)inst
->operands
.end(), num_definitions
);
880 constexpr bool is_phi(Instruction
* instr
)
882 return instr
->opcode
== aco_opcode::p_phi
|| instr
->opcode
== aco_opcode::p_linear_phi
;
885 static inline bool is_phi(aco_ptr
<Instruction
>& instr
)
887 return is_phi(instr
.get());
890 constexpr barrier_interaction
get_barrier_interaction(Instruction
* instr
)
892 switch (instr
->format
) {
894 return static_cast<SMEM_instruction
*>(instr
)->barrier
;
896 return static_cast<MUBUF_instruction
*>(instr
)->barrier
;
898 return static_cast<MIMG_instruction
*>(instr
)->barrier
;
901 return barrier_buffer
;
903 return barrier_shared
;
910 /* uniform indicates that leaving this block,
911 * all actives lanes stay active */
912 block_kind_uniform
= 1 << 0,
913 block_kind_top_level
= 1 << 1,
914 block_kind_loop_preheader
= 1 << 2,
915 block_kind_loop_header
= 1 << 3,
916 block_kind_loop_exit
= 1 << 4,
917 block_kind_continue
= 1 << 5,
918 block_kind_break
= 1 << 6,
919 block_kind_continue_or_break
= 1 << 7,
920 block_kind_discard
= 1 << 8,
921 block_kind_branch
= 1 << 9,
922 block_kind_merge
= 1 << 10,
923 block_kind_invert
= 1 << 11,
924 block_kind_uses_discard_if
= 1 << 12,
925 block_kind_needs_lowering
= 1 << 13,
929 struct RegisterDemand
{
930 constexpr RegisterDemand() = default;
931 constexpr RegisterDemand(const int16_t v
, const int16_t s
) noexcept
932 : vgpr
{v
}, sgpr
{s
} {}
936 constexpr friend bool operator==(const RegisterDemand a
, const RegisterDemand b
) noexcept
{
937 return a
.vgpr
== b
.vgpr
&& a
.sgpr
== b
.sgpr
;
940 constexpr bool exceeds(const RegisterDemand other
) const noexcept
{
941 return vgpr
> other
.vgpr
|| sgpr
> other
.sgpr
;
944 constexpr RegisterDemand
operator+(const Temp t
) const noexcept
{
945 if (t
.type() == RegType::sgpr
)
946 return RegisterDemand( vgpr
, sgpr
+ t
.size() );
948 return RegisterDemand( vgpr
+ t
.size(), sgpr
);
951 constexpr RegisterDemand
operator+(const RegisterDemand other
) const noexcept
{
952 return RegisterDemand(vgpr
+ other
.vgpr
, sgpr
+ other
.sgpr
);
955 constexpr RegisterDemand
operator-(const RegisterDemand other
) const noexcept
{
956 return RegisterDemand(vgpr
- other
.vgpr
, sgpr
- other
.sgpr
);
959 constexpr RegisterDemand
& operator+=(const RegisterDemand other
) noexcept
{
965 constexpr RegisterDemand
& operator-=(const RegisterDemand other
) noexcept
{
971 constexpr RegisterDemand
& operator+=(const Temp t
) noexcept
{
972 if (t
.type() == RegType::sgpr
)
979 constexpr RegisterDemand
& operator-=(const Temp t
) noexcept
{
980 if (t
.type() == RegType::sgpr
)
987 constexpr void update(const RegisterDemand other
) noexcept
{
988 vgpr
= std::max(vgpr
, other
.vgpr
);
989 sgpr
= std::max(sgpr
, other
.sgpr
);
998 std::vector
<aco_ptr
<Instruction
>> instructions
;
999 std::vector
<unsigned> logical_preds
;
1000 std::vector
<unsigned> linear_preds
;
1001 std::vector
<unsigned> logical_succs
;
1002 std::vector
<unsigned> linear_succs
;
1003 RegisterDemand register_demand
= RegisterDemand();
1004 uint16_t loop_nest_depth
= 0;
1006 int logical_idom
= -1;
1007 int linear_idom
= -1;
1008 Temp live_out_exec
= Temp();
1010 /* this information is needed for predecessors to blocks with phis when
1011 * moving out of ssa */
1012 bool scc_live_out
= false;
1013 PhysReg scratch_sgpr
= PhysReg(); /* only needs to be valid if scc_live_out != false */
1015 Block(unsigned idx
) : index(idx
) {}
1016 Block() : index(0) {}
1019 using Stage
= uint16_t;
1021 /* software stages */
1022 static constexpr Stage sw_vs
= 1 << 0;
1023 static constexpr Stage sw_gs
= 1 << 1;
1024 static constexpr Stage sw_tcs
= 1 << 2;
1025 static constexpr Stage sw_tes
= 1 << 3;
1026 static constexpr Stage sw_fs
= 1 << 4;
1027 static constexpr Stage sw_cs
= 1 << 5;
1028 static constexpr Stage sw_mask
= 0x3f;
1030 /* hardware stages (can't be OR'd, just a mask for convenience when testing multiple) */
1031 static constexpr Stage hw_vs
= 1 << 6;
1032 static constexpr Stage hw_es
= 1 << 7;
1033 static constexpr Stage hw_gs
= 1 << 8; /* not on GFX9. combined into ES on GFX9 (and GFX10/legacy). */
1034 static constexpr Stage hw_ls
= 1 << 9;
1035 static constexpr Stage hw_hs
= 1 << 10; /* not on GFX9. combined into LS on GFX9 (and GFX10/legacy). */
1036 static constexpr Stage hw_fs
= 1 << 11;
1037 static constexpr Stage hw_cs
= 1 << 12;
1038 static constexpr Stage hw_mask
= 0x7f << 6;
1040 /* possible settings of Program::stage */
1041 static constexpr Stage vertex_vs
= sw_vs
| hw_vs
;
1042 static constexpr Stage fragment_fs
= sw_fs
| hw_fs
;
1043 static constexpr Stage compute_cs
= sw_cs
| hw_cs
;
1044 static constexpr Stage tess_eval_vs
= sw_tes
| hw_vs
;
1046 static constexpr Stage ngg_vertex_gs
= sw_vs
| hw_gs
;
1047 static constexpr Stage ngg_vertex_geometry_gs
= sw_vs
| sw_gs
| hw_gs
;
1048 static constexpr Stage ngg_tess_eval_geometry_gs
= sw_tes
| sw_gs
| hw_gs
;
1049 static constexpr Stage ngg_vertex_tess_control_hs
= sw_vs
| sw_tcs
| hw_hs
;
1050 /* GFX9 (and GFX10 if NGG isn't used) */
1051 static constexpr Stage vertex_geometry_es
= sw_vs
| sw_gs
| hw_es
;
1052 static constexpr Stage vertex_tess_control_ls
= sw_vs
| sw_tcs
| hw_ls
;
1053 static constexpr Stage tess_eval_geometry_es
= sw_tes
| sw_gs
| hw_es
;
1055 static constexpr Stage vertex_ls
= sw_vs
| hw_ls
; /* vertex before tesselation control */
1056 static constexpr Stage tess_control_hs
= sw_tcs
| hw_hs
;
1057 static constexpr Stage tess_eval_es
= sw_tes
| hw_gs
; /* tesselation evaluation before GS */
1058 static constexpr Stage geometry_gs
= sw_gs
| hw_gs
;
1060 class Program final
{
1062 std::vector
<Block
> blocks
;
1063 RegisterDemand max_reg_demand
= RegisterDemand();
1064 uint16_t sgpr_limit
= 0;
1065 uint16_t num_waves
= 0;
1066 ac_shader_config
* config
;
1067 struct radv_shader_info
*info
;
1068 enum chip_class chip_class
;
1069 enum radeon_family family
;
1070 Stage stage
; /* Stage */
1071 bool needs_exact
= false; /* there exists an instruction with disable_wqm = true */
1072 bool needs_wqm
= false; /* there exists a p_wqm instruction */
1073 bool wb_smem_l1_on_end
= false;
1075 std::vector
<uint8_t> constant_data
;
1077 uint32_t allocateId()
1079 assert(allocationID
<= 16777215);
1080 return allocationID
++;
1083 uint32_t peekAllocationId()
1085 return allocationID
;
1088 void setAllocationId(uint32_t id
)
1093 Block
* create_and_insert_block() {
1094 blocks
.emplace_back(blocks
.size());
1095 return &blocks
.back();
1098 Block
* insert_block(Block
&& block
) {
1099 block
.index
= blocks
.size();
1100 blocks
.emplace_back(std::move(block
));
1101 return &blocks
.back();
1105 uint32_t allocationID
= 1;
1109 /* live temps out per block */
1110 std::vector
<std::set
<Temp
>> live_out
;
1111 /* register demand (sgpr/vgpr) per instruction per block */
1112 std::vector
<std::vector
<RegisterDemand
>> register_demand
;
1115 void select_program(Program
*program
,
1116 unsigned shader_count
,
1117 struct nir_shader
*const *shaders
,
1118 ac_shader_config
* config
,
1119 struct radv_shader_info
*info
,
1120 struct radv_nir_compiler_options
*options
);
1122 void lower_wqm(Program
* program
, live
& live_vars
,
1123 const struct radv_nir_compiler_options
*options
);
1124 void lower_bool_phis(Program
* program
);
1125 void update_vgpr_sgpr_demand(Program
* program
, const RegisterDemand new_demand
);
1126 live
live_var_analysis(Program
* program
, const struct radv_nir_compiler_options
*options
);
1127 std::vector
<uint16_t> dead_code_analysis(Program
*program
);
1128 void dominator_tree(Program
* program
);
1129 void insert_exec_mask(Program
*program
);
1130 void value_numbering(Program
* program
);
1131 void optimize(Program
* program
);
1132 void setup_reduce_temp(Program
* program
);
1133 void lower_to_cssa(Program
* program
, live
& live_vars
, const struct radv_nir_compiler_options
*options
);
1134 void register_allocation(Program
*program
, std::vector
<std::set
<Temp
>> live_out_per_block
);
1135 void ssa_elimination(Program
* program
);
1136 void lower_to_hw_instr(Program
* program
);
1137 void schedule_program(Program
* program
, live
& live_vars
);
1138 void spill(Program
* program
, live
& live_vars
, const struct radv_nir_compiler_options
*options
);
1139 void insert_wait_states(Program
* program
);
1140 void insert_NOPs(Program
* program
);
1141 unsigned emit_program(Program
* program
, std::vector
<uint32_t>& code
);
1142 void print_asm(Program
*program
, std::vector
<uint32_t>& binary
, unsigned exec_size
,
1143 enum radeon_family family
, std::ostream
& out
);
1144 void validate(Program
* program
, FILE *output
);
1145 bool validate_ra(Program
* program
, const struct radv_nir_compiler_options
*options
, FILE *output
);
1147 void perfwarn(bool cond
, const char *msg
, Instruction
*instr
=NULL
);
1149 #define perfwarn(program, cond, msg, ...)
1152 void aco_print_instr(Instruction
*instr
, FILE *output
);
1153 void aco_print_program(Program
*program
, FILE *output
);
1156 const int16_t opcode_gfx9
[static_cast<int>(aco_opcode::num_opcodes
)];
1157 const int16_t opcode_gfx10
[static_cast<int>(aco_opcode::num_opcodes
)];
1158 const std::bitset
<static_cast<int>(aco_opcode::num_opcodes
)> can_use_input_modifiers
;
1159 const std::bitset
<static_cast<int>(aco_opcode::num_opcodes
)> can_use_output_modifiers
;
1160 const char *name
[static_cast<int>(aco_opcode::num_opcodes
)];
1161 const aco::Format format
[static_cast<int>(aco_opcode::num_opcodes
)];
1164 extern const Info instr_info
;
1168 #endif /* ACO_IR_H */