2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "GL/gl.h" /* GLenum */
34 #include "util/list.h"
35 #include "util/ralloc.h"
37 #include "util/bitscan.h"
38 #include "util/bitset.h"
39 #include "util/macros.h"
40 #include "compiler/nir_types.h"
41 #include "compiler/shader_enums.h"
42 #include "compiler/shader_info.h"
46 #include "util/debug.h"
49 #include "nir_opcodes.h"
51 #if defined(_WIN32) && !defined(snprintf)
52 #define snprintf _snprintf
60 #define NIR_TRUE (~0u)
61 #define NIR_MAX_VEC_COMPONENTS 4
62 #define NIR_MAX_MATRIX_COLUMNS 4
63 typedef uint8_t nir_component_mask_t
;
65 /** Defines a cast function
67 * This macro defines a cast function from in_type to out_type where
68 * out_type is some structure type that contains a field of type out_type.
70 * Note that you have to be a bit careful as the generated cast function
73 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
74 type_field, type_value) \
75 static inline out_type * \
76 name(const in_type *parent) \
78 assert(parent && parent->type_field == type_value); \
79 return exec_node_data(out_type, parent, field); \
89 * Description of built-in state associated with a uniform
91 * \sa nir_variable::state_slots
94 gl_state_index16 tokens
[STATE_LENGTH
];
99 nir_var_shader_in
= (1 << 0),
100 nir_var_shader_out
= (1 << 1),
101 nir_var_shader_temp
= (1 << 2),
102 nir_var_function_temp
= (1 << 3),
103 nir_var_uniform
= (1 << 4),
104 nir_var_mem_ubo
= (1 << 5),
105 nir_var_system_value
= (1 << 6),
106 nir_var_mem_ssbo
= (1 << 7),
107 nir_var_mem_shared
= (1 << 8),
108 nir_var_mem_global
= (1 << 9),
116 nir_rounding_mode_undef
= 0,
117 nir_rounding_mode_rtne
= 1, /* round to nearest even */
118 nir_rounding_mode_ru
= 2, /* round up */
119 nir_rounding_mode_rd
= 3, /* round down */
120 nir_rounding_mode_rtz
= 4, /* round towards zero */
137 #define nir_const_value_to_array(arr, c, components, m) \
139 for (unsigned i = 0; i < components; ++i) \
143 typedef struct nir_constant
{
145 * Value of the constant.
147 * The field used to back the values supplied by the constant is determined
148 * by the type associated with the \c nir_variable. Constants may be
149 * scalars, vectors, or matrices.
151 nir_const_value values
[NIR_MAX_VEC_COMPONENTS
];
153 /* we could get this from the var->type but makes clone *much* easier to
154 * not have to care about the type.
156 unsigned num_elements
;
158 /* Array elements / Structure Fields */
159 struct nir_constant
**elements
;
163 * \brief Layout qualifiers for gl_FragDepth.
165 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
166 * with a layout qualifier.
169 nir_depth_layout_none
, /**< No depth layout is specified. */
170 nir_depth_layout_any
,
171 nir_depth_layout_greater
,
172 nir_depth_layout_less
,
173 nir_depth_layout_unchanged
177 * Enum keeping track of how a variable was declared.
181 * Normal declaration.
183 nir_var_declared_normally
= 0,
186 * Variable is implicitly generated by the compiler and should not be
187 * visible via the API.
190 } nir_var_declaration_type
;
193 * Either a uniform, global variable, shader input, or shader output. Based on
194 * ir_variable - it should be easy to translate between the two.
197 typedef struct nir_variable
{
198 struct exec_node node
;
201 * Declared type of the variable
203 const struct glsl_type
*type
;
206 * Declared name of the variable
210 struct nir_variable_data
{
212 * Storage class of the variable.
214 * \sa nir_variable_mode
216 nir_variable_mode mode
;
219 * Is the variable read-only?
221 * This is set for variables declared as \c const, shader inputs,
224 unsigned read_only
:1;
228 unsigned invariant
:1;
231 * Can this variable be coalesced with another?
233 * This is set by nir_lower_io_to_temporaries to say that any
234 * copies involving this variable should stay put. Propagating it can
235 * duplicate the resulting load/store, which is not wanted, and may
236 * result in a load/store of the variable with an indirect offset which
237 * the backend may not be able to handle.
239 unsigned cannot_coalesce
:1;
242 * When separate shader programs are enabled, only input/outputs between
243 * the stages of a multi-stage separate program can be safely removed
244 * from the shader interface. Other input/outputs must remains active.
246 * This is also used to make sure xfb varyings that are unused by the
247 * fragment shader are not removed.
249 unsigned always_active_io
:1;
252 * Interpolation mode for shader inputs / outputs
254 * \sa glsl_interp_mode
256 unsigned interpolation
:2;
259 * If non-zero, then this variable may be packed along with other variables
260 * into a single varying slot, so this offset should be applied when
261 * accessing components. For example, an offset of 1 means that the x
262 * component of this variable is actually stored in component y of the
263 * location specified by \c location.
265 unsigned location_frac
:2;
268 * If true, this variable represents an array of scalars that should
269 * be tightly packed. In other words, consecutive array elements
270 * should be stored one component apart, rather than one slot apart.
275 * Whether this is a fragment shader output implicitly initialized with
276 * the previous contents of the specified render target at the
277 * framebuffer location corresponding to this shader invocation.
279 unsigned fb_fetch_output
:1;
282 * Non-zero if this variable is considered bindless as defined by
283 * ARB_bindless_texture.
288 * Was an explicit binding set in the shader?
290 unsigned explicit_binding
:1;
293 * Was a transfer feedback buffer set in the shader?
295 unsigned explicit_xfb_buffer
:1;
298 * Was a transfer feedback stride set in the shader?
300 unsigned explicit_xfb_stride
:1;
303 * Was an explicit offset set in the shader?
305 unsigned explicit_offset
:1;
308 * \brief Layout qualifier for gl_FragDepth.
310 * This is not equal to \c ir_depth_layout_none if and only if this
311 * variable is \c gl_FragDepth and a layout qualifier is specified.
313 nir_depth_layout depth_layout
;
316 * Storage location of the base of this variable
318 * The precise meaning of this field depends on the nature of the variable.
320 * - Vertex shader input: one of the values from \c gl_vert_attrib.
321 * - Vertex shader output: one of the values from \c gl_varying_slot.
322 * - Geometry shader input: one of the values from \c gl_varying_slot.
323 * - Geometry shader output: one of the values from \c gl_varying_slot.
324 * - Fragment shader input: one of the values from \c gl_varying_slot.
325 * - Fragment shader output: one of the values from \c gl_frag_result.
326 * - Uniforms: Per-stage uniform slot number for default uniform block.
327 * - Uniforms: Index within the uniform block definition for UBO members.
328 * - Non-UBO Uniforms: uniform slot number.
329 * - Other: This field is not currently used.
331 * If the variable is a uniform, shader input, or shader output, and the
332 * slot has not been assigned, the value will be -1.
337 * The actual location of the variable in the IR. Only valid for inputs
340 unsigned int driver_location
;
343 * Vertex stream output identifier.
345 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
346 * stream of the i-th component.
351 * output index for dual source blending.
356 * Descriptor set binding for sampler or UBO.
361 * Initial binding point for a sampler or UBO.
363 * For array types, this represents the binding point for the first element.
368 * Location an atomic counter or transform feedback is stored at.
373 * Transform feedback buffer.
378 * Transform feedback stride.
383 * How the variable was declared. See nir_var_declaration_type.
385 * This is used to detect variables generated by the compiler, so should
386 * not be visible via the API.
388 unsigned how_declared
:2;
391 * ARB_shader_image_load_store qualifiers.
394 enum gl_access_qualifier access
;
396 /** Image internal format if specified explicitly, otherwise GL_NONE. */
402 * Built-in state that backs this uniform
404 * Once set at variable creation, \c state_slots must remain invariant.
405 * This is because, ideally, this array would be shared by all clones of
406 * this variable in the IR tree. In other words, we'd really like for it
407 * to be a fly-weight.
409 * If the variable is not a uniform, \c num_state_slots will be zero and
410 * \c state_slots will be \c NULL.
413 unsigned num_state_slots
; /**< Number of state slots used */
414 nir_state_slot
*state_slots
; /**< State descriptors. */
418 * Constant expression assigned in the initializer of the variable
420 * This field should only be used temporarily by creators of NIR shaders
421 * and then lower_constant_initializers can be used to get rid of them.
422 * Most of the rest of NIR ignores this field or asserts that it's NULL.
424 nir_constant
*constant_initializer
;
427 * For variables that are in an interface block or are an instance of an
428 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
430 * \sa ir_variable::location
432 const struct glsl_type
*interface_type
;
435 * Description of per-member data for per-member struct variables
437 * This is used for variables which are actually an amalgamation of
438 * multiple entities such as a struct of built-in values or a struct of
439 * inputs each with their own layout specifier. This is only allowed on
440 * variables with a struct or array of array of struct type.
442 unsigned num_members
;
443 struct nir_variable_data
*members
;
446 #define nir_foreach_variable(var, var_list) \
447 foreach_list_typed(nir_variable, var, node, var_list)
449 #define nir_foreach_variable_safe(var, var_list) \
450 foreach_list_typed_safe(nir_variable, var, node, var_list)
453 nir_variable_is_global(const nir_variable
*var
)
455 return var
->data
.mode
!= nir_var_function_temp
;
458 typedef struct nir_register
{
459 struct exec_node node
;
461 unsigned num_components
; /** < number of vector components */
462 unsigned num_array_elems
; /** < size of array (0 for no array) */
464 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
467 /** generic register index. */
470 /** only for debug purposes, can be NULL */
473 /** set of nir_srcs where this register is used (read from) */
474 struct list_head uses
;
476 /** set of nir_dests where this register is defined (written to) */
477 struct list_head defs
;
479 /** set of nir_ifs where this register is used as a condition */
480 struct list_head if_uses
;
483 #define nir_foreach_register(reg, reg_list) \
484 foreach_list_typed(nir_register, reg, node, reg_list)
485 #define nir_foreach_register_safe(reg, reg_list) \
486 foreach_list_typed_safe(nir_register, reg, node, reg_list)
488 typedef enum PACKED
{
490 nir_instr_type_deref
,
493 nir_instr_type_intrinsic
,
494 nir_instr_type_load_const
,
496 nir_instr_type_ssa_undef
,
498 nir_instr_type_parallel_copy
,
501 typedef struct nir_instr
{
502 struct exec_node node
;
503 struct nir_block
*block
;
506 /* A temporary for optimization and analysis passes to use for storing
507 * flags. For instance, DCE uses this to store the "dead/live" info.
511 /** generic instruction index. */
515 static inline nir_instr
*
516 nir_instr_next(nir_instr
*instr
)
518 struct exec_node
*next
= exec_node_get_next(&instr
->node
);
519 if (exec_node_is_tail_sentinel(next
))
522 return exec_node_data(nir_instr
, next
, node
);
525 static inline nir_instr
*
526 nir_instr_prev(nir_instr
*instr
)
528 struct exec_node
*prev
= exec_node_get_prev(&instr
->node
);
529 if (exec_node_is_head_sentinel(prev
))
532 return exec_node_data(nir_instr
, prev
, node
);
536 nir_instr_is_first(const nir_instr
*instr
)
538 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr
->node
));
542 nir_instr_is_last(const nir_instr
*instr
)
544 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr
->node
));
547 typedef struct nir_ssa_def
{
548 /** for debugging only, can be NULL */
551 /** generic SSA definition index. */
554 /** Index into the live_in and live_out bitfields */
557 /** Instruction which produces this SSA value. */
558 nir_instr
*parent_instr
;
560 /** set of nir_instrs where this register is used (read from) */
561 struct list_head uses
;
563 /** set of nir_ifs where this register is used as a condition */
564 struct list_head if_uses
;
566 uint8_t num_components
;
568 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
576 struct nir_src
*indirect
; /** < NULL for no indirect offset */
577 unsigned base_offset
;
579 /* TODO use-def chain goes here */
583 nir_instr
*parent_instr
;
584 struct list_head def_link
;
587 struct nir_src
*indirect
; /** < NULL for no indirect offset */
588 unsigned base_offset
;
590 /* TODO def-use chain goes here */
595 typedef struct nir_src
{
597 /** Instruction that consumes this value as a source. */
598 nir_instr
*parent_instr
;
599 struct nir_if
*parent_if
;
602 struct list_head use_link
;
612 static inline nir_src
615 nir_src src
= { { NULL
} };
619 #define NIR_SRC_INIT nir_src_init()
621 #define nir_foreach_use(src, reg_or_ssa_def) \
622 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
624 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
625 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
627 #define nir_foreach_if_use(src, reg_or_ssa_def) \
628 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
630 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
631 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
642 static inline nir_dest
645 nir_dest dest
= { { { NULL
} } };
649 #define NIR_DEST_INIT nir_dest_init()
651 #define nir_foreach_def(dest, reg) \
652 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
654 #define nir_foreach_def_safe(dest, reg) \
655 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
657 static inline nir_src
658 nir_src_for_ssa(nir_ssa_def
*def
)
660 nir_src src
= NIR_SRC_INIT
;
668 static inline nir_src
669 nir_src_for_reg(nir_register
*reg
)
671 nir_src src
= NIR_SRC_INIT
;
675 src
.reg
.indirect
= NULL
;
676 src
.reg
.base_offset
= 0;
681 static inline nir_dest
682 nir_dest_for_reg(nir_register
*reg
)
684 nir_dest dest
= NIR_DEST_INIT
;
691 static inline unsigned
692 nir_src_bit_size(nir_src src
)
694 return src
.is_ssa
? src
.ssa
->bit_size
: src
.reg
.reg
->bit_size
;
697 static inline unsigned
698 nir_src_num_components(nir_src src
)
700 return src
.is_ssa
? src
.ssa
->num_components
: src
.reg
.reg
->num_components
;
704 nir_src_is_const(nir_src src
)
707 src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
;
710 int64_t nir_src_as_int(nir_src src
);
711 uint64_t nir_src_as_uint(nir_src src
);
712 bool nir_src_as_bool(nir_src src
);
713 double nir_src_as_float(nir_src src
);
714 int64_t nir_src_comp_as_int(nir_src src
, unsigned component
);
715 uint64_t nir_src_comp_as_uint(nir_src src
, unsigned component
);
716 bool nir_src_comp_as_bool(nir_src src
, unsigned component
);
717 double nir_src_comp_as_float(nir_src src
, unsigned component
);
719 static inline unsigned
720 nir_dest_bit_size(nir_dest dest
)
722 return dest
.is_ssa
? dest
.ssa
.bit_size
: dest
.reg
.reg
->bit_size
;
725 static inline unsigned
726 nir_dest_num_components(nir_dest dest
)
728 return dest
.is_ssa
? dest
.ssa
.num_components
: dest
.reg
.reg
->num_components
;
731 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *instr_or_if
);
732 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
);
738 * \name input modifiers
742 * For inputs interpreted as floating point, flips the sign bit. For
743 * inputs interpreted as integers, performs the two's complement negation.
748 * Clears the sign bit for floating point values, and computes the integer
749 * absolute value for integers. Note that the negate modifier acts after
750 * the absolute value modifier, therefore if both are set then all inputs
751 * will become negative.
757 * For each input component, says which component of the register it is
758 * chosen from. Note that which elements of the swizzle are used and which
759 * are ignored are based on the write mask for most opcodes - for example,
760 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
761 * a swizzle of {2, x, 1, 0} where x means "don't care."
763 uint8_t swizzle
[NIR_MAX_VEC_COMPONENTS
];
770 * \name saturate output modifier
772 * Only valid for opcodes that output floating-point numbers. Clamps the
773 * output to between 0.0 and 1.0 inclusive.
778 unsigned write_mask
: NIR_MAX_VEC_COMPONENTS
; /* ignored if dest.is_ssa is true */
781 /** NIR sized and unsized types
783 * The values in this enum are carefully chosen so that the sized type is
784 * just the unsized type OR the number of bits.
787 nir_type_invalid
= 0, /* Not a valid type */
791 nir_type_float
= 128,
792 nir_type_bool1
= 1 | nir_type_bool
,
793 nir_type_bool32
= 32 | nir_type_bool
,
794 nir_type_int1
= 1 | nir_type_int
,
795 nir_type_int8
= 8 | nir_type_int
,
796 nir_type_int16
= 16 | nir_type_int
,
797 nir_type_int32
= 32 | nir_type_int
,
798 nir_type_int64
= 64 | nir_type_int
,
799 nir_type_uint1
= 1 | nir_type_uint
,
800 nir_type_uint8
= 8 | nir_type_uint
,
801 nir_type_uint16
= 16 | nir_type_uint
,
802 nir_type_uint32
= 32 | nir_type_uint
,
803 nir_type_uint64
= 64 | nir_type_uint
,
804 nir_type_float16
= 16 | nir_type_float
,
805 nir_type_float32
= 32 | nir_type_float
,
806 nir_type_float64
= 64 | nir_type_float
,
809 #define NIR_ALU_TYPE_SIZE_MASK 0x79
810 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
812 static inline unsigned
813 nir_alu_type_get_type_size(nir_alu_type type
)
815 return type
& NIR_ALU_TYPE_SIZE_MASK
;
818 static inline unsigned
819 nir_alu_type_get_base_type(nir_alu_type type
)
821 return type
& NIR_ALU_TYPE_BASE_TYPE_MASK
;
824 static inline nir_alu_type
825 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type
)
829 return nir_type_bool1
;
832 return nir_type_uint32
;
835 return nir_type_int32
;
837 case GLSL_TYPE_UINT16
:
838 return nir_type_uint16
;
840 case GLSL_TYPE_INT16
:
841 return nir_type_int16
;
843 case GLSL_TYPE_UINT8
:
844 return nir_type_uint8
;
846 return nir_type_int8
;
847 case GLSL_TYPE_UINT64
:
848 return nir_type_uint64
;
850 case GLSL_TYPE_INT64
:
851 return nir_type_int64
;
853 case GLSL_TYPE_FLOAT
:
854 return nir_type_float32
;
856 case GLSL_TYPE_FLOAT16
:
857 return nir_type_float16
;
859 case GLSL_TYPE_DOUBLE
:
860 return nir_type_float64
;
863 case GLSL_TYPE_SAMPLER
:
864 case GLSL_TYPE_IMAGE
:
865 case GLSL_TYPE_ATOMIC_UINT
:
866 case GLSL_TYPE_STRUCT
:
867 case GLSL_TYPE_INTERFACE
:
868 case GLSL_TYPE_ARRAY
:
870 case GLSL_TYPE_SUBROUTINE
:
871 case GLSL_TYPE_FUNCTION
:
872 case GLSL_TYPE_ERROR
:
873 return nir_type_invalid
;
876 unreachable("unknown type");
879 static inline nir_alu_type
880 nir_get_nir_type_for_glsl_type(const struct glsl_type
*type
)
882 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type
));
885 nir_op
nir_type_conversion_op(nir_alu_type src
, nir_alu_type dst
,
886 nir_rounding_mode rnd
);
889 nir_op_vec(unsigned components
)
891 switch (components
) {
892 case 1: return nir_op_mov
;
893 case 2: return nir_op_vec2
;
894 case 3: return nir_op_vec3
;
895 case 4: return nir_op_vec4
;
896 default: unreachable("bad component count");
902 * Operation where the first two sources are commutative.
904 * For 2-source operations, this just mathematical commutativity. Some
905 * 3-source operations, like ffma, are only commutative in the first two
908 NIR_OP_IS_2SRC_COMMUTATIVE
= (1 << 0),
909 NIR_OP_IS_ASSOCIATIVE
= (1 << 1),
910 } nir_op_algebraic_property
;
918 * The number of components in the output
920 * If non-zero, this is the size of the output and input sizes are
921 * explicitly given; swizzle and writemask are still in effect, but if
922 * the output component is masked out, then the input component may
925 * If zero, the opcode acts in the standard, per-component manner; the
926 * operation is performed on each component (except the ones that are
927 * masked out) with the input being taken from the input swizzle for
930 * The size of some of the inputs may be given (i.e. non-zero) even
931 * though output_size is zero; in that case, the inputs with a zero
932 * size act per-component, while the inputs with non-zero size don't.
934 unsigned output_size
;
937 * The type of vector that the instruction outputs. Note that the
938 * staurate modifier is only allowed on outputs with the float type.
941 nir_alu_type output_type
;
944 * The number of components in each input
946 unsigned input_sizes
[NIR_MAX_VEC_COMPONENTS
];
949 * The type of vector that each input takes. Note that negate and
950 * absolute value are only allowed on inputs with int or float type and
951 * behave differently on the two.
953 nir_alu_type input_types
[NIR_MAX_VEC_COMPONENTS
];
955 nir_op_algebraic_property algebraic_properties
;
957 /* Whether this represents a numeric conversion opcode */
961 extern const nir_op_info nir_op_infos
[nir_num_opcodes
];
963 typedef struct nir_alu_instr
{
967 /** Indicates that this ALU instruction generates an exact value
969 * This is kind of a mixture of GLSL "precise" and "invariant" and not
970 * really equivalent to either. This indicates that the value generated by
971 * this operation is high-precision and any code transformations that touch
972 * it must ensure that the resulting value is bit-for-bit identical to the
978 * Indicates that this instruction do not cause wrapping to occur, in the
979 * form of overflow or underflow.
981 bool no_signed_wrap
:1;
982 bool no_unsigned_wrap
:1;
988 void nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
989 nir_alu_instr
*instr
);
990 void nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
991 nir_alu_instr
*instr
);
993 /* is this source channel used? */
995 nir_alu_instr_channel_used(const nir_alu_instr
*instr
, unsigned src
,
998 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
999 return channel
< nir_op_infos
[instr
->op
].input_sizes
[src
];
1001 return (instr
->dest
.write_mask
>> channel
) & 1;
1004 static inline nir_component_mask_t
1005 nir_alu_instr_src_read_mask(const nir_alu_instr
*instr
, unsigned src
)
1007 nir_component_mask_t read_mask
= 0;
1008 for (unsigned c
= 0; c
< NIR_MAX_VEC_COMPONENTS
; c
++) {
1009 if (!nir_alu_instr_channel_used(instr
, src
, c
))
1012 read_mask
|= (1 << instr
->src
[src
].swizzle
[c
]);
1018 * For instructions whose destinations are SSA, get the number of channels
1021 static inline unsigned
1022 nir_ssa_alu_instr_src_components(const nir_alu_instr
*instr
, unsigned src
)
1024 assert(instr
->dest
.dest
.is_ssa
);
1026 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
1027 return nir_op_infos
[instr
->op
].input_sizes
[src
];
1029 return instr
->dest
.dest
.ssa
.num_components
;
1032 bool nir_const_value_negative_equal(const nir_const_value
*c1
,
1033 const nir_const_value
*c2
,
1034 unsigned components
,
1035 nir_alu_type base_type
,
1038 bool nir_alu_srcs_equal(const nir_alu_instr
*alu1
, const nir_alu_instr
*alu2
,
1039 unsigned src1
, unsigned src2
);
1041 bool nir_alu_srcs_negative_equal(const nir_alu_instr
*alu1
,
1042 const nir_alu_instr
*alu2
,
1043 unsigned src1
, unsigned src2
);
1047 nir_deref_type_array
,
1048 nir_deref_type_array_wildcard
,
1049 nir_deref_type_ptr_as_array
,
1050 nir_deref_type_struct
,
1051 nir_deref_type_cast
,
1057 /** The type of this deref instruction */
1058 nir_deref_type deref_type
;
1060 /** The mode of the underlying variable */
1061 nir_variable_mode mode
;
1063 /** The dereferenced type of the resulting pointer value */
1064 const struct glsl_type
*type
;
1067 /** Variable being dereferenced if deref_type is a deref_var */
1070 /** Parent deref if deref_type is not deref_var */
1074 /** Additional deref parameters */
1085 unsigned ptr_stride
;
1089 /** Destination to store the resulting "pointer" */
1093 static inline nir_deref_instr
*nir_src_as_deref(nir_src src
);
1095 static inline nir_deref_instr
*
1096 nir_deref_instr_parent(const nir_deref_instr
*instr
)
1098 if (instr
->deref_type
== nir_deref_type_var
)
1101 return nir_src_as_deref(instr
->parent
);
1104 static inline nir_variable
*
1105 nir_deref_instr_get_variable(const nir_deref_instr
*instr
)
1107 while (instr
->deref_type
!= nir_deref_type_var
) {
1108 if (instr
->deref_type
== nir_deref_type_cast
)
1111 instr
= nir_deref_instr_parent(instr
);
1117 bool nir_deref_instr_has_indirect(nir_deref_instr
*instr
);
1118 bool nir_deref_instr_has_complex_use(nir_deref_instr
*instr
);
1120 bool nir_deref_instr_remove_if_unused(nir_deref_instr
*instr
);
1122 unsigned nir_deref_instr_ptr_as_array_stride(nir_deref_instr
*instr
);
1127 struct nir_function
*callee
;
1129 unsigned num_params
;
1133 #include "nir_intrinsics.h"
1135 #define NIR_INTRINSIC_MAX_CONST_INDEX 4
1137 /** Represents an intrinsic
1139 * An intrinsic is an instruction type for handling things that are
1140 * more-or-less regular operations but don't just consume and produce SSA
1141 * values like ALU operations do. Intrinsics are not for things that have
1142 * special semantic meaning such as phi nodes and parallel copies.
1143 * Examples of intrinsics include variable load/store operations, system
1144 * value loads, and the like. Even though texturing more-or-less falls
1145 * under this category, texturing is its own instruction type because
1146 * trying to represent texturing with intrinsics would lead to a
1147 * combinatorial explosion of intrinsic opcodes.
1149 * By having a single instruction type for handling a lot of different
1150 * cases, optimization passes can look for intrinsics and, for the most
1151 * part, completely ignore them. Each intrinsic type also has a few
1152 * possible flags that govern whether or not they can be reordered or
1153 * eliminated. That way passes like dead code elimination can still work
1154 * on intrisics without understanding the meaning of each.
1156 * Each intrinsic has some number of constant indices, some number of
1157 * variables, and some number of sources. What these sources, variables,
1158 * and indices mean depends on the intrinsic and is documented with the
1159 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
1160 * instructions are the only types of instruction that can operate on
1166 nir_intrinsic_op intrinsic
;
1170 /** number of components if this is a vectorized intrinsic
1172 * Similarly to ALU operations, some intrinsics are vectorized.
1173 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
1174 * For vectorized intrinsics, the num_components field specifies the
1175 * number of destination components and the number of source components
1176 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
1178 uint8_t num_components
;
1180 int const_index
[NIR_INTRINSIC_MAX_CONST_INDEX
];
1183 } nir_intrinsic_instr
;
1185 static inline nir_variable
*
1186 nir_intrinsic_get_var(nir_intrinsic_instr
*intrin
, unsigned i
)
1188 return nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[i
]));
1192 * \name NIR intrinsics semantic flags
1194 * information about what the compiler can do with the intrinsics.
1196 * \sa nir_intrinsic_info::flags
1200 * whether the intrinsic can be safely eliminated if none of its output
1201 * value is not being used.
1203 NIR_INTRINSIC_CAN_ELIMINATE
= (1 << 0),
1206 * Whether the intrinsic can be reordered with respect to any other
1207 * intrinsic, i.e. whether the only reordering dependencies of the
1208 * intrinsic are due to the register reads/writes.
1210 NIR_INTRINSIC_CAN_REORDER
= (1 << 1),
1211 } nir_intrinsic_semantic_flag
;
1214 * \name NIR intrinsics const-index flag
1216 * Indicates the usage of a const_index slot.
1218 * \sa nir_intrinsic_info::index_map
1222 * Generally instructions that take a offset src argument, can encode
1223 * a constant 'base' value which is added to the offset.
1225 NIR_INTRINSIC_BASE
= 1,
1228 * For store instructions, a writemask for the store.
1230 NIR_INTRINSIC_WRMASK
,
1233 * The stream-id for GS emit_vertex/end_primitive intrinsics.
1235 NIR_INTRINSIC_STREAM_ID
,
1238 * The clip-plane id for load_user_clip_plane intrinsic.
1240 NIR_INTRINSIC_UCP_ID
,
1243 * The amount of data, starting from BASE, that this instruction may
1244 * access. This is used to provide bounds if the offset is not constant.
1246 NIR_INTRINSIC_RANGE
,
1249 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1251 NIR_INTRINSIC_DESC_SET
,
1254 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1256 NIR_INTRINSIC_BINDING
,
1261 NIR_INTRINSIC_COMPONENT
,
1264 * Interpolation mode (only meaningful for FS inputs).
1266 NIR_INTRINSIC_INTERP_MODE
,
1269 * A binary nir_op to use when performing a reduction or scan operation
1271 NIR_INTRINSIC_REDUCTION_OP
,
1274 * Cluster size for reduction operations
1276 NIR_INTRINSIC_CLUSTER_SIZE
,
1279 * Parameter index for a load_param intrinsic
1281 NIR_INTRINSIC_PARAM_IDX
,
1284 * Image dimensionality for image intrinsics
1286 * One of GLSL_SAMPLER_DIM_*
1288 NIR_INTRINSIC_IMAGE_DIM
,
1291 * Non-zero if we are accessing an array image
1293 NIR_INTRINSIC_IMAGE_ARRAY
,
1296 * Image format for image intrinsics
1298 NIR_INTRINSIC_FORMAT
,
1301 * Access qualifiers for image and memory access intrinsics
1303 NIR_INTRINSIC_ACCESS
,
1306 * Alignment for offsets and addresses
1308 * These two parameters, specify an alignment in terms of a multiplier and
1309 * an offset. The offset or address parameter X of the intrinsic is
1310 * guaranteed to satisfy the following:
1312 * (X - align_offset) % align_mul == 0
1314 NIR_INTRINSIC_ALIGN_MUL
,
1315 NIR_INTRINSIC_ALIGN_OFFSET
,
1318 * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
1320 NIR_INTRINSIC_DESC_TYPE
,
1323 * The nir_alu_type of a uniform/input/output
1328 * The swizzle mask for the instructions
1329 * SwizzleInvocationsAMD and SwizzleInvocationsMaskedAMD
1331 NIR_INTRINSIC_SWIZZLE_MASK
,
1333 /* Separate source/dest access flags for copies */
1334 NIR_INTRINSIC_SRC_ACCESS
= 21,
1335 NIR_INTRINSIC_DST_ACCESS
= 22,
1337 NIR_INTRINSIC_NUM_INDEX_FLAGS
,
1339 } nir_intrinsic_index_flag
;
1341 #define NIR_INTRINSIC_MAX_INPUTS 5
1346 unsigned num_srcs
; /** < number of register/SSA inputs */
1348 /** number of components of each input register
1350 * If this value is 0, the number of components is given by the
1351 * num_components field of nir_intrinsic_instr. If this value is -1, the
1352 * intrinsic consumes however many components are provided and it is not
1355 int src_components
[NIR_INTRINSIC_MAX_INPUTS
];
1359 /** number of components of the output register
1361 * If this value is 0, the number of components is given by the
1362 * num_components field of nir_intrinsic_instr.
1364 unsigned dest_components
;
1366 /** bitfield of legal bit sizes */
1367 unsigned dest_bit_sizes
;
1369 /** the number of constant indices used by the intrinsic */
1370 unsigned num_indices
;
1372 /** indicates the usage of intr->const_index[n] */
1373 unsigned index_map
[NIR_INTRINSIC_NUM_INDEX_FLAGS
];
1375 /** semantic flags for calls to this intrinsic */
1376 nir_intrinsic_semantic_flag flags
;
1377 } nir_intrinsic_info
;
1379 extern const nir_intrinsic_info nir_intrinsic_infos
[nir_num_intrinsics
];
1381 static inline unsigned
1382 nir_intrinsic_src_components(nir_intrinsic_instr
*intr
, unsigned srcn
)
1384 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1385 assert(srcn
< info
->num_srcs
);
1386 if (info
->src_components
[srcn
] > 0)
1387 return info
->src_components
[srcn
];
1388 else if (info
->src_components
[srcn
] == 0)
1389 return intr
->num_components
;
1391 return nir_src_num_components(intr
->src
[srcn
]);
1394 static inline unsigned
1395 nir_intrinsic_dest_components(nir_intrinsic_instr
*intr
)
1397 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1398 if (!info
->has_dest
)
1400 else if (info
->dest_components
)
1401 return info
->dest_components
;
1403 return intr
->num_components
;
1406 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1407 static inline type \
1408 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \
1410 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1411 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1412 return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1414 static inline void \
1415 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1417 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1418 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1419 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1422 INTRINSIC_IDX_ACCESSORS(write_mask
, WRMASK
, unsigned)
1423 INTRINSIC_IDX_ACCESSORS(base
, BASE
, int)
1424 INTRINSIC_IDX_ACCESSORS(stream_id
, STREAM_ID
, unsigned)
1425 INTRINSIC_IDX_ACCESSORS(ucp_id
, UCP_ID
, unsigned)
1426 INTRINSIC_IDX_ACCESSORS(range
, RANGE
, unsigned)
1427 INTRINSIC_IDX_ACCESSORS(desc_set
, DESC_SET
, unsigned)
1428 INTRINSIC_IDX_ACCESSORS(binding
, BINDING
, unsigned)
1429 INTRINSIC_IDX_ACCESSORS(component
, COMPONENT
, unsigned)
1430 INTRINSIC_IDX_ACCESSORS(interp_mode
, INTERP_MODE
, unsigned)
1431 INTRINSIC_IDX_ACCESSORS(reduction_op
, REDUCTION_OP
, unsigned)
1432 INTRINSIC_IDX_ACCESSORS(cluster_size
, CLUSTER_SIZE
, unsigned)
1433 INTRINSIC_IDX_ACCESSORS(param_idx
, PARAM_IDX
, unsigned)
1434 INTRINSIC_IDX_ACCESSORS(image_dim
, IMAGE_DIM
, enum glsl_sampler_dim
)
1435 INTRINSIC_IDX_ACCESSORS(image_array
, IMAGE_ARRAY
, bool)
1436 INTRINSIC_IDX_ACCESSORS(access
, ACCESS
, enum gl_access_qualifier
)
1437 INTRINSIC_IDX_ACCESSORS(src_access
, SRC_ACCESS
, enum gl_access_qualifier
)
1438 INTRINSIC_IDX_ACCESSORS(dst_access
, DST_ACCESS
, enum gl_access_qualifier
)
1439 INTRINSIC_IDX_ACCESSORS(format
, FORMAT
, unsigned)
1440 INTRINSIC_IDX_ACCESSORS(align_mul
, ALIGN_MUL
, unsigned)
1441 INTRINSIC_IDX_ACCESSORS(align_offset
, ALIGN_OFFSET
, unsigned)
1442 INTRINSIC_IDX_ACCESSORS(desc_type
, DESC_TYPE
, unsigned)
1443 INTRINSIC_IDX_ACCESSORS(type
, TYPE
, nir_alu_type
)
1444 INTRINSIC_IDX_ACCESSORS(swizzle_mask
, SWIZZLE_MASK
, unsigned)
1447 nir_intrinsic_set_align(nir_intrinsic_instr
*intrin
,
1448 unsigned align_mul
, unsigned align_offset
)
1450 assert(util_is_power_of_two_nonzero(align_mul
));
1451 assert(align_offset
< align_mul
);
1452 nir_intrinsic_set_align_mul(intrin
, align_mul
);
1453 nir_intrinsic_set_align_offset(intrin
, align_offset
);
1456 /** Returns a simple alignment for a load/store intrinsic offset
1458 * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
1459 * and ALIGN_OFFSET parameters, this helper takes both into account and
1460 * provides a single simple alignment parameter. The offset X is guaranteed
1461 * to satisfy X % align == 0.
1463 static inline unsigned
1464 nir_intrinsic_align(const nir_intrinsic_instr
*intrin
)
1466 const unsigned align_mul
= nir_intrinsic_align_mul(intrin
);
1467 const unsigned align_offset
= nir_intrinsic_align_offset(intrin
);
1468 assert(align_offset
< align_mul
);
1469 return align_offset
? 1 << (ffs(align_offset
) - 1) : align_mul
;
1472 /* Converts a image_deref_* intrinsic into a image_* one */
1473 void nir_rewrite_image_intrinsic(nir_intrinsic_instr
*instr
,
1474 nir_ssa_def
*handle
, bool bindless
);
1476 /* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
1478 nir_intrinsic_can_reorder(nir_intrinsic_instr
*instr
)
1480 if (instr
->intrinsic
== nir_intrinsic_load_deref
||
1481 instr
->intrinsic
== nir_intrinsic_load_ssbo
||
1482 instr
->intrinsic
== nir_intrinsic_bindless_image_load
||
1483 instr
->intrinsic
== nir_intrinsic_image_deref_load
||
1484 instr
->intrinsic
== nir_intrinsic_image_load
) {
1485 return nir_intrinsic_access(instr
) & ACCESS_CAN_REORDER
;
1487 const nir_intrinsic_info
*info
=
1488 &nir_intrinsic_infos
[instr
->intrinsic
];
1489 return (info
->flags
& NIR_INTRINSIC_CAN_ELIMINATE
) &&
1490 (info
->flags
& NIR_INTRINSIC_CAN_REORDER
);
1495 * \group texture information
1497 * This gives semantic information about textures which is useful to the
1498 * frontend, the backend, and lowering passes, but not the optimizer.
1503 nir_tex_src_projector
,
1504 nir_tex_src_comparator
, /* shadow comparator */
1508 nir_tex_src_min_lod
,
1509 nir_tex_src_ms_index
, /* MSAA sample index */
1510 nir_tex_src_ms_mcs
, /* MSAA compression value */
1513 nir_tex_src_texture_deref
, /* < deref pointing to the texture */
1514 nir_tex_src_sampler_deref
, /* < deref pointing to the sampler */
1515 nir_tex_src_texture_offset
, /* < dynamically uniform indirect offset */
1516 nir_tex_src_sampler_offset
, /* < dynamically uniform indirect offset */
1517 nir_tex_src_texture_handle
, /* < bindless texture handle */
1518 nir_tex_src_sampler_handle
, /* < bindless sampler handle */
1519 nir_tex_src_plane
, /* < selects plane for planar textures */
1520 nir_num_tex_src_types
1525 nir_tex_src_type src_type
;
1529 nir_texop_tex
, /**< Regular texture look-up */
1530 nir_texop_txb
, /**< Texture look-up with LOD bias */
1531 nir_texop_txl
, /**< Texture look-up with explicit LOD */
1532 nir_texop_txd
, /**< Texture look-up with partial derivatives */
1533 nir_texop_txf
, /**< Texel fetch with explicit LOD */
1534 nir_texop_txf_ms
, /**< Multisample texture fetch */
1535 nir_texop_txf_ms_fb
, /**< Multisample texture fetch from framebuffer */
1536 nir_texop_txf_ms_mcs
, /**< Multisample compression value fetch */
1537 nir_texop_txs
, /**< Texture size */
1538 nir_texop_lod
, /**< Texture lod query */
1539 nir_texop_tg4
, /**< Texture gather */
1540 nir_texop_query_levels
, /**< Texture levels query */
1541 nir_texop_texture_samples
, /**< Texture samples query */
1542 nir_texop_samples_identical
, /**< Query whether all samples are definitely
1550 enum glsl_sampler_dim sampler_dim
;
1551 nir_alu_type dest_type
;
1556 unsigned num_srcs
, coord_components
;
1557 bool is_array
, is_shadow
;
1560 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1561 * components or the new-style shadow that outputs 1 component.
1563 bool is_new_style_shadow
;
1565 /* gather component selector */
1566 unsigned component
: 2;
1568 /* gather offsets */
1569 int8_t tg4_offsets
[4][2];
1571 /* True if the texture index or handle is not dynamically uniform */
1572 bool texture_non_uniform
;
1574 /* True if the sampler index or handle is not dynamically uniform */
1575 bool sampler_non_uniform
;
1577 /** The texture index
1579 * If this texture instruction has a nir_tex_src_texture_offset source,
1580 * then the texture index is given by texture_index + texture_offset.
1582 unsigned texture_index
;
1584 /** The size of the texture array or 0 if it's not an array */
1585 unsigned texture_array_size
;
1587 /** The sampler index
1589 * The following operations do not require a sampler and, as such, this
1590 * field should be ignored:
1592 * - nir_texop_txf_ms
1595 * - nir_texop_query_levels
1596 * - nir_texop_texture_samples
1597 * - nir_texop_samples_identical
1599 * If this texture instruction has a nir_tex_src_sampler_offset source,
1600 * then the sampler index is given by sampler_index + sampler_offset.
1602 unsigned sampler_index
;
1605 static inline unsigned
1606 nir_tex_instr_dest_size(const nir_tex_instr
*instr
)
1608 switch (instr
->op
) {
1609 case nir_texop_txs
: {
1611 switch (instr
->sampler_dim
) {
1612 case GLSL_SAMPLER_DIM_1D
:
1613 case GLSL_SAMPLER_DIM_BUF
:
1616 case GLSL_SAMPLER_DIM_2D
:
1617 case GLSL_SAMPLER_DIM_CUBE
:
1618 case GLSL_SAMPLER_DIM_MS
:
1619 case GLSL_SAMPLER_DIM_RECT
:
1620 case GLSL_SAMPLER_DIM_EXTERNAL
:
1621 case GLSL_SAMPLER_DIM_SUBPASS
:
1624 case GLSL_SAMPLER_DIM_3D
:
1628 unreachable("not reached");
1630 if (instr
->is_array
)
1638 case nir_texop_texture_samples
:
1639 case nir_texop_query_levels
:
1640 case nir_texop_samples_identical
:
1644 if (instr
->is_shadow
&& instr
->is_new_style_shadow
)
1651 /* Returns true if this texture operation queries something about the texture
1652 * rather than actually sampling it.
1655 nir_tex_instr_is_query(const nir_tex_instr
*instr
)
1657 switch (instr
->op
) {
1660 case nir_texop_texture_samples
:
1661 case nir_texop_query_levels
:
1662 case nir_texop_txf_ms_mcs
:
1669 case nir_texop_txf_ms
:
1670 case nir_texop_txf_ms_fb
:
1674 unreachable("Invalid texture opcode");
1679 nir_alu_instr_is_comparison(const nir_alu_instr
*instr
)
1681 switch (instr
->op
) {
1701 static inline nir_alu_type
1702 nir_tex_instr_src_type(const nir_tex_instr
*instr
, unsigned src
)
1704 switch (instr
->src
[src
].src_type
) {
1705 case nir_tex_src_coord
:
1706 switch (instr
->op
) {
1708 case nir_texop_txf_ms
:
1709 case nir_texop_txf_ms_fb
:
1710 case nir_texop_txf_ms_mcs
:
1711 case nir_texop_samples_identical
:
1712 return nir_type_int
;
1715 return nir_type_float
;
1718 case nir_tex_src_lod
:
1719 switch (instr
->op
) {
1722 return nir_type_int
;
1725 return nir_type_float
;
1728 case nir_tex_src_projector
:
1729 case nir_tex_src_comparator
:
1730 case nir_tex_src_bias
:
1731 case nir_tex_src_ddx
:
1732 case nir_tex_src_ddy
:
1733 return nir_type_float
;
1735 case nir_tex_src_offset
:
1736 case nir_tex_src_ms_index
:
1737 case nir_tex_src_texture_offset
:
1738 case nir_tex_src_sampler_offset
:
1739 return nir_type_int
;
1742 unreachable("Invalid texture source type");
1746 static inline unsigned
1747 nir_tex_instr_src_size(const nir_tex_instr
*instr
, unsigned src
)
1749 if (instr
->src
[src
].src_type
== nir_tex_src_coord
)
1750 return instr
->coord_components
;
1752 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1753 if (instr
->src
[src
].src_type
== nir_tex_src_ms_mcs
)
1756 if (instr
->src
[src
].src_type
== nir_tex_src_ddx
||
1757 instr
->src
[src
].src_type
== nir_tex_src_ddy
) {
1758 if (instr
->is_array
)
1759 return instr
->coord_components
- 1;
1761 return instr
->coord_components
;
1764 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
1765 * the offset, since a cube maps to a single face.
1767 if (instr
->src
[src
].src_type
== nir_tex_src_offset
) {
1768 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
1770 else if (instr
->is_array
)
1771 return instr
->coord_components
- 1;
1773 return instr
->coord_components
;
1780 nir_tex_instr_src_index(const nir_tex_instr
*instr
, nir_tex_src_type type
)
1782 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1783 if (instr
->src
[i
].src_type
== type
)
1789 void nir_tex_instr_add_src(nir_tex_instr
*tex
,
1790 nir_tex_src_type src_type
,
1793 void nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
);
1795 bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr
*tex
);
1802 nir_const_value value
[];
1803 } nir_load_const_instr
;
1805 #define nir_const_load_to_arr(arr, l, m) \
1807 nir_const_value_to_array(arr, l->value, l->def.num_components, m); \
1821 /* creates a new SSA variable in an undefined state */
1826 } nir_ssa_undef_instr
;
1829 struct exec_node node
;
1831 /* The predecessor block corresponding to this source */
1832 struct nir_block
*pred
;
1837 #define nir_foreach_phi_src(phi_src, phi) \
1838 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1839 #define nir_foreach_phi_src_safe(phi_src, phi) \
1840 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1845 struct exec_list srcs
; /** < list of nir_phi_src */
1851 struct exec_node node
;
1854 } nir_parallel_copy_entry
;
1856 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1857 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1862 /* A list of nir_parallel_copy_entrys. The sources of all of the
1863 * entries are copied to the corresponding destinations "in parallel".
1864 * In other words, if we have two entries: a -> b and b -> a, the values
1867 struct exec_list entries
;
1868 } nir_parallel_copy_instr
;
1870 NIR_DEFINE_CAST(nir_instr_as_alu
, nir_instr
, nir_alu_instr
, instr
,
1871 type
, nir_instr_type_alu
)
1872 NIR_DEFINE_CAST(nir_instr_as_deref
, nir_instr
, nir_deref_instr
, instr
,
1873 type
, nir_instr_type_deref
)
1874 NIR_DEFINE_CAST(nir_instr_as_call
, nir_instr
, nir_call_instr
, instr
,
1875 type
, nir_instr_type_call
)
1876 NIR_DEFINE_CAST(nir_instr_as_jump
, nir_instr
, nir_jump_instr
, instr
,
1877 type
, nir_instr_type_jump
)
1878 NIR_DEFINE_CAST(nir_instr_as_tex
, nir_instr
, nir_tex_instr
, instr
,
1879 type
, nir_instr_type_tex
)
1880 NIR_DEFINE_CAST(nir_instr_as_intrinsic
, nir_instr
, nir_intrinsic_instr
, instr
,
1881 type
, nir_instr_type_intrinsic
)
1882 NIR_DEFINE_CAST(nir_instr_as_load_const
, nir_instr
, nir_load_const_instr
, instr
,
1883 type
, nir_instr_type_load_const
)
1884 NIR_DEFINE_CAST(nir_instr_as_ssa_undef
, nir_instr
, nir_ssa_undef_instr
, instr
,
1885 type
, nir_instr_type_ssa_undef
)
1886 NIR_DEFINE_CAST(nir_instr_as_phi
, nir_instr
, nir_phi_instr
, instr
,
1887 type
, nir_instr_type_phi
)
1888 NIR_DEFINE_CAST(nir_instr_as_parallel_copy
, nir_instr
,
1889 nir_parallel_copy_instr
, instr
,
1890 type
, nir_instr_type_parallel_copy
)
1895 * Control flow consists of a tree of control flow nodes, which include
1896 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1897 * instructions that always run start-to-finish. Each basic block also keeps
1898 * track of its successors (blocks which may run immediately after the current
1899 * block) and predecessors (blocks which could have run immediately before the
1900 * current block). Each function also has a start block and an end block which
1901 * all return statements point to (which is always empty). Together, all the
1902 * blocks with their predecessors and successors make up the control flow
1903 * graph (CFG) of the function. There are helpers that modify the tree of
1904 * control flow nodes while modifying the CFG appropriately; these should be
1905 * used instead of modifying the tree directly.
1912 nir_cf_node_function
1915 typedef struct nir_cf_node
{
1916 struct exec_node node
;
1917 nir_cf_node_type type
;
1918 struct nir_cf_node
*parent
;
1921 typedef struct nir_block
{
1922 nir_cf_node cf_node
;
1924 struct exec_list instr_list
; /** < list of nir_instr */
1926 /** generic block index; generated by nir_index_blocks */
1930 * Each block can only have up to 2 successors, so we put them in a simple
1931 * array - no need for anything more complicated.
1933 struct nir_block
*successors
[2];
1935 /* Set of nir_block predecessors in the CFG */
1936 struct set
*predecessors
;
1939 * this node's immediate dominator in the dominance tree - set to NULL for
1942 struct nir_block
*imm_dom
;
1944 /* This node's children in the dominance tree */
1945 unsigned num_dom_children
;
1946 struct nir_block
**dom_children
;
1948 /* Set of nir_blocks on the dominance frontier of this block */
1949 struct set
*dom_frontier
;
1952 * These two indices have the property that dom_{pre,post}_index for each
1953 * child of this block in the dominance tree will always be between
1954 * dom_pre_index and dom_post_index for this block, which makes testing if
1955 * a given block is dominated by another block an O(1) operation.
1957 unsigned dom_pre_index
, dom_post_index
;
1959 /* live in and out for this block; used for liveness analysis */
1960 BITSET_WORD
*live_in
;
1961 BITSET_WORD
*live_out
;
1964 static inline nir_instr
*
1965 nir_block_first_instr(nir_block
*block
)
1967 struct exec_node
*head
= exec_list_get_head(&block
->instr_list
);
1968 return exec_node_data(nir_instr
, head
, node
);
1971 static inline nir_instr
*
1972 nir_block_last_instr(nir_block
*block
)
1974 struct exec_node
*tail
= exec_list_get_tail(&block
->instr_list
);
1975 return exec_node_data(nir_instr
, tail
, node
);
1979 nir_block_ends_in_jump(nir_block
*block
)
1981 return !exec_list_is_empty(&block
->instr_list
) &&
1982 nir_block_last_instr(block
)->type
== nir_instr_type_jump
;
1985 #define nir_foreach_instr(instr, block) \
1986 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1987 #define nir_foreach_instr_reverse(instr, block) \
1988 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1989 #define nir_foreach_instr_safe(instr, block) \
1990 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1991 #define nir_foreach_instr_reverse_safe(instr, block) \
1992 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1995 nir_selection_control_none
= 0x0,
1996 nir_selection_control_flatten
= 0x1,
1997 nir_selection_control_dont_flatten
= 0x2,
1998 } nir_selection_control
;
2000 typedef struct nir_if
{
2001 nir_cf_node cf_node
;
2003 nir_selection_control control
;
2005 struct exec_list then_list
; /** < list of nir_cf_node */
2006 struct exec_list else_list
; /** < list of nir_cf_node */
2012 /** Instruction that generates nif::condition. */
2013 nir_instr
*conditional_instr
;
2015 /** Block within ::nif that has the break instruction. */
2016 nir_block
*break_block
;
2018 /** Last block for the then- or else-path that does not contain the break. */
2019 nir_block
*continue_from_block
;
2021 /** True when ::break_block is in the else-path of ::nif. */
2022 bool continue_from_then
;
2025 /* This is true if the terminators exact trip count is unknown. For
2028 * for (int i = 0; i < imin(x, 4); i++)
2031 * Here loop analysis would have set a max_trip_count of 4 however we dont
2032 * know for sure that this is the exact trip count.
2034 bool exact_trip_count_unknown
;
2036 struct list_head loop_terminator_link
;
2037 } nir_loop_terminator
;
2040 /* Estimated cost (in number of instructions) of the loop */
2041 unsigned instr_cost
;
2043 /* Guessed trip count based on array indexing */
2044 unsigned guessed_trip_count
;
2046 /* Maximum number of times the loop is run (if known) */
2047 unsigned max_trip_count
;
2049 /* Do we know the exact number of times the loop will be run */
2050 bool exact_trip_count_known
;
2052 /* Unroll the loop regardless of its size */
2055 /* Does the loop contain complex loop terminators, continues or other
2056 * complex behaviours? If this is true we can't rely on
2057 * loop_terminator_list to be complete or accurate.
2061 nir_loop_terminator
*limiting_terminator
;
2063 /* A list of loop_terminators terminating this loop. */
2064 struct list_head loop_terminator_list
;
2068 nir_loop_control_none
= 0x0,
2069 nir_loop_control_unroll
= 0x1,
2070 nir_loop_control_dont_unroll
= 0x2,
2074 nir_cf_node cf_node
;
2076 struct exec_list body
; /** < list of nir_cf_node */
2078 nir_loop_info
*info
;
2079 nir_loop_control control
;
2080 bool partially_unrolled
;
2084 * Various bits of metadata that can may be created or required by
2085 * optimization and analysis passes
2088 nir_metadata_none
= 0x0,
2089 nir_metadata_block_index
= 0x1,
2090 nir_metadata_dominance
= 0x2,
2091 nir_metadata_live_ssa_defs
= 0x4,
2092 nir_metadata_not_properly_reset
= 0x8,
2093 nir_metadata_loop_analysis
= 0x10,
2097 nir_cf_node cf_node
;
2099 /** pointer to the function of which this is an implementation */
2100 struct nir_function
*function
;
2102 struct exec_list body
; /** < list of nir_cf_node */
2104 nir_block
*end_block
;
2106 /** list for all local variables in the function */
2107 struct exec_list locals
;
2109 /** list of local registers in the function */
2110 struct exec_list registers
;
2112 /** next available local register index */
2115 /** next available SSA value index */
2118 /* total number of basic blocks, only valid when block_index_dirty = false */
2119 unsigned num_blocks
;
2121 nir_metadata valid_metadata
;
2122 } nir_function_impl
;
2124 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
2125 nir_start_block(nir_function_impl
*impl
)
2127 return (nir_block
*) impl
->body
.head_sentinel
.next
;
2130 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
2131 nir_impl_last_block(nir_function_impl
*impl
)
2133 return (nir_block
*) impl
->body
.tail_sentinel
.prev
;
2136 static inline nir_cf_node
*
2137 nir_cf_node_next(nir_cf_node
*node
)
2139 struct exec_node
*next
= exec_node_get_next(&node
->node
);
2140 if (exec_node_is_tail_sentinel(next
))
2143 return exec_node_data(nir_cf_node
, next
, node
);
2146 static inline nir_cf_node
*
2147 nir_cf_node_prev(nir_cf_node
*node
)
2149 struct exec_node
*prev
= exec_node_get_prev(&node
->node
);
2150 if (exec_node_is_head_sentinel(prev
))
2153 return exec_node_data(nir_cf_node
, prev
, node
);
2157 nir_cf_node_is_first(const nir_cf_node
*node
)
2159 return exec_node_is_head_sentinel(node
->node
.prev
);
2163 nir_cf_node_is_last(const nir_cf_node
*node
)
2165 return exec_node_is_tail_sentinel(node
->node
.next
);
2168 NIR_DEFINE_CAST(nir_cf_node_as_block
, nir_cf_node
, nir_block
, cf_node
,
2169 type
, nir_cf_node_block
)
2170 NIR_DEFINE_CAST(nir_cf_node_as_if
, nir_cf_node
, nir_if
, cf_node
,
2171 type
, nir_cf_node_if
)
2172 NIR_DEFINE_CAST(nir_cf_node_as_loop
, nir_cf_node
, nir_loop
, cf_node
,
2173 type
, nir_cf_node_loop
)
2174 NIR_DEFINE_CAST(nir_cf_node_as_function
, nir_cf_node
,
2175 nir_function_impl
, cf_node
, type
, nir_cf_node_function
)
2177 static inline nir_block
*
2178 nir_if_first_then_block(nir_if
*if_stmt
)
2180 struct exec_node
*head
= exec_list_get_head(&if_stmt
->then_list
);
2181 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2184 static inline nir_block
*
2185 nir_if_last_then_block(nir_if
*if_stmt
)
2187 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->then_list
);
2188 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2191 static inline nir_block
*
2192 nir_if_first_else_block(nir_if
*if_stmt
)
2194 struct exec_node
*head
= exec_list_get_head(&if_stmt
->else_list
);
2195 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2198 static inline nir_block
*
2199 nir_if_last_else_block(nir_if
*if_stmt
)
2201 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->else_list
);
2202 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2205 static inline nir_block
*
2206 nir_loop_first_block(nir_loop
*loop
)
2208 struct exec_node
*head
= exec_list_get_head(&loop
->body
);
2209 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2212 static inline nir_block
*
2213 nir_loop_last_block(nir_loop
*loop
)
2215 struct exec_node
*tail
= exec_list_get_tail(&loop
->body
);
2216 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2220 * Return true if this list of cf_nodes contains a single empty block.
2223 nir_cf_list_is_empty_block(struct exec_list
*cf_list
)
2225 if (exec_list_is_singular(cf_list
)) {
2226 struct exec_node
*head
= exec_list_get_head(cf_list
);
2228 nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2229 return exec_list_is_empty(&block
->instr_list
);
2235 uint8_t num_components
;
2239 typedef struct nir_function
{
2240 struct exec_node node
;
2243 struct nir_shader
*shader
;
2245 unsigned num_params
;
2246 nir_parameter
*params
;
2248 /** The implementation of this function.
2250 * If the function is only declared and not implemented, this is NULL.
2252 nir_function_impl
*impl
;
2258 nir_lower_imul64
= (1 << 0),
2259 nir_lower_isign64
= (1 << 1),
2260 /** Lower all int64 modulus and division opcodes */
2261 nir_lower_divmod64
= (1 << 2),
2262 /** Lower all 64-bit umul_high and imul_high opcodes */
2263 nir_lower_imul_high64
= (1 << 3),
2264 nir_lower_mov64
= (1 << 4),
2265 nir_lower_icmp64
= (1 << 5),
2266 nir_lower_iadd64
= (1 << 6),
2267 nir_lower_iabs64
= (1 << 7),
2268 nir_lower_ineg64
= (1 << 8),
2269 nir_lower_logic64
= (1 << 9),
2270 nir_lower_minmax64
= (1 << 10),
2271 nir_lower_shift64
= (1 << 11),
2272 nir_lower_imul_2x32_64
= (1 << 12),
2273 } nir_lower_int64_options
;
2276 nir_lower_drcp
= (1 << 0),
2277 nir_lower_dsqrt
= (1 << 1),
2278 nir_lower_drsq
= (1 << 2),
2279 nir_lower_dtrunc
= (1 << 3),
2280 nir_lower_dfloor
= (1 << 4),
2281 nir_lower_dceil
= (1 << 5),
2282 nir_lower_dfract
= (1 << 6),
2283 nir_lower_dround_even
= (1 << 7),
2284 nir_lower_dmod
= (1 << 8),
2285 nir_lower_fp64_full_software
= (1 << 9),
2286 } nir_lower_doubles_options
;
2288 typedef struct nir_shader_compiler_options
{
2294 /** Lowers flrp when it does not support doubles */
2300 /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
2301 bool lower_bitfield_extract
;
2302 /** Lowers ibitfield_extract/ubitfield_extract to compares, shifts. */
2303 bool lower_bitfield_extract_to_shifts
;
2304 /** Lowers bitfield_insert to bfi/bfm */
2305 bool lower_bitfield_insert
;
2306 /** Lowers bitfield_insert to compares, and shifts. */
2307 bool lower_bitfield_insert_to_shifts
;
2308 /** Lowers bitfield_insert to bfm/bitfield_select. */
2309 bool lower_bitfield_insert_to_bitfield_select
;
2310 /** Lowers bitfield_reverse to shifts. */
2311 bool lower_bitfield_reverse
;
2312 /** Lowers bit_count to shifts. */
2313 bool lower_bit_count
;
2314 /** Lowers ifind_msb to compare and ufind_msb */
2315 bool lower_ifind_msb
;
2316 /** Lowers find_lsb to ufind_msb and logic ops */
2317 bool lower_find_lsb
;
2318 bool lower_uadd_carry
;
2319 bool lower_usub_borrow
;
2320 /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
2321 bool lower_mul_high
;
2322 /** lowers fneg and ineg to fsub and isub. */
2324 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
2327 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
2330 /** enables rules to lower idiv by power-of-two: */
2333 /** enable rules to avoid bit shifts */
2334 bool lower_bitshift
;
2336 /** enables rules to lower isign to imin+imax */
2339 /** enables rules to lower fsign to fsub and flt */
2342 /* Does the native fdot instruction replicate its result for four
2343 * components? If so, then opt_algebraic_late will turn all fdotN
2344 * instructions into fdot_replicatedN instructions.
2346 bool fdot_replicates
;
2348 /** lowers ffloor to fsub+ffract: */
2351 /** lowers ffract to fsub+ffloor: */
2354 /** lowers fceil to fneg+ffloor+fneg: */
2361 bool lower_pack_half_2x16
;
2362 bool lower_pack_unorm_2x16
;
2363 bool lower_pack_snorm_2x16
;
2364 bool lower_pack_unorm_4x8
;
2365 bool lower_pack_snorm_4x8
;
2366 bool lower_unpack_half_2x16
;
2367 bool lower_unpack_unorm_2x16
;
2368 bool lower_unpack_snorm_2x16
;
2369 bool lower_unpack_unorm_4x8
;
2370 bool lower_unpack_snorm_4x8
;
2372 bool lower_extract_byte
;
2373 bool lower_extract_word
;
2375 bool lower_all_io_to_temps
;
2376 bool lower_all_io_to_elements
;
2378 /* Indicates that the driver only has zero-based vertex id */
2379 bool vertex_id_zero_based
;
2382 * If enabled, gl_BaseVertex will be lowered as:
2383 * is_indexed_draw (~0/0) & firstvertex
2385 bool lower_base_vertex
;
2388 * If enabled, gl_HelperInvocation will be lowered as:
2390 * !((1 << sample_id) & sample_mask_in))
2392 * This depends on some possibly hw implementation details, which may
2393 * not be true for all hw. In particular that the FS is only executed
2394 * for covered samples or for helper invocations. So, do not blindly
2395 * enable this option.
2397 * Note: See also issue #22 in ARB_shader_image_load_store
2399 bool lower_helper_invocation
;
2402 * Convert gl_SampleMaskIn to gl_HelperInvocation as follows:
2404 * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
2405 * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
2407 bool optimize_sample_mask_in
;
2409 bool lower_cs_local_index_from_id
;
2410 bool lower_cs_local_id_from_index
;
2412 bool lower_device_index_to_zero
;
2414 /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
2415 bool lower_wpos_pntc
;
2421 * Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
2422 * for IO purposes and would prefer loads/stores be vectorized.
2427 * Should nir_lower_io() create load_interpolated_input intrinsics?
2429 * If not, it generates regular load_input intrinsics and interpolation
2430 * information must be inferred from the list of input nir_variables.
2432 bool use_interpolated_input_intrinsics
;
2434 /* Lowers when 32x32->64 bit multiplication is not supported */
2435 bool lower_mul_2x32_64
;
2437 /* Lowers when rotate instruction is not supported */
2440 unsigned max_unroll_iterations
;
2442 nir_lower_int64_options lower_int64_options
;
2443 nir_lower_doubles_options lower_doubles_options
;
2444 } nir_shader_compiler_options
;
2446 typedef struct nir_shader
{
2447 /** list of uniforms (nir_variable) */
2448 struct exec_list uniforms
;
2450 /** list of inputs (nir_variable) */
2451 struct exec_list inputs
;
2453 /** list of outputs (nir_variable) */
2454 struct exec_list outputs
;
2456 /** list of shared compute variables (nir_variable) */
2457 struct exec_list shared
;
2459 /** Set of driver-specific options for the shader.
2461 * The memory for the options is expected to be kept in a single static
2462 * copy by the driver.
2464 const struct nir_shader_compiler_options
*options
;
2466 /** Various bits of compile-time information about a given shader */
2467 struct shader_info info
;
2469 /** list of global variables in the shader (nir_variable) */
2470 struct exec_list globals
;
2472 /** list of system value variables in the shader (nir_variable) */
2473 struct exec_list system_values
;
2475 struct exec_list functions
; /** < list of nir_function */
2478 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
2481 unsigned num_inputs
, num_uniforms
, num_outputs
, num_shared
;
2483 /** Size in bytes of required scratch space */
2484 unsigned scratch_size
;
2486 /** Constant data associated with this shader.
2488 * Constant data is loaded through load_constant intrinsics. See also
2489 * nir_opt_large_constants.
2491 void *constant_data
;
2492 unsigned constant_data_size
;
2495 #define nir_foreach_function(func, shader) \
2496 foreach_list_typed(nir_function, func, node, &(shader)->functions)
2498 static inline nir_function_impl
*
2499 nir_shader_get_entrypoint(nir_shader
*shader
)
2501 nir_function
*func
= NULL
;
2503 nir_foreach_function(function
, shader
) {
2504 assert(func
== NULL
);
2505 if (function
->is_entrypoint
) {
2516 assert(func
->num_params
== 0);
2521 nir_shader
*nir_shader_create(void *mem_ctx
,
2522 gl_shader_stage stage
,
2523 const nir_shader_compiler_options
*options
,
2526 nir_register
*nir_local_reg_create(nir_function_impl
*impl
);
2528 void nir_reg_remove(nir_register
*reg
);
2530 /** Adds a variable to the appropriate list in nir_shader */
2531 void nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
);
2534 nir_function_impl_add_variable(nir_function_impl
*impl
, nir_variable
*var
)
2536 assert(var
->data
.mode
== nir_var_function_temp
);
2537 exec_list_push_tail(&impl
->locals
, &var
->node
);
2540 /** creates a variable, sets a few defaults, and adds it to the list */
2541 nir_variable
*nir_variable_create(nir_shader
*shader
,
2542 nir_variable_mode mode
,
2543 const struct glsl_type
*type
,
2545 /** creates a local variable and adds it to the list */
2546 nir_variable
*nir_local_variable_create(nir_function_impl
*impl
,
2547 const struct glsl_type
*type
,
2550 /** creates a function and adds it to the shader's list of functions */
2551 nir_function
*nir_function_create(nir_shader
*shader
, const char *name
);
2553 nir_function_impl
*nir_function_impl_create(nir_function
*func
);
2554 /** creates a function_impl that isn't tied to any particular function */
2555 nir_function_impl
*nir_function_impl_create_bare(nir_shader
*shader
);
2557 nir_block
*nir_block_create(nir_shader
*shader
);
2558 nir_if
*nir_if_create(nir_shader
*shader
);
2559 nir_loop
*nir_loop_create(nir_shader
*shader
);
2561 nir_function_impl
*nir_cf_node_get_function(nir_cf_node
*node
);
2563 /** requests that the given pieces of metadata be generated */
2564 void nir_metadata_require(nir_function_impl
*impl
, nir_metadata required
, ...);
2565 /** dirties all but the preserved metadata */
2566 void nir_metadata_preserve(nir_function_impl
*impl
, nir_metadata preserved
);
2568 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
2569 nir_alu_instr
*nir_alu_instr_create(nir_shader
*shader
, nir_op op
);
2571 nir_deref_instr
*nir_deref_instr_create(nir_shader
*shader
,
2572 nir_deref_type deref_type
);
2574 nir_jump_instr
*nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
);
2576 nir_load_const_instr
*nir_load_const_instr_create(nir_shader
*shader
,
2577 unsigned num_components
,
2580 nir_intrinsic_instr
*nir_intrinsic_instr_create(nir_shader
*shader
,
2581 nir_intrinsic_op op
);
2583 nir_call_instr
*nir_call_instr_create(nir_shader
*shader
,
2584 nir_function
*callee
);
2586 nir_tex_instr
*nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
);
2588 nir_phi_instr
*nir_phi_instr_create(nir_shader
*shader
);
2590 nir_parallel_copy_instr
*nir_parallel_copy_instr_create(nir_shader
*shader
);
2592 nir_ssa_undef_instr
*nir_ssa_undef_instr_create(nir_shader
*shader
,
2593 unsigned num_components
,
2596 nir_const_value
nir_alu_binop_identity(nir_op binop
, unsigned bit_size
);
2599 * NIR Cursors and Instruction Insertion API
2602 * A tiny struct representing a point to insert/extract instructions or
2603 * control flow nodes. Helps reduce the combinatorial explosion of possible
2604 * points to insert/extract.
2606 * \sa nir_control_flow.h
2609 nir_cursor_before_block
,
2610 nir_cursor_after_block
,
2611 nir_cursor_before_instr
,
2612 nir_cursor_after_instr
,
2613 } nir_cursor_option
;
2616 nir_cursor_option option
;
2623 static inline nir_block
*
2624 nir_cursor_current_block(nir_cursor cursor
)
2626 if (cursor
.option
== nir_cursor_before_instr
||
2627 cursor
.option
== nir_cursor_after_instr
) {
2628 return cursor
.instr
->block
;
2630 return cursor
.block
;
2634 bool nir_cursors_equal(nir_cursor a
, nir_cursor b
);
2636 static inline nir_cursor
2637 nir_before_block(nir_block
*block
)
2640 cursor
.option
= nir_cursor_before_block
;
2641 cursor
.block
= block
;
2645 static inline nir_cursor
2646 nir_after_block(nir_block
*block
)
2649 cursor
.option
= nir_cursor_after_block
;
2650 cursor
.block
= block
;
2654 static inline nir_cursor
2655 nir_before_instr(nir_instr
*instr
)
2658 cursor
.option
= nir_cursor_before_instr
;
2659 cursor
.instr
= instr
;
2663 static inline nir_cursor
2664 nir_after_instr(nir_instr
*instr
)
2667 cursor
.option
= nir_cursor_after_instr
;
2668 cursor
.instr
= instr
;
2672 static inline nir_cursor
2673 nir_after_block_before_jump(nir_block
*block
)
2675 nir_instr
*last_instr
= nir_block_last_instr(block
);
2676 if (last_instr
&& last_instr
->type
== nir_instr_type_jump
) {
2677 return nir_before_instr(last_instr
);
2679 return nir_after_block(block
);
2683 static inline nir_cursor
2684 nir_before_src(nir_src
*src
, bool is_if_condition
)
2686 if (is_if_condition
) {
2687 nir_block
*prev_block
=
2688 nir_cf_node_as_block(nir_cf_node_prev(&src
->parent_if
->cf_node
));
2689 assert(!nir_block_ends_in_jump(prev_block
));
2690 return nir_after_block(prev_block
);
2691 } else if (src
->parent_instr
->type
== nir_instr_type_phi
) {
2693 nir_phi_instr
*cond_phi
= nir_instr_as_phi(src
->parent_instr
);
2695 nir_foreach_phi_src(phi_src
, cond_phi
) {
2696 if (phi_src
->src
.ssa
== src
->ssa
) {
2703 /* The LIST_ENTRY macro is a generic container-of macro, it just happens
2704 * to have a more specific name.
2706 nir_phi_src
*phi_src
= LIST_ENTRY(nir_phi_src
, src
, src
);
2707 return nir_after_block_before_jump(phi_src
->pred
);
2709 return nir_before_instr(src
->parent_instr
);
2713 static inline nir_cursor
2714 nir_before_cf_node(nir_cf_node
*node
)
2716 if (node
->type
== nir_cf_node_block
)
2717 return nir_before_block(nir_cf_node_as_block(node
));
2719 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node
)));
2722 static inline nir_cursor
2723 nir_after_cf_node(nir_cf_node
*node
)
2725 if (node
->type
== nir_cf_node_block
)
2726 return nir_after_block(nir_cf_node_as_block(node
));
2728 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node
)));
2731 static inline nir_cursor
2732 nir_after_phis(nir_block
*block
)
2734 nir_foreach_instr(instr
, block
) {
2735 if (instr
->type
!= nir_instr_type_phi
)
2736 return nir_before_instr(instr
);
2738 return nir_after_block(block
);
2741 static inline nir_cursor
2742 nir_after_cf_node_and_phis(nir_cf_node
*node
)
2744 if (node
->type
== nir_cf_node_block
)
2745 return nir_after_block(nir_cf_node_as_block(node
));
2747 nir_block
*block
= nir_cf_node_as_block(nir_cf_node_next(node
));
2749 return nir_after_phis(block
);
2752 static inline nir_cursor
2753 nir_before_cf_list(struct exec_list
*cf_list
)
2755 nir_cf_node
*first_node
= exec_node_data(nir_cf_node
,
2756 exec_list_get_head(cf_list
), node
);
2757 return nir_before_cf_node(first_node
);
2760 static inline nir_cursor
2761 nir_after_cf_list(struct exec_list
*cf_list
)
2763 nir_cf_node
*last_node
= exec_node_data(nir_cf_node
,
2764 exec_list_get_tail(cf_list
), node
);
2765 return nir_after_cf_node(last_node
);
2769 * Insert a NIR instruction at the given cursor.
2771 * Note: This does not update the cursor.
2773 void nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
);
2776 nir_instr_insert_before(nir_instr
*instr
, nir_instr
*before
)
2778 nir_instr_insert(nir_before_instr(instr
), before
);
2782 nir_instr_insert_after(nir_instr
*instr
, nir_instr
*after
)
2784 nir_instr_insert(nir_after_instr(instr
), after
);
2788 nir_instr_insert_before_block(nir_block
*block
, nir_instr
*before
)
2790 nir_instr_insert(nir_before_block(block
), before
);
2794 nir_instr_insert_after_block(nir_block
*block
, nir_instr
*after
)
2796 nir_instr_insert(nir_after_block(block
), after
);
2800 nir_instr_insert_before_cf(nir_cf_node
*node
, nir_instr
*before
)
2802 nir_instr_insert(nir_before_cf_node(node
), before
);
2806 nir_instr_insert_after_cf(nir_cf_node
*node
, nir_instr
*after
)
2808 nir_instr_insert(nir_after_cf_node(node
), after
);
2812 nir_instr_insert_before_cf_list(struct exec_list
*list
, nir_instr
*before
)
2814 nir_instr_insert(nir_before_cf_list(list
), before
);
2818 nir_instr_insert_after_cf_list(struct exec_list
*list
, nir_instr
*after
)
2820 nir_instr_insert(nir_after_cf_list(list
), after
);
2823 void nir_instr_remove_v(nir_instr
*instr
);
2825 static inline nir_cursor
2826 nir_instr_remove(nir_instr
*instr
)
2829 nir_instr
*prev
= nir_instr_prev(instr
);
2831 cursor
= nir_after_instr(prev
);
2833 cursor
= nir_before_block(instr
->block
);
2835 nir_instr_remove_v(instr
);
2841 typedef bool (*nir_foreach_ssa_def_cb
)(nir_ssa_def
*def
, void *state
);
2842 typedef bool (*nir_foreach_dest_cb
)(nir_dest
*dest
, void *state
);
2843 typedef bool (*nir_foreach_src_cb
)(nir_src
*src
, void *state
);
2844 bool nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
,
2846 bool nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
);
2847 bool nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
);
2849 nir_const_value
*nir_src_as_const_value(nir_src src
);
2851 #define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
2852 static inline c_type * \
2853 nir_src_as_ ## name (nir_src src) \
2855 return src.is_ssa && src.ssa->parent_instr->type == type_enum \
2856 ? cast_macro(src.ssa->parent_instr) : NULL; \
2859 NIR_SRC_AS_(alu_instr
, nir_alu_instr
, nir_instr_type_alu
, nir_instr_as_alu
)
2860 NIR_SRC_AS_(intrinsic
, nir_intrinsic_instr
,
2861 nir_instr_type_intrinsic
, nir_instr_as_intrinsic
)
2862 NIR_SRC_AS_(deref
, nir_deref_instr
, nir_instr_type_deref
, nir_instr_as_deref
)
2864 bool nir_src_is_dynamically_uniform(nir_src src
);
2865 bool nir_srcs_equal(nir_src src1
, nir_src src2
);
2866 void nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
);
2867 void nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
);
2868 void nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
);
2869 void nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
,
2872 void nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
2873 unsigned num_components
, unsigned bit_size
,
2875 void nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
2876 unsigned num_components
, unsigned bit_size
,
2879 nir_ssa_dest_init_for_type(nir_instr
*instr
, nir_dest
*dest
,
2880 const struct glsl_type
*type
,
2883 assert(glsl_type_is_vector_or_scalar(type
));
2884 nir_ssa_dest_init(instr
, dest
, glsl_get_components(type
),
2885 glsl_get_bit_size(type
), name
);
2887 void nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
);
2888 void nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
2889 nir_instr
*after_me
);
2891 nir_component_mask_t
nir_ssa_def_components_read(const nir_ssa_def
*def
);
2894 * finds the next basic block in source-code order, returns NULL if there is
2898 nir_block
*nir_block_cf_tree_next(nir_block
*block
);
2900 /* Performs the opposite of nir_block_cf_tree_next() */
2902 nir_block
*nir_block_cf_tree_prev(nir_block
*block
);
2904 /* Gets the first block in a CF node in source-code order */
2906 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
);
2908 /* Gets the last block in a CF node in source-code order */
2910 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
);
2912 /* Gets the next block after a CF node in source-code order */
2914 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
);
2916 /* Macros for loops that visit blocks in source-code order */
2918 #define nir_foreach_block(block, impl) \
2919 for (nir_block *block = nir_start_block(impl); block != NULL; \
2920 block = nir_block_cf_tree_next(block))
2922 #define nir_foreach_block_safe(block, impl) \
2923 for (nir_block *block = nir_start_block(impl), \
2924 *next = nir_block_cf_tree_next(block); \
2926 block = next, next = nir_block_cf_tree_next(block))
2928 #define nir_foreach_block_reverse(block, impl) \
2929 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2930 block = nir_block_cf_tree_prev(block))
2932 #define nir_foreach_block_reverse_safe(block, impl) \
2933 for (nir_block *block = nir_impl_last_block(impl), \
2934 *prev = nir_block_cf_tree_prev(block); \
2936 block = prev, prev = nir_block_cf_tree_prev(block))
2938 #define nir_foreach_block_in_cf_node(block, node) \
2939 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2940 block != nir_cf_node_cf_tree_next(node); \
2941 block = nir_block_cf_tree_next(block))
2943 /* If the following CF node is an if, this function returns that if.
2944 * Otherwise, it returns NULL.
2946 nir_if
*nir_block_get_following_if(nir_block
*block
);
2948 nir_loop
*nir_block_get_following_loop(nir_block
*block
);
2950 void nir_index_local_regs(nir_function_impl
*impl
);
2951 void nir_index_ssa_defs(nir_function_impl
*impl
);
2952 unsigned nir_index_instrs(nir_function_impl
*impl
);
2954 void nir_index_blocks(nir_function_impl
*impl
);
2956 void nir_print_shader(nir_shader
*shader
, FILE *fp
);
2957 void nir_print_shader_annotated(nir_shader
*shader
, FILE *fp
, struct hash_table
*errors
);
2958 void nir_print_instr(const nir_instr
*instr
, FILE *fp
);
2959 void nir_print_deref(const nir_deref_instr
*deref
, FILE *fp
);
2961 /** Shallow clone of a single ALU instruction. */
2962 nir_alu_instr
*nir_alu_instr_clone(nir_shader
*s
, const nir_alu_instr
*orig
);
2964 nir_shader
*nir_shader_clone(void *mem_ctx
, const nir_shader
*s
);
2965 nir_function_impl
*nir_function_impl_clone(nir_shader
*shader
,
2966 const nir_function_impl
*fi
);
2967 nir_constant
*nir_constant_clone(const nir_constant
*c
, nir_variable
*var
);
2968 nir_variable
*nir_variable_clone(const nir_variable
*c
, nir_shader
*shader
);
2970 void nir_shader_replace(nir_shader
*dest
, nir_shader
*src
);
2972 void nir_shader_serialize_deserialize(nir_shader
*s
);
2975 void nir_validate_shader(nir_shader
*shader
, const char *when
);
2976 void nir_metadata_set_validation_flag(nir_shader
*shader
);
2977 void nir_metadata_check_validation_flag(nir_shader
*shader
);
2980 should_skip_nir(const char *name
)
2982 static const char *list
= NULL
;
2984 /* Comma separated list of names to skip. */
2985 list
= getenv("NIR_SKIP");
2993 return comma_separated_list_contains(list
, name
);
2997 should_clone_nir(void)
2999 static int should_clone
= -1;
3000 if (should_clone
< 0)
3001 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
3003 return should_clone
;
3007 should_serialize_deserialize_nir(void)
3009 static int test_serialize
= -1;
3010 if (test_serialize
< 0)
3011 test_serialize
= env_var_as_boolean("NIR_TEST_SERIALIZE", false);
3013 return test_serialize
;
3017 should_print_nir(void)
3019 static int should_print
= -1;
3020 if (should_print
< 0)
3021 should_print
= env_var_as_boolean("NIR_PRINT", false);
3023 return should_print
;
3026 static inline void nir_validate_shader(nir_shader
*shader
, const char *when
) { (void) shader
; (void)when
; }
3027 static inline void nir_metadata_set_validation_flag(nir_shader
*shader
) { (void) shader
; }
3028 static inline void nir_metadata_check_validation_flag(nir_shader
*shader
) { (void) shader
; }
3029 static inline bool should_skip_nir(UNUSED
const char *pass_name
) { return false; }
3030 static inline bool should_clone_nir(void) { return false; }
3031 static inline bool should_serialize_deserialize_nir(void) { return false; }
3032 static inline bool should_print_nir(void) { return false; }
3035 #define _PASS(pass, nir, do_pass) do { \
3036 if (should_skip_nir(#pass)) { \
3037 printf("skipping %s\n", #pass); \
3041 nir_validate_shader(nir, "after " #pass); \
3042 if (should_clone_nir()) { \
3043 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
3044 nir_shader_replace(nir, clone); \
3046 if (should_serialize_deserialize_nir()) { \
3047 nir_shader_serialize_deserialize(nir); \
3051 #define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
3052 nir_metadata_set_validation_flag(nir); \
3053 if (should_print_nir()) \
3054 printf("%s\n", #pass); \
3055 if (pass(nir, ##__VA_ARGS__)) { \
3057 if (should_print_nir()) \
3058 nir_print_shader(nir, stdout); \
3059 nir_metadata_check_validation_flag(nir); \
3063 #define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
3064 if (should_print_nir()) \
3065 printf("%s\n", #pass); \
3066 pass(nir, ##__VA_ARGS__); \
3067 if (should_print_nir()) \
3068 nir_print_shader(nir, stdout); \
3071 #define NIR_SKIP(name) should_skip_nir(#name)
3073 void nir_calc_dominance_impl(nir_function_impl
*impl
);
3074 void nir_calc_dominance(nir_shader
*shader
);
3076 nir_block
*nir_dominance_lca(nir_block
*b1
, nir_block
*b2
);
3077 bool nir_block_dominates(nir_block
*parent
, nir_block
*child
);
3079 void nir_dump_dom_tree_impl(nir_function_impl
*impl
, FILE *fp
);
3080 void nir_dump_dom_tree(nir_shader
*shader
, FILE *fp
);
3082 void nir_dump_dom_frontier_impl(nir_function_impl
*impl
, FILE *fp
);
3083 void nir_dump_dom_frontier(nir_shader
*shader
, FILE *fp
);
3085 void nir_dump_cfg_impl(nir_function_impl
*impl
, FILE *fp
);
3086 void nir_dump_cfg(nir_shader
*shader
, FILE *fp
);
3088 int nir_gs_count_vertices(const nir_shader
*shader
);
3090 bool nir_shrink_vec_array_vars(nir_shader
*shader
, nir_variable_mode modes
);
3091 bool nir_split_array_vars(nir_shader
*shader
, nir_variable_mode modes
);
3092 bool nir_split_var_copies(nir_shader
*shader
);
3093 bool nir_split_per_member_structs(nir_shader
*shader
);
3094 bool nir_split_struct_vars(nir_shader
*shader
, nir_variable_mode modes
);
3096 bool nir_lower_returns_impl(nir_function_impl
*impl
);
3097 bool nir_lower_returns(nir_shader
*shader
);
3099 void nir_inline_function_impl(struct nir_builder
*b
,
3100 const nir_function_impl
*impl
,
3101 nir_ssa_def
**params
);
3102 bool nir_inline_functions(nir_shader
*shader
);
3104 bool nir_propagate_invariant(nir_shader
*shader
);
3106 void nir_lower_var_copy_instr(nir_intrinsic_instr
*copy
, nir_shader
*shader
);
3107 void nir_lower_deref_copy_instr(struct nir_builder
*b
,
3108 nir_intrinsic_instr
*copy
);
3109 bool nir_lower_var_copies(nir_shader
*shader
);
3111 void nir_fixup_deref_modes(nir_shader
*shader
);
3113 bool nir_lower_global_vars_to_local(nir_shader
*shader
);
3116 nir_lower_direct_array_deref_of_vec_load
= (1 << 0),
3117 nir_lower_indirect_array_deref_of_vec_load
= (1 << 1),
3118 nir_lower_direct_array_deref_of_vec_store
= (1 << 2),
3119 nir_lower_indirect_array_deref_of_vec_store
= (1 << 3),
3120 } nir_lower_array_deref_of_vec_options
;
3122 bool nir_lower_array_deref_of_vec(nir_shader
*shader
, nir_variable_mode modes
,
3123 nir_lower_array_deref_of_vec_options options
);
3125 bool nir_lower_indirect_derefs(nir_shader
*shader
, nir_variable_mode modes
);
3127 bool nir_lower_locals_to_regs(nir_shader
*shader
);
3129 void nir_lower_io_to_temporaries(nir_shader
*shader
,
3130 nir_function_impl
*entrypoint
,
3131 bool outputs
, bool inputs
);
3133 bool nir_lower_vars_to_scratch(nir_shader
*shader
,
3134 nir_variable_mode modes
,
3136 glsl_type_size_align_func size_align
);
3138 void nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
);
3140 void nir_gather_ssa_types(nir_function_impl
*impl
,
3141 BITSET_WORD
*float_types
,
3142 BITSET_WORD
*int_types
);
3144 void nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
3145 int (*type_size
)(const struct glsl_type
*, bool));
3147 /* Some helpers to do very simple linking */
3148 bool nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
);
3149 bool nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
3150 uint64_t *used_by_other_stage
,
3151 uint64_t *used_by_other_stage_patches
);
3152 void nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
3153 bool default_to_smooth_interp
);
3154 void nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
);
3155 bool nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
);
3158 void nir_assign_io_var_locations(struct exec_list
*var_list
,
3160 gl_shader_stage stage
);
3163 /* If set, this forces all non-flat fragment shader inputs to be
3164 * interpolated as if with the "sample" qualifier. This requires
3165 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
3167 nir_lower_io_force_sample_interpolation
= (1 << 1),
3168 } nir_lower_io_options
;
3169 bool nir_lower_io(nir_shader
*shader
,
3170 nir_variable_mode modes
,
3171 int (*type_size
)(const struct glsl_type
*, bool),
3172 nir_lower_io_options
);
3174 bool nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
);
3178 * An address format which is a simple 32-bit global GPU address.
3180 nir_address_format_32bit_global
,
3183 * An address format which is a simple 64-bit global GPU address.
3185 nir_address_format_64bit_global
,
3188 * An address format which is a bounds-checked 64-bit global GPU address.
3190 * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
3191 * address stored with the low bits in .x and high bits in .y, .z is a
3192 * size, and .w is an offset. When the final I/O operation is lowered, .w
3193 * is checked against .z and the operation is predicated on the result.
3195 nir_address_format_64bit_bounded_global
,
3198 * An address format which is comprised of a vec2 where the first
3199 * component is a buffer index and the second is an offset.
3201 nir_address_format_32bit_index_offset
,
3204 * An address format which is a simple 32-bit offset.
3206 nir_address_format_32bit_offset
,
3209 * An address format representing a purely logical addressing model. In
3210 * this model, all deref chains must be complete from the dereference
3211 * operation to the variable. Cast derefs are not allowed. These
3212 * addresses will be 32-bit scalars but the format is immaterial because
3213 * you can always chase the chain.
3215 nir_address_format_logical
,
3216 } nir_address_format
;
3218 static inline unsigned
3219 nir_address_format_bit_size(nir_address_format addr_format
)
3221 switch (addr_format
) {
3222 case nir_address_format_32bit_global
: return 32;
3223 case nir_address_format_64bit_global
: return 64;
3224 case nir_address_format_64bit_bounded_global
: return 32;
3225 case nir_address_format_32bit_index_offset
: return 32;
3226 case nir_address_format_32bit_offset
: return 32;
3227 case nir_address_format_logical
: return 32;
3229 unreachable("Invalid address format");
3232 static inline unsigned
3233 nir_address_format_num_components(nir_address_format addr_format
)
3235 switch (addr_format
) {
3236 case nir_address_format_32bit_global
: return 1;
3237 case nir_address_format_64bit_global
: return 1;
3238 case nir_address_format_64bit_bounded_global
: return 4;
3239 case nir_address_format_32bit_index_offset
: return 2;
3240 case nir_address_format_32bit_offset
: return 1;
3241 case nir_address_format_logical
: return 1;
3243 unreachable("Invalid address format");
3246 static inline const struct glsl_type
*
3247 nir_address_format_to_glsl_type(nir_address_format addr_format
)
3249 unsigned bit_size
= nir_address_format_bit_size(addr_format
);
3250 assert(bit_size
== 32 || bit_size
== 64);
3251 return glsl_vector_type(bit_size
== 32 ? GLSL_TYPE_UINT
: GLSL_TYPE_UINT64
,
3252 nir_address_format_num_components(addr_format
));
3255 const nir_const_value
*nir_address_format_null_value(nir_address_format addr_format
);
3257 nir_ssa_def
*nir_build_addr_ieq(struct nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
3258 nir_address_format addr_format
);
3260 nir_ssa_def
*nir_build_addr_isub(struct nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
3261 nir_address_format addr_format
);
3263 nir_ssa_def
* nir_explicit_io_address_from_deref(struct nir_builder
*b
,
3264 nir_deref_instr
*deref
,
3265 nir_ssa_def
*base_addr
,
3266 nir_address_format addr_format
);
3267 void nir_lower_explicit_io_instr(struct nir_builder
*b
,
3268 nir_intrinsic_instr
*io_instr
,
3270 nir_address_format addr_format
);
3272 bool nir_lower_explicit_io(nir_shader
*shader
,
3273 nir_variable_mode modes
,
3274 nir_address_format
);
3276 nir_src
*nir_get_io_offset_src(nir_intrinsic_instr
*instr
);
3277 nir_src
*nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
);
3279 bool nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
);
3281 bool nir_lower_regs_to_ssa_impl(nir_function_impl
*impl
);
3282 bool nir_lower_regs_to_ssa(nir_shader
*shader
);
3283 bool nir_lower_vars_to_ssa(nir_shader
*shader
);
3285 bool nir_remove_dead_derefs(nir_shader
*shader
);
3286 bool nir_remove_dead_derefs_impl(nir_function_impl
*impl
);
3287 bool nir_remove_dead_variables(nir_shader
*shader
, nir_variable_mode modes
);
3288 bool nir_lower_constant_initializers(nir_shader
*shader
,
3289 nir_variable_mode modes
);
3291 bool nir_move_load_const(nir_shader
*shader
);
3292 bool nir_move_vec_src_uses_to_dest(nir_shader
*shader
);
3293 bool nir_lower_vec_to_movs(nir_shader
*shader
);
3294 void nir_lower_alpha_test(nir_shader
*shader
, enum compare_func func
,
3296 bool nir_lower_alu(nir_shader
*shader
);
3298 bool nir_lower_flrp(nir_shader
*shader
, unsigned lowering_mask
,
3299 bool always_precise
, bool have_ffma
);
3301 bool nir_lower_alu_to_scalar(nir_shader
*shader
, BITSET_WORD
*lower_set
);
3302 bool nir_lower_bool_to_float(nir_shader
*shader
);
3303 bool nir_lower_bool_to_int32(nir_shader
*shader
);
3304 bool nir_lower_int_to_float(nir_shader
*shader
);
3305 bool nir_lower_load_const_to_scalar(nir_shader
*shader
);
3306 bool nir_lower_read_invocation_to_scalar(nir_shader
*shader
);
3307 bool nir_lower_phis_to_scalar(nir_shader
*shader
);
3308 void nir_lower_io_arrays_to_elements(nir_shader
*producer
, nir_shader
*consumer
);
3309 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader
*shader
,
3311 void nir_lower_io_to_scalar(nir_shader
*shader
, nir_variable_mode mask
);
3312 void nir_lower_io_to_scalar_early(nir_shader
*shader
, nir_variable_mode mask
);
3313 bool nir_lower_io_to_vector(nir_shader
*shader
, nir_variable_mode mask
);
3315 void nir_lower_fragcoord_wtrans(nir_shader
*shader
);
3316 void nir_lower_viewport_transform(nir_shader
*shader
);
3317 bool nir_lower_uniforms_to_ubo(nir_shader
*shader
, int multiplier
);
3319 typedef struct nir_lower_subgroups_options
{
3320 uint8_t subgroup_size
;
3321 uint8_t ballot_bit_size
;
3322 bool lower_to_scalar
:1;
3323 bool lower_vote_trivial
:1;
3324 bool lower_vote_eq_to_ballot
:1;
3325 bool lower_subgroup_masks
:1;
3326 bool lower_shuffle
:1;
3327 bool lower_shuffle_to_32bit
:1;
3329 } nir_lower_subgroups_options
;
3331 bool nir_lower_subgroups(nir_shader
*shader
,
3332 const nir_lower_subgroups_options
*options
);
3334 bool nir_lower_system_values(nir_shader
*shader
);
3336 enum PACKED nir_lower_tex_packing
{
3337 nir_lower_tex_packing_none
= 0,
3338 /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed
3339 * or unsigned ints based on the sampler type
3341 nir_lower_tex_packing_16
,
3342 /* The sampler returns 1 32-bit word of 4x8 unorm */
3343 nir_lower_tex_packing_8
,
3346 typedef struct nir_lower_tex_options
{
3348 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
3349 * sampler types a texture projector is lowered.
3354 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
3356 bool lower_txf_offset
;
3359 * If true, lower away nir_tex_src_offset for all rect textures.
3361 bool lower_rect_offset
;
3364 * If true, lower rect textures to 2D, using txs to fetch the
3365 * texture dimensions and dividing the texture coords by the
3366 * texture dims to normalize.
3371 * If true, convert yuv to rgb.
3373 unsigned lower_y_uv_external
;
3374 unsigned lower_y_u_v_external
;
3375 unsigned lower_yx_xuxv_external
;
3376 unsigned lower_xy_uxvx_external
;
3377 unsigned lower_ayuv_external
;
3378 unsigned lower_xyuv_external
;
3381 * To emulate certain texture wrap modes, this can be used
3382 * to saturate the specified tex coord to [0.0, 1.0]. The
3383 * bits are according to sampler #, ie. if, for example:
3385 * (conf->saturate_s & (1 << n))
3387 * is true, then the s coord for sampler n is saturated.
3389 * Note that clamping must happen *after* projector lowering
3390 * so any projected texture sample instruction with a clamped
3391 * coordinate gets automatically lowered, regardless of the
3392 * 'lower_txp' setting.
3394 unsigned saturate_s
;
3395 unsigned saturate_t
;
3396 unsigned saturate_r
;
3398 /* Bitmask of textures that need swizzling.
3400 * If (swizzle_result & (1 << texture_index)), then the swizzle in
3401 * swizzles[texture_index] is applied to the result of the texturing
3404 unsigned swizzle_result
;
3406 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
3407 * while 4 and 5 represent 0 and 1 respectively.
3409 uint8_t swizzles
[32][4];
3411 /* Can be used to scale sampled values in range required by the format. */
3412 float scale_factors
[32];
3415 * Bitmap of textures that need srgb to linear conversion. If
3416 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
3417 * of the texture are lowered to linear.
3419 unsigned lower_srgb
;
3422 * If true, lower nir_texop_tex on shaders that doesn't support implicit
3423 * LODs to nir_texop_txl.
3425 bool lower_tex_without_implicit_lod
;
3428 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
3430 bool lower_txd_cube_map
;
3433 * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl.
3438 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
3439 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
3440 * with lower_txd_cube_map.
3442 bool lower_txd_shadow
;
3445 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
3446 * Implies lower_txd_cube_map and lower_txd_shadow.
3451 * If true, lower nir_texop_txb that try to use shadow compare and min_lod
3452 * at the same time to a nir_texop_lod, some math, and nir_texop_tex.
3454 bool lower_txb_shadow_clamp
;
3457 * If true, lower nir_texop_txd on shadow samplers when it uses min_lod
3458 * with nir_texop_txl. This includes cube maps.
3460 bool lower_txd_shadow_clamp
;
3463 * If true, lower nir_texop_txd on when it uses both offset and min_lod
3464 * with nir_texop_txl. This includes cube maps.
3466 bool lower_txd_offset_clamp
;
3469 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
3470 * sampler is bindless.
3472 bool lower_txd_clamp_bindless_sampler
;
3475 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
3476 * sampler index is not statically determinable to be less than 16.
3478 bool lower_txd_clamp_if_sampler_index_not_lt_16
;
3481 * If true, lower nir_texop_txs with a non-0-lod into nir_texop_txs with
3482 * 0-lod followed by a nir_ishr.
3487 * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
3488 * mixed-up tg4 locations.
3490 bool lower_tg4_broadcom_swizzle
;
3493 * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls
3495 bool lower_tg4_offsets
;
3497 enum nir_lower_tex_packing lower_tex_packing
[32];
3498 } nir_lower_tex_options
;
3500 bool nir_lower_tex(nir_shader
*shader
,
3501 const nir_lower_tex_options
*options
);
3503 enum nir_lower_non_uniform_access_type
{
3504 nir_lower_non_uniform_ubo_access
= (1 << 0),
3505 nir_lower_non_uniform_ssbo_access
= (1 << 1),
3506 nir_lower_non_uniform_texture_access
= (1 << 2),
3507 nir_lower_non_uniform_image_access
= (1 << 3),
3510 bool nir_lower_non_uniform_access(nir_shader
*shader
,
3511 enum nir_lower_non_uniform_access_type
);
3513 bool nir_lower_idiv(nir_shader
*shader
);
3515 bool nir_lower_input_attachments(nir_shader
*shader
, bool use_fragcoord_sysval
);
3517 bool nir_lower_clip_vs(nir_shader
*shader
, unsigned ucp_enables
, bool use_vars
);
3518 bool nir_lower_clip_fs(nir_shader
*shader
, unsigned ucp_enables
);
3519 bool nir_lower_clip_cull_distance_arrays(nir_shader
*nir
);
3521 bool nir_lower_frexp(nir_shader
*nir
);
3523 void nir_lower_two_sided_color(nir_shader
*shader
);
3525 bool nir_lower_clamp_color_outputs(nir_shader
*shader
);
3527 void nir_lower_passthrough_edgeflags(nir_shader
*shader
);
3528 bool nir_lower_patch_vertices(nir_shader
*nir
, unsigned static_count
,
3529 const gl_state_index16
*uniform_state_tokens
);
3531 typedef struct nir_lower_wpos_ytransform_options
{
3532 gl_state_index16 state_tokens
[STATE_LENGTH
];
3533 bool fs_coord_origin_upper_left
:1;
3534 bool fs_coord_origin_lower_left
:1;
3535 bool fs_coord_pixel_center_integer
:1;
3536 bool fs_coord_pixel_center_half_integer
:1;
3537 } nir_lower_wpos_ytransform_options
;
3539 bool nir_lower_wpos_ytransform(nir_shader
*shader
,
3540 const nir_lower_wpos_ytransform_options
*options
);
3541 bool nir_lower_wpos_center(nir_shader
*shader
, const bool for_sample_shading
);
3543 bool nir_lower_fb_read(nir_shader
*shader
);
3545 typedef struct nir_lower_drawpixels_options
{
3546 gl_state_index16 texcoord_state_tokens
[STATE_LENGTH
];
3547 gl_state_index16 scale_state_tokens
[STATE_LENGTH
];
3548 gl_state_index16 bias_state_tokens
[STATE_LENGTH
];
3549 unsigned drawpix_sampler
;
3550 unsigned pixelmap_sampler
;
3552 bool scale_and_bias
:1;
3553 } nir_lower_drawpixels_options
;
3555 void nir_lower_drawpixels(nir_shader
*shader
,
3556 const nir_lower_drawpixels_options
*options
);
3558 typedef struct nir_lower_bitmap_options
{
3561 } nir_lower_bitmap_options
;
3563 void nir_lower_bitmap(nir_shader
*shader
, const nir_lower_bitmap_options
*options
);
3565 bool nir_lower_atomics_to_ssbo(nir_shader
*shader
, unsigned ssbo_offset
);
3568 nir_lower_int_source_mods
= 1 << 0,
3569 nir_lower_float_source_mods
= 1 << 1,
3570 nir_lower_triop_abs
= 1 << 2,
3571 nir_lower_all_source_mods
= (1 << 3) - 1
3572 } nir_lower_to_source_mods_flags
;
3575 bool nir_lower_to_source_mods(nir_shader
*shader
, nir_lower_to_source_mods_flags options
);
3577 bool nir_lower_gs_intrinsics(nir_shader
*shader
);
3579 typedef unsigned (*nir_lower_bit_size_callback
)(const nir_alu_instr
*, void *);
3581 bool nir_lower_bit_size(nir_shader
*shader
,
3582 nir_lower_bit_size_callback callback
,
3583 void *callback_data
);
3585 nir_lower_int64_options
nir_lower_int64_op_to_options_mask(nir_op opcode
);
3586 bool nir_lower_int64(nir_shader
*shader
, nir_lower_int64_options options
);
3588 nir_lower_doubles_options
nir_lower_doubles_op_to_options_mask(nir_op opcode
);
3589 bool nir_lower_doubles(nir_shader
*shader
, const nir_shader
*softfp64
,
3590 nir_lower_doubles_options options
);
3591 bool nir_lower_pack(nir_shader
*shader
);
3594 nir_lower_interpolation_at_sample
= (1 << 1),
3595 nir_lower_interpolation_at_offset
= (1 << 2),
3596 nir_lower_interpolation_centroid
= (1 << 3),
3597 nir_lower_interpolation_pixel
= (1 << 4),
3598 nir_lower_interpolation_sample
= (1 << 5),
3599 } nir_lower_interpolation_options
;
3601 bool nir_lower_interpolation(nir_shader
*shader
,
3602 nir_lower_interpolation_options options
);
3604 bool nir_normalize_cubemap_coords(nir_shader
*shader
);
3606 void nir_live_ssa_defs_impl(nir_function_impl
*impl
);
3608 void nir_loop_analyze_impl(nir_function_impl
*impl
,
3609 nir_variable_mode indirect_mask
);
3611 bool nir_ssa_defs_interfere(nir_ssa_def
*a
, nir_ssa_def
*b
);
3613 bool nir_repair_ssa_impl(nir_function_impl
*impl
);
3614 bool nir_repair_ssa(nir_shader
*shader
);
3616 void nir_convert_loop_to_lcssa(nir_loop
*loop
);
3618 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
3619 * registers. If false, convert all values (even those not involved in a phi
3620 * node) to registers.
3622 bool nir_convert_from_ssa(nir_shader
*shader
, bool phi_webs_only
);
3624 bool nir_lower_phis_to_regs_block(nir_block
*block
);
3625 bool nir_lower_ssa_defs_to_regs_block(nir_block
*block
);
3626 bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl
*impl
);
3628 /* This is here for unit tests. */
3629 bool nir_opt_comparison_pre_impl(nir_function_impl
*impl
);
3631 bool nir_opt_comparison_pre(nir_shader
*shader
);
3633 bool nir_opt_algebraic(nir_shader
*shader
);
3634 bool nir_opt_algebraic_before_ffma(nir_shader
*shader
);
3635 bool nir_opt_algebraic_late(nir_shader
*shader
);
3636 bool nir_opt_constant_folding(nir_shader
*shader
);
3638 bool nir_opt_combine_stores(nir_shader
*shader
, nir_variable_mode modes
);
3640 bool nir_copy_prop(nir_shader
*shader
);
3642 bool nir_opt_copy_prop_vars(nir_shader
*shader
);
3644 bool nir_opt_cse(nir_shader
*shader
);
3646 bool nir_opt_dce(nir_shader
*shader
);
3648 bool nir_opt_dead_cf(nir_shader
*shader
);
3650 bool nir_opt_dead_write_vars(nir_shader
*shader
);
3652 bool nir_opt_deref_impl(nir_function_impl
*impl
);
3653 bool nir_opt_deref(nir_shader
*shader
);
3655 bool nir_opt_find_array_copies(nir_shader
*shader
);
3657 bool nir_opt_gcm(nir_shader
*shader
, bool value_number
);
3659 bool nir_opt_idiv_const(nir_shader
*shader
, unsigned min_bit_size
);
3661 bool nir_opt_if(nir_shader
*shader
, bool aggressive_last_continue
);
3663 bool nir_opt_intrinsics(nir_shader
*shader
);
3665 bool nir_opt_large_constants(nir_shader
*shader
,
3666 glsl_type_size_align_func size_align
,
3667 unsigned threshold
);
3669 bool nir_opt_loop_unroll(nir_shader
*shader
, nir_variable_mode indirect_mask
);
3671 bool nir_opt_move_comparisons(nir_shader
*shader
);
3673 bool nir_opt_move_load_ubo(nir_shader
*shader
);
3675 bool nir_opt_peephole_select(nir_shader
*shader
, unsigned limit
,
3676 bool indirect_load_ok
, bool expensive_alu_ok
);
3678 bool nir_opt_rematerialize_compares(nir_shader
*shader
);
3680 bool nir_opt_remove_phis(nir_shader
*shader
);
3682 bool nir_opt_shrink_load(nir_shader
*shader
);
3684 bool nir_opt_trivial_continues(nir_shader
*shader
);
3686 bool nir_opt_undef(nir_shader
*shader
);
3688 bool nir_opt_vectorize(nir_shader
*shader
);
3690 bool nir_opt_conditional_discard(nir_shader
*shader
);
3692 void nir_strip(nir_shader
*shader
);
3694 void nir_sweep(nir_shader
*shader
);
3696 void nir_remap_dual_slot_attributes(nir_shader
*shader
,
3697 uint64_t *dual_slot_inputs
);
3698 uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs
, uint64_t dual_slot
);
3700 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val
);
3701 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin
);
3703 bool nir_lower_sincos(nir_shader
*shader
);
3706 nir_variable_is_in_ubo(const nir_variable
*var
)
3708 return (var
->data
.mode
== nir_var_mem_ubo
&&
3709 var
->interface_type
!= NULL
);
3713 nir_variable_is_in_ssbo(const nir_variable
*var
)
3715 return (var
->data
.mode
== nir_var_mem_ssbo
&&
3716 var
->interface_type
!= NULL
);
3720 nir_variable_is_in_block(const nir_variable
*var
)
3722 return nir_variable_is_in_ubo(var
) || nir_variable_is_in_ssbo(var
);