2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "GL/gl.h" /* GLenum */
34 #include "util/list.h"
35 #include "util/ralloc.h"
37 #include "util/bitscan.h"
38 #include "util/bitset.h"
39 #include "util/macros.h"
40 #include "compiler/nir_types.h"
41 #include "compiler/shader_enums.h"
42 #include "compiler/shader_info.h"
46 #include "util/debug.h"
49 #include "nir_opcodes.h"
51 #if defined(_WIN32) && !defined(snprintf)
52 #define snprintf _snprintf
60 #define NIR_TRUE (~0u)
61 #define NIR_MAX_VEC_COMPONENTS 4
62 typedef uint8_t nir_component_mask_t
;
64 /** Defines a cast function
66 * This macro defines a cast function from in_type to out_type where
67 * out_type is some structure type that contains a field of type out_type.
69 * Note that you have to be a bit careful as the generated cast function
72 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
73 type_field, type_value) \
74 static inline out_type * \
75 name(const in_type *parent) \
77 assert(parent && parent->type_field == type_value); \
78 return exec_node_data(out_type, parent, field); \
88 * Description of built-in state associated with a uniform
90 * \sa nir_variable::state_slots
93 gl_state_index16 tokens
[STATE_LENGTH
];
98 nir_var_shader_in
= (1 << 0),
99 nir_var_shader_out
= (1 << 1),
100 nir_var_global
= (1 << 2),
101 nir_var_local
= (1 << 3),
102 nir_var_uniform
= (1 << 4),
103 nir_var_shader_storage
= (1 << 5),
104 nir_var_system_value
= (1 << 6),
105 nir_var_shared
= (1 << 8),
113 nir_rounding_mode_undef
= 0,
114 nir_rounding_mode_rtne
= 1, /* round to nearest even */
115 nir_rounding_mode_ru
= 2, /* round up */
116 nir_rounding_mode_rd
= 3, /* round down */
117 nir_rounding_mode_rtz
= 4, /* round towards zero */
121 float f32
[NIR_MAX_VEC_COMPONENTS
];
122 double f64
[NIR_MAX_VEC_COMPONENTS
];
123 int8_t i8
[NIR_MAX_VEC_COMPONENTS
];
124 uint8_t u8
[NIR_MAX_VEC_COMPONENTS
];
125 int16_t i16
[NIR_MAX_VEC_COMPONENTS
];
126 uint16_t u16
[NIR_MAX_VEC_COMPONENTS
];
127 int32_t i32
[NIR_MAX_VEC_COMPONENTS
];
128 uint32_t u32
[NIR_MAX_VEC_COMPONENTS
];
129 int64_t i64
[NIR_MAX_VEC_COMPONENTS
];
130 uint64_t u64
[NIR_MAX_VEC_COMPONENTS
];
133 typedef struct nir_constant
{
135 * Value of the constant.
137 * The field used to back the values supplied by the constant is determined
138 * by the type associated with the \c nir_variable. Constants may be
139 * scalars, vectors, or matrices.
141 nir_const_value values
[NIR_MAX_VEC_COMPONENTS
];
143 /* we could get this from the var->type but makes clone *much* easier to
144 * not have to care about the type.
146 unsigned num_elements
;
148 /* Array elements / Structure Fields */
149 struct nir_constant
**elements
;
153 * \brief Layout qualifiers for gl_FragDepth.
155 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
156 * with a layout qualifier.
159 nir_depth_layout_none
, /**< No depth layout is specified. */
160 nir_depth_layout_any
,
161 nir_depth_layout_greater
,
162 nir_depth_layout_less
,
163 nir_depth_layout_unchanged
167 * Enum keeping track of how a variable was declared.
171 * Normal declaration.
173 nir_var_declared_normally
= 0,
176 * Variable is implicitly generated by the compiler and should not be
177 * visible via the API.
180 } nir_var_declaration_type
;
183 * Either a uniform, global variable, shader input, or shader output. Based on
184 * ir_variable - it should be easy to translate between the two.
187 typedef struct nir_variable
{
188 struct exec_node node
;
191 * Declared type of the variable
193 const struct glsl_type
*type
;
196 * Declared name of the variable
200 struct nir_variable_data
{
202 * Storage class of the variable.
204 * \sa nir_variable_mode
206 nir_variable_mode mode
;
209 * Is the variable read-only?
211 * This is set for variables declared as \c const, shader inputs,
214 unsigned read_only
:1;
218 unsigned invariant
:1;
221 * When separate shader programs are enabled, only input/outputs between
222 * the stages of a multi-stage separate program can be safely removed
223 * from the shader interface. Other input/outputs must remains active.
225 * This is also used to make sure xfb varyings that are unused by the
226 * fragment shader are not removed.
228 unsigned always_active_io
:1;
231 * Interpolation mode for shader inputs / outputs
233 * \sa glsl_interp_mode
235 unsigned interpolation
:2;
238 * \name ARB_fragment_coord_conventions
241 unsigned origin_upper_left
:1;
242 unsigned pixel_center_integer
:1;
246 * If non-zero, then this variable may be packed along with other variables
247 * into a single varying slot, so this offset should be applied when
248 * accessing components. For example, an offset of 1 means that the x
249 * component of this variable is actually stored in component y of the
250 * location specified by \c location.
252 unsigned location_frac
:2;
255 * If true, this variable represents an array of scalars that should
256 * be tightly packed. In other words, consecutive array elements
257 * should be stored one component apart, rather than one slot apart.
262 * Whether this is a fragment shader output implicitly initialized with
263 * the previous contents of the specified render target at the
264 * framebuffer location corresponding to this shader invocation.
266 unsigned fb_fetch_output
:1;
269 * Non-zero if this variable is considered bindless as defined by
270 * ARB_bindless_texture.
275 * Was an explicit binding set in the shader?
277 unsigned explicit_binding
:1;
280 * Was a transfer feedback buffer set in the shader?
282 unsigned explicit_xfb_buffer
:1;
285 * Was a transfer feedback stride set in the shader?
287 unsigned explicit_xfb_stride
:1;
290 * Was an explicit offset set in the shader?
292 unsigned explicit_offset
:1;
295 * \brief Layout qualifier for gl_FragDepth.
297 * This is not equal to \c ir_depth_layout_none if and only if this
298 * variable is \c gl_FragDepth and a layout qualifier is specified.
300 nir_depth_layout depth_layout
;
303 * Storage location of the base of this variable
305 * The precise meaning of this field depends on the nature of the variable.
307 * - Vertex shader input: one of the values from \c gl_vert_attrib.
308 * - Vertex shader output: one of the values from \c gl_varying_slot.
309 * - Geometry shader input: one of the values from \c gl_varying_slot.
310 * - Geometry shader output: one of the values from \c gl_varying_slot.
311 * - Fragment shader input: one of the values from \c gl_varying_slot.
312 * - Fragment shader output: one of the values from \c gl_frag_result.
313 * - Uniforms: Per-stage uniform slot number for default uniform block.
314 * - Uniforms: Index within the uniform block definition for UBO members.
315 * - Non-UBO Uniforms: uniform slot number.
316 * - Other: This field is not currently used.
318 * If the variable is a uniform, shader input, or shader output, and the
319 * slot has not been assigned, the value will be -1.
324 * The actual location of the variable in the IR. Only valid for inputs
327 unsigned int driver_location
;
330 * Vertex stream output identifier.
332 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
333 * stream of the i-th component.
338 * output index for dual source blending.
343 * Descriptor set binding for sampler or UBO.
348 * Initial binding point for a sampler or UBO.
350 * For array types, this represents the binding point for the first element.
355 * Location an atomic counter or transform feedback is stored at.
360 * Transform feedback buffer.
365 * Transform feedback stride.
370 * How the variable was declared. See nir_var_declaration_type.
372 * This is used to detect variables generated by the compiler, so should
373 * not be visible via the API.
375 unsigned how_declared
:2;
378 * ARB_shader_image_load_store qualifiers.
381 enum gl_access_qualifier access
;
383 /** Image internal format if specified explicitly, otherwise GL_NONE. */
389 * Built-in state that backs this uniform
391 * Once set at variable creation, \c state_slots must remain invariant.
392 * This is because, ideally, this array would be shared by all clones of
393 * this variable in the IR tree. In other words, we'd really like for it
394 * to be a fly-weight.
396 * If the variable is not a uniform, \c num_state_slots will be zero and
397 * \c state_slots will be \c NULL.
400 unsigned num_state_slots
; /**< Number of state slots used */
401 nir_state_slot
*state_slots
; /**< State descriptors. */
405 * Constant expression assigned in the initializer of the variable
407 * This field should only be used temporarily by creators of NIR shaders
408 * and then lower_constant_initializers can be used to get rid of them.
409 * Most of the rest of NIR ignores this field or asserts that it's NULL.
411 nir_constant
*constant_initializer
;
414 * For variables that are in an interface block or are an instance of an
415 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
417 * \sa ir_variable::location
419 const struct glsl_type
*interface_type
;
422 * Description of per-member data for per-member struct variables
424 * This is used for variables which are actually an amalgamation of
425 * multiple entities such as a struct of built-in values or a struct of
426 * inputs each with their own layout specifier. This is only allowed on
427 * variables with a struct or array of array of struct type.
429 unsigned num_members
;
430 struct nir_variable_data
*members
;
433 #define nir_foreach_variable(var, var_list) \
434 foreach_list_typed(nir_variable, var, node, var_list)
436 #define nir_foreach_variable_safe(var, var_list) \
437 foreach_list_typed_safe(nir_variable, var, node, var_list)
440 nir_variable_is_global(const nir_variable
*var
)
442 return var
->data
.mode
!= nir_var_local
;
445 typedef struct nir_register
{
446 struct exec_node node
;
448 unsigned num_components
; /** < number of vector components */
449 unsigned num_array_elems
; /** < size of array (0 for no array) */
451 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
454 /** generic register index. */
457 /** only for debug purposes, can be NULL */
460 /** whether this register is local (per-function) or global (per-shader) */
464 * If this flag is set to true, then accessing channels >= num_components
465 * is well-defined, and simply spills over to the next array element. This
466 * is useful for backends that can do per-component accessing, in
467 * particular scalar backends. By setting this flag and making
468 * num_components equal to 1, structures can be packed tightly into
469 * registers and then registers can be accessed per-component to get to
470 * each structure member, even if it crosses vec4 boundaries.
474 /** set of nir_srcs where this register is used (read from) */
475 struct list_head uses
;
477 /** set of nir_dests where this register is defined (written to) */
478 struct list_head defs
;
480 /** set of nir_ifs where this register is used as a condition */
481 struct list_head if_uses
;
484 #define nir_foreach_register(reg, reg_list) \
485 foreach_list_typed(nir_register, reg, node, reg_list)
486 #define nir_foreach_register_safe(reg, reg_list) \
487 foreach_list_typed_safe(nir_register, reg, node, reg_list)
491 nir_instr_type_deref
,
494 nir_instr_type_intrinsic
,
495 nir_instr_type_load_const
,
497 nir_instr_type_ssa_undef
,
499 nir_instr_type_parallel_copy
,
502 typedef struct nir_instr
{
503 struct exec_node node
;
505 struct nir_block
*block
;
507 /** generic instruction index. */
510 /* A temporary for optimization and analysis passes to use for storing
511 * flags. For instance, DCE uses this to store the "dead/live" info.
516 static inline nir_instr
*
517 nir_instr_next(nir_instr
*instr
)
519 struct exec_node
*next
= exec_node_get_next(&instr
->node
);
520 if (exec_node_is_tail_sentinel(next
))
523 return exec_node_data(nir_instr
, next
, node
);
526 static inline nir_instr
*
527 nir_instr_prev(nir_instr
*instr
)
529 struct exec_node
*prev
= exec_node_get_prev(&instr
->node
);
530 if (exec_node_is_head_sentinel(prev
))
533 return exec_node_data(nir_instr
, prev
, node
);
537 nir_instr_is_first(const nir_instr
*instr
)
539 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr
->node
));
543 nir_instr_is_last(const nir_instr
*instr
)
545 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr
->node
));
548 typedef struct nir_ssa_def
{
549 /** for debugging only, can be NULL */
552 /** generic SSA definition index. */
555 /** Index into the live_in and live_out bitfields */
558 /** Instruction which produces this SSA value. */
559 nir_instr
*parent_instr
;
561 /** set of nir_instrs where this register is used (read from) */
562 struct list_head uses
;
564 /** set of nir_ifs where this register is used as a condition */
565 struct list_head if_uses
;
567 uint8_t num_components
;
569 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
577 struct nir_src
*indirect
; /** < NULL for no indirect offset */
578 unsigned base_offset
;
580 /* TODO use-def chain goes here */
584 nir_instr
*parent_instr
;
585 struct list_head def_link
;
588 struct nir_src
*indirect
; /** < NULL for no indirect offset */
589 unsigned base_offset
;
591 /* TODO def-use chain goes here */
596 typedef struct nir_src
{
598 /** Instruction that consumes this value as a source. */
599 nir_instr
*parent_instr
;
600 struct nir_if
*parent_if
;
603 struct list_head use_link
;
613 static inline nir_src
616 nir_src src
= { { NULL
} };
620 #define NIR_SRC_INIT nir_src_init()
622 #define nir_foreach_use(src, reg_or_ssa_def) \
623 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
625 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
626 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
628 #define nir_foreach_if_use(src, reg_or_ssa_def) \
629 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
631 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
632 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
643 static inline nir_dest
646 nir_dest dest
= { { { NULL
} } };
650 #define NIR_DEST_INIT nir_dest_init()
652 #define nir_foreach_def(dest, reg) \
653 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
655 #define nir_foreach_def_safe(dest, reg) \
656 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
658 static inline nir_src
659 nir_src_for_ssa(nir_ssa_def
*def
)
661 nir_src src
= NIR_SRC_INIT
;
669 static inline nir_src
670 nir_src_for_reg(nir_register
*reg
)
672 nir_src src
= NIR_SRC_INIT
;
676 src
.reg
.indirect
= NULL
;
677 src
.reg
.base_offset
= 0;
682 static inline nir_dest
683 nir_dest_for_reg(nir_register
*reg
)
685 nir_dest dest
= NIR_DEST_INIT
;
692 static inline unsigned
693 nir_src_bit_size(nir_src src
)
695 return src
.is_ssa
? src
.ssa
->bit_size
: src
.reg
.reg
->bit_size
;
698 static inline unsigned
699 nir_src_num_components(nir_src src
)
701 return src
.is_ssa
? src
.ssa
->num_components
: src
.reg
.reg
->num_components
;
705 nir_src_is_const(nir_src src
)
708 src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
;
711 int64_t nir_src_as_int(nir_src src
);
712 uint64_t nir_src_as_uint(nir_src src
);
713 bool nir_src_as_bool(nir_src src
);
714 double nir_src_as_float(nir_src src
);
715 int64_t nir_src_comp_as_int(nir_src src
, unsigned component
);
716 uint64_t nir_src_comp_as_uint(nir_src src
, unsigned component
);
717 bool nir_src_comp_as_bool(nir_src src
, unsigned component
);
718 double nir_src_comp_as_float(nir_src src
, unsigned component
);
720 static inline unsigned
721 nir_dest_bit_size(nir_dest dest
)
723 return dest
.is_ssa
? dest
.ssa
.bit_size
: dest
.reg
.reg
->bit_size
;
726 static inline unsigned
727 nir_dest_num_components(nir_dest dest
)
729 return dest
.is_ssa
? dest
.ssa
.num_components
: dest
.reg
.reg
->num_components
;
732 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *instr_or_if
);
733 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
);
739 * \name input modifiers
743 * For inputs interpreted as floating point, flips the sign bit. For
744 * inputs interpreted as integers, performs the two's complement negation.
749 * Clears the sign bit for floating point values, and computes the integer
750 * absolute value for integers. Note that the negate modifier acts after
751 * the absolute value modifier, therefore if both are set then all inputs
752 * will become negative.
758 * For each input component, says which component of the register it is
759 * chosen from. Note that which elements of the swizzle are used and which
760 * are ignored are based on the write mask for most opcodes - for example,
761 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
762 * a swizzle of {2, x, 1, 0} where x means "don't care."
764 uint8_t swizzle
[NIR_MAX_VEC_COMPONENTS
];
771 * \name saturate output modifier
773 * Only valid for opcodes that output floating-point numbers. Clamps the
774 * output to between 0.0 and 1.0 inclusive.
779 unsigned write_mask
: NIR_MAX_VEC_COMPONENTS
; /* ignored if dest.is_ssa is true */
783 nir_type_invalid
= 0, /* Not a valid type */
788 nir_type_bool32
= 32 | nir_type_bool
,
789 nir_type_int8
= 8 | nir_type_int
,
790 nir_type_int16
= 16 | nir_type_int
,
791 nir_type_int32
= 32 | nir_type_int
,
792 nir_type_int64
= 64 | nir_type_int
,
793 nir_type_uint8
= 8 | nir_type_uint
,
794 nir_type_uint16
= 16 | nir_type_uint
,
795 nir_type_uint32
= 32 | nir_type_uint
,
796 nir_type_uint64
= 64 | nir_type_uint
,
797 nir_type_float16
= 16 | nir_type_float
,
798 nir_type_float32
= 32 | nir_type_float
,
799 nir_type_float64
= 64 | nir_type_float
,
802 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
803 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
805 static inline unsigned
806 nir_alu_type_get_type_size(nir_alu_type type
)
808 return type
& NIR_ALU_TYPE_SIZE_MASK
;
811 static inline unsigned
812 nir_alu_type_get_base_type(nir_alu_type type
)
814 return type
& NIR_ALU_TYPE_BASE_TYPE_MASK
;
817 static inline nir_alu_type
818 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type
)
822 return nir_type_bool32
;
825 return nir_type_uint32
;
828 return nir_type_int32
;
830 case GLSL_TYPE_UINT16
:
831 return nir_type_uint16
;
833 case GLSL_TYPE_INT16
:
834 return nir_type_int16
;
836 case GLSL_TYPE_UINT8
:
837 return nir_type_uint8
;
839 return nir_type_int8
;
840 case GLSL_TYPE_UINT64
:
841 return nir_type_uint64
;
843 case GLSL_TYPE_INT64
:
844 return nir_type_int64
;
846 case GLSL_TYPE_FLOAT
:
847 return nir_type_float32
;
849 case GLSL_TYPE_FLOAT16
:
850 return nir_type_float16
;
852 case GLSL_TYPE_DOUBLE
:
853 return nir_type_float64
;
856 unreachable("unknown type");
860 static inline nir_alu_type
861 nir_get_nir_type_for_glsl_type(const struct glsl_type
*type
)
863 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type
));
866 nir_op
nir_type_conversion_op(nir_alu_type src
, nir_alu_type dst
,
867 nir_rounding_mode rnd
);
870 NIR_OP_IS_COMMUTATIVE
= (1 << 0),
871 NIR_OP_IS_ASSOCIATIVE
= (1 << 1),
872 } nir_op_algebraic_property
;
880 * The number of components in the output
882 * If non-zero, this is the size of the output and input sizes are
883 * explicitly given; swizzle and writemask are still in effect, but if
884 * the output component is masked out, then the input component may
887 * If zero, the opcode acts in the standard, per-component manner; the
888 * operation is performed on each component (except the ones that are
889 * masked out) with the input being taken from the input swizzle for
892 * The size of some of the inputs may be given (i.e. non-zero) even
893 * though output_size is zero; in that case, the inputs with a zero
894 * size act per-component, while the inputs with non-zero size don't.
896 unsigned output_size
;
899 * The type of vector that the instruction outputs. Note that the
900 * staurate modifier is only allowed on outputs with the float type.
903 nir_alu_type output_type
;
906 * The number of components in each input
908 unsigned input_sizes
[NIR_MAX_VEC_COMPONENTS
];
911 * The type of vector that each input takes. Note that negate and
912 * absolute value are only allowed on inputs with int or float type and
913 * behave differently on the two.
915 nir_alu_type input_types
[NIR_MAX_VEC_COMPONENTS
];
917 nir_op_algebraic_property algebraic_properties
;
920 extern const nir_op_info nir_op_infos
[nir_num_opcodes
];
922 typedef struct nir_alu_instr
{
926 /** Indicates that this ALU instruction generates an exact value
928 * This is kind of a mixture of GLSL "precise" and "invariant" and not
929 * really equivalent to either. This indicates that the value generated by
930 * this operation is high-precision and any code transformations that touch
931 * it must ensure that the resulting value is bit-for-bit identical to the
940 void nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
941 nir_alu_instr
*instr
);
942 void nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
943 nir_alu_instr
*instr
);
945 /* is this source channel used? */
947 nir_alu_instr_channel_used(const nir_alu_instr
*instr
, unsigned src
,
950 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
951 return channel
< nir_op_infos
[instr
->op
].input_sizes
[src
];
953 return (instr
->dest
.write_mask
>> channel
) & 1;
956 static inline nir_component_mask_t
957 nir_alu_instr_src_read_mask(const nir_alu_instr
*instr
, unsigned src
)
959 nir_component_mask_t read_mask
= 0;
960 for (unsigned c
= 0; c
< NIR_MAX_VEC_COMPONENTS
; c
++) {
961 if (!nir_alu_instr_channel_used(instr
, src
, c
))
964 read_mask
|= (1 << instr
->src
[src
].swizzle
[c
]);
970 * For instructions whose destinations are SSA, get the number of channels
973 static inline unsigned
974 nir_ssa_alu_instr_src_components(const nir_alu_instr
*instr
, unsigned src
)
976 assert(instr
->dest
.dest
.is_ssa
);
978 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
979 return nir_op_infos
[instr
->op
].input_sizes
[src
];
981 return instr
->dest
.dest
.ssa
.num_components
;
984 bool nir_alu_srcs_equal(const nir_alu_instr
*alu1
, const nir_alu_instr
*alu2
,
985 unsigned src1
, unsigned src2
);
989 nir_deref_type_array
,
990 nir_deref_type_array_wildcard
,
991 nir_deref_type_struct
,
998 /** The type of this deref instruction */
999 nir_deref_type deref_type
;
1001 /** The mode of the underlying variable */
1002 nir_variable_mode mode
;
1004 /** The dereferenced type of the resulting pointer value */
1005 const struct glsl_type
*type
;
1008 /** Variable being dereferenced if deref_type is a deref_var */
1011 /** Parent deref if deref_type is not deref_var */
1015 /** Additional deref parameters */
1026 /** Destination to store the resulting "pointer" */
1030 NIR_DEFINE_CAST(nir_instr_as_deref
, nir_instr
, nir_deref_instr
, instr
,
1031 type
, nir_instr_type_deref
)
1033 static inline nir_deref_instr
*
1034 nir_src_as_deref(nir_src src
)
1039 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_deref
)
1042 return nir_instr_as_deref(src
.ssa
->parent_instr
);
1045 static inline nir_deref_instr
*
1046 nir_deref_instr_parent(const nir_deref_instr
*instr
)
1048 if (instr
->deref_type
== nir_deref_type_var
)
1051 return nir_src_as_deref(instr
->parent
);
1054 static inline nir_variable
*
1055 nir_deref_instr_get_variable(const nir_deref_instr
*instr
)
1057 while (instr
->deref_type
!= nir_deref_type_var
) {
1058 if (instr
->deref_type
== nir_deref_type_cast
)
1061 instr
= nir_deref_instr_parent(instr
);
1067 bool nir_deref_instr_has_indirect(nir_deref_instr
*instr
);
1069 bool nir_deref_instr_remove_if_unused(nir_deref_instr
*instr
);
1074 struct nir_function
*callee
;
1076 unsigned num_params
;
1080 #include "nir_intrinsics.h"
1082 #define NIR_INTRINSIC_MAX_CONST_INDEX 4
1084 /** Represents an intrinsic
1086 * An intrinsic is an instruction type for handling things that are
1087 * more-or-less regular operations but don't just consume and produce SSA
1088 * values like ALU operations do. Intrinsics are not for things that have
1089 * special semantic meaning such as phi nodes and parallel copies.
1090 * Examples of intrinsics include variable load/store operations, system
1091 * value loads, and the like. Even though texturing more-or-less falls
1092 * under this category, texturing is its own instruction type because
1093 * trying to represent texturing with intrinsics would lead to a
1094 * combinatorial explosion of intrinsic opcodes.
1096 * By having a single instruction type for handling a lot of different
1097 * cases, optimization passes can look for intrinsics and, for the most
1098 * part, completely ignore them. Each intrinsic type also has a few
1099 * possible flags that govern whether or not they can be reordered or
1100 * eliminated. That way passes like dead code elimination can still work
1101 * on intrisics without understanding the meaning of each.
1103 * Each intrinsic has some number of constant indices, some number of
1104 * variables, and some number of sources. What these sources, variables,
1105 * and indices mean depends on the intrinsic and is documented with the
1106 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
1107 * instructions are the only types of instruction that can operate on
1113 nir_intrinsic_op intrinsic
;
1117 /** number of components if this is a vectorized intrinsic
1119 * Similarly to ALU operations, some intrinsics are vectorized.
1120 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
1121 * For vectorized intrinsics, the num_components field specifies the
1122 * number of destination components and the number of source components
1123 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
1125 uint8_t num_components
;
1127 int const_index
[NIR_INTRINSIC_MAX_CONST_INDEX
];
1130 } nir_intrinsic_instr
;
1132 static inline nir_variable
*
1133 nir_intrinsic_get_var(nir_intrinsic_instr
*intrin
, unsigned i
)
1135 return nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[i
]));
1139 * \name NIR intrinsics semantic flags
1141 * information about what the compiler can do with the intrinsics.
1143 * \sa nir_intrinsic_info::flags
1147 * whether the intrinsic can be safely eliminated if none of its output
1148 * value is not being used.
1150 NIR_INTRINSIC_CAN_ELIMINATE
= (1 << 0),
1153 * Whether the intrinsic can be reordered with respect to any other
1154 * intrinsic, i.e. whether the only reordering dependencies of the
1155 * intrinsic are due to the register reads/writes.
1157 NIR_INTRINSIC_CAN_REORDER
= (1 << 1),
1158 } nir_intrinsic_semantic_flag
;
1161 * \name NIR intrinsics const-index flag
1163 * Indicates the usage of a const_index slot.
1165 * \sa nir_intrinsic_info::index_map
1169 * Generally instructions that take a offset src argument, can encode
1170 * a constant 'base' value which is added to the offset.
1172 NIR_INTRINSIC_BASE
= 1,
1175 * For store instructions, a writemask for the store.
1177 NIR_INTRINSIC_WRMASK
= 2,
1180 * The stream-id for GS emit_vertex/end_primitive intrinsics.
1182 NIR_INTRINSIC_STREAM_ID
= 3,
1185 * The clip-plane id for load_user_clip_plane intrinsic.
1187 NIR_INTRINSIC_UCP_ID
= 4,
1190 * The amount of data, starting from BASE, that this instruction may
1191 * access. This is used to provide bounds if the offset is not constant.
1193 NIR_INTRINSIC_RANGE
= 5,
1196 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1198 NIR_INTRINSIC_DESC_SET
= 6,
1201 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1203 NIR_INTRINSIC_BINDING
= 7,
1208 NIR_INTRINSIC_COMPONENT
= 8,
1211 * Interpolation mode (only meaningful for FS inputs).
1213 NIR_INTRINSIC_INTERP_MODE
= 9,
1216 * A binary nir_op to use when performing a reduction or scan operation
1218 NIR_INTRINSIC_REDUCTION_OP
= 10,
1221 * Cluster size for reduction operations
1223 NIR_INTRINSIC_CLUSTER_SIZE
= 11,
1226 * Parameter index for a load_param intrinsic
1228 NIR_INTRINSIC_PARAM_IDX
= 12,
1231 * Image dimensionality for image intrinsics
1233 * One of GLSL_SAMPLER_DIM_*
1235 NIR_INTRINSIC_IMAGE_DIM
= 13,
1238 * Non-zero if we are accessing an array image
1240 NIR_INTRINSIC_IMAGE_ARRAY
= 14,
1243 * Image format for image intrinsics
1245 NIR_INTRINSIC_FORMAT
= 15,
1248 * Access qualifiers for image intrinsics
1250 NIR_INTRINSIC_ACCESS
= 16,
1253 * Alignment for offsets and addresses
1255 * These two parameters, specify an alignment in terms of a multiplier and
1256 * an offset. The offset or address parameter X of the intrinsic is
1257 * guaranteed to satisfy the following:
1259 * (X - align_offset) % align_mul == 0
1261 NIR_INTRINSIC_ALIGN_MUL
= 17,
1262 NIR_INTRINSIC_ALIGN_OFFSET
= 18,
1264 NIR_INTRINSIC_NUM_INDEX_FLAGS
,
1266 } nir_intrinsic_index_flag
;
1268 #define NIR_INTRINSIC_MAX_INPUTS 5
1273 unsigned num_srcs
; /** < number of register/SSA inputs */
1275 /** number of components of each input register
1277 * If this value is 0, the number of components is given by the
1278 * num_components field of nir_intrinsic_instr.
1280 unsigned src_components
[NIR_INTRINSIC_MAX_INPUTS
];
1284 /** number of components of the output register
1286 * If this value is 0, the number of components is given by the
1287 * num_components field of nir_intrinsic_instr.
1289 unsigned dest_components
;
1291 /** the number of constant indices used by the intrinsic */
1292 unsigned num_indices
;
1294 /** indicates the usage of intr->const_index[n] */
1295 unsigned index_map
[NIR_INTRINSIC_NUM_INDEX_FLAGS
];
1297 /** semantic flags for calls to this intrinsic */
1298 nir_intrinsic_semantic_flag flags
;
1299 } nir_intrinsic_info
;
1301 extern const nir_intrinsic_info nir_intrinsic_infos
[nir_num_intrinsics
];
1303 static inline unsigned
1304 nir_intrinsic_src_components(nir_intrinsic_instr
*intr
, unsigned srcn
)
1306 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1307 assert(srcn
< info
->num_srcs
);
1308 if (info
->src_components
[srcn
])
1309 return info
->src_components
[srcn
];
1311 return intr
->num_components
;
1314 static inline unsigned
1315 nir_intrinsic_dest_components(nir_intrinsic_instr
*intr
)
1317 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1318 if (!info
->has_dest
)
1320 else if (info
->dest_components
)
1321 return info
->dest_components
;
1323 return intr
->num_components
;
1326 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1327 static inline type \
1328 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \
1330 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1331 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1332 return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1334 static inline void \
1335 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1337 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1338 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1339 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1342 INTRINSIC_IDX_ACCESSORS(write_mask
, WRMASK
, unsigned)
1343 INTRINSIC_IDX_ACCESSORS(base
, BASE
, int)
1344 INTRINSIC_IDX_ACCESSORS(stream_id
, STREAM_ID
, unsigned)
1345 INTRINSIC_IDX_ACCESSORS(ucp_id
, UCP_ID
, unsigned)
1346 INTRINSIC_IDX_ACCESSORS(range
, RANGE
, unsigned)
1347 INTRINSIC_IDX_ACCESSORS(desc_set
, DESC_SET
, unsigned)
1348 INTRINSIC_IDX_ACCESSORS(binding
, BINDING
, unsigned)
1349 INTRINSIC_IDX_ACCESSORS(component
, COMPONENT
, unsigned)
1350 INTRINSIC_IDX_ACCESSORS(interp_mode
, INTERP_MODE
, unsigned)
1351 INTRINSIC_IDX_ACCESSORS(reduction_op
, REDUCTION_OP
, unsigned)
1352 INTRINSIC_IDX_ACCESSORS(cluster_size
, CLUSTER_SIZE
, unsigned)
1353 INTRINSIC_IDX_ACCESSORS(param_idx
, PARAM_IDX
, unsigned)
1354 INTRINSIC_IDX_ACCESSORS(image_dim
, IMAGE_DIM
, enum glsl_sampler_dim
)
1355 INTRINSIC_IDX_ACCESSORS(image_array
, IMAGE_ARRAY
, bool)
1356 INTRINSIC_IDX_ACCESSORS(access
, ACCESS
, enum gl_access_qualifier
)
1357 INTRINSIC_IDX_ACCESSORS(format
, FORMAT
, unsigned)
1358 INTRINSIC_IDX_ACCESSORS(align_mul
, ALIGN_MUL
, unsigned)
1359 INTRINSIC_IDX_ACCESSORS(align_offset
, ALIGN_OFFSET
, unsigned)
1362 nir_intrinsic_set_align(nir_intrinsic_instr
*intrin
,
1363 unsigned align_mul
, unsigned align_offset
)
1365 assert(util_is_power_of_two_nonzero(align_mul
));
1366 assert(align_offset
< align_mul
);
1367 nir_intrinsic_set_align_mul(intrin
, align_mul
);
1368 nir_intrinsic_set_align_offset(intrin
, align_offset
);
1371 /** Returns a simple alignment for a load/store intrinsic offset
1373 * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
1374 * and ALIGN_OFFSET parameters, this helper takes both into account and
1375 * provides a single simple alignment parameter. The offset X is guaranteed
1376 * to satisfy X % align == 0.
1378 static inline unsigned
1379 nir_intrinsic_align(nir_intrinsic_instr
*intrin
)
1381 const unsigned align_mul
= nir_intrinsic_align_mul(intrin
);
1382 const unsigned align_offset
= nir_intrinsic_align_offset(intrin
);
1383 assert(align_offset
< align_mul
);
1384 return align_offset
? 1 << (ffs(align_offset
) - 1) : align_mul
;
1388 * \group texture information
1390 * This gives semantic information about textures which is useful to the
1391 * frontend, the backend, and lowering passes, but not the optimizer.
1396 nir_tex_src_projector
,
1397 nir_tex_src_comparator
, /* shadow comparator */
1401 nir_tex_src_min_lod
,
1402 nir_tex_src_ms_index
, /* MSAA sample index */
1403 nir_tex_src_ms_mcs
, /* MSAA compression value */
1406 nir_tex_src_texture_deref
, /* < deref pointing to the texture */
1407 nir_tex_src_sampler_deref
, /* < deref pointing to the sampler */
1408 nir_tex_src_texture_offset
, /* < dynamically uniform indirect offset */
1409 nir_tex_src_sampler_offset
, /* < dynamically uniform indirect offset */
1410 nir_tex_src_plane
, /* < selects plane for planar textures */
1411 nir_num_tex_src_types
1416 nir_tex_src_type src_type
;
1420 nir_texop_tex
, /**< Regular texture look-up */
1421 nir_texop_txb
, /**< Texture look-up with LOD bias */
1422 nir_texop_txl
, /**< Texture look-up with explicit LOD */
1423 nir_texop_txd
, /**< Texture look-up with partial derivatives */
1424 nir_texop_txf
, /**< Texel fetch with explicit LOD */
1425 nir_texop_txf_ms
, /**< Multisample texture fetch */
1426 nir_texop_txf_ms_mcs
, /**< Multisample compression value fetch */
1427 nir_texop_txs
, /**< Texture size */
1428 nir_texop_lod
, /**< Texture lod query */
1429 nir_texop_tg4
, /**< Texture gather */
1430 nir_texop_query_levels
, /**< Texture levels query */
1431 nir_texop_texture_samples
, /**< Texture samples query */
1432 nir_texop_samples_identical
, /**< Query whether all samples are definitely
1440 enum glsl_sampler_dim sampler_dim
;
1441 nir_alu_type dest_type
;
1446 unsigned num_srcs
, coord_components
;
1447 bool is_array
, is_shadow
;
1450 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1451 * components or the new-style shadow that outputs 1 component.
1453 bool is_new_style_shadow
;
1455 /* gather component selector */
1456 unsigned component
: 2;
1458 /** The texture index
1460 * If this texture instruction has a nir_tex_src_texture_offset source,
1461 * then the texture index is given by texture_index + texture_offset.
1463 unsigned texture_index
;
1465 /** The size of the texture array or 0 if it's not an array */
1466 unsigned texture_array_size
;
1468 /** The sampler index
1470 * The following operations do not require a sampler and, as such, this
1471 * field should be ignored:
1473 * - nir_texop_txf_ms
1476 * - nir_texop_query_levels
1477 * - nir_texop_texture_samples
1478 * - nir_texop_samples_identical
1480 * If this texture instruction has a nir_tex_src_sampler_offset source,
1481 * then the sampler index is given by sampler_index + sampler_offset.
1483 unsigned sampler_index
;
1486 static inline unsigned
1487 nir_tex_instr_dest_size(const nir_tex_instr
*instr
)
1489 switch (instr
->op
) {
1490 case nir_texop_txs
: {
1492 switch (instr
->sampler_dim
) {
1493 case GLSL_SAMPLER_DIM_1D
:
1494 case GLSL_SAMPLER_DIM_BUF
:
1497 case GLSL_SAMPLER_DIM_2D
:
1498 case GLSL_SAMPLER_DIM_CUBE
:
1499 case GLSL_SAMPLER_DIM_MS
:
1500 case GLSL_SAMPLER_DIM_RECT
:
1501 case GLSL_SAMPLER_DIM_EXTERNAL
:
1502 case GLSL_SAMPLER_DIM_SUBPASS
:
1505 case GLSL_SAMPLER_DIM_3D
:
1509 unreachable("not reached");
1511 if (instr
->is_array
)
1519 case nir_texop_texture_samples
:
1520 case nir_texop_query_levels
:
1521 case nir_texop_samples_identical
:
1525 if (instr
->is_shadow
&& instr
->is_new_style_shadow
)
1532 /* Returns true if this texture operation queries something about the texture
1533 * rather than actually sampling it.
1536 nir_tex_instr_is_query(const nir_tex_instr
*instr
)
1538 switch (instr
->op
) {
1541 case nir_texop_texture_samples
:
1542 case nir_texop_query_levels
:
1543 case nir_texop_txf_ms_mcs
:
1550 case nir_texop_txf_ms
:
1554 unreachable("Invalid texture opcode");
1559 nir_alu_instr_is_comparison(const nir_alu_instr
*instr
)
1561 switch (instr
->op
) {
1582 static inline nir_alu_type
1583 nir_tex_instr_src_type(const nir_tex_instr
*instr
, unsigned src
)
1585 switch (instr
->src
[src
].src_type
) {
1586 case nir_tex_src_coord
:
1587 switch (instr
->op
) {
1589 case nir_texop_txf_ms
:
1590 case nir_texop_txf_ms_mcs
:
1591 case nir_texop_samples_identical
:
1592 return nir_type_int
;
1595 return nir_type_float
;
1598 case nir_tex_src_lod
:
1599 switch (instr
->op
) {
1602 return nir_type_int
;
1605 return nir_type_float
;
1608 case nir_tex_src_projector
:
1609 case nir_tex_src_comparator
:
1610 case nir_tex_src_bias
:
1611 case nir_tex_src_ddx
:
1612 case nir_tex_src_ddy
:
1613 return nir_type_float
;
1615 case nir_tex_src_offset
:
1616 case nir_tex_src_ms_index
:
1617 case nir_tex_src_texture_offset
:
1618 case nir_tex_src_sampler_offset
:
1619 return nir_type_int
;
1622 unreachable("Invalid texture source type");
1626 static inline unsigned
1627 nir_tex_instr_src_size(const nir_tex_instr
*instr
, unsigned src
)
1629 if (instr
->src
[src
].src_type
== nir_tex_src_coord
)
1630 return instr
->coord_components
;
1632 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1633 if (instr
->src
[src
].src_type
== nir_tex_src_ms_mcs
)
1636 if (instr
->src
[src
].src_type
== nir_tex_src_ddx
||
1637 instr
->src
[src
].src_type
== nir_tex_src_ddy
) {
1638 if (instr
->is_array
)
1639 return instr
->coord_components
- 1;
1641 return instr
->coord_components
;
1644 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
1645 * the offset, since a cube maps to a single face.
1647 if (instr
->src
[src
].src_type
== nir_tex_src_offset
) {
1648 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
1650 else if (instr
->is_array
)
1651 return instr
->coord_components
- 1;
1653 return instr
->coord_components
;
1660 nir_tex_instr_src_index(const nir_tex_instr
*instr
, nir_tex_src_type type
)
1662 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1663 if (instr
->src
[i
].src_type
== type
)
1669 void nir_tex_instr_add_src(nir_tex_instr
*tex
,
1670 nir_tex_src_type src_type
,
1673 void nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
);
1678 nir_const_value value
;
1681 } nir_load_const_instr
;
1694 /* creates a new SSA variable in an undefined state */
1699 } nir_ssa_undef_instr
;
1702 struct exec_node node
;
1704 /* The predecessor block corresponding to this source */
1705 struct nir_block
*pred
;
1710 #define nir_foreach_phi_src(phi_src, phi) \
1711 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1712 #define nir_foreach_phi_src_safe(phi_src, phi) \
1713 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1718 struct exec_list srcs
; /** < list of nir_phi_src */
1724 struct exec_node node
;
1727 } nir_parallel_copy_entry
;
1729 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1730 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1735 /* A list of nir_parallel_copy_entrys. The sources of all of the
1736 * entries are copied to the corresponding destinations "in parallel".
1737 * In other words, if we have two entries: a -> b and b -> a, the values
1740 struct exec_list entries
;
1741 } nir_parallel_copy_instr
;
1743 NIR_DEFINE_CAST(nir_instr_as_alu
, nir_instr
, nir_alu_instr
, instr
,
1744 type
, nir_instr_type_alu
)
1745 NIR_DEFINE_CAST(nir_instr_as_call
, nir_instr
, nir_call_instr
, instr
,
1746 type
, nir_instr_type_call
)
1747 NIR_DEFINE_CAST(nir_instr_as_jump
, nir_instr
, nir_jump_instr
, instr
,
1748 type
, nir_instr_type_jump
)
1749 NIR_DEFINE_CAST(nir_instr_as_tex
, nir_instr
, nir_tex_instr
, instr
,
1750 type
, nir_instr_type_tex
)
1751 NIR_DEFINE_CAST(nir_instr_as_intrinsic
, nir_instr
, nir_intrinsic_instr
, instr
,
1752 type
, nir_instr_type_intrinsic
)
1753 NIR_DEFINE_CAST(nir_instr_as_load_const
, nir_instr
, nir_load_const_instr
, instr
,
1754 type
, nir_instr_type_load_const
)
1755 NIR_DEFINE_CAST(nir_instr_as_ssa_undef
, nir_instr
, nir_ssa_undef_instr
, instr
,
1756 type
, nir_instr_type_ssa_undef
)
1757 NIR_DEFINE_CAST(nir_instr_as_phi
, nir_instr
, nir_phi_instr
, instr
,
1758 type
, nir_instr_type_phi
)
1759 NIR_DEFINE_CAST(nir_instr_as_parallel_copy
, nir_instr
,
1760 nir_parallel_copy_instr
, instr
,
1761 type
, nir_instr_type_parallel_copy
)
1766 * Control flow consists of a tree of control flow nodes, which include
1767 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1768 * instructions that always run start-to-finish. Each basic block also keeps
1769 * track of its successors (blocks which may run immediately after the current
1770 * block) and predecessors (blocks which could have run immediately before the
1771 * current block). Each function also has a start block and an end block which
1772 * all return statements point to (which is always empty). Together, all the
1773 * blocks with their predecessors and successors make up the control flow
1774 * graph (CFG) of the function. There are helpers that modify the tree of
1775 * control flow nodes while modifying the CFG appropriately; these should be
1776 * used instead of modifying the tree directly.
1783 nir_cf_node_function
1786 typedef struct nir_cf_node
{
1787 struct exec_node node
;
1788 nir_cf_node_type type
;
1789 struct nir_cf_node
*parent
;
1792 typedef struct nir_block
{
1793 nir_cf_node cf_node
;
1795 struct exec_list instr_list
; /** < list of nir_instr */
1797 /** generic block index; generated by nir_index_blocks */
1801 * Each block can only have up to 2 successors, so we put them in a simple
1802 * array - no need for anything more complicated.
1804 struct nir_block
*successors
[2];
1806 /* Set of nir_block predecessors in the CFG */
1807 struct set
*predecessors
;
1810 * this node's immediate dominator in the dominance tree - set to NULL for
1813 struct nir_block
*imm_dom
;
1815 /* This node's children in the dominance tree */
1816 unsigned num_dom_children
;
1817 struct nir_block
**dom_children
;
1819 /* Set of nir_blocks on the dominance frontier of this block */
1820 struct set
*dom_frontier
;
1823 * These two indices have the property that dom_{pre,post}_index for each
1824 * child of this block in the dominance tree will always be between
1825 * dom_pre_index and dom_post_index for this block, which makes testing if
1826 * a given block is dominated by another block an O(1) operation.
1828 unsigned dom_pre_index
, dom_post_index
;
1830 /* live in and out for this block; used for liveness analysis */
1831 BITSET_WORD
*live_in
;
1832 BITSET_WORD
*live_out
;
1835 static inline nir_instr
*
1836 nir_block_first_instr(nir_block
*block
)
1838 struct exec_node
*head
= exec_list_get_head(&block
->instr_list
);
1839 return exec_node_data(nir_instr
, head
, node
);
1842 static inline nir_instr
*
1843 nir_block_last_instr(nir_block
*block
)
1845 struct exec_node
*tail
= exec_list_get_tail(&block
->instr_list
);
1846 return exec_node_data(nir_instr
, tail
, node
);
1850 nir_block_ends_in_jump(nir_block
*block
)
1852 return !exec_list_is_empty(&block
->instr_list
) &&
1853 nir_block_last_instr(block
)->type
== nir_instr_type_jump
;
1856 #define nir_foreach_instr(instr, block) \
1857 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1858 #define nir_foreach_instr_reverse(instr, block) \
1859 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1860 #define nir_foreach_instr_safe(instr, block) \
1861 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1862 #define nir_foreach_instr_reverse_safe(instr, block) \
1863 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1865 typedef struct nir_if
{
1866 nir_cf_node cf_node
;
1869 struct exec_list then_list
; /** < list of nir_cf_node */
1870 struct exec_list else_list
; /** < list of nir_cf_node */
1876 nir_instr
*conditional_instr
;
1878 nir_block
*break_block
;
1879 nir_block
*continue_from_block
;
1881 bool continue_from_then
;
1883 struct list_head loop_terminator_link
;
1884 } nir_loop_terminator
;
1887 /* Number of instructions in the loop */
1888 unsigned num_instructions
;
1890 /* Maximum number of times the loop is run (if known) */
1891 unsigned max_trip_count
;
1893 /* Do we know the exact number of times the loop will be run */
1894 bool exact_trip_count_known
;
1896 /* Unroll the loop regardless of its size */
1899 /* Does the loop contain complex loop terminators, continues or other
1900 * complex behaviours? If this is true we can't rely on
1901 * loop_terminator_list to be complete or accurate.
1905 nir_loop_terminator
*limiting_terminator
;
1907 /* A list of loop_terminators terminating this loop. */
1908 struct list_head loop_terminator_list
;
1912 nir_cf_node cf_node
;
1914 struct exec_list body
; /** < list of nir_cf_node */
1916 nir_loop_info
*info
;
1920 * Various bits of metadata that can may be created or required by
1921 * optimization and analysis passes
1924 nir_metadata_none
= 0x0,
1925 nir_metadata_block_index
= 0x1,
1926 nir_metadata_dominance
= 0x2,
1927 nir_metadata_live_ssa_defs
= 0x4,
1928 nir_metadata_not_properly_reset
= 0x8,
1929 nir_metadata_loop_analysis
= 0x10,
1933 nir_cf_node cf_node
;
1935 /** pointer to the function of which this is an implementation */
1936 struct nir_function
*function
;
1938 struct exec_list body
; /** < list of nir_cf_node */
1940 nir_block
*end_block
;
1942 /** list for all local variables in the function */
1943 struct exec_list locals
;
1945 /** list of local registers in the function */
1946 struct exec_list registers
;
1948 /** next available local register index */
1951 /** next available SSA value index */
1954 /* total number of basic blocks, only valid when block_index_dirty = false */
1955 unsigned num_blocks
;
1957 nir_metadata valid_metadata
;
1958 } nir_function_impl
;
1960 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
1961 nir_start_block(nir_function_impl
*impl
)
1963 return (nir_block
*) impl
->body
.head_sentinel
.next
;
1966 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
1967 nir_impl_last_block(nir_function_impl
*impl
)
1969 return (nir_block
*) impl
->body
.tail_sentinel
.prev
;
1972 static inline nir_cf_node
*
1973 nir_cf_node_next(nir_cf_node
*node
)
1975 struct exec_node
*next
= exec_node_get_next(&node
->node
);
1976 if (exec_node_is_tail_sentinel(next
))
1979 return exec_node_data(nir_cf_node
, next
, node
);
1982 static inline nir_cf_node
*
1983 nir_cf_node_prev(nir_cf_node
*node
)
1985 struct exec_node
*prev
= exec_node_get_prev(&node
->node
);
1986 if (exec_node_is_head_sentinel(prev
))
1989 return exec_node_data(nir_cf_node
, prev
, node
);
1993 nir_cf_node_is_first(const nir_cf_node
*node
)
1995 return exec_node_is_head_sentinel(node
->node
.prev
);
1999 nir_cf_node_is_last(const nir_cf_node
*node
)
2001 return exec_node_is_tail_sentinel(node
->node
.next
);
2004 NIR_DEFINE_CAST(nir_cf_node_as_block
, nir_cf_node
, nir_block
, cf_node
,
2005 type
, nir_cf_node_block
)
2006 NIR_DEFINE_CAST(nir_cf_node_as_if
, nir_cf_node
, nir_if
, cf_node
,
2007 type
, nir_cf_node_if
)
2008 NIR_DEFINE_CAST(nir_cf_node_as_loop
, nir_cf_node
, nir_loop
, cf_node
,
2009 type
, nir_cf_node_loop
)
2010 NIR_DEFINE_CAST(nir_cf_node_as_function
, nir_cf_node
,
2011 nir_function_impl
, cf_node
, type
, nir_cf_node_function
)
2013 static inline nir_block
*
2014 nir_if_first_then_block(nir_if
*if_stmt
)
2016 struct exec_node
*head
= exec_list_get_head(&if_stmt
->then_list
);
2017 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2020 static inline nir_block
*
2021 nir_if_last_then_block(nir_if
*if_stmt
)
2023 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->then_list
);
2024 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2027 static inline nir_block
*
2028 nir_if_first_else_block(nir_if
*if_stmt
)
2030 struct exec_node
*head
= exec_list_get_head(&if_stmt
->else_list
);
2031 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2034 static inline nir_block
*
2035 nir_if_last_else_block(nir_if
*if_stmt
)
2037 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->else_list
);
2038 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2041 static inline nir_block
*
2042 nir_loop_first_block(nir_loop
*loop
)
2044 struct exec_node
*head
= exec_list_get_head(&loop
->body
);
2045 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2048 static inline nir_block
*
2049 nir_loop_last_block(nir_loop
*loop
)
2051 struct exec_node
*tail
= exec_list_get_tail(&loop
->body
);
2052 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2056 uint8_t num_components
;
2060 typedef struct nir_function
{
2061 struct exec_node node
;
2064 struct nir_shader
*shader
;
2066 unsigned num_params
;
2067 nir_parameter
*params
;
2069 /** The implementation of this function.
2071 * If the function is only declared and not implemented, this is NULL.
2073 nir_function_impl
*impl
;
2076 typedef struct nir_shader_compiler_options
{
2081 /** Lowers flrp when it does not support doubles */
2088 /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
2089 bool lower_bitfield_extract
;
2090 /** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */
2091 bool lower_bitfield_extract_to_shifts
;
2092 /** Lowers bitfield_insert to bfi/bfm */
2093 bool lower_bitfield_insert
;
2094 /** Lowers bitfield_insert to bfm, compares, and shifts. */
2095 bool lower_bitfield_insert_to_shifts
;
2096 /** Lowers bitfield_reverse to shifts. */
2097 bool lower_bitfield_reverse
;
2098 /** Lowers bit_count to shifts. */
2099 bool lower_bit_count
;
2100 /** Lowers bfm to shifts and subtracts. */
2102 /** Lowers ifind_msb to compare and ufind_msb */
2103 bool lower_ifind_msb
;
2104 /** Lowers find_lsb to ufind_msb and logic ops */
2105 bool lower_find_lsb
;
2106 bool lower_uadd_carry
;
2107 bool lower_usub_borrow
;
2108 /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
2109 bool lower_mul_high
;
2110 /** lowers fneg and ineg to fsub and isub. */
2112 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
2115 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
2118 /** enables rules to lower idiv by power-of-two: */
2121 /* lower b2f to iand */
2124 /* Does the native fdot instruction replicate its result for four
2125 * components? If so, then opt_algebraic_late will turn all fdotN
2126 * instructions into fdot_replicatedN instructions.
2128 bool fdot_replicates
;
2130 /** lowers ffloor to fsub+ffract: */
2133 /** lowers ffract to fsub+ffloor: */
2136 /** lowers fceil to fneg+ffloor+fneg: */
2141 bool lower_pack_half_2x16
;
2142 bool lower_pack_unorm_2x16
;
2143 bool lower_pack_snorm_2x16
;
2144 bool lower_pack_unorm_4x8
;
2145 bool lower_pack_snorm_4x8
;
2146 bool lower_unpack_half_2x16
;
2147 bool lower_unpack_unorm_2x16
;
2148 bool lower_unpack_snorm_2x16
;
2149 bool lower_unpack_unorm_4x8
;
2150 bool lower_unpack_snorm_4x8
;
2152 bool lower_extract_byte
;
2153 bool lower_extract_word
;
2155 bool lower_all_io_to_temps
;
2158 * Does the driver support real 32-bit integers? (Otherwise, integers
2159 * are simulated by floats.)
2161 bool native_integers
;
2163 /* Indicates that the driver only has zero-based vertex id */
2164 bool vertex_id_zero_based
;
2167 * If enabled, gl_BaseVertex will be lowered as:
2168 * is_indexed_draw (~0/0) & firstvertex
2170 bool lower_base_vertex
;
2173 * If enabled, gl_HelperInvocation will be lowered as:
2175 * !((1 << sample_id) & sample_mask_in))
2177 * This depends on some possibly hw implementation details, which may
2178 * not be true for all hw. In particular that the FS is only executed
2179 * for covered samples or for helper invocations. So, do not blindly
2180 * enable this option.
2182 * Note: See also issue #22 in ARB_shader_image_load_store
2184 bool lower_helper_invocation
;
2186 bool lower_cs_local_index_from_id
;
2187 bool lower_cs_local_id_from_index
;
2189 bool lower_device_index_to_zero
;
2191 /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
2192 bool lower_wpos_pntc
;
2195 * Should nir_lower_io() create load_interpolated_input intrinsics?
2197 * If not, it generates regular load_input intrinsics and interpolation
2198 * information must be inferred from the list of input nir_variables.
2200 bool use_interpolated_input_intrinsics
;
2202 unsigned max_unroll_iterations
;
2203 } nir_shader_compiler_options
;
2205 typedef struct nir_shader
{
2206 /** list of uniforms (nir_variable) */
2207 struct exec_list uniforms
;
2209 /** list of inputs (nir_variable) */
2210 struct exec_list inputs
;
2212 /** list of outputs (nir_variable) */
2213 struct exec_list outputs
;
2215 /** list of shared compute variables (nir_variable) */
2216 struct exec_list shared
;
2218 /** Set of driver-specific options for the shader.
2220 * The memory for the options is expected to be kept in a single static
2221 * copy by the driver.
2223 const struct nir_shader_compiler_options
*options
;
2225 /** Various bits of compile-time information about a given shader */
2226 struct shader_info info
;
2228 /** list of global variables in the shader (nir_variable) */
2229 struct exec_list globals
;
2231 /** list of system value variables in the shader (nir_variable) */
2232 struct exec_list system_values
;
2234 struct exec_list functions
; /** < list of nir_function */
2236 /** list of global register in the shader */
2237 struct exec_list registers
;
2239 /** next available global register index */
2243 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
2246 unsigned num_inputs
, num_uniforms
, num_outputs
, num_shared
;
2248 /** Constant data associated with this shader.
2250 * Constant data is loaded through load_constant intrinsics. See also
2251 * nir_opt_large_constants.
2253 void *constant_data
;
2254 unsigned constant_data_size
;
2257 static inline nir_function_impl
*
2258 nir_shader_get_entrypoint(nir_shader
*shader
)
2260 assert(exec_list_length(&shader
->functions
) == 1);
2261 struct exec_node
*func_node
= exec_list_get_head(&shader
->functions
);
2262 nir_function
*func
= exec_node_data(nir_function
, func_node
, node
);
2263 assert(func
->num_params
== 0);
2268 #define nir_foreach_function(func, shader) \
2269 foreach_list_typed(nir_function, func, node, &(shader)->functions)
2271 nir_shader
*nir_shader_create(void *mem_ctx
,
2272 gl_shader_stage stage
,
2273 const nir_shader_compiler_options
*options
,
2276 /** creates a register, including assigning it an index and adding it to the list */
2277 nir_register
*nir_global_reg_create(nir_shader
*shader
);
2279 nir_register
*nir_local_reg_create(nir_function_impl
*impl
);
2281 void nir_reg_remove(nir_register
*reg
);
2283 /** Adds a variable to the appropriate list in nir_shader */
2284 void nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
);
2287 nir_function_impl_add_variable(nir_function_impl
*impl
, nir_variable
*var
)
2289 assert(var
->data
.mode
== nir_var_local
);
2290 exec_list_push_tail(&impl
->locals
, &var
->node
);
2293 /** creates a variable, sets a few defaults, and adds it to the list */
2294 nir_variable
*nir_variable_create(nir_shader
*shader
,
2295 nir_variable_mode mode
,
2296 const struct glsl_type
*type
,
2298 /** creates a local variable and adds it to the list */
2299 nir_variable
*nir_local_variable_create(nir_function_impl
*impl
,
2300 const struct glsl_type
*type
,
2303 /** creates a function and adds it to the shader's list of functions */
2304 nir_function
*nir_function_create(nir_shader
*shader
, const char *name
);
2306 nir_function_impl
*nir_function_impl_create(nir_function
*func
);
2307 /** creates a function_impl that isn't tied to any particular function */
2308 nir_function_impl
*nir_function_impl_create_bare(nir_shader
*shader
);
2310 nir_block
*nir_block_create(nir_shader
*shader
);
2311 nir_if
*nir_if_create(nir_shader
*shader
);
2312 nir_loop
*nir_loop_create(nir_shader
*shader
);
2314 nir_function_impl
*nir_cf_node_get_function(nir_cf_node
*node
);
2316 /** requests that the given pieces of metadata be generated */
2317 void nir_metadata_require(nir_function_impl
*impl
, nir_metadata required
, ...);
2318 /** dirties all but the preserved metadata */
2319 void nir_metadata_preserve(nir_function_impl
*impl
, nir_metadata preserved
);
2321 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
2322 nir_alu_instr
*nir_alu_instr_create(nir_shader
*shader
, nir_op op
);
2324 nir_deref_instr
*nir_deref_instr_create(nir_shader
*shader
,
2325 nir_deref_type deref_type
);
2327 nir_jump_instr
*nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
);
2329 nir_load_const_instr
*nir_load_const_instr_create(nir_shader
*shader
,
2330 unsigned num_components
,
2333 nir_intrinsic_instr
*nir_intrinsic_instr_create(nir_shader
*shader
,
2334 nir_intrinsic_op op
);
2336 nir_call_instr
*nir_call_instr_create(nir_shader
*shader
,
2337 nir_function
*callee
);
2339 nir_tex_instr
*nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
);
2341 nir_phi_instr
*nir_phi_instr_create(nir_shader
*shader
);
2343 nir_parallel_copy_instr
*nir_parallel_copy_instr_create(nir_shader
*shader
);
2345 nir_ssa_undef_instr
*nir_ssa_undef_instr_create(nir_shader
*shader
,
2346 unsigned num_components
,
2349 nir_const_value
nir_alu_binop_identity(nir_op binop
, unsigned bit_size
);
2352 * NIR Cursors and Instruction Insertion API
2355 * A tiny struct representing a point to insert/extract instructions or
2356 * control flow nodes. Helps reduce the combinatorial explosion of possible
2357 * points to insert/extract.
2359 * \sa nir_control_flow.h
2362 nir_cursor_before_block
,
2363 nir_cursor_after_block
,
2364 nir_cursor_before_instr
,
2365 nir_cursor_after_instr
,
2366 } nir_cursor_option
;
2369 nir_cursor_option option
;
2376 static inline nir_block
*
2377 nir_cursor_current_block(nir_cursor cursor
)
2379 if (cursor
.option
== nir_cursor_before_instr
||
2380 cursor
.option
== nir_cursor_after_instr
) {
2381 return cursor
.instr
->block
;
2383 return cursor
.block
;
2387 bool nir_cursors_equal(nir_cursor a
, nir_cursor b
);
2389 static inline nir_cursor
2390 nir_before_block(nir_block
*block
)
2393 cursor
.option
= nir_cursor_before_block
;
2394 cursor
.block
= block
;
2398 static inline nir_cursor
2399 nir_after_block(nir_block
*block
)
2402 cursor
.option
= nir_cursor_after_block
;
2403 cursor
.block
= block
;
2407 static inline nir_cursor
2408 nir_before_instr(nir_instr
*instr
)
2411 cursor
.option
= nir_cursor_before_instr
;
2412 cursor
.instr
= instr
;
2416 static inline nir_cursor
2417 nir_after_instr(nir_instr
*instr
)
2420 cursor
.option
= nir_cursor_after_instr
;
2421 cursor
.instr
= instr
;
2425 static inline nir_cursor
2426 nir_after_block_before_jump(nir_block
*block
)
2428 nir_instr
*last_instr
= nir_block_last_instr(block
);
2429 if (last_instr
&& last_instr
->type
== nir_instr_type_jump
) {
2430 return nir_before_instr(last_instr
);
2432 return nir_after_block(block
);
2436 static inline nir_cursor
2437 nir_before_src(nir_src
*src
, bool is_if_condition
)
2439 if (is_if_condition
) {
2440 nir_block
*prev_block
=
2441 nir_cf_node_as_block(nir_cf_node_prev(&src
->parent_if
->cf_node
));
2442 assert(!nir_block_ends_in_jump(prev_block
));
2443 return nir_after_block(prev_block
);
2444 } else if (src
->parent_instr
->type
== nir_instr_type_phi
) {
2446 nir_phi_instr
*cond_phi
= nir_instr_as_phi(src
->parent_instr
);
2448 nir_foreach_phi_src(phi_src
, cond_phi
) {
2449 if (phi_src
->src
.ssa
== src
->ssa
) {
2456 /* The LIST_ENTRY macro is a generic container-of macro, it just happens
2457 * to have a more specific name.
2459 nir_phi_src
*phi_src
= LIST_ENTRY(nir_phi_src
, src
, src
);
2460 return nir_after_block_before_jump(phi_src
->pred
);
2462 return nir_before_instr(src
->parent_instr
);
2466 static inline nir_cursor
2467 nir_before_cf_node(nir_cf_node
*node
)
2469 if (node
->type
== nir_cf_node_block
)
2470 return nir_before_block(nir_cf_node_as_block(node
));
2472 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node
)));
2475 static inline nir_cursor
2476 nir_after_cf_node(nir_cf_node
*node
)
2478 if (node
->type
== nir_cf_node_block
)
2479 return nir_after_block(nir_cf_node_as_block(node
));
2481 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node
)));
2484 static inline nir_cursor
2485 nir_after_phis(nir_block
*block
)
2487 nir_foreach_instr(instr
, block
) {
2488 if (instr
->type
!= nir_instr_type_phi
)
2489 return nir_before_instr(instr
);
2491 return nir_after_block(block
);
2494 static inline nir_cursor
2495 nir_after_cf_node_and_phis(nir_cf_node
*node
)
2497 if (node
->type
== nir_cf_node_block
)
2498 return nir_after_block(nir_cf_node_as_block(node
));
2500 nir_block
*block
= nir_cf_node_as_block(nir_cf_node_next(node
));
2502 return nir_after_phis(block
);
2505 static inline nir_cursor
2506 nir_before_cf_list(struct exec_list
*cf_list
)
2508 nir_cf_node
*first_node
= exec_node_data(nir_cf_node
,
2509 exec_list_get_head(cf_list
), node
);
2510 return nir_before_cf_node(first_node
);
2513 static inline nir_cursor
2514 nir_after_cf_list(struct exec_list
*cf_list
)
2516 nir_cf_node
*last_node
= exec_node_data(nir_cf_node
,
2517 exec_list_get_tail(cf_list
), node
);
2518 return nir_after_cf_node(last_node
);
2522 * Insert a NIR instruction at the given cursor.
2524 * Note: This does not update the cursor.
2526 void nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
);
2529 nir_instr_insert_before(nir_instr
*instr
, nir_instr
*before
)
2531 nir_instr_insert(nir_before_instr(instr
), before
);
2535 nir_instr_insert_after(nir_instr
*instr
, nir_instr
*after
)
2537 nir_instr_insert(nir_after_instr(instr
), after
);
2541 nir_instr_insert_before_block(nir_block
*block
, nir_instr
*before
)
2543 nir_instr_insert(nir_before_block(block
), before
);
2547 nir_instr_insert_after_block(nir_block
*block
, nir_instr
*after
)
2549 nir_instr_insert(nir_after_block(block
), after
);
2553 nir_instr_insert_before_cf(nir_cf_node
*node
, nir_instr
*before
)
2555 nir_instr_insert(nir_before_cf_node(node
), before
);
2559 nir_instr_insert_after_cf(nir_cf_node
*node
, nir_instr
*after
)
2561 nir_instr_insert(nir_after_cf_node(node
), after
);
2565 nir_instr_insert_before_cf_list(struct exec_list
*list
, nir_instr
*before
)
2567 nir_instr_insert(nir_before_cf_list(list
), before
);
2571 nir_instr_insert_after_cf_list(struct exec_list
*list
, nir_instr
*after
)
2573 nir_instr_insert(nir_after_cf_list(list
), after
);
2576 void nir_instr_remove_v(nir_instr
*instr
);
2578 static inline nir_cursor
2579 nir_instr_remove(nir_instr
*instr
)
2582 nir_instr
*prev
= nir_instr_prev(instr
);
2584 cursor
= nir_after_instr(prev
);
2586 cursor
= nir_before_block(instr
->block
);
2588 nir_instr_remove_v(instr
);
2594 typedef bool (*nir_foreach_ssa_def_cb
)(nir_ssa_def
*def
, void *state
);
2595 typedef bool (*nir_foreach_dest_cb
)(nir_dest
*dest
, void *state
);
2596 typedef bool (*nir_foreach_src_cb
)(nir_src
*src
, void *state
);
2597 bool nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
,
2599 bool nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
);
2600 bool nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
);
2602 nir_const_value
*nir_src_as_const_value(nir_src src
);
2604 static inline struct nir_instr
*
2605 nir_src_instr(const struct nir_src
*src
)
2607 return src
->is_ssa
? src
->ssa
->parent_instr
: NULL
;
2610 #define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
2611 static inline c_type * \
2612 nir_src_as_ ## name (struct nir_src *src) \
2614 return src->is_ssa && src->ssa->parent_instr->type == type_enum \
2615 ? cast_macro(src->ssa->parent_instr) : NULL; \
2617 static inline const c_type * \
2618 nir_src_as_ ## name ## _const(const struct nir_src *src) \
2620 return src->is_ssa && src->ssa->parent_instr->type == type_enum \
2621 ? cast_macro(src->ssa->parent_instr) : NULL; \
2624 NIR_SRC_AS_(alu_instr
, nir_alu_instr
, nir_instr_type_alu
, nir_instr_as_alu
)
2626 bool nir_src_is_dynamically_uniform(nir_src src
);
2627 bool nir_srcs_equal(nir_src src1
, nir_src src2
);
2628 void nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
);
2629 void nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
);
2630 void nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
);
2631 void nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
,
2634 void nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
2635 unsigned num_components
, unsigned bit_size
,
2637 void nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
2638 unsigned num_components
, unsigned bit_size
,
2641 nir_ssa_dest_init_for_type(nir_instr
*instr
, nir_dest
*dest
,
2642 const struct glsl_type
*type
,
2645 assert(glsl_type_is_vector_or_scalar(type
));
2646 nir_ssa_dest_init(instr
, dest
, glsl_get_components(type
),
2647 glsl_get_bit_size(type
), name
);
2649 void nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
);
2650 void nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
2651 nir_instr
*after_me
);
2653 nir_component_mask_t
nir_ssa_def_components_read(const nir_ssa_def
*def
);
2656 * finds the next basic block in source-code order, returns NULL if there is
2660 nir_block
*nir_block_cf_tree_next(nir_block
*block
);
2662 /* Performs the opposite of nir_block_cf_tree_next() */
2664 nir_block
*nir_block_cf_tree_prev(nir_block
*block
);
2666 /* Gets the first block in a CF node in source-code order */
2668 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
);
2670 /* Gets the last block in a CF node in source-code order */
2672 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
);
2674 /* Gets the next block after a CF node in source-code order */
2676 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
);
2678 /* Macros for loops that visit blocks in source-code order */
2680 #define nir_foreach_block(block, impl) \
2681 for (nir_block *block = nir_start_block(impl); block != NULL; \
2682 block = nir_block_cf_tree_next(block))
2684 #define nir_foreach_block_safe(block, impl) \
2685 for (nir_block *block = nir_start_block(impl), \
2686 *next = nir_block_cf_tree_next(block); \
2688 block = next, next = nir_block_cf_tree_next(block))
2690 #define nir_foreach_block_reverse(block, impl) \
2691 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2692 block = nir_block_cf_tree_prev(block))
2694 #define nir_foreach_block_reverse_safe(block, impl) \
2695 for (nir_block *block = nir_impl_last_block(impl), \
2696 *prev = nir_block_cf_tree_prev(block); \
2698 block = prev, prev = nir_block_cf_tree_prev(block))
2700 #define nir_foreach_block_in_cf_node(block, node) \
2701 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2702 block != nir_cf_node_cf_tree_next(node); \
2703 block = nir_block_cf_tree_next(block))
2705 /* If the following CF node is an if, this function returns that if.
2706 * Otherwise, it returns NULL.
2708 nir_if
*nir_block_get_following_if(nir_block
*block
);
2710 nir_loop
*nir_block_get_following_loop(nir_block
*block
);
2712 void nir_index_local_regs(nir_function_impl
*impl
);
2713 void nir_index_global_regs(nir_shader
*shader
);
2714 void nir_index_ssa_defs(nir_function_impl
*impl
);
2715 unsigned nir_index_instrs(nir_function_impl
*impl
);
2717 void nir_index_blocks(nir_function_impl
*impl
);
2719 void nir_print_shader(nir_shader
*shader
, FILE *fp
);
2720 void nir_print_shader_annotated(nir_shader
*shader
, FILE *fp
, struct hash_table
*errors
);
2721 void nir_print_instr(const nir_instr
*instr
, FILE *fp
);
2723 nir_shader
*nir_shader_clone(void *mem_ctx
, const nir_shader
*s
);
2724 nir_function_impl
*nir_function_impl_clone(const nir_function_impl
*fi
);
2725 nir_constant
*nir_constant_clone(const nir_constant
*c
, nir_variable
*var
);
2726 nir_variable
*nir_variable_clone(const nir_variable
*c
, nir_shader
*shader
);
2728 nir_shader
*nir_shader_serialize_deserialize(void *mem_ctx
, nir_shader
*s
);
2731 void nir_validate_shader(nir_shader
*shader
, const char *when
);
2732 void nir_metadata_set_validation_flag(nir_shader
*shader
);
2733 void nir_metadata_check_validation_flag(nir_shader
*shader
);
2736 should_clone_nir(void)
2738 static int should_clone
= -1;
2739 if (should_clone
< 0)
2740 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
2742 return should_clone
;
2746 should_serialize_deserialize_nir(void)
2748 static int test_serialize
= -1;
2749 if (test_serialize
< 0)
2750 test_serialize
= env_var_as_boolean("NIR_TEST_SERIALIZE", false);
2752 return test_serialize
;
2756 should_print_nir(void)
2758 static int should_print
= -1;
2759 if (should_print
< 0)
2760 should_print
= env_var_as_boolean("NIR_PRINT", false);
2762 return should_print
;
2765 static inline void nir_validate_shader(nir_shader
*shader
, const char *when
) { (void) shader
; (void)when
; }
2766 static inline void nir_metadata_set_validation_flag(nir_shader
*shader
) { (void) shader
; }
2767 static inline void nir_metadata_check_validation_flag(nir_shader
*shader
) { (void) shader
; }
2768 static inline bool should_clone_nir(void) { return false; }
2769 static inline bool should_serialize_deserialize_nir(void) { return false; }
2770 static inline bool should_print_nir(void) { return false; }
2773 #define _PASS(pass, nir, do_pass) do { \
2775 nir_validate_shader(nir, "after " #pass); \
2776 if (should_clone_nir()) { \
2777 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2781 if (should_serialize_deserialize_nir()) { \
2782 void *mem_ctx = ralloc_parent(nir); \
2783 nir = nir_shader_serialize_deserialize(mem_ctx, nir); \
2787 #define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
2788 nir_metadata_set_validation_flag(nir); \
2789 if (should_print_nir()) \
2790 printf("%s\n", #pass); \
2791 if (pass(nir, ##__VA_ARGS__)) { \
2793 if (should_print_nir()) \
2794 nir_print_shader(nir, stdout); \
2795 nir_metadata_check_validation_flag(nir); \
2799 #define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
2800 if (should_print_nir()) \
2801 printf("%s\n", #pass); \
2802 pass(nir, ##__VA_ARGS__); \
2803 if (should_print_nir()) \
2804 nir_print_shader(nir, stdout); \
2807 void nir_calc_dominance_impl(nir_function_impl
*impl
);
2808 void nir_calc_dominance(nir_shader
*shader
);
2810 nir_block
*nir_dominance_lca(nir_block
*b1
, nir_block
*b2
);
2811 bool nir_block_dominates(nir_block
*parent
, nir_block
*child
);
2813 void nir_dump_dom_tree_impl(nir_function_impl
*impl
, FILE *fp
);
2814 void nir_dump_dom_tree(nir_shader
*shader
, FILE *fp
);
2816 void nir_dump_dom_frontier_impl(nir_function_impl
*impl
, FILE *fp
);
2817 void nir_dump_dom_frontier(nir_shader
*shader
, FILE *fp
);
2819 void nir_dump_cfg_impl(nir_function_impl
*impl
, FILE *fp
);
2820 void nir_dump_cfg(nir_shader
*shader
, FILE *fp
);
2822 int nir_gs_count_vertices(const nir_shader
*shader
);
2824 bool nir_shrink_vec_array_vars(nir_shader
*shader
, nir_variable_mode modes
);
2825 bool nir_split_array_vars(nir_shader
*shader
, nir_variable_mode modes
);
2826 bool nir_split_var_copies(nir_shader
*shader
);
2827 bool nir_split_per_member_structs(nir_shader
*shader
);
2828 bool nir_split_struct_vars(nir_shader
*shader
, nir_variable_mode modes
);
2830 bool nir_lower_returns_impl(nir_function_impl
*impl
);
2831 bool nir_lower_returns(nir_shader
*shader
);
2833 bool nir_inline_functions(nir_shader
*shader
);
2835 bool nir_propagate_invariant(nir_shader
*shader
);
2837 void nir_lower_var_copy_instr(nir_intrinsic_instr
*copy
, nir_shader
*shader
);
2838 void nir_lower_deref_copy_instr(struct nir_builder
*b
,
2839 nir_intrinsic_instr
*copy
);
2840 bool nir_lower_var_copies(nir_shader
*shader
);
2842 void nir_fixup_deref_modes(nir_shader
*shader
);
2844 bool nir_lower_global_vars_to_local(nir_shader
*shader
);
2846 bool nir_lower_indirect_derefs(nir_shader
*shader
, nir_variable_mode modes
);
2848 bool nir_lower_locals_to_regs(nir_shader
*shader
);
2850 void nir_lower_io_to_temporaries(nir_shader
*shader
,
2851 nir_function_impl
*entrypoint
,
2852 bool outputs
, bool inputs
);
2854 void nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
);
2856 void nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
2857 int (*type_size
)(const struct glsl_type
*));
2859 /* Some helpers to do very simple linking */
2860 bool nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
);
2861 bool nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
2862 uint64_t *used_by_other_stage
,
2863 uint64_t *used_by_other_stage_patches
);
2864 void nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
2865 bool default_to_smooth_interp
);
2866 void nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
);
2867 bool nir_link_constant_varyings(nir_shader
*producer
, nir_shader
*consumer
);
2870 /* If set, this forces all non-flat fragment shader inputs to be
2871 * interpolated as if with the "sample" qualifier. This requires
2872 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
2874 nir_lower_io_force_sample_interpolation
= (1 << 1),
2875 } nir_lower_io_options
;
2876 bool nir_lower_io(nir_shader
*shader
,
2877 nir_variable_mode modes
,
2878 int (*type_size
)(const struct glsl_type
*),
2879 nir_lower_io_options
);
2880 nir_src
*nir_get_io_offset_src(nir_intrinsic_instr
*instr
);
2881 nir_src
*nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
);
2883 bool nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
);
2885 bool nir_lower_regs_to_ssa_impl(nir_function_impl
*impl
);
2886 bool nir_lower_regs_to_ssa(nir_shader
*shader
);
2887 bool nir_lower_vars_to_ssa(nir_shader
*shader
);
2889 bool nir_remove_dead_derefs(nir_shader
*shader
);
2890 bool nir_remove_dead_derefs_impl(nir_function_impl
*impl
);
2891 bool nir_remove_dead_variables(nir_shader
*shader
, nir_variable_mode modes
);
2892 bool nir_lower_constant_initializers(nir_shader
*shader
,
2893 nir_variable_mode modes
);
2895 bool nir_move_load_const(nir_shader
*shader
);
2896 bool nir_move_vec_src_uses_to_dest(nir_shader
*shader
);
2897 bool nir_lower_vec_to_movs(nir_shader
*shader
);
2898 void nir_lower_alpha_test(nir_shader
*shader
, enum compare_func func
,
2900 bool nir_lower_alu(nir_shader
*shader
);
2901 bool nir_lower_alu_to_scalar(nir_shader
*shader
);
2902 bool nir_lower_load_const_to_scalar(nir_shader
*shader
);
2903 bool nir_lower_read_invocation_to_scalar(nir_shader
*shader
);
2904 bool nir_lower_phis_to_scalar(nir_shader
*shader
);
2905 void nir_lower_io_arrays_to_elements(nir_shader
*producer
, nir_shader
*consumer
);
2906 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader
*shader
,
2908 void nir_lower_io_to_scalar(nir_shader
*shader
, nir_variable_mode mask
);
2909 void nir_lower_io_to_scalar_early(nir_shader
*shader
, nir_variable_mode mask
);
2911 typedef struct nir_lower_subgroups_options
{
2912 uint8_t subgroup_size
;
2913 uint8_t ballot_bit_size
;
2914 bool lower_to_scalar
:1;
2915 bool lower_vote_trivial
:1;
2916 bool lower_vote_eq_to_ballot
:1;
2917 bool lower_subgroup_masks
:1;
2918 bool lower_shuffle
:1;
2919 bool lower_shuffle_to_32bit
:1;
2921 } nir_lower_subgroups_options
;
2923 bool nir_lower_subgroups(nir_shader
*shader
,
2924 const nir_lower_subgroups_options
*options
);
2926 bool nir_lower_system_values(nir_shader
*shader
);
2928 typedef struct nir_lower_tex_options
{
2930 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2931 * sampler types a texture projector is lowered.
2936 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2938 bool lower_txf_offset
;
2941 * If true, lower away nir_tex_src_offset for all rect textures.
2943 bool lower_rect_offset
;
2946 * If true, lower rect textures to 2D, using txs to fetch the
2947 * texture dimensions and dividing the texture coords by the
2948 * texture dims to normalize.
2953 * If true, convert yuv to rgb.
2955 unsigned lower_y_uv_external
;
2956 unsigned lower_y_u_v_external
;
2957 unsigned lower_yx_xuxv_external
;
2958 unsigned lower_xy_uxvx_external
;
2959 unsigned lower_ayuv_external
;
2962 * To emulate certain texture wrap modes, this can be used
2963 * to saturate the specified tex coord to [0.0, 1.0]. The
2964 * bits are according to sampler #, ie. if, for example:
2966 * (conf->saturate_s & (1 << n))
2968 * is true, then the s coord for sampler n is saturated.
2970 * Note that clamping must happen *after* projector lowering
2971 * so any projected texture sample instruction with a clamped
2972 * coordinate gets automatically lowered, regardless of the
2973 * 'lower_txp' setting.
2975 unsigned saturate_s
;
2976 unsigned saturate_t
;
2977 unsigned saturate_r
;
2979 /* Bitmask of textures that need swizzling.
2981 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2982 * swizzles[texture_index] is applied to the result of the texturing
2985 unsigned swizzle_result
;
2987 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2988 * while 4 and 5 represent 0 and 1 respectively.
2990 uint8_t swizzles
[32][4];
2993 * Bitmap of textures that need srgb to linear conversion. If
2994 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2995 * of the texture are lowered to linear.
2997 unsigned lower_srgb
;
3000 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
3002 bool lower_txd_cube_map
;
3005 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
3006 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
3007 * with lower_txd_cube_map.
3009 bool lower_txd_shadow
;
3012 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
3013 * Implies lower_txd_cube_map and lower_txd_shadow.
3016 } nir_lower_tex_options
;
3018 bool nir_lower_tex(nir_shader
*shader
,
3019 const nir_lower_tex_options
*options
);
3021 bool nir_lower_idiv(nir_shader
*shader
);
3023 bool nir_lower_clip_vs(nir_shader
*shader
, unsigned ucp_enables
, bool use_vars
);
3024 bool nir_lower_clip_fs(nir_shader
*shader
, unsigned ucp_enables
);
3025 bool nir_lower_clip_cull_distance_arrays(nir_shader
*nir
);
3027 void nir_lower_two_sided_color(nir_shader
*shader
);
3029 bool nir_lower_clamp_color_outputs(nir_shader
*shader
);
3031 void nir_lower_passthrough_edgeflags(nir_shader
*shader
);
3032 bool nir_lower_patch_vertices(nir_shader
*nir
, unsigned static_count
,
3033 const gl_state_index16
*uniform_state_tokens
);
3035 typedef struct nir_lower_wpos_ytransform_options
{
3036 gl_state_index16 state_tokens
[STATE_LENGTH
];
3037 bool fs_coord_origin_upper_left
:1;
3038 bool fs_coord_origin_lower_left
:1;
3039 bool fs_coord_pixel_center_integer
:1;
3040 bool fs_coord_pixel_center_half_integer
:1;
3041 } nir_lower_wpos_ytransform_options
;
3043 bool nir_lower_wpos_ytransform(nir_shader
*shader
,
3044 const nir_lower_wpos_ytransform_options
*options
);
3045 bool nir_lower_wpos_center(nir_shader
*shader
, const bool for_sample_shading
);
3047 typedef struct nir_lower_drawpixels_options
{
3048 gl_state_index16 texcoord_state_tokens
[STATE_LENGTH
];
3049 gl_state_index16 scale_state_tokens
[STATE_LENGTH
];
3050 gl_state_index16 bias_state_tokens
[STATE_LENGTH
];
3051 unsigned drawpix_sampler
;
3052 unsigned pixelmap_sampler
;
3054 bool scale_and_bias
:1;
3055 } nir_lower_drawpixels_options
;
3057 void nir_lower_drawpixels(nir_shader
*shader
,
3058 const nir_lower_drawpixels_options
*options
);
3060 typedef struct nir_lower_bitmap_options
{
3063 } nir_lower_bitmap_options
;
3065 void nir_lower_bitmap(nir_shader
*shader
, const nir_lower_bitmap_options
*options
);
3067 bool nir_lower_atomics_to_ssbo(nir_shader
*shader
, unsigned ssbo_offset
);
3070 nir_lower_int_source_mods
= 1 << 0,
3071 nir_lower_float_source_mods
= 1 << 1,
3072 nir_lower_all_source_mods
= (1 << 2) - 1
3073 } nir_lower_to_source_mods_flags
;
3076 bool nir_lower_to_source_mods(nir_shader
*shader
, nir_lower_to_source_mods_flags options
);
3078 bool nir_lower_gs_intrinsics(nir_shader
*shader
);
3080 typedef unsigned (*nir_lower_bit_size_callback
)(const nir_alu_instr
*, void *);
3082 bool nir_lower_bit_size(nir_shader
*shader
,
3083 nir_lower_bit_size_callback callback
,
3084 void *callback_data
);
3087 nir_lower_imul64
= (1 << 0),
3088 nir_lower_isign64
= (1 << 1),
3089 /** Lower all int64 modulus and division opcodes */
3090 nir_lower_divmod64
= (1 << 2),
3091 } nir_lower_int64_options
;
3093 bool nir_lower_int64(nir_shader
*shader
, nir_lower_int64_options options
);
3096 nir_lower_drcp
= (1 << 0),
3097 nir_lower_dsqrt
= (1 << 1),
3098 nir_lower_drsq
= (1 << 2),
3099 nir_lower_dtrunc
= (1 << 3),
3100 nir_lower_dfloor
= (1 << 4),
3101 nir_lower_dceil
= (1 << 5),
3102 nir_lower_dfract
= (1 << 6),
3103 nir_lower_dround_even
= (1 << 7),
3104 nir_lower_dmod
= (1 << 8)
3105 } nir_lower_doubles_options
;
3107 bool nir_lower_doubles(nir_shader
*shader
, nir_lower_doubles_options options
);
3108 bool nir_lower_pack(nir_shader
*shader
);
3110 bool nir_normalize_cubemap_coords(nir_shader
*shader
);
3112 void nir_live_ssa_defs_impl(nir_function_impl
*impl
);
3114 void nir_loop_analyze_impl(nir_function_impl
*impl
,
3115 nir_variable_mode indirect_mask
);
3117 bool nir_ssa_defs_interfere(nir_ssa_def
*a
, nir_ssa_def
*b
);
3119 bool nir_repair_ssa_impl(nir_function_impl
*impl
);
3120 bool nir_repair_ssa(nir_shader
*shader
);
3122 void nir_convert_loop_to_lcssa(nir_loop
*loop
);
3124 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
3125 * registers. If false, convert all values (even those not involved in a phi
3126 * node) to registers.
3128 bool nir_convert_from_ssa(nir_shader
*shader
, bool phi_webs_only
);
3130 bool nir_lower_phis_to_regs_block(nir_block
*block
);
3131 bool nir_lower_ssa_defs_to_regs_block(nir_block
*block
);
3132 bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl
*impl
);
3134 bool nir_opt_algebraic(nir_shader
*shader
);
3135 bool nir_opt_algebraic_before_ffma(nir_shader
*shader
);
3136 bool nir_opt_algebraic_late(nir_shader
*shader
);
3137 bool nir_opt_constant_folding(nir_shader
*shader
);
3139 bool nir_opt_global_to_local(nir_shader
*shader
);
3141 bool nir_copy_prop(nir_shader
*shader
);
3143 bool nir_opt_copy_prop_vars(nir_shader
*shader
);
3145 bool nir_opt_cse(nir_shader
*shader
);
3147 bool nir_opt_dce(nir_shader
*shader
);
3149 bool nir_opt_dead_cf(nir_shader
*shader
);
3151 bool nir_opt_dead_write_vars(nir_shader
*shader
);
3153 bool nir_opt_find_array_copies(nir_shader
*shader
);
3155 bool nir_opt_gcm(nir_shader
*shader
, bool value_number
);
3157 bool nir_opt_if(nir_shader
*shader
);
3159 bool nir_opt_intrinsics(nir_shader
*shader
);
3161 bool nir_opt_large_constants(nir_shader
*shader
,
3162 glsl_type_size_align_func size_align
,
3163 unsigned threshold
);
3165 bool nir_opt_loop_unroll(nir_shader
*shader
, nir_variable_mode indirect_mask
);
3167 bool nir_opt_move_comparisons(nir_shader
*shader
);
3169 bool nir_opt_move_load_ubo(nir_shader
*shader
);
3171 bool nir_opt_peephole_select(nir_shader
*shader
, unsigned limit
);
3173 bool nir_opt_remove_phis_impl(nir_function_impl
*impl
);
3174 bool nir_opt_remove_phis(nir_shader
*shader
);
3176 bool nir_opt_shrink_load(nir_shader
*shader
);
3178 bool nir_opt_trivial_continues(nir_shader
*shader
);
3180 bool nir_opt_undef(nir_shader
*shader
);
3182 bool nir_opt_conditional_discard(nir_shader
*shader
);
3184 void nir_sweep(nir_shader
*shader
);
3186 void nir_remap_dual_slot_attributes(nir_shader
*shader
,
3187 uint64_t *dual_slot_inputs
);
3188 uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs
, uint64_t dual_slot
);
3190 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val
);
3191 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin
);