2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "GL/gl.h" /* GLenum */
34 #include "util/list.h"
35 #include "util/ralloc.h"
37 #include "util/bitset.h"
38 #include "util/macros.h"
39 #include "compiler/nir_types.h"
40 #include "compiler/shader_enums.h"
41 #include "compiler/shader_info.h"
45 #include "util/debug.h"
48 #include "nir_opcodes.h"
50 #if defined(_WIN32) && !defined(snprintf)
51 #define snprintf _snprintf
59 #define NIR_TRUE (~0u)
61 /** Defines a cast function
63 * This macro defines a cast function from in_type to out_type where
64 * out_type is some structure type that contains a field of type out_type.
66 * Note that you have to be a bit careful as the generated cast function
69 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
70 type_field, type_value) \
71 static inline out_type * \
72 name(const in_type *parent) \
74 assert(parent && parent->type_field == type_value); \
75 return exec_node_data(out_type, parent, field); \
84 * Description of built-in state associated with a uniform
86 * \sa nir_variable::state_slots
89 gl_state_index16 tokens
[STATE_LENGTH
];
94 nir_var_shader_in
= (1 << 0),
95 nir_var_shader_out
= (1 << 1),
96 nir_var_global
= (1 << 2),
97 nir_var_local
= (1 << 3),
98 nir_var_uniform
= (1 << 4),
99 nir_var_shader_storage
= (1 << 5),
100 nir_var_system_value
= (1 << 6),
101 nir_var_param
= (1 << 7),
102 nir_var_shared
= (1 << 8),
110 nir_rounding_mode_undef
= 0,
111 nir_rounding_mode_rtne
= 1, /* round to nearest even */
112 nir_rounding_mode_ru
= 2, /* round up */
113 nir_rounding_mode_rd
= 3, /* round down */
114 nir_rounding_mode_rtz
= 4, /* round towards zero */
130 typedef struct nir_constant
{
132 * Value of the constant.
134 * The field used to back the values supplied by the constant is determined
135 * by the type associated with the \c nir_variable. Constants may be
136 * scalars, vectors, or matrices.
138 nir_const_value values
[4];
140 /* we could get this from the var->type but makes clone *much* easier to
141 * not have to care about the type.
143 unsigned num_elements
;
145 /* Array elements / Structure Fields */
146 struct nir_constant
**elements
;
150 * \brief Layout qualifiers for gl_FragDepth.
152 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
153 * with a layout qualifier.
156 nir_depth_layout_none
, /**< No depth layout is specified. */
157 nir_depth_layout_any
,
158 nir_depth_layout_greater
,
159 nir_depth_layout_less
,
160 nir_depth_layout_unchanged
164 * Either a uniform, global variable, shader input, or shader output. Based on
165 * ir_variable - it should be easy to translate between the two.
168 typedef struct nir_variable
{
169 struct exec_node node
;
172 * Declared type of the variable
174 const struct glsl_type
*type
;
177 * Declared name of the variable
181 struct nir_variable_data
{
183 * Storage class of the variable.
185 * \sa nir_variable_mode
187 nir_variable_mode mode
;
190 * Is the variable read-only?
192 * This is set for variables declared as \c const, shader inputs,
195 unsigned read_only
:1;
199 unsigned invariant
:1;
202 * When separate shader programs are enabled, only input/outputs between
203 * the stages of a multi-stage separate program can be safely removed
204 * from the shader interface. Other input/outputs must remains active.
206 * This is also used to make sure xfb varyings that are unused by the
207 * fragment shader are not removed.
209 unsigned always_active_io
:1;
212 * Interpolation mode for shader inputs / outputs
214 * \sa glsl_interp_mode
216 unsigned interpolation
:2;
219 * \name ARB_fragment_coord_conventions
222 unsigned origin_upper_left
:1;
223 unsigned pixel_center_integer
:1;
227 * If non-zero, then this variable may be packed along with other variables
228 * into a single varying slot, so this offset should be applied when
229 * accessing components. For example, an offset of 1 means that the x
230 * component of this variable is actually stored in component y of the
231 * location specified by \c location.
233 unsigned location_frac
:2;
236 * If true, this variable represents an array of scalars that should
237 * be tightly packed. In other words, consecutive array elements
238 * should be stored one component apart, rather than one slot apart.
243 * Whether this is a fragment shader output implicitly initialized with
244 * the previous contents of the specified render target at the
245 * framebuffer location corresponding to this shader invocation.
247 unsigned fb_fetch_output
:1;
250 * Non-zero if this variable is considered bindless as defined by
251 * ARB_bindless_texture.
256 * Was an explicit binding set in the shader?
258 unsigned explicit_binding
:1;
261 * \brief Layout qualifier for gl_FragDepth.
263 * This is not equal to \c ir_depth_layout_none if and only if this
264 * variable is \c gl_FragDepth and a layout qualifier is specified.
266 nir_depth_layout depth_layout
;
269 * Storage location of the base of this variable
271 * The precise meaning of this field depends on the nature of the variable.
273 * - Vertex shader input: one of the values from \c gl_vert_attrib.
274 * - Vertex shader output: one of the values from \c gl_varying_slot.
275 * - Geometry shader input: one of the values from \c gl_varying_slot.
276 * - Geometry shader output: one of the values from \c gl_varying_slot.
277 * - Fragment shader input: one of the values from \c gl_varying_slot.
278 * - Fragment shader output: one of the values from \c gl_frag_result.
279 * - Uniforms: Per-stage uniform slot number for default uniform block.
280 * - Uniforms: Index within the uniform block definition for UBO members.
281 * - Non-UBO Uniforms: uniform slot number.
282 * - Other: This field is not currently used.
284 * If the variable is a uniform, shader input, or shader output, and the
285 * slot has not been assigned, the value will be -1.
290 * The actual location of the variable in the IR. Only valid for inputs
293 unsigned int driver_location
;
296 * Vertex stream output identifier.
298 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
299 * stream of the i-th component.
304 * output index for dual source blending.
309 * Descriptor set binding for sampler or UBO.
314 * Initial binding point for a sampler or UBO.
316 * For array types, this represents the binding point for the first element.
321 * Location an atomic counter is stored at.
326 * ARB_shader_image_load_store qualifiers.
329 bool read_only
; /**< "readonly" qualifier. */
330 bool write_only
; /**< "writeonly" qualifier. */
335 /** Image internal format if specified explicitly, otherwise GL_NONE. */
341 * Built-in state that backs this uniform
343 * Once set at variable creation, \c state_slots must remain invariant.
344 * This is because, ideally, this array would be shared by all clones of
345 * this variable in the IR tree. In other words, we'd really like for it
346 * to be a fly-weight.
348 * If the variable is not a uniform, \c num_state_slots will be zero and
349 * \c state_slots will be \c NULL.
352 unsigned num_state_slots
; /**< Number of state slots used */
353 nir_state_slot
*state_slots
; /**< State descriptors. */
357 * Constant expression assigned in the initializer of the variable
359 * This field should only be used temporarily by creators of NIR shaders
360 * and then lower_constant_initializers can be used to get rid of them.
361 * Most of the rest of NIR ignores this field or asserts that it's NULL.
363 nir_constant
*constant_initializer
;
366 * For variables that are in an interface block or are an instance of an
367 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
369 * \sa ir_variable::location
371 const struct glsl_type
*interface_type
;
374 #define nir_foreach_variable(var, var_list) \
375 foreach_list_typed(nir_variable, var, node, var_list)
377 #define nir_foreach_variable_safe(var, var_list) \
378 foreach_list_typed_safe(nir_variable, var, node, var_list)
381 nir_variable_is_global(const nir_variable
*var
)
383 return var
->data
.mode
!= nir_var_local
&& var
->data
.mode
!= nir_var_param
;
386 typedef struct nir_register
{
387 struct exec_node node
;
389 unsigned num_components
; /** < number of vector components */
390 unsigned num_array_elems
; /** < size of array (0 for no array) */
392 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
395 /** generic register index. */
398 /** only for debug purposes, can be NULL */
401 /** whether this register is local (per-function) or global (per-shader) */
405 * If this flag is set to true, then accessing channels >= num_components
406 * is well-defined, and simply spills over to the next array element. This
407 * is useful for backends that can do per-component accessing, in
408 * particular scalar backends. By setting this flag and making
409 * num_components equal to 1, structures can be packed tightly into
410 * registers and then registers can be accessed per-component to get to
411 * each structure member, even if it crosses vec4 boundaries.
415 /** set of nir_srcs where this register is used (read from) */
416 struct list_head uses
;
418 /** set of nir_dests where this register is defined (written to) */
419 struct list_head defs
;
421 /** set of nir_ifs where this register is used as a condition */
422 struct list_head if_uses
;
425 #define nir_foreach_register(reg, reg_list) \
426 foreach_list_typed(nir_register, reg, node, reg_list)
427 #define nir_foreach_register_safe(reg, reg_list) \
428 foreach_list_typed_safe(nir_register, reg, node, reg_list)
434 nir_instr_type_intrinsic
,
435 nir_instr_type_load_const
,
437 nir_instr_type_ssa_undef
,
439 nir_instr_type_parallel_copy
,
442 typedef struct nir_instr
{
443 struct exec_node node
;
445 struct nir_block
*block
;
447 /** generic instruction index. */
450 /* A temporary for optimization and analysis passes to use for storing
451 * flags. For instance, DCE uses this to store the "dead/live" info.
456 static inline nir_instr
*
457 nir_instr_next(nir_instr
*instr
)
459 struct exec_node
*next
= exec_node_get_next(&instr
->node
);
460 if (exec_node_is_tail_sentinel(next
))
463 return exec_node_data(nir_instr
, next
, node
);
466 static inline nir_instr
*
467 nir_instr_prev(nir_instr
*instr
)
469 struct exec_node
*prev
= exec_node_get_prev(&instr
->node
);
470 if (exec_node_is_head_sentinel(prev
))
473 return exec_node_data(nir_instr
, prev
, node
);
477 nir_instr_is_first(const nir_instr
*instr
)
479 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr
->node
));
483 nir_instr_is_last(const nir_instr
*instr
)
485 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr
->node
));
488 typedef struct nir_ssa_def
{
489 /** for debugging only, can be NULL */
492 /** generic SSA definition index. */
495 /** Index into the live_in and live_out bitfields */
498 /** Instruction which produces this SSA value. */
499 nir_instr
*parent_instr
;
501 /** set of nir_instrs where this register is used (read from) */
502 struct list_head uses
;
504 /** set of nir_ifs where this register is used as a condition */
505 struct list_head if_uses
;
507 uint8_t num_components
;
509 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
517 struct nir_src
*indirect
; /** < NULL for no indirect offset */
518 unsigned base_offset
;
520 /* TODO use-def chain goes here */
524 nir_instr
*parent_instr
;
525 struct list_head def_link
;
528 struct nir_src
*indirect
; /** < NULL for no indirect offset */
529 unsigned base_offset
;
531 /* TODO def-use chain goes here */
536 typedef struct nir_src
{
538 /** Instruction that consumes this value as a source. */
539 nir_instr
*parent_instr
;
540 struct nir_if
*parent_if
;
543 struct list_head use_link
;
553 static inline nir_src
556 nir_src src
= { { NULL
} };
560 #define NIR_SRC_INIT nir_src_init()
562 #define nir_foreach_use(src, reg_or_ssa_def) \
563 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
565 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
566 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
568 #define nir_foreach_if_use(src, reg_or_ssa_def) \
569 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
571 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
572 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
583 static inline nir_dest
586 nir_dest dest
= { { { NULL
} } };
590 #define NIR_DEST_INIT nir_dest_init()
592 #define nir_foreach_def(dest, reg) \
593 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
595 #define nir_foreach_def_safe(dest, reg) \
596 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
598 static inline nir_src
599 nir_src_for_ssa(nir_ssa_def
*def
)
601 nir_src src
= NIR_SRC_INIT
;
609 static inline nir_src
610 nir_src_for_reg(nir_register
*reg
)
612 nir_src src
= NIR_SRC_INIT
;
616 src
.reg
.indirect
= NULL
;
617 src
.reg
.base_offset
= 0;
622 static inline nir_dest
623 nir_dest_for_reg(nir_register
*reg
)
625 nir_dest dest
= NIR_DEST_INIT
;
632 static inline unsigned
633 nir_src_bit_size(nir_src src
)
635 return src
.is_ssa
? src
.ssa
->bit_size
: src
.reg
.reg
->bit_size
;
638 static inline unsigned
639 nir_src_num_components(nir_src src
)
641 return src
.is_ssa
? src
.ssa
->num_components
: src
.reg
.reg
->num_components
;
644 static inline unsigned
645 nir_dest_bit_size(nir_dest dest
)
647 return dest
.is_ssa
? dest
.ssa
.bit_size
: dest
.reg
.reg
->bit_size
;
650 static inline unsigned
651 nir_dest_num_components(nir_dest dest
)
653 return dest
.is_ssa
? dest
.ssa
.num_components
: dest
.reg
.reg
->num_components
;
656 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *instr_or_if
);
657 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
);
663 * \name input modifiers
667 * For inputs interpreted as floating point, flips the sign bit. For
668 * inputs interpreted as integers, performs the two's complement negation.
673 * Clears the sign bit for floating point values, and computes the integer
674 * absolute value for integers. Note that the negate modifier acts after
675 * the absolute value modifier, therefore if both are set then all inputs
676 * will become negative.
682 * For each input component, says which component of the register it is
683 * chosen from. Note that which elements of the swizzle are used and which
684 * are ignored are based on the write mask for most opcodes - for example,
685 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
686 * a swizzle of {2, x, 1, 0} where x means "don't care."
695 * \name saturate output modifier
697 * Only valid for opcodes that output floating-point numbers. Clamps the
698 * output to between 0.0 and 1.0 inclusive.
703 unsigned write_mask
: 4; /* ignored if dest.is_ssa is true */
707 nir_type_invalid
= 0, /* Not a valid type */
712 nir_type_bool32
= 32 | nir_type_bool
,
713 nir_type_int8
= 8 | nir_type_int
,
714 nir_type_int16
= 16 | nir_type_int
,
715 nir_type_int32
= 32 | nir_type_int
,
716 nir_type_int64
= 64 | nir_type_int
,
717 nir_type_uint8
= 8 | nir_type_uint
,
718 nir_type_uint16
= 16 | nir_type_uint
,
719 nir_type_uint32
= 32 | nir_type_uint
,
720 nir_type_uint64
= 64 | nir_type_uint
,
721 nir_type_float16
= 16 | nir_type_float
,
722 nir_type_float32
= 32 | nir_type_float
,
723 nir_type_float64
= 64 | nir_type_float
,
726 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
727 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
729 static inline unsigned
730 nir_alu_type_get_type_size(nir_alu_type type
)
732 return type
& NIR_ALU_TYPE_SIZE_MASK
;
735 static inline unsigned
736 nir_alu_type_get_base_type(nir_alu_type type
)
738 return type
& NIR_ALU_TYPE_BASE_TYPE_MASK
;
741 static inline nir_alu_type
742 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type
)
746 return nir_type_bool32
;
749 return nir_type_uint32
;
752 return nir_type_int32
;
754 case GLSL_TYPE_UINT16
:
755 return nir_type_uint16
;
757 case GLSL_TYPE_INT16
:
758 return nir_type_int16
;
760 case GLSL_TYPE_UINT8
:
761 return nir_type_uint8
;
763 return nir_type_int8
;
764 case GLSL_TYPE_UINT64
:
765 return nir_type_uint64
;
767 case GLSL_TYPE_INT64
:
768 return nir_type_int64
;
770 case GLSL_TYPE_FLOAT
:
771 return nir_type_float32
;
773 case GLSL_TYPE_FLOAT16
:
774 return nir_type_float16
;
776 case GLSL_TYPE_DOUBLE
:
777 return nir_type_float64
;
780 unreachable("unknown type");
784 static inline nir_alu_type
785 nir_get_nir_type_for_glsl_type(const struct glsl_type
*type
)
787 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type
));
790 nir_op
nir_type_conversion_op(nir_alu_type src
, nir_alu_type dst
,
791 nir_rounding_mode rnd
);
794 NIR_OP_IS_COMMUTATIVE
= (1 << 0),
795 NIR_OP_IS_ASSOCIATIVE
= (1 << 1),
796 } nir_op_algebraic_property
;
804 * The number of components in the output
806 * If non-zero, this is the size of the output and input sizes are
807 * explicitly given; swizzle and writemask are still in effect, but if
808 * the output component is masked out, then the input component may
811 * If zero, the opcode acts in the standard, per-component manner; the
812 * operation is performed on each component (except the ones that are
813 * masked out) with the input being taken from the input swizzle for
816 * The size of some of the inputs may be given (i.e. non-zero) even
817 * though output_size is zero; in that case, the inputs with a zero
818 * size act per-component, while the inputs with non-zero size don't.
820 unsigned output_size
;
823 * The type of vector that the instruction outputs. Note that the
824 * staurate modifier is only allowed on outputs with the float type.
827 nir_alu_type output_type
;
830 * The number of components in each input
832 unsigned input_sizes
[4];
835 * The type of vector that each input takes. Note that negate and
836 * absolute value are only allowed on inputs with int or float type and
837 * behave differently on the two.
839 nir_alu_type input_types
[4];
841 nir_op_algebraic_property algebraic_properties
;
844 extern const nir_op_info nir_op_infos
[nir_num_opcodes
];
846 typedef struct nir_alu_instr
{
850 /** Indicates that this ALU instruction generates an exact value
852 * This is kind of a mixture of GLSL "precise" and "invariant" and not
853 * really equivalent to either. This indicates that the value generated by
854 * this operation is high-precision and any code transformations that touch
855 * it must ensure that the resulting value is bit-for-bit identical to the
864 void nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
865 nir_alu_instr
*instr
);
866 void nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
867 nir_alu_instr
*instr
);
869 /* is this source channel used? */
871 nir_alu_instr_channel_used(const nir_alu_instr
*instr
, unsigned src
,
874 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
875 return channel
< nir_op_infos
[instr
->op
].input_sizes
[src
];
877 return (instr
->dest
.write_mask
>> channel
) & 1;
881 * For instructions whose destinations are SSA, get the number of channels
884 static inline unsigned
885 nir_ssa_alu_instr_src_components(const nir_alu_instr
*instr
, unsigned src
)
887 assert(instr
->dest
.dest
.is_ssa
);
889 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
890 return nir_op_infos
[instr
->op
].input_sizes
[src
];
892 return instr
->dest
.dest
.ssa
.num_components
;
895 bool nir_alu_srcs_equal(const nir_alu_instr
*alu1
, const nir_alu_instr
*alu2
,
896 unsigned src1
, unsigned src2
);
900 nir_deref_type_array
,
901 nir_deref_type_struct
904 typedef struct nir_deref
{
905 nir_deref_type deref_type
;
906 struct nir_deref
*child
;
907 const struct glsl_type
*type
;
916 /* This enum describes how the array is referenced. If the deref is
917 * direct then the base_offset is used. If the deref is indirect then
918 * offset is given by base_offset + indirect. If the deref is a wildcard
919 * then the deref refers to all of the elements of the array at the same
920 * time. Wildcard dereferences are only ever allowed in copy_var
921 * intrinsics and the source and destination derefs must have matching
925 nir_deref_array_type_direct
,
926 nir_deref_array_type_indirect
,
927 nir_deref_array_type_wildcard
,
928 } nir_deref_array_type
;
933 nir_deref_array_type deref_array_type
;
934 unsigned base_offset
;
944 NIR_DEFINE_CAST(nir_deref_as_var
, nir_deref
, nir_deref_var
, deref
,
945 deref_type
, nir_deref_type_var
)
946 NIR_DEFINE_CAST(nir_deref_as_array
, nir_deref
, nir_deref_array
, deref
,
947 deref_type
, nir_deref_type_array
)
948 NIR_DEFINE_CAST(nir_deref_as_struct
, nir_deref
, nir_deref_struct
, deref
,
949 deref_type
, nir_deref_type_struct
)
951 /* Returns the last deref in the chain. */
952 static inline nir_deref
*
953 nir_deref_tail(nir_deref
*deref
)
956 deref
= deref
->child
;
964 nir_deref_var
**params
;
965 nir_deref_var
*return_deref
;
967 struct nir_function
*callee
;
970 #include "nir_intrinsics.h"
972 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
974 /** Represents an intrinsic
976 * An intrinsic is an instruction type for handling things that are
977 * more-or-less regular operations but don't just consume and produce SSA
978 * values like ALU operations do. Intrinsics are not for things that have
979 * special semantic meaning such as phi nodes and parallel copies.
980 * Examples of intrinsics include variable load/store operations, system
981 * value loads, and the like. Even though texturing more-or-less falls
982 * under this category, texturing is its own instruction type because
983 * trying to represent texturing with intrinsics would lead to a
984 * combinatorial explosion of intrinsic opcodes.
986 * By having a single instruction type for handling a lot of different
987 * cases, optimization passes can look for intrinsics and, for the most
988 * part, completely ignore them. Each intrinsic type also has a few
989 * possible flags that govern whether or not they can be reordered or
990 * eliminated. That way passes like dead code elimination can still work
991 * on intrisics without understanding the meaning of each.
993 * Each intrinsic has some number of constant indices, some number of
994 * variables, and some number of sources. What these sources, variables,
995 * and indices mean depends on the intrinsic and is documented with the
996 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
997 * instructions are the only types of instruction that can operate on
1003 nir_intrinsic_op intrinsic
;
1007 /** number of components if this is a vectorized intrinsic
1009 * Similarly to ALU operations, some intrinsics are vectorized.
1010 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
1011 * For vectorized intrinsics, the num_components field specifies the
1012 * number of destination components and the number of source components
1013 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
1015 uint8_t num_components
;
1017 int const_index
[NIR_INTRINSIC_MAX_CONST_INDEX
];
1019 nir_deref_var
*variables
[2];
1022 } nir_intrinsic_instr
;
1025 * \name NIR intrinsics semantic flags
1027 * information about what the compiler can do with the intrinsics.
1029 * \sa nir_intrinsic_info::flags
1033 * whether the intrinsic can be safely eliminated if none of its output
1034 * value is not being used.
1036 NIR_INTRINSIC_CAN_ELIMINATE
= (1 << 0),
1039 * Whether the intrinsic can be reordered with respect to any other
1040 * intrinsic, i.e. whether the only reordering dependencies of the
1041 * intrinsic are due to the register reads/writes.
1043 NIR_INTRINSIC_CAN_REORDER
= (1 << 1),
1044 } nir_intrinsic_semantic_flag
;
1047 * \name NIR intrinsics const-index flag
1049 * Indicates the usage of a const_index slot.
1051 * \sa nir_intrinsic_info::index_map
1055 * Generally instructions that take a offset src argument, can encode
1056 * a constant 'base' value which is added to the offset.
1058 NIR_INTRINSIC_BASE
= 1,
1061 * For store instructions, a writemask for the store.
1063 NIR_INTRINSIC_WRMASK
= 2,
1066 * The stream-id for GS emit_vertex/end_primitive intrinsics.
1068 NIR_INTRINSIC_STREAM_ID
= 3,
1071 * The clip-plane id for load_user_clip_plane intrinsic.
1073 NIR_INTRINSIC_UCP_ID
= 4,
1076 * The amount of data, starting from BASE, that this instruction may
1077 * access. This is used to provide bounds if the offset is not constant.
1079 NIR_INTRINSIC_RANGE
= 5,
1082 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1084 NIR_INTRINSIC_DESC_SET
= 6,
1087 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1089 NIR_INTRINSIC_BINDING
= 7,
1094 NIR_INTRINSIC_COMPONENT
= 8,
1097 * Interpolation mode (only meaningful for FS inputs).
1099 NIR_INTRINSIC_INTERP_MODE
= 9,
1102 * A binary nir_op to use when performing a reduction or scan operation
1104 NIR_INTRINSIC_REDUCTION_OP
= 10,
1107 * Cluster size for reduction operations
1109 NIR_INTRINSIC_CLUSTER_SIZE
= 11,
1111 NIR_INTRINSIC_NUM_INDEX_FLAGS
,
1113 } nir_intrinsic_index_flag
;
1115 #define NIR_INTRINSIC_MAX_INPUTS 4
1120 unsigned num_srcs
; /** < number of register/SSA inputs */
1122 /** number of components of each input register
1124 * If this value is 0, the number of components is given by the
1125 * num_components field of nir_intrinsic_instr.
1127 unsigned src_components
[NIR_INTRINSIC_MAX_INPUTS
];
1131 /** number of components of the output register
1133 * If this value is 0, the number of components is given by the
1134 * num_components field of nir_intrinsic_instr.
1136 unsigned dest_components
;
1138 /** the number of inputs/outputs that are variables */
1139 unsigned num_variables
;
1141 /** the number of constant indices used by the intrinsic */
1142 unsigned num_indices
;
1144 /** indicates the usage of intr->const_index[n] */
1145 unsigned index_map
[NIR_INTRINSIC_NUM_INDEX_FLAGS
];
1147 /** semantic flags for calls to this intrinsic */
1148 nir_intrinsic_semantic_flag flags
;
1149 } nir_intrinsic_info
;
1151 extern const nir_intrinsic_info nir_intrinsic_infos
[nir_num_intrinsics
];
1153 static inline unsigned
1154 nir_intrinsic_src_components(nir_intrinsic_instr
*intr
, unsigned srcn
)
1156 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1157 assert(srcn
< info
->num_srcs
);
1158 if (info
->src_components
[srcn
])
1159 return info
->src_components
[srcn
];
1161 return intr
->num_components
;
1164 static inline unsigned
1165 nir_intrinsic_dest_components(nir_intrinsic_instr
*intr
)
1167 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1168 if (!info
->has_dest
)
1170 else if (info
->dest_components
)
1171 return info
->dest_components
;
1173 return intr
->num_components
;
1176 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1177 static inline type \
1178 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \
1180 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1181 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1182 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1184 static inline void \
1185 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1187 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1188 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1189 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1192 INTRINSIC_IDX_ACCESSORS(write_mask
, WRMASK
, unsigned)
1193 INTRINSIC_IDX_ACCESSORS(base
, BASE
, int)
1194 INTRINSIC_IDX_ACCESSORS(stream_id
, STREAM_ID
, unsigned)
1195 INTRINSIC_IDX_ACCESSORS(ucp_id
, UCP_ID
, unsigned)
1196 INTRINSIC_IDX_ACCESSORS(range
, RANGE
, unsigned)
1197 INTRINSIC_IDX_ACCESSORS(desc_set
, DESC_SET
, unsigned)
1198 INTRINSIC_IDX_ACCESSORS(binding
, BINDING
, unsigned)
1199 INTRINSIC_IDX_ACCESSORS(component
, COMPONENT
, unsigned)
1200 INTRINSIC_IDX_ACCESSORS(interp_mode
, INTERP_MODE
, unsigned)
1201 INTRINSIC_IDX_ACCESSORS(reduction_op
, REDUCTION_OP
, unsigned)
1202 INTRINSIC_IDX_ACCESSORS(cluster_size
, CLUSTER_SIZE
, unsigned)
1205 * \group texture information
1207 * This gives semantic information about textures which is useful to the
1208 * frontend, the backend, and lowering passes, but not the optimizer.
1213 nir_tex_src_projector
,
1214 nir_tex_src_comparator
, /* shadow comparator */
1218 nir_tex_src_ms_index
, /* MSAA sample index */
1219 nir_tex_src_ms_mcs
, /* MSAA compression value */
1222 nir_tex_src_texture_offset
, /* < dynamically uniform indirect offset */
1223 nir_tex_src_sampler_offset
, /* < dynamically uniform indirect offset */
1224 nir_tex_src_plane
, /* < selects plane for planar textures */
1225 nir_num_tex_src_types
1230 nir_tex_src_type src_type
;
1234 nir_texop_tex
, /**< Regular texture look-up */
1235 nir_texop_txb
, /**< Texture look-up with LOD bias */
1236 nir_texop_txl
, /**< Texture look-up with explicit LOD */
1237 nir_texop_txd
, /**< Texture look-up with partial derivatives */
1238 nir_texop_txf
, /**< Texel fetch with explicit LOD */
1239 nir_texop_txf_ms
, /**< Multisample texture fetch */
1240 nir_texop_txf_ms_mcs
, /**< Multisample compression value fetch */
1241 nir_texop_txs
, /**< Texture size */
1242 nir_texop_lod
, /**< Texture lod query */
1243 nir_texop_tg4
, /**< Texture gather */
1244 nir_texop_query_levels
, /**< Texture levels query */
1245 nir_texop_texture_samples
, /**< Texture samples query */
1246 nir_texop_samples_identical
, /**< Query whether all samples are definitely
1254 enum glsl_sampler_dim sampler_dim
;
1255 nir_alu_type dest_type
;
1260 unsigned num_srcs
, coord_components
;
1261 bool is_array
, is_shadow
;
1264 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1265 * components or the new-style shadow that outputs 1 component.
1267 bool is_new_style_shadow
;
1269 /* gather component selector */
1270 unsigned component
: 2;
1272 /** The texture index
1274 * If this texture instruction has a nir_tex_src_texture_offset source,
1275 * then the texture index is given by texture_index + texture_offset.
1277 unsigned texture_index
;
1279 /** The size of the texture array or 0 if it's not an array */
1280 unsigned texture_array_size
;
1282 /** The texture deref
1284 * If this is null, use texture_index instead.
1286 nir_deref_var
*texture
;
1288 /** The sampler index
1290 * The following operations do not require a sampler and, as such, this
1291 * field should be ignored:
1293 * - nir_texop_txf_ms
1296 * - nir_texop_query_levels
1297 * - nir_texop_texture_samples
1298 * - nir_texop_samples_identical
1300 * If this texture instruction has a nir_tex_src_sampler_offset source,
1301 * then the sampler index is given by sampler_index + sampler_offset.
1303 unsigned sampler_index
;
1305 /** The sampler deref
1307 * If this is null, use sampler_index instead.
1309 nir_deref_var
*sampler
;
1312 static inline unsigned
1313 nir_tex_instr_dest_size(const nir_tex_instr
*instr
)
1315 switch (instr
->op
) {
1316 case nir_texop_txs
: {
1318 switch (instr
->sampler_dim
) {
1319 case GLSL_SAMPLER_DIM_1D
:
1320 case GLSL_SAMPLER_DIM_BUF
:
1323 case GLSL_SAMPLER_DIM_2D
:
1324 case GLSL_SAMPLER_DIM_CUBE
:
1325 case GLSL_SAMPLER_DIM_MS
:
1326 case GLSL_SAMPLER_DIM_RECT
:
1327 case GLSL_SAMPLER_DIM_EXTERNAL
:
1328 case GLSL_SAMPLER_DIM_SUBPASS
:
1331 case GLSL_SAMPLER_DIM_3D
:
1335 unreachable("not reached");
1337 if (instr
->is_array
)
1345 case nir_texop_texture_samples
:
1346 case nir_texop_query_levels
:
1347 case nir_texop_samples_identical
:
1351 if (instr
->is_shadow
&& instr
->is_new_style_shadow
)
1358 /* Returns true if this texture operation queries something about the texture
1359 * rather than actually sampling it.
1362 nir_tex_instr_is_query(const nir_tex_instr
*instr
)
1364 switch (instr
->op
) {
1367 case nir_texop_texture_samples
:
1368 case nir_texop_query_levels
:
1369 case nir_texop_txf_ms_mcs
:
1376 case nir_texop_txf_ms
:
1380 unreachable("Invalid texture opcode");
1385 nir_alu_instr_is_comparison(const nir_alu_instr
*instr
)
1387 switch (instr
->op
) {
1408 static inline nir_alu_type
1409 nir_tex_instr_src_type(const nir_tex_instr
*instr
, unsigned src
)
1411 switch (instr
->src
[src
].src_type
) {
1412 case nir_tex_src_coord
:
1413 switch (instr
->op
) {
1415 case nir_texop_txf_ms
:
1416 case nir_texop_txf_ms_mcs
:
1417 case nir_texop_samples_identical
:
1418 return nir_type_int
;
1421 return nir_type_float
;
1424 case nir_tex_src_lod
:
1425 switch (instr
->op
) {
1428 return nir_type_int
;
1431 return nir_type_float
;
1434 case nir_tex_src_projector
:
1435 case nir_tex_src_comparator
:
1436 case nir_tex_src_bias
:
1437 case nir_tex_src_ddx
:
1438 case nir_tex_src_ddy
:
1439 return nir_type_float
;
1441 case nir_tex_src_offset
:
1442 case nir_tex_src_ms_index
:
1443 case nir_tex_src_texture_offset
:
1444 case nir_tex_src_sampler_offset
:
1445 return nir_type_int
;
1448 unreachable("Invalid texture source type");
1452 static inline unsigned
1453 nir_tex_instr_src_size(const nir_tex_instr
*instr
, unsigned src
)
1455 if (instr
->src
[src
].src_type
== nir_tex_src_coord
)
1456 return instr
->coord_components
;
1458 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1459 if (instr
->src
[src
].src_type
== nir_tex_src_ms_mcs
)
1462 if (instr
->src
[src
].src_type
== nir_tex_src_ddx
||
1463 instr
->src
[src
].src_type
== nir_tex_src_ddy
) {
1464 if (instr
->is_array
)
1465 return instr
->coord_components
- 1;
1467 return instr
->coord_components
;
1470 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
1471 * the offset, since a cube maps to a single face.
1473 if (instr
->src
[src
].src_type
== nir_tex_src_offset
) {
1474 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
1476 else if (instr
->is_array
)
1477 return instr
->coord_components
- 1;
1479 return instr
->coord_components
;
1486 nir_tex_instr_src_index(const nir_tex_instr
*instr
, nir_tex_src_type type
)
1488 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1489 if (instr
->src
[i
].src_type
== type
)
1495 void nir_tex_instr_add_src(nir_tex_instr
*tex
,
1496 nir_tex_src_type src_type
,
1499 void nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
);
1504 nir_const_value value
;
1507 } nir_load_const_instr
;
1520 /* creates a new SSA variable in an undefined state */
1525 } nir_ssa_undef_instr
;
1528 struct exec_node node
;
1530 /* The predecessor block corresponding to this source */
1531 struct nir_block
*pred
;
1536 #define nir_foreach_phi_src(phi_src, phi) \
1537 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1538 #define nir_foreach_phi_src_safe(phi_src, phi) \
1539 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1544 struct exec_list srcs
; /** < list of nir_phi_src */
1550 struct exec_node node
;
1553 } nir_parallel_copy_entry
;
1555 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1556 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1561 /* A list of nir_parallel_copy_entrys. The sources of all of the
1562 * entries are copied to the corresponding destinations "in parallel".
1563 * In other words, if we have two entries: a -> b and b -> a, the values
1566 struct exec_list entries
;
1567 } nir_parallel_copy_instr
;
1569 NIR_DEFINE_CAST(nir_instr_as_alu
, nir_instr
, nir_alu_instr
, instr
,
1570 type
, nir_instr_type_alu
)
1571 NIR_DEFINE_CAST(nir_instr_as_call
, nir_instr
, nir_call_instr
, instr
,
1572 type
, nir_instr_type_call
)
1573 NIR_DEFINE_CAST(nir_instr_as_jump
, nir_instr
, nir_jump_instr
, instr
,
1574 type
, nir_instr_type_jump
)
1575 NIR_DEFINE_CAST(nir_instr_as_tex
, nir_instr
, nir_tex_instr
, instr
,
1576 type
, nir_instr_type_tex
)
1577 NIR_DEFINE_CAST(nir_instr_as_intrinsic
, nir_instr
, nir_intrinsic_instr
, instr
,
1578 type
, nir_instr_type_intrinsic
)
1579 NIR_DEFINE_CAST(nir_instr_as_load_const
, nir_instr
, nir_load_const_instr
, instr
,
1580 type
, nir_instr_type_load_const
)
1581 NIR_DEFINE_CAST(nir_instr_as_ssa_undef
, nir_instr
, nir_ssa_undef_instr
, instr
,
1582 type
, nir_instr_type_ssa_undef
)
1583 NIR_DEFINE_CAST(nir_instr_as_phi
, nir_instr
, nir_phi_instr
, instr
,
1584 type
, nir_instr_type_phi
)
1585 NIR_DEFINE_CAST(nir_instr_as_parallel_copy
, nir_instr
,
1586 nir_parallel_copy_instr
, instr
,
1587 type
, nir_instr_type_parallel_copy
)
1592 * Control flow consists of a tree of control flow nodes, which include
1593 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1594 * instructions that always run start-to-finish. Each basic block also keeps
1595 * track of its successors (blocks which may run immediately after the current
1596 * block) and predecessors (blocks which could have run immediately before the
1597 * current block). Each function also has a start block and an end block which
1598 * all return statements point to (which is always empty). Together, all the
1599 * blocks with their predecessors and successors make up the control flow
1600 * graph (CFG) of the function. There are helpers that modify the tree of
1601 * control flow nodes while modifying the CFG appropriately; these should be
1602 * used instead of modifying the tree directly.
1609 nir_cf_node_function
1612 typedef struct nir_cf_node
{
1613 struct exec_node node
;
1614 nir_cf_node_type type
;
1615 struct nir_cf_node
*parent
;
1618 typedef struct nir_block
{
1619 nir_cf_node cf_node
;
1621 struct exec_list instr_list
; /** < list of nir_instr */
1623 /** generic block index; generated by nir_index_blocks */
1627 * Each block can only have up to 2 successors, so we put them in a simple
1628 * array - no need for anything more complicated.
1630 struct nir_block
*successors
[2];
1632 /* Set of nir_block predecessors in the CFG */
1633 struct set
*predecessors
;
1636 * this node's immediate dominator in the dominance tree - set to NULL for
1639 struct nir_block
*imm_dom
;
1641 /* This node's children in the dominance tree */
1642 unsigned num_dom_children
;
1643 struct nir_block
**dom_children
;
1645 /* Set of nir_blocks on the dominance frontier of this block */
1646 struct set
*dom_frontier
;
1649 * These two indices have the property that dom_{pre,post}_index for each
1650 * child of this block in the dominance tree will always be between
1651 * dom_pre_index and dom_post_index for this block, which makes testing if
1652 * a given block is dominated by another block an O(1) operation.
1654 unsigned dom_pre_index
, dom_post_index
;
1656 /* live in and out for this block; used for liveness analysis */
1657 BITSET_WORD
*live_in
;
1658 BITSET_WORD
*live_out
;
1661 static inline nir_instr
*
1662 nir_block_first_instr(nir_block
*block
)
1664 struct exec_node
*head
= exec_list_get_head(&block
->instr_list
);
1665 return exec_node_data(nir_instr
, head
, node
);
1668 static inline nir_instr
*
1669 nir_block_last_instr(nir_block
*block
)
1671 struct exec_node
*tail
= exec_list_get_tail(&block
->instr_list
);
1672 return exec_node_data(nir_instr
, tail
, node
);
1675 #define nir_foreach_instr(instr, block) \
1676 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1677 #define nir_foreach_instr_reverse(instr, block) \
1678 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1679 #define nir_foreach_instr_safe(instr, block) \
1680 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1681 #define nir_foreach_instr_reverse_safe(instr, block) \
1682 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1684 typedef struct nir_if
{
1685 nir_cf_node cf_node
;
1688 struct exec_list then_list
; /** < list of nir_cf_node */
1689 struct exec_list else_list
; /** < list of nir_cf_node */
1695 nir_instr
*conditional_instr
;
1697 nir_block
*break_block
;
1698 nir_block
*continue_from_block
;
1700 bool continue_from_then
;
1702 struct list_head loop_terminator_link
;
1703 } nir_loop_terminator
;
1706 /* Number of instructions in the loop */
1707 unsigned num_instructions
;
1709 /* How many times the loop is run (if known) */
1710 unsigned trip_count
;
1711 bool is_trip_count_known
;
1713 /* Unroll the loop regardless of its size */
1716 nir_loop_terminator
*limiting_terminator
;
1718 /* A list of loop_terminators terminating this loop. */
1719 struct list_head loop_terminator_list
;
1723 nir_cf_node cf_node
;
1725 struct exec_list body
; /** < list of nir_cf_node */
1727 nir_loop_info
*info
;
1731 * Various bits of metadata that can may be created or required by
1732 * optimization and analysis passes
1735 nir_metadata_none
= 0x0,
1736 nir_metadata_block_index
= 0x1,
1737 nir_metadata_dominance
= 0x2,
1738 nir_metadata_live_ssa_defs
= 0x4,
1739 nir_metadata_not_properly_reset
= 0x8,
1740 nir_metadata_loop_analysis
= 0x10,
1744 nir_cf_node cf_node
;
1746 /** pointer to the function of which this is an implementation */
1747 struct nir_function
*function
;
1749 struct exec_list body
; /** < list of nir_cf_node */
1751 nir_block
*end_block
;
1753 /** list for all local variables in the function */
1754 struct exec_list locals
;
1756 /** array of variables used as parameters */
1757 unsigned num_params
;
1758 nir_variable
**params
;
1760 /** variable used to hold the result of the function */
1761 nir_variable
*return_var
;
1763 /** list of local registers in the function */
1764 struct exec_list registers
;
1766 /** next available local register index */
1769 /** next available SSA value index */
1772 /* total number of basic blocks, only valid when block_index_dirty = false */
1773 unsigned num_blocks
;
1775 nir_metadata valid_metadata
;
1776 } nir_function_impl
;
1778 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
1779 nir_start_block(nir_function_impl
*impl
)
1781 return (nir_block
*) impl
->body
.head_sentinel
.next
;
1784 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
1785 nir_impl_last_block(nir_function_impl
*impl
)
1787 return (nir_block
*) impl
->body
.tail_sentinel
.prev
;
1790 static inline nir_cf_node
*
1791 nir_cf_node_next(nir_cf_node
*node
)
1793 struct exec_node
*next
= exec_node_get_next(&node
->node
);
1794 if (exec_node_is_tail_sentinel(next
))
1797 return exec_node_data(nir_cf_node
, next
, node
);
1800 static inline nir_cf_node
*
1801 nir_cf_node_prev(nir_cf_node
*node
)
1803 struct exec_node
*prev
= exec_node_get_prev(&node
->node
);
1804 if (exec_node_is_head_sentinel(prev
))
1807 return exec_node_data(nir_cf_node
, prev
, node
);
1811 nir_cf_node_is_first(const nir_cf_node
*node
)
1813 return exec_node_is_head_sentinel(node
->node
.prev
);
1817 nir_cf_node_is_last(const nir_cf_node
*node
)
1819 return exec_node_is_tail_sentinel(node
->node
.next
);
1822 NIR_DEFINE_CAST(nir_cf_node_as_block
, nir_cf_node
, nir_block
, cf_node
,
1823 type
, nir_cf_node_block
)
1824 NIR_DEFINE_CAST(nir_cf_node_as_if
, nir_cf_node
, nir_if
, cf_node
,
1825 type
, nir_cf_node_if
)
1826 NIR_DEFINE_CAST(nir_cf_node_as_loop
, nir_cf_node
, nir_loop
, cf_node
,
1827 type
, nir_cf_node_loop
)
1828 NIR_DEFINE_CAST(nir_cf_node_as_function
, nir_cf_node
,
1829 nir_function_impl
, cf_node
, type
, nir_cf_node_function
)
1831 static inline nir_block
*
1832 nir_if_first_then_block(nir_if
*if_stmt
)
1834 struct exec_node
*head
= exec_list_get_head(&if_stmt
->then_list
);
1835 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
1838 static inline nir_block
*
1839 nir_if_last_then_block(nir_if
*if_stmt
)
1841 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->then_list
);
1842 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
1845 static inline nir_block
*
1846 nir_if_first_else_block(nir_if
*if_stmt
)
1848 struct exec_node
*head
= exec_list_get_head(&if_stmt
->else_list
);
1849 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
1852 static inline nir_block
*
1853 nir_if_last_else_block(nir_if
*if_stmt
)
1855 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->else_list
);
1856 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
1859 static inline nir_block
*
1860 nir_loop_first_block(nir_loop
*loop
)
1862 struct exec_node
*head
= exec_list_get_head(&loop
->body
);
1863 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
1866 static inline nir_block
*
1867 nir_loop_last_block(nir_loop
*loop
)
1869 struct exec_node
*tail
= exec_list_get_tail(&loop
->body
);
1870 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
1876 nir_parameter_inout
,
1877 } nir_parameter_type
;
1880 nir_parameter_type param_type
;
1881 const struct glsl_type
*type
;
1884 typedef struct nir_function
{
1885 struct exec_node node
;
1888 struct nir_shader
*shader
;
1890 unsigned num_params
;
1891 nir_parameter
*params
;
1892 const struct glsl_type
*return_type
;
1894 /** The implementation of this function.
1896 * If the function is only declared and not implemented, this is NULL.
1898 nir_function_impl
*impl
;
1901 typedef struct nir_shader_compiler_options
{
1906 /** Lowers flrp when it does not support doubles */
1913 /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
1914 bool lower_bitfield_extract
;
1915 /** Lowers ibitfield_extract/ubitfield_extract to bfm, compares, shifts. */
1916 bool lower_bitfield_extract_to_shifts
;
1917 /** Lowers bitfield_insert to bfi/bfm */
1918 bool lower_bitfield_insert
;
1919 /** Lowers bitfield_insert to bfm, compares, and shifts. */
1920 bool lower_bitfield_insert_to_shifts
;
1921 /** Lowers bitfield_reverse to shifts. */
1922 bool lower_bitfield_reverse
;
1923 /** Lowers bit_count to shifts. */
1924 bool lower_bit_count
;
1925 /** Lowers bfm to shifts and subtracts. */
1927 /** Lowers ifind_msb to compare and ufind_msb */
1928 bool lower_ifind_msb
;
1929 /** Lowers find_lsb to ufind_msb and logic ops */
1930 bool lower_find_lsb
;
1931 bool lower_uadd_carry
;
1932 bool lower_usub_borrow
;
1933 /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
1934 bool lower_mul_high
;
1935 /** lowers fneg and ineg to fsub and isub. */
1937 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1940 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1943 /** enables rules to lower idiv by power-of-two: */
1946 /* lower b2f to iand */
1949 /* Does the native fdot instruction replicate its result for four
1950 * components? If so, then opt_algebraic_late will turn all fdotN
1951 * instructions into fdot_replicatedN instructions.
1953 bool fdot_replicates
;
1955 /** lowers ffract to fsub+ffloor: */
1960 bool lower_pack_half_2x16
;
1961 bool lower_pack_unorm_2x16
;
1962 bool lower_pack_snorm_2x16
;
1963 bool lower_pack_unorm_4x8
;
1964 bool lower_pack_snorm_4x8
;
1965 bool lower_unpack_half_2x16
;
1966 bool lower_unpack_unorm_2x16
;
1967 bool lower_unpack_snorm_2x16
;
1968 bool lower_unpack_unorm_4x8
;
1969 bool lower_unpack_snorm_4x8
;
1971 bool lower_extract_byte
;
1972 bool lower_extract_word
;
1974 bool lower_all_io_to_temps
;
1977 * Does the driver support real 32-bit integers? (Otherwise, integers
1978 * are simulated by floats.)
1980 bool native_integers
;
1982 /* Indicates that the driver only has zero-based vertex id */
1983 bool vertex_id_zero_based
;
1986 * If enabled, gl_BaseVertex will be lowered as:
1987 * is_indexed_draw (~0/0) & firstvertex
1989 bool lower_base_vertex
;
1991 bool lower_cs_local_index_from_id
;
1993 bool lower_device_index_to_zero
;
1996 * Should nir_lower_io() create load_interpolated_input intrinsics?
1998 * If not, it generates regular load_input intrinsics and interpolation
1999 * information must be inferred from the list of input nir_variables.
2001 bool use_interpolated_input_intrinsics
;
2004 * Do vertex shader double inputs use two locations? The Vulkan spec
2005 * requires two locations to be used, OpenGL allows a single location.
2007 bool vs_inputs_dual_locations
;
2009 unsigned max_unroll_iterations
;
2010 } nir_shader_compiler_options
;
2012 typedef struct nir_shader
{
2013 /** list of uniforms (nir_variable) */
2014 struct exec_list uniforms
;
2016 /** list of inputs (nir_variable) */
2017 struct exec_list inputs
;
2019 /** list of outputs (nir_variable) */
2020 struct exec_list outputs
;
2022 /** list of shared compute variables (nir_variable) */
2023 struct exec_list shared
;
2025 /** Set of driver-specific options for the shader.
2027 * The memory for the options is expected to be kept in a single static
2028 * copy by the driver.
2030 const struct nir_shader_compiler_options
*options
;
2032 /** Various bits of compile-time information about a given shader */
2033 struct shader_info info
;
2035 /** list of global variables in the shader (nir_variable) */
2036 struct exec_list globals
;
2038 /** list of system value variables in the shader (nir_variable) */
2039 struct exec_list system_values
;
2041 struct exec_list functions
; /** < list of nir_function */
2043 /** list of global register in the shader */
2044 struct exec_list registers
;
2046 /** next available global register index */
2050 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
2053 unsigned num_inputs
, num_uniforms
, num_outputs
, num_shared
;
2056 static inline nir_function_impl
*
2057 nir_shader_get_entrypoint(nir_shader
*shader
)
2059 assert(exec_list_length(&shader
->functions
) == 1);
2060 struct exec_node
*func_node
= exec_list_get_head(&shader
->functions
);
2061 nir_function
*func
= exec_node_data(nir_function
, func_node
, node
);
2062 assert(func
->return_type
== glsl_void_type());
2063 assert(func
->num_params
== 0);
2068 #define nir_foreach_function(func, shader) \
2069 foreach_list_typed(nir_function, func, node, &(shader)->functions)
2071 nir_shader
*nir_shader_create(void *mem_ctx
,
2072 gl_shader_stage stage
,
2073 const nir_shader_compiler_options
*options
,
2076 /** creates a register, including assigning it an index and adding it to the list */
2077 nir_register
*nir_global_reg_create(nir_shader
*shader
);
2079 nir_register
*nir_local_reg_create(nir_function_impl
*impl
);
2081 void nir_reg_remove(nir_register
*reg
);
2083 /** Adds a variable to the appropriate list in nir_shader */
2084 void nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
);
2087 nir_function_impl_add_variable(nir_function_impl
*impl
, nir_variable
*var
)
2089 assert(var
->data
.mode
== nir_var_local
);
2090 exec_list_push_tail(&impl
->locals
, &var
->node
);
2093 /** creates a variable, sets a few defaults, and adds it to the list */
2094 nir_variable
*nir_variable_create(nir_shader
*shader
,
2095 nir_variable_mode mode
,
2096 const struct glsl_type
*type
,
2098 /** creates a local variable and adds it to the list */
2099 nir_variable
*nir_local_variable_create(nir_function_impl
*impl
,
2100 const struct glsl_type
*type
,
2103 /** creates a function and adds it to the shader's list of functions */
2104 nir_function
*nir_function_create(nir_shader
*shader
, const char *name
);
2106 nir_function_impl
*nir_function_impl_create(nir_function
*func
);
2107 /** creates a function_impl that isn't tied to any particular function */
2108 nir_function_impl
*nir_function_impl_create_bare(nir_shader
*shader
);
2110 nir_block
*nir_block_create(nir_shader
*shader
);
2111 nir_if
*nir_if_create(nir_shader
*shader
);
2112 nir_loop
*nir_loop_create(nir_shader
*shader
);
2114 nir_function_impl
*nir_cf_node_get_function(nir_cf_node
*node
);
2116 /** requests that the given pieces of metadata be generated */
2117 void nir_metadata_require(nir_function_impl
*impl
, nir_metadata required
, ...);
2118 /** dirties all but the preserved metadata */
2119 void nir_metadata_preserve(nir_function_impl
*impl
, nir_metadata preserved
);
2121 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
2122 nir_alu_instr
*nir_alu_instr_create(nir_shader
*shader
, nir_op op
);
2124 nir_jump_instr
*nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
);
2126 nir_load_const_instr
*nir_load_const_instr_create(nir_shader
*shader
,
2127 unsigned num_components
,
2130 nir_intrinsic_instr
*nir_intrinsic_instr_create(nir_shader
*shader
,
2131 nir_intrinsic_op op
);
2133 nir_call_instr
*nir_call_instr_create(nir_shader
*shader
,
2134 nir_function
*callee
);
2136 nir_tex_instr
*nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
);
2138 nir_phi_instr
*nir_phi_instr_create(nir_shader
*shader
);
2140 nir_parallel_copy_instr
*nir_parallel_copy_instr_create(nir_shader
*shader
);
2142 nir_ssa_undef_instr
*nir_ssa_undef_instr_create(nir_shader
*shader
,
2143 unsigned num_components
,
2146 nir_deref_var
*nir_deref_var_create(void *mem_ctx
, nir_variable
*var
);
2147 nir_deref_array
*nir_deref_array_create(void *mem_ctx
);
2148 nir_deref_struct
*nir_deref_struct_create(void *mem_ctx
, unsigned field_index
);
2150 typedef bool (*nir_deref_foreach_leaf_cb
)(nir_deref_var
*deref
, void *state
);
2151 bool nir_deref_foreach_leaf(nir_deref_var
*deref
,
2152 nir_deref_foreach_leaf_cb cb
, void *state
);
2154 nir_load_const_instr
*
2155 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
);
2157 nir_const_value
nir_alu_binop_identity(nir_op binop
, unsigned bit_size
);
2160 * NIR Cursors and Instruction Insertion API
2163 * A tiny struct representing a point to insert/extract instructions or
2164 * control flow nodes. Helps reduce the combinatorial explosion of possible
2165 * points to insert/extract.
2167 * \sa nir_control_flow.h
2170 nir_cursor_before_block
,
2171 nir_cursor_after_block
,
2172 nir_cursor_before_instr
,
2173 nir_cursor_after_instr
,
2174 } nir_cursor_option
;
2177 nir_cursor_option option
;
2184 static inline nir_block
*
2185 nir_cursor_current_block(nir_cursor cursor
)
2187 if (cursor
.option
== nir_cursor_before_instr
||
2188 cursor
.option
== nir_cursor_after_instr
) {
2189 return cursor
.instr
->block
;
2191 return cursor
.block
;
2195 bool nir_cursors_equal(nir_cursor a
, nir_cursor b
);
2197 static inline nir_cursor
2198 nir_before_block(nir_block
*block
)
2201 cursor
.option
= nir_cursor_before_block
;
2202 cursor
.block
= block
;
2206 static inline nir_cursor
2207 nir_after_block(nir_block
*block
)
2210 cursor
.option
= nir_cursor_after_block
;
2211 cursor
.block
= block
;
2215 static inline nir_cursor
2216 nir_before_instr(nir_instr
*instr
)
2219 cursor
.option
= nir_cursor_before_instr
;
2220 cursor
.instr
= instr
;
2224 static inline nir_cursor
2225 nir_after_instr(nir_instr
*instr
)
2228 cursor
.option
= nir_cursor_after_instr
;
2229 cursor
.instr
= instr
;
2233 static inline nir_cursor
2234 nir_after_block_before_jump(nir_block
*block
)
2236 nir_instr
*last_instr
= nir_block_last_instr(block
);
2237 if (last_instr
&& last_instr
->type
== nir_instr_type_jump
) {
2238 return nir_before_instr(last_instr
);
2240 return nir_after_block(block
);
2244 static inline nir_cursor
2245 nir_before_cf_node(nir_cf_node
*node
)
2247 if (node
->type
== nir_cf_node_block
)
2248 return nir_before_block(nir_cf_node_as_block(node
));
2250 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node
)));
2253 static inline nir_cursor
2254 nir_after_cf_node(nir_cf_node
*node
)
2256 if (node
->type
== nir_cf_node_block
)
2257 return nir_after_block(nir_cf_node_as_block(node
));
2259 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node
)));
2262 static inline nir_cursor
2263 nir_after_phis(nir_block
*block
)
2265 nir_foreach_instr(instr
, block
) {
2266 if (instr
->type
!= nir_instr_type_phi
)
2267 return nir_before_instr(instr
);
2269 return nir_after_block(block
);
2272 static inline nir_cursor
2273 nir_after_cf_node_and_phis(nir_cf_node
*node
)
2275 if (node
->type
== nir_cf_node_block
)
2276 return nir_after_block(nir_cf_node_as_block(node
));
2278 nir_block
*block
= nir_cf_node_as_block(nir_cf_node_next(node
));
2280 return nir_after_phis(block
);
2283 static inline nir_cursor
2284 nir_before_cf_list(struct exec_list
*cf_list
)
2286 nir_cf_node
*first_node
= exec_node_data(nir_cf_node
,
2287 exec_list_get_head(cf_list
), node
);
2288 return nir_before_cf_node(first_node
);
2291 static inline nir_cursor
2292 nir_after_cf_list(struct exec_list
*cf_list
)
2294 nir_cf_node
*last_node
= exec_node_data(nir_cf_node
,
2295 exec_list_get_tail(cf_list
), node
);
2296 return nir_after_cf_node(last_node
);
2300 * Insert a NIR instruction at the given cursor.
2302 * Note: This does not update the cursor.
2304 void nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
);
2307 nir_instr_insert_before(nir_instr
*instr
, nir_instr
*before
)
2309 nir_instr_insert(nir_before_instr(instr
), before
);
2313 nir_instr_insert_after(nir_instr
*instr
, nir_instr
*after
)
2315 nir_instr_insert(nir_after_instr(instr
), after
);
2319 nir_instr_insert_before_block(nir_block
*block
, nir_instr
*before
)
2321 nir_instr_insert(nir_before_block(block
), before
);
2325 nir_instr_insert_after_block(nir_block
*block
, nir_instr
*after
)
2327 nir_instr_insert(nir_after_block(block
), after
);
2331 nir_instr_insert_before_cf(nir_cf_node
*node
, nir_instr
*before
)
2333 nir_instr_insert(nir_before_cf_node(node
), before
);
2337 nir_instr_insert_after_cf(nir_cf_node
*node
, nir_instr
*after
)
2339 nir_instr_insert(nir_after_cf_node(node
), after
);
2343 nir_instr_insert_before_cf_list(struct exec_list
*list
, nir_instr
*before
)
2345 nir_instr_insert(nir_before_cf_list(list
), before
);
2349 nir_instr_insert_after_cf_list(struct exec_list
*list
, nir_instr
*after
)
2351 nir_instr_insert(nir_after_cf_list(list
), after
);
2354 void nir_instr_remove_v(nir_instr
*instr
);
2356 static inline nir_cursor
2357 nir_instr_remove(nir_instr
*instr
)
2360 nir_instr
*prev
= nir_instr_prev(instr
);
2362 cursor
= nir_after_instr(prev
);
2364 cursor
= nir_before_block(instr
->block
);
2366 nir_instr_remove_v(instr
);
2372 typedef bool (*nir_foreach_ssa_def_cb
)(nir_ssa_def
*def
, void *state
);
2373 typedef bool (*nir_foreach_dest_cb
)(nir_dest
*dest
, void *state
);
2374 typedef bool (*nir_foreach_src_cb
)(nir_src
*src
, void *state
);
2375 bool nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
,
2377 bool nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
);
2378 bool nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
);
2380 nir_const_value
*nir_src_as_const_value(nir_src src
);
2381 bool nir_src_is_dynamically_uniform(nir_src src
);
2382 bool nir_srcs_equal(nir_src src1
, nir_src src2
);
2383 void nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
);
2384 void nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
);
2385 void nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
);
2386 void nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
,
2388 void nir_instr_rewrite_deref(nir_instr
*instr
, nir_deref_var
**deref
,
2389 nir_deref_var
*new_deref
);
2391 void nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
2392 unsigned num_components
, unsigned bit_size
,
2394 void nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
2395 unsigned num_components
, unsigned bit_size
,
2398 nir_ssa_dest_init_for_type(nir_instr
*instr
, nir_dest
*dest
,
2399 const struct glsl_type
*type
,
2402 assert(glsl_type_is_vector_or_scalar(type
));
2403 nir_ssa_dest_init(instr
, dest
, glsl_get_components(type
),
2404 glsl_get_bit_size(type
), name
);
2406 void nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
);
2407 void nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
2408 nir_instr
*after_me
);
2410 uint8_t nir_ssa_def_components_read(const nir_ssa_def
*def
);
2413 * finds the next basic block in source-code order, returns NULL if there is
2417 nir_block
*nir_block_cf_tree_next(nir_block
*block
);
2419 /* Performs the opposite of nir_block_cf_tree_next() */
2421 nir_block
*nir_block_cf_tree_prev(nir_block
*block
);
2423 /* Gets the first block in a CF node in source-code order */
2425 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
);
2427 /* Gets the last block in a CF node in source-code order */
2429 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
);
2431 /* Gets the next block after a CF node in source-code order */
2433 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
);
2435 /* Macros for loops that visit blocks in source-code order */
2437 #define nir_foreach_block(block, impl) \
2438 for (nir_block *block = nir_start_block(impl); block != NULL; \
2439 block = nir_block_cf_tree_next(block))
2441 #define nir_foreach_block_safe(block, impl) \
2442 for (nir_block *block = nir_start_block(impl), \
2443 *next = nir_block_cf_tree_next(block); \
2445 block = next, next = nir_block_cf_tree_next(block))
2447 #define nir_foreach_block_reverse(block, impl) \
2448 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2449 block = nir_block_cf_tree_prev(block))
2451 #define nir_foreach_block_reverse_safe(block, impl) \
2452 for (nir_block *block = nir_impl_last_block(impl), \
2453 *prev = nir_block_cf_tree_prev(block); \
2455 block = prev, prev = nir_block_cf_tree_prev(block))
2457 #define nir_foreach_block_in_cf_node(block, node) \
2458 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2459 block != nir_cf_node_cf_tree_next(node); \
2460 block = nir_block_cf_tree_next(block))
2462 /* If the following CF node is an if, this function returns that if.
2463 * Otherwise, it returns NULL.
2465 nir_if
*nir_block_get_following_if(nir_block
*block
);
2467 nir_loop
*nir_block_get_following_loop(nir_block
*block
);
2469 void nir_index_local_regs(nir_function_impl
*impl
);
2470 void nir_index_global_regs(nir_shader
*shader
);
2471 void nir_index_ssa_defs(nir_function_impl
*impl
);
2472 unsigned nir_index_instrs(nir_function_impl
*impl
);
2474 void nir_index_blocks(nir_function_impl
*impl
);
2476 void nir_print_shader(nir_shader
*shader
, FILE *fp
);
2477 void nir_print_shader_annotated(nir_shader
*shader
, FILE *fp
, struct hash_table
*errors
);
2478 void nir_print_instr(const nir_instr
*instr
, FILE *fp
);
2480 nir_shader
*nir_shader_clone(void *mem_ctx
, const nir_shader
*s
);
2481 nir_function_impl
*nir_function_impl_clone(const nir_function_impl
*fi
);
2482 nir_constant
*nir_constant_clone(const nir_constant
*c
, nir_variable
*var
);
2483 nir_variable
*nir_variable_clone(const nir_variable
*c
, nir_shader
*shader
);
2484 nir_deref
*nir_deref_clone(const nir_deref
*deref
, void *mem_ctx
);
2485 nir_deref_var
*nir_deref_var_clone(const nir_deref_var
*deref
, void *mem_ctx
);
2487 nir_shader
*nir_shader_serialize_deserialize(void *mem_ctx
, nir_shader
*s
);
2490 void nir_validate_shader(nir_shader
*shader
);
2491 void nir_metadata_set_validation_flag(nir_shader
*shader
);
2492 void nir_metadata_check_validation_flag(nir_shader
*shader
);
2495 should_clone_nir(void)
2497 static int should_clone
= -1;
2498 if (should_clone
< 0)
2499 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
2501 return should_clone
;
2505 should_serialize_deserialize_nir(void)
2507 static int test_serialize
= -1;
2508 if (test_serialize
< 0)
2509 test_serialize
= env_var_as_boolean("NIR_TEST_SERIALIZE", false);
2511 return test_serialize
;
2515 should_print_nir(void)
2517 static int should_print
= -1;
2518 if (should_print
< 0)
2519 should_print
= env_var_as_boolean("NIR_PRINT", false);
2521 return should_print
;
2524 static inline void nir_validate_shader(nir_shader
*shader
) { (void) shader
; }
2525 static inline void nir_metadata_set_validation_flag(nir_shader
*shader
) { (void) shader
; }
2526 static inline void nir_metadata_check_validation_flag(nir_shader
*shader
) { (void) shader
; }
2527 static inline bool should_clone_nir(void) { return false; }
2528 static inline bool should_serialize_deserialize_nir(void) { return false; }
2529 static inline bool should_print_nir(void) { return false; }
2532 #define _PASS(nir, do_pass) do { \
2534 nir_validate_shader(nir); \
2535 if (should_clone_nir()) { \
2536 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2540 if (should_serialize_deserialize_nir()) { \
2541 void *mem_ctx = ralloc_parent(nir); \
2542 nir = nir_shader_serialize_deserialize(mem_ctx, nir); \
2546 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2547 nir_metadata_set_validation_flag(nir); \
2548 if (should_print_nir()) \
2549 printf("%s\n", #pass); \
2550 if (pass(nir, ##__VA_ARGS__)) { \
2552 if (should_print_nir()) \
2553 nir_print_shader(nir, stdout); \
2554 nir_metadata_check_validation_flag(nir); \
2558 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2559 if (should_print_nir()) \
2560 printf("%s\n", #pass); \
2561 pass(nir, ##__VA_ARGS__); \
2562 if (should_print_nir()) \
2563 nir_print_shader(nir, stdout); \
2566 void nir_calc_dominance_impl(nir_function_impl
*impl
);
2567 void nir_calc_dominance(nir_shader
*shader
);
2569 nir_block
*nir_dominance_lca(nir_block
*b1
, nir_block
*b2
);
2570 bool nir_block_dominates(nir_block
*parent
, nir_block
*child
);
2572 void nir_dump_dom_tree_impl(nir_function_impl
*impl
, FILE *fp
);
2573 void nir_dump_dom_tree(nir_shader
*shader
, FILE *fp
);
2575 void nir_dump_dom_frontier_impl(nir_function_impl
*impl
, FILE *fp
);
2576 void nir_dump_dom_frontier(nir_shader
*shader
, FILE *fp
);
2578 void nir_dump_cfg_impl(nir_function_impl
*impl
, FILE *fp
);
2579 void nir_dump_cfg(nir_shader
*shader
, FILE *fp
);
2581 int nir_gs_count_vertices(const nir_shader
*shader
);
2583 bool nir_split_var_copies(nir_shader
*shader
);
2585 bool nir_lower_returns_impl(nir_function_impl
*impl
);
2586 bool nir_lower_returns(nir_shader
*shader
);
2588 bool nir_inline_functions(nir_shader
*shader
);
2590 bool nir_propagate_invariant(nir_shader
*shader
);
2592 void nir_lower_var_copy_instr(nir_intrinsic_instr
*copy
, nir_shader
*shader
);
2593 bool nir_lower_var_copies(nir_shader
*shader
);
2595 bool nir_lower_global_vars_to_local(nir_shader
*shader
);
2597 bool nir_lower_indirect_derefs(nir_shader
*shader
, nir_variable_mode modes
);
2599 bool nir_lower_locals_to_regs(nir_shader
*shader
);
2601 void nir_lower_io_to_temporaries(nir_shader
*shader
,
2602 nir_function_impl
*entrypoint
,
2603 bool outputs
, bool inputs
);
2605 void nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
);
2607 void nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
2608 int (*type_size
)(const struct glsl_type
*));
2610 /* Some helpers to do very simple linking */
2611 bool nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
);
2612 void nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
2613 bool default_to_smooth_interp
);
2616 /* If set, this forces all non-flat fragment shader inputs to be
2617 * interpolated as if with the "sample" qualifier. This requires
2618 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
2620 nir_lower_io_force_sample_interpolation
= (1 << 1),
2621 } nir_lower_io_options
;
2622 bool nir_lower_io(nir_shader
*shader
,
2623 nir_variable_mode modes
,
2624 int (*type_size
)(const struct glsl_type
*),
2625 nir_lower_io_options
);
2626 nir_src
*nir_get_io_offset_src(nir_intrinsic_instr
*instr
);
2627 nir_src
*nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
);
2629 bool nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
);
2631 void nir_lower_io_types(nir_shader
*shader
);
2632 bool nir_lower_regs_to_ssa_impl(nir_function_impl
*impl
);
2633 bool nir_lower_regs_to_ssa(nir_shader
*shader
);
2634 bool nir_lower_vars_to_ssa(nir_shader
*shader
);
2636 bool nir_remove_dead_variables(nir_shader
*shader
, nir_variable_mode modes
);
2637 bool nir_lower_constant_initializers(nir_shader
*shader
,
2638 nir_variable_mode modes
);
2640 bool nir_move_load_const(nir_shader
*shader
);
2641 bool nir_move_vec_src_uses_to_dest(nir_shader
*shader
);
2642 bool nir_lower_vec_to_movs(nir_shader
*shader
);
2643 void nir_lower_alpha_test(nir_shader
*shader
, enum compare_func func
,
2645 bool nir_lower_alu(nir_shader
*shader
);
2646 bool nir_lower_alu_to_scalar(nir_shader
*shader
);
2647 bool nir_lower_load_const_to_scalar(nir_shader
*shader
);
2648 bool nir_lower_read_invocation_to_scalar(nir_shader
*shader
);
2649 bool nir_lower_phis_to_scalar(nir_shader
*shader
);
2650 void nir_lower_io_arrays_to_elements(nir_shader
*producer
, nir_shader
*consumer
);
2651 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader
*shader
,
2653 void nir_lower_io_to_scalar(nir_shader
*shader
, nir_variable_mode mask
);
2654 void nir_lower_io_to_scalar_early(nir_shader
*shader
, nir_variable_mode mask
);
2656 typedef struct nir_lower_subgroups_options
{
2657 uint8_t subgroup_size
;
2658 uint8_t ballot_bit_size
;
2659 bool lower_to_scalar
:1;
2660 bool lower_vote_trivial
:1;
2661 bool lower_vote_eq_to_ballot
:1;
2662 bool lower_subgroup_masks
:1;
2663 bool lower_shuffle
:1;
2664 bool lower_shuffle_to_32bit
:1;
2666 } nir_lower_subgroups_options
;
2668 bool nir_lower_subgroups(nir_shader
*shader
,
2669 const nir_lower_subgroups_options
*options
);
2671 bool nir_lower_system_values(nir_shader
*shader
);
2673 typedef struct nir_lower_tex_options
{
2675 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2676 * sampler types a texture projector is lowered.
2681 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2683 bool lower_txf_offset
;
2686 * If true, lower away nir_tex_src_offset for all rect textures.
2688 bool lower_rect_offset
;
2691 * If true, lower rect textures to 2D, using txs to fetch the
2692 * texture dimensions and dividing the texture coords by the
2693 * texture dims to normalize.
2698 * If true, convert yuv to rgb.
2700 unsigned lower_y_uv_external
;
2701 unsigned lower_y_u_v_external
;
2702 unsigned lower_yx_xuxv_external
;
2703 unsigned lower_xy_uxvx_external
;
2706 * To emulate certain texture wrap modes, this can be used
2707 * to saturate the specified tex coord to [0.0, 1.0]. The
2708 * bits are according to sampler #, ie. if, for example:
2710 * (conf->saturate_s & (1 << n))
2712 * is true, then the s coord for sampler n is saturated.
2714 * Note that clamping must happen *after* projector lowering
2715 * so any projected texture sample instruction with a clamped
2716 * coordinate gets automatically lowered, regardless of the
2717 * 'lower_txp' setting.
2719 unsigned saturate_s
;
2720 unsigned saturate_t
;
2721 unsigned saturate_r
;
2723 /* Bitmask of textures that need swizzling.
2725 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2726 * swizzles[texture_index] is applied to the result of the texturing
2729 unsigned swizzle_result
;
2731 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2732 * while 4 and 5 represent 0 and 1 respectively.
2734 uint8_t swizzles
[32][4];
2737 * Bitmap of textures that need srgb to linear conversion. If
2738 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2739 * of the texture are lowered to linear.
2741 unsigned lower_srgb
;
2744 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
2746 bool lower_txd_cube_map
;
2749 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
2750 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
2751 * with lower_txd_cube_map.
2753 bool lower_txd_shadow
;
2756 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
2757 * Implies lower_txd_cube_map and lower_txd_shadow.
2760 } nir_lower_tex_options
;
2762 bool nir_lower_tex(nir_shader
*shader
,
2763 const nir_lower_tex_options
*options
);
2765 bool nir_lower_idiv(nir_shader
*shader
);
2767 bool nir_lower_clip_vs(nir_shader
*shader
, unsigned ucp_enables
);
2768 bool nir_lower_clip_fs(nir_shader
*shader
, unsigned ucp_enables
);
2769 bool nir_lower_clip_cull_distance_arrays(nir_shader
*nir
);
2771 void nir_lower_two_sided_color(nir_shader
*shader
);
2773 bool nir_lower_clamp_color_outputs(nir_shader
*shader
);
2775 void nir_lower_passthrough_edgeflags(nir_shader
*shader
);
2776 void nir_lower_tes_patch_vertices(nir_shader
*tes
, unsigned patch_vertices
);
2778 typedef struct nir_lower_wpos_ytransform_options
{
2779 gl_state_index16 state_tokens
[STATE_LENGTH
];
2780 bool fs_coord_origin_upper_left
:1;
2781 bool fs_coord_origin_lower_left
:1;
2782 bool fs_coord_pixel_center_integer
:1;
2783 bool fs_coord_pixel_center_half_integer
:1;
2784 } nir_lower_wpos_ytransform_options
;
2786 bool nir_lower_wpos_ytransform(nir_shader
*shader
,
2787 const nir_lower_wpos_ytransform_options
*options
);
2788 bool nir_lower_wpos_center(nir_shader
*shader
, const bool for_sample_shading
);
2790 typedef struct nir_lower_drawpixels_options
{
2791 gl_state_index16 texcoord_state_tokens
[STATE_LENGTH
];
2792 gl_state_index16 scale_state_tokens
[STATE_LENGTH
];
2793 gl_state_index16 bias_state_tokens
[STATE_LENGTH
];
2794 unsigned drawpix_sampler
;
2795 unsigned pixelmap_sampler
;
2797 bool scale_and_bias
:1;
2798 } nir_lower_drawpixels_options
;
2800 void nir_lower_drawpixels(nir_shader
*shader
,
2801 const nir_lower_drawpixels_options
*options
);
2803 typedef struct nir_lower_bitmap_options
{
2806 } nir_lower_bitmap_options
;
2808 void nir_lower_bitmap(nir_shader
*shader
, const nir_lower_bitmap_options
*options
);
2810 bool nir_lower_atomics_to_ssbo(nir_shader
*shader
, unsigned ssbo_offset
);
2811 bool nir_lower_to_source_mods(nir_shader
*shader
);
2813 bool nir_lower_gs_intrinsics(nir_shader
*shader
);
2815 typedef unsigned (*nir_lower_bit_size_callback
)(const nir_alu_instr
*, void *);
2817 bool nir_lower_bit_size(nir_shader
*shader
,
2818 nir_lower_bit_size_callback callback
,
2819 void *callback_data
);
2822 nir_lower_imul64
= (1 << 0),
2823 nir_lower_isign64
= (1 << 1),
2824 /** Lower all int64 modulus and division opcodes */
2825 nir_lower_divmod64
= (1 << 2),
2826 } nir_lower_int64_options
;
2828 bool nir_lower_int64(nir_shader
*shader
, nir_lower_int64_options options
);
2831 nir_lower_drcp
= (1 << 0),
2832 nir_lower_dsqrt
= (1 << 1),
2833 nir_lower_drsq
= (1 << 2),
2834 nir_lower_dtrunc
= (1 << 3),
2835 nir_lower_dfloor
= (1 << 4),
2836 nir_lower_dceil
= (1 << 5),
2837 nir_lower_dfract
= (1 << 6),
2838 nir_lower_dround_even
= (1 << 7),
2839 nir_lower_dmod
= (1 << 8)
2840 } nir_lower_doubles_options
;
2842 bool nir_lower_doubles(nir_shader
*shader
, nir_lower_doubles_options options
);
2843 bool nir_lower_pack(nir_shader
*shader
);
2845 bool nir_normalize_cubemap_coords(nir_shader
*shader
);
2847 void nir_live_ssa_defs_impl(nir_function_impl
*impl
);
2849 void nir_loop_analyze_impl(nir_function_impl
*impl
,
2850 nir_variable_mode indirect_mask
);
2852 bool nir_ssa_defs_interfere(nir_ssa_def
*a
, nir_ssa_def
*b
);
2854 bool nir_repair_ssa_impl(nir_function_impl
*impl
);
2855 bool nir_repair_ssa(nir_shader
*shader
);
2857 void nir_convert_loop_to_lcssa(nir_loop
*loop
);
2859 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2860 * registers. If false, convert all values (even those not involved in a phi
2861 * node) to registers.
2863 bool nir_convert_from_ssa(nir_shader
*shader
, bool phi_webs_only
);
2865 bool nir_lower_phis_to_regs_block(nir_block
*block
);
2866 bool nir_lower_ssa_defs_to_regs_block(nir_block
*block
);
2868 bool nir_opt_algebraic(nir_shader
*shader
);
2869 bool nir_opt_algebraic_before_ffma(nir_shader
*shader
);
2870 bool nir_opt_algebraic_late(nir_shader
*shader
);
2871 bool nir_opt_constant_folding(nir_shader
*shader
);
2873 bool nir_opt_global_to_local(nir_shader
*shader
);
2875 bool nir_copy_prop(nir_shader
*shader
);
2877 bool nir_opt_copy_prop_vars(nir_shader
*shader
);
2879 bool nir_opt_cse(nir_shader
*shader
);
2881 bool nir_opt_dce(nir_shader
*shader
);
2883 bool nir_opt_dead_cf(nir_shader
*shader
);
2885 bool nir_opt_gcm(nir_shader
*shader
, bool value_number
);
2887 bool nir_opt_if(nir_shader
*shader
);
2889 bool nir_opt_intrinsics(nir_shader
*shader
);
2891 bool nir_opt_loop_unroll(nir_shader
*shader
, nir_variable_mode indirect_mask
);
2893 bool nir_opt_move_comparisons(nir_shader
*shader
);
2895 bool nir_opt_move_load_ubo(nir_shader
*shader
);
2897 bool nir_opt_peephole_select(nir_shader
*shader
, unsigned limit
);
2899 bool nir_opt_remove_phis(nir_shader
*shader
);
2901 bool nir_opt_shrink_load(nir_shader
*shader
);
2903 bool nir_opt_trivial_continues(nir_shader
*shader
);
2905 bool nir_opt_undef(nir_shader
*shader
);
2907 bool nir_opt_conditional_discard(nir_shader
*shader
);
2909 void nir_sweep(nir_shader
*shader
);
2911 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val
);
2912 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin
);