2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
30 #include "util/hash_table.h"
31 #include "compiler/glsl/list.h"
32 #include "GL/gl.h" /* GLenum */
33 #include "util/list.h"
34 #include "util/ralloc.h"
36 #include "util/bitset.h"
37 #include "compiler/nir_types.h"
38 #include "compiler/shader_enums.h"
41 #include "nir_opcodes.h"
48 struct gl_shader_program
;
51 #define NIR_TRUE (~0u)
53 /** Defines a cast function
55 * This macro defines a cast function from in_type to out_type where
56 * out_type is some structure type that contains a field of type out_type.
58 * Note that you have to be a bit careful as the generated cast function
61 #define NIR_DEFINE_CAST(name, in_type, out_type, field) \
62 static inline out_type * \
63 name(const in_type *parent) \
65 return exec_node_data(out_type, parent, field); \
74 * Description of built-in state associated with a uniform
76 * \sa nir_variable::state_slots
90 nir_var_shader_storage
,
97 * Data stored in an nir_constant
99 union nir_constant_data
{
107 typedef struct nir_constant
{
109 * Value of the constant.
111 * The field used to back the values supplied by the constant is determined
112 * by the type associated with the \c nir_variable. Constants may be
113 * scalars, vectors, or matrices.
115 union nir_constant_data value
;
117 /* we could get this from the var->type but makes clone *much* easier to
118 * not have to care about the type.
120 unsigned num_elements
;
122 /* Array elements / Structure Fields */
123 struct nir_constant
**elements
;
127 * \brief Layout qualifiers for gl_FragDepth.
129 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
130 * with a layout qualifier.
133 nir_depth_layout_none
, /**< No depth layout is specified. */
134 nir_depth_layout_any
,
135 nir_depth_layout_greater
,
136 nir_depth_layout_less
,
137 nir_depth_layout_unchanged
141 * Either a uniform, global variable, shader input, or shader output. Based on
142 * ir_variable - it should be easy to translate between the two.
145 typedef struct nir_variable
{
146 struct exec_node node
;
149 * Declared type of the variable
151 const struct glsl_type
*type
;
154 * Declared name of the variable
158 struct nir_variable_data
{
161 * Is the variable read-only?
163 * This is set for variables declared as \c const, shader inputs,
166 unsigned read_only
:1;
170 unsigned invariant
:1;
173 * Storage class of the variable.
175 * \sa nir_variable_mode
177 nir_variable_mode mode
:5;
180 * Interpolation mode for shader inputs / outputs
182 * \sa glsl_interp_qualifier
184 unsigned interpolation
:2;
187 * \name ARB_fragment_coord_conventions
190 unsigned origin_upper_left
:1;
191 unsigned pixel_center_integer
:1;
195 * Was the location explicitly set in the shader?
197 * If the location is explicitly set in the shader, it \b cannot be changed
198 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
201 unsigned explicit_location
:1;
202 unsigned explicit_index
:1;
205 * Was an initial binding explicitly set in the shader?
207 * If so, constant_initializer contains an integer nir_constant
208 * representing the initial binding point.
210 unsigned explicit_binding
:1;
213 * Does this variable have an initializer?
215 * This is used by the linker to cross-validiate initializers of global
218 unsigned has_initializer
:1;
221 * If non-zero, then this variable may be packed along with other variables
222 * into a single varying slot, so this offset should be applied when
223 * accessing components. For example, an offset of 1 means that the x
224 * component of this variable is actually stored in component y of the
225 * location specified by \c location.
227 unsigned location_frac
:2;
230 * \brief Layout qualifier for gl_FragDepth.
232 * This is not equal to \c ir_depth_layout_none if and only if this
233 * variable is \c gl_FragDepth and a layout qualifier is specified.
235 nir_depth_layout depth_layout
;
238 * Storage location of the base of this variable
240 * The precise meaning of this field depends on the nature of the variable.
242 * - Vertex shader input: one of the values from \c gl_vert_attrib.
243 * - Vertex shader output: one of the values from \c gl_varying_slot.
244 * - Geometry shader input: one of the values from \c gl_varying_slot.
245 * - Geometry shader output: one of the values from \c gl_varying_slot.
246 * - Fragment shader input: one of the values from \c gl_varying_slot.
247 * - Fragment shader output: one of the values from \c gl_frag_result.
248 * - Uniforms: Per-stage uniform slot number for default uniform block.
249 * - Uniforms: Index within the uniform block definition for UBO members.
250 * - Non-UBO Uniforms: uniform slot number.
251 * - Other: This field is not currently used.
253 * If the variable is a uniform, shader input, or shader output, and the
254 * slot has not been assigned, the value will be -1.
259 * The actual location of the variable in the IR. Only valid for inputs
262 unsigned int driver_location
;
265 * output index for dual source blending.
270 * Descriptor set binding for sampler or UBO.
275 * Initial binding point for a sampler or UBO.
277 * For array types, this represents the binding point for the first element.
282 * Location an atomic counter is stored at.
287 * ARB_shader_image_load_store qualifiers.
290 bool read_only
; /**< "readonly" qualifier. */
291 bool write_only
; /**< "writeonly" qualifier. */
296 /** Image internal format if specified explicitly, otherwise GL_NONE. */
301 * Highest element accessed with a constant expression array index
303 * Not used for non-array variables.
305 unsigned max_array_access
;
310 * Built-in state that backs this uniform
312 * Once set at variable creation, \c state_slots must remain invariant.
313 * This is because, ideally, this array would be shared by all clones of
314 * this variable in the IR tree. In other words, we'd really like for it
315 * to be a fly-weight.
317 * If the variable is not a uniform, \c num_state_slots will be zero and
318 * \c state_slots will be \c NULL.
321 unsigned num_state_slots
; /**< Number of state slots used */
322 nir_state_slot
*state_slots
; /**< State descriptors. */
326 * Constant expression assigned in the initializer of the variable
328 nir_constant
*constant_initializer
;
331 * For variables that are in an interface block or are an instance of an
332 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
334 * \sa ir_variable::location
336 const struct glsl_type
*interface_type
;
339 #define nir_foreach_variable(var, var_list) \
340 foreach_list_typed(nir_variable, var, node, var_list)
342 #define nir_foreach_variable_safe(var, var_list) \
343 foreach_list_typed_safe(nir_variable, var, node, var_list)
346 nir_variable_is_global(const nir_variable
*var
)
348 return var
->data
.mode
!= nir_var_local
&& var
->data
.mode
!= nir_var_param
;
352 * Returns the bits in the inputs_read, outputs_written, or
353 * system_values_read bitfield corresponding to this variable.
355 static inline uint64_t
356 nir_variable_get_io_mask(nir_variable
*var
, gl_shader_stage stage
)
358 assert(var
->data
.mode
== nir_var_shader_in
||
359 var
->data
.mode
== nir_var_shader_out
||
360 var
->data
.mode
== nir_var_system_value
);
361 assert(var
->data
.location
>= 0);
363 const struct glsl_type
*var_type
= var
->type
;
364 if (stage
== MESA_SHADER_GEOMETRY
&& var
->data
.mode
== nir_var_shader_in
) {
365 /* Most geometry shader inputs are per-vertex arrays */
366 if (var
->data
.location
>= VARYING_SLOT_VAR0
)
367 assert(glsl_type_is_array(var_type
));
369 if (glsl_type_is_array(var_type
))
370 var_type
= glsl_get_array_element(var_type
);
373 bool is_vertex_input
= (var
->data
.mode
== nir_var_shader_in
&&
374 stage
== MESA_SHADER_VERTEX
);
375 unsigned slots
= glsl_count_attribute_slots(var_type
, is_vertex_input
);
376 return ((1ull << slots
) - 1) << var
->data
.location
;
379 typedef struct nir_register
{
380 struct exec_node node
;
382 unsigned num_components
; /** < number of vector components */
383 unsigned num_array_elems
; /** < size of array (0 for no array) */
385 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
388 /** generic register index. */
391 /** only for debug purposes, can be NULL */
394 /** whether this register is local (per-function) or global (per-shader) */
398 * If this flag is set to true, then accessing channels >= num_components
399 * is well-defined, and simply spills over to the next array element. This
400 * is useful for backends that can do per-component accessing, in
401 * particular scalar backends. By setting this flag and making
402 * num_components equal to 1, structures can be packed tightly into
403 * registers and then registers can be accessed per-component to get to
404 * each structure member, even if it crosses vec4 boundaries.
408 /** set of nir_src's where this register is used (read from) */
409 struct list_head uses
;
411 /** set of nir_dest's where this register is defined (written to) */
412 struct list_head defs
;
414 /** set of nir_if's where this register is used as a condition */
415 struct list_head if_uses
;
422 nir_instr_type_intrinsic
,
423 nir_instr_type_load_const
,
425 nir_instr_type_ssa_undef
,
427 nir_instr_type_parallel_copy
,
430 typedef struct nir_instr
{
431 struct exec_node node
;
433 struct nir_block
*block
;
435 /** generic instruction index. */
438 /* A temporary for optimization and analysis passes to use for storing
439 * flags. For instance, DCE uses this to store the "dead/live" info.
444 static inline nir_instr
*
445 nir_instr_next(nir_instr
*instr
)
447 struct exec_node
*next
= exec_node_get_next(&instr
->node
);
448 if (exec_node_is_tail_sentinel(next
))
451 return exec_node_data(nir_instr
, next
, node
);
454 static inline nir_instr
*
455 nir_instr_prev(nir_instr
*instr
)
457 struct exec_node
*prev
= exec_node_get_prev(&instr
->node
);
458 if (exec_node_is_head_sentinel(prev
))
461 return exec_node_data(nir_instr
, prev
, node
);
465 nir_instr_is_first(nir_instr
*instr
)
467 return exec_node_is_head_sentinel(exec_node_get_prev(&instr
->node
));
471 nir_instr_is_last(nir_instr
*instr
)
473 return exec_node_is_tail_sentinel(exec_node_get_next(&instr
->node
));
476 typedef struct nir_ssa_def
{
477 /** for debugging only, can be NULL */
480 /** generic SSA definition index. */
483 /** Index into the live_in and live_out bitfields */
486 nir_instr
*parent_instr
;
488 /** set of nir_instr's where this register is used (read from) */
489 struct list_head uses
;
491 /** set of nir_if's where this register is used as a condition */
492 struct list_head if_uses
;
494 uint8_t num_components
;
496 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
504 struct nir_src
*indirect
; /** < NULL for no indirect offset */
505 unsigned base_offset
;
507 /* TODO use-def chain goes here */
511 nir_instr
*parent_instr
;
512 struct list_head def_link
;
515 struct nir_src
*indirect
; /** < NULL for no indirect offset */
516 unsigned base_offset
;
518 /* TODO def-use chain goes here */
523 typedef struct nir_src
{
525 nir_instr
*parent_instr
;
526 struct nir_if
*parent_if
;
529 struct list_head use_link
;
539 #define NIR_SRC_INIT (nir_src) { { NULL } }
541 #define nir_foreach_use(reg_or_ssa_def, src) \
542 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
544 #define nir_foreach_use_safe(reg_or_ssa_def, src) \
545 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
547 #define nir_foreach_if_use(reg_or_ssa_def, src) \
548 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
550 #define nir_foreach_if_use_safe(reg_or_ssa_def, src) \
551 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
562 #define NIR_DEST_INIT (nir_dest) { { { NULL } } }
564 #define nir_foreach_def(reg, dest) \
565 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
567 #define nir_foreach_def_safe(reg, dest) \
568 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
570 static inline nir_src
571 nir_src_for_ssa(nir_ssa_def
*def
)
573 nir_src src
= NIR_SRC_INIT
;
581 static inline nir_src
582 nir_src_for_reg(nir_register
*reg
)
584 nir_src src
= NIR_SRC_INIT
;
588 src
.reg
.indirect
= NULL
;
589 src
.reg
.base_offset
= 0;
594 static inline nir_dest
595 nir_dest_for_reg(nir_register
*reg
)
597 nir_dest dest
= NIR_DEST_INIT
;
604 static inline unsigned
605 nir_src_bit_size(nir_src src
)
607 return src
.is_ssa
? src
.ssa
->bit_size
: src
.reg
.reg
->bit_size
;
610 static inline unsigned
611 nir_dest_bit_size(nir_dest dest
)
613 return dest
.is_ssa
? dest
.ssa
.bit_size
: dest
.reg
.reg
->bit_size
;
616 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *instr_or_if
);
617 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
);
623 * \name input modifiers
627 * For inputs interpreted as floating point, flips the sign bit. For
628 * inputs interpreted as integers, performs the two's complement negation.
633 * Clears the sign bit for floating point values, and computes the integer
634 * absolute value for integers. Note that the negate modifier acts after
635 * the absolute value modifier, therefore if both are set then all inputs
636 * will become negative.
642 * For each input component, says which component of the register it is
643 * chosen from. Note that which elements of the swizzle are used and which
644 * are ignored are based on the write mask for most opcodes - for example,
645 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
646 * a swizzle of {2, x, 1, 0} where x means "don't care."
655 * \name saturate output modifier
657 * Only valid for opcodes that output floating-point numbers. Clamps the
658 * output to between 0.0 and 1.0 inclusive.
663 unsigned write_mask
: 4; /* ignored if dest.is_ssa is true */
667 nir_type_invalid
= 0, /* Not a valid type */
672 nir_type_bool32
= 32 | nir_type_bool
,
673 nir_type_int8
= 8 | nir_type_int
,
674 nir_type_int16
= 16 | nir_type_int
,
675 nir_type_int32
= 32 | nir_type_int
,
676 nir_type_int64
= 64 | nir_type_int
,
677 nir_type_uint8
= 8 | nir_type_uint
,
678 nir_type_uint16
= 16 | nir_type_uint
,
679 nir_type_uint32
= 32 | nir_type_uint
,
680 nir_type_uint64
= 64 | nir_type_uint
,
681 nir_type_float16
= 16 | nir_type_float
,
682 nir_type_float32
= 32 | nir_type_float
,
683 nir_type_float64
= 64 | nir_type_float
,
686 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
687 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
689 static inline unsigned
690 nir_alu_type_get_type_size(nir_alu_type type
)
692 return type
& NIR_ALU_TYPE_SIZE_MASK
;
695 static inline unsigned
696 nir_alu_type_get_base_type(nir_alu_type type
)
698 return type
& NIR_ALU_TYPE_BASE_TYPE_MASK
;
702 NIR_OP_IS_COMMUTATIVE
= (1 << 0),
703 NIR_OP_IS_ASSOCIATIVE
= (1 << 1),
704 } nir_op_algebraic_property
;
712 * The number of components in the output
714 * If non-zero, this is the size of the output and input sizes are
715 * explicitly given; swizzle and writemask are still in effect, but if
716 * the output component is masked out, then the input component may
719 * If zero, the opcode acts in the standard, per-component manner; the
720 * operation is performed on each component (except the ones that are
721 * masked out) with the input being taken from the input swizzle for
724 * The size of some of the inputs may be given (i.e. non-zero) even
725 * though output_size is zero; in that case, the inputs with a zero
726 * size act per-component, while the inputs with non-zero size don't.
728 unsigned output_size
;
731 * The type of vector that the instruction outputs. Note that the
732 * staurate modifier is only allowed on outputs with the float type.
735 nir_alu_type output_type
;
738 * The number of components in each input
740 unsigned input_sizes
[4];
743 * The type of vector that each input takes. Note that negate and
744 * absolute value are only allowed on inputs with int or float type and
745 * behave differently on the two.
747 nir_alu_type input_types
[4];
749 nir_op_algebraic_property algebraic_properties
;
752 extern const nir_op_info nir_op_infos
[nir_num_opcodes
];
754 typedef struct nir_alu_instr
{
758 /** Indicates that this ALU instruction generates an exact value
760 * This is kind of a mixture of GLSL "precise" and "invariant" and not
761 * really equivalent to either. This indicates that the value generated by
762 * this operation is high-precision and any code transformations that touch
763 * it must ensure that the resulting value is bit-for-bit identical to the
772 void nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
773 nir_alu_instr
*instr
);
774 void nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
775 nir_alu_instr
*instr
);
777 /* is this source channel used? */
779 nir_alu_instr_channel_used(nir_alu_instr
*instr
, unsigned src
, unsigned channel
)
781 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
782 return channel
< nir_op_infos
[instr
->op
].input_sizes
[src
];
784 return (instr
->dest
.write_mask
>> channel
) & 1;
788 * For instructions whose destinations are SSA, get the number of channels
791 static inline unsigned
792 nir_ssa_alu_instr_src_components(const nir_alu_instr
*instr
, unsigned src
)
794 assert(instr
->dest
.dest
.is_ssa
);
796 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
797 return nir_op_infos
[instr
->op
].input_sizes
[src
];
799 return instr
->dest
.dest
.ssa
.num_components
;
804 nir_deref_type_array
,
805 nir_deref_type_struct
808 typedef struct nir_deref
{
809 nir_deref_type deref_type
;
810 struct nir_deref
*child
;
811 const struct glsl_type
*type
;
820 /* This enum describes how the array is referenced. If the deref is
821 * direct then the base_offset is used. If the deref is indirect then then
822 * offset is given by base_offset + indirect. If the deref is a wildcard
823 * then the deref refers to all of the elements of the array at the same
824 * time. Wildcard dereferences are only ever allowed in copy_var
825 * intrinsics and the source and destination derefs must have matching
829 nir_deref_array_type_direct
,
830 nir_deref_array_type_indirect
,
831 nir_deref_array_type_wildcard
,
832 } nir_deref_array_type
;
837 nir_deref_array_type deref_array_type
;
838 unsigned base_offset
;
848 NIR_DEFINE_CAST(nir_deref_as_var
, nir_deref
, nir_deref_var
, deref
)
849 NIR_DEFINE_CAST(nir_deref_as_array
, nir_deref
, nir_deref_array
, deref
)
850 NIR_DEFINE_CAST(nir_deref_as_struct
, nir_deref
, nir_deref_struct
, deref
)
852 /* Returns the last deref in the chain. */
853 static inline nir_deref
*
854 nir_deref_tail(nir_deref
*deref
)
857 deref
= deref
->child
;
865 nir_deref_var
**params
;
866 nir_deref_var
*return_deref
;
868 struct nir_function
*callee
;
871 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
872 num_variables, num_indices, idx0, idx1, idx2, flags) \
873 nir_intrinsic_##name,
875 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
878 #include "nir_intrinsics.h"
879 nir_num_intrinsics
= nir_last_intrinsic
+ 1
883 #undef LAST_INTRINSIC
885 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
887 /** Represents an intrinsic
889 * An intrinsic is an instruction type for handling things that are
890 * more-or-less regular operations but don't just consume and produce SSA
891 * values like ALU operations do. Intrinsics are not for things that have
892 * special semantic meaning such as phi nodes and parallel copies.
893 * Examples of intrinsics include variable load/store operations, system
894 * value loads, and the like. Even though texturing more-or-less falls
895 * under this category, texturing is its own instruction type because
896 * trying to represent texturing with intrinsics would lead to a
897 * combinatorial explosion of intrinsic opcodes.
899 * By having a single instruction type for handling a lot of different
900 * cases, optimization passes can look for intrinsics and, for the most
901 * part, completely ignore them. Each intrinsic type also has a few
902 * possible flags that govern whether or not they can be reordered or
903 * eliminated. That way passes like dead code elimination can still work
904 * on intrisics without understanding the meaning of each.
906 * Each intrinsic has some number of constant indices, some number of
907 * variables, and some number of sources. What these sources, variables,
908 * and indices mean depends on the intrinsic and is documented with the
909 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
910 * instructions are the only types of instruction that can operate on
916 nir_intrinsic_op intrinsic
;
920 /** number of components if this is a vectorized intrinsic
922 * Similarly to ALU operations, some intrinsics are vectorized.
923 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
924 * For vectorized intrinsics, the num_components field specifies the
925 * number of destination components and the number of source components
926 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
928 uint8_t num_components
;
930 int const_index
[NIR_INTRINSIC_MAX_CONST_INDEX
];
932 nir_deref_var
*variables
[2];
935 } nir_intrinsic_instr
;
938 * \name NIR intrinsics semantic flags
940 * information about what the compiler can do with the intrinsics.
942 * \sa nir_intrinsic_info::flags
946 * whether the intrinsic can be safely eliminated if none of its output
947 * value is not being used.
949 NIR_INTRINSIC_CAN_ELIMINATE
= (1 << 0),
952 * Whether the intrinsic can be reordered with respect to any other
953 * intrinsic, i.e. whether the only reordering dependencies of the
954 * intrinsic are due to the register reads/writes.
956 NIR_INTRINSIC_CAN_REORDER
= (1 << 1),
957 } nir_intrinsic_semantic_flag
;
960 * \name NIR intrinsics const-index flag
962 * Indicates the usage of a const_index slot.
964 * \sa nir_intrinsic_info::index_map
968 * Generally instructions that take a offset src argument, can encode
969 * a constant 'base' value which is added to the offset.
971 NIR_INTRINSIC_BASE
= 1,
974 * For store instructions, a writemask for the store.
976 NIR_INTRINSIC_WRMASK
= 2,
979 * The stream-id for GS emit_vertex/end_primitive intrinsics.
981 NIR_INTRINSIC_STREAM_ID
= 3,
984 * The clip-plane id for load_user_clip_plane intrinsic.
986 NIR_INTRINSIC_UCP_ID
= 4,
989 * The range of a load operation. This specifies the maximum amount of
990 * data starting at the base offset (if any) that can be accessed.
992 NIR_INTRINSIC_RANGE
= 5,
995 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
997 NIR_INTRINSIC_DESC_SET
= 6,
1000 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1002 NIR_INTRINSIC_BINDING
= 7,
1004 NIR_INTRINSIC_NUM_INDEX_FLAGS
,
1006 } nir_intrinsic_index_flag
;
1008 #define NIR_INTRINSIC_MAX_INPUTS 4
1013 unsigned num_srcs
; /** < number of register/SSA inputs */
1015 /** number of components of each input register
1017 * If this value is 0, the number of components is given by the
1018 * num_components field of nir_intrinsic_instr.
1020 unsigned src_components
[NIR_INTRINSIC_MAX_INPUTS
];
1024 /** number of components of the output register
1026 * If this value is 0, the number of components is given by the
1027 * num_components field of nir_intrinsic_instr.
1029 unsigned dest_components
;
1031 /** the number of inputs/outputs that are variables */
1032 unsigned num_variables
;
1034 /** the number of constant indices used by the intrinsic */
1035 unsigned num_indices
;
1037 /** indicates the usage of intr->const_index[n] */
1038 unsigned index_map
[NIR_INTRINSIC_NUM_INDEX_FLAGS
];
1040 /** semantic flags for calls to this intrinsic */
1041 nir_intrinsic_semantic_flag flags
;
1042 } nir_intrinsic_info
;
1044 extern const nir_intrinsic_info nir_intrinsic_infos
[nir_num_intrinsics
];
1047 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1048 static inline type \
1049 nir_intrinsic_##name(nir_intrinsic_instr *instr) \
1051 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1052 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1053 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1055 static inline void \
1056 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1058 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1059 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1060 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1063 INTRINSIC_IDX_ACCESSORS(write_mask
, WRMASK
, unsigned)
1064 INTRINSIC_IDX_ACCESSORS(base
, BASE
, int)
1065 INTRINSIC_IDX_ACCESSORS(stream_id
, STREAM_ID
, unsigned)
1066 INTRINSIC_IDX_ACCESSORS(ucp_id
, UCP_ID
, unsigned)
1067 INTRINSIC_IDX_ACCESSORS(range
, RANGE
, unsigned)
1068 INTRINSIC_IDX_ACCESSORS(desc_set
, DESC_SET
, unsigned)
1069 INTRINSIC_IDX_ACCESSORS(binding
, BINDING
, unsigned)
1072 * \group texture information
1074 * This gives semantic information about textures which is useful to the
1075 * frontend, the backend, and lowering passes, but not the optimizer.
1080 nir_tex_src_projector
,
1081 nir_tex_src_comparitor
, /* shadow comparitor */
1085 nir_tex_src_ms_index
, /* MSAA sample index */
1088 nir_tex_src_texture_offset
, /* < dynamically uniform indirect offset */
1089 nir_tex_src_sampler_offset
, /* < dynamically uniform indirect offset */
1090 nir_num_tex_src_types
1095 nir_tex_src_type src_type
;
1099 nir_texop_tex
, /**< Regular texture look-up */
1100 nir_texop_txb
, /**< Texture look-up with LOD bias */
1101 nir_texop_txl
, /**< Texture look-up with explicit LOD */
1102 nir_texop_txd
, /**< Texture look-up with partial derivatvies */
1103 nir_texop_txf
, /**< Texel fetch with explicit LOD */
1104 nir_texop_txf_ms
, /**< Multisample texture fetch */
1105 nir_texop_txs
, /**< Texture size */
1106 nir_texop_lod
, /**< Texture lod query */
1107 nir_texop_tg4
, /**< Texture gather */
1108 nir_texop_query_levels
, /**< Texture levels query */
1109 nir_texop_texture_samples
, /**< Texture samples query */
1110 nir_texop_samples_identical
, /**< Query whether all samples are definitely
1118 enum glsl_sampler_dim sampler_dim
;
1119 nir_alu_type dest_type
;
1124 unsigned num_srcs
, coord_components
;
1125 bool is_array
, is_shadow
;
1128 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1129 * components or the new-style shadow that outputs 1 component.
1131 bool is_new_style_shadow
;
1133 /* gather component selector */
1134 unsigned component
: 2;
1136 /** The texture index
1138 * If this texture instruction has a nir_tex_src_texture_offset source,
1139 * then the texture index is given by texture_index + texture_offset.
1141 unsigned texture_index
;
1143 /** The size of the texture array or 0 if it's not an array */
1144 unsigned texture_array_size
;
1146 /** The texture deref
1148 * If this is null, use texture_index instead.
1150 nir_deref_var
*texture
;
1152 /** The sampler index
1154 * The following operations do not require a sampler and, as such, this
1155 * field should be ignored:
1157 * - nir_texop_txf_ms
1161 * - nir_texop_query_levels
1162 * - nir_texop_texture_samples
1163 * - nir_texop_samples_identical
1165 * If this texture instruction has a nir_tex_src_sampler_offset source,
1166 * then the sampler index is given by sampler_index + sampler_offset.
1168 unsigned sampler_index
;
1170 /** The sampler deref
1172 * If this is null, use sampler_index instead.
1174 nir_deref_var
*sampler
;
1177 static inline unsigned
1178 nir_tex_instr_dest_size(nir_tex_instr
*instr
)
1180 switch (instr
->op
) {
1181 case nir_texop_txs
: {
1183 switch (instr
->sampler_dim
) {
1184 case GLSL_SAMPLER_DIM_1D
:
1185 case GLSL_SAMPLER_DIM_BUF
:
1188 case GLSL_SAMPLER_DIM_2D
:
1189 case GLSL_SAMPLER_DIM_CUBE
:
1190 case GLSL_SAMPLER_DIM_MS
:
1191 case GLSL_SAMPLER_DIM_RECT
:
1192 case GLSL_SAMPLER_DIM_EXTERNAL
:
1195 case GLSL_SAMPLER_DIM_3D
:
1199 unreachable("not reached");
1201 if (instr
->is_array
)
1209 case nir_texop_texture_samples
:
1210 case nir_texop_query_levels
:
1211 case nir_texop_samples_identical
:
1215 if (instr
->is_shadow
&& instr
->is_new_style_shadow
)
1222 /* Returns true if this texture operation queries something about the texture
1223 * rather than actually sampling it.
1226 nir_tex_instr_is_query(nir_tex_instr
*instr
)
1228 switch (instr
->op
) {
1231 case nir_texop_texture_samples
:
1232 case nir_texop_query_levels
:
1239 case nir_texop_txf_ms
:
1243 unreachable("Invalid texture opcode");
1247 static inline unsigned
1248 nir_tex_instr_src_size(nir_tex_instr
*instr
, unsigned src
)
1250 if (instr
->src
[src
].src_type
== nir_tex_src_coord
)
1251 return instr
->coord_components
;
1254 if (instr
->src
[src
].src_type
== nir_tex_src_offset
||
1255 instr
->src
[src
].src_type
== nir_tex_src_ddx
||
1256 instr
->src
[src
].src_type
== nir_tex_src_ddy
) {
1257 if (instr
->is_array
)
1258 return instr
->coord_components
- 1;
1260 return instr
->coord_components
;
1267 nir_tex_instr_src_index(nir_tex_instr
*instr
, nir_tex_src_type type
)
1269 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1270 if (instr
->src
[i
].src_type
== type
)
1290 nir_const_value value
;
1293 } nir_load_const_instr
;
1306 /* creates a new SSA variable in an undefined state */
1311 } nir_ssa_undef_instr
;
1314 struct exec_node node
;
1316 /* The predecessor block corresponding to this source */
1317 struct nir_block
*pred
;
1322 #define nir_foreach_phi_src(phi, entry) \
1323 foreach_list_typed(nir_phi_src, entry, node, &(phi)->srcs)
1324 #define nir_foreach_phi_src_safe(phi, entry) \
1325 foreach_list_typed_safe(nir_phi_src, entry, node, &(phi)->srcs)
1330 struct exec_list srcs
; /** < list of nir_phi_src */
1336 struct exec_node node
;
1339 } nir_parallel_copy_entry
;
1341 #define nir_foreach_parallel_copy_entry(pcopy, entry) \
1342 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1347 /* A list of nir_parallel_copy_entry's. The sources of all of the
1348 * entries are copied to the corresponding destinations "in parallel".
1349 * In other words, if we have two entries: a -> b and b -> a, the values
1352 struct exec_list entries
;
1353 } nir_parallel_copy_instr
;
1355 NIR_DEFINE_CAST(nir_instr_as_alu
, nir_instr
, nir_alu_instr
, instr
)
1356 NIR_DEFINE_CAST(nir_instr_as_call
, nir_instr
, nir_call_instr
, instr
)
1357 NIR_DEFINE_CAST(nir_instr_as_jump
, nir_instr
, nir_jump_instr
, instr
)
1358 NIR_DEFINE_CAST(nir_instr_as_tex
, nir_instr
, nir_tex_instr
, instr
)
1359 NIR_DEFINE_CAST(nir_instr_as_intrinsic
, nir_instr
, nir_intrinsic_instr
, instr
)
1360 NIR_DEFINE_CAST(nir_instr_as_load_const
, nir_instr
, nir_load_const_instr
, instr
)
1361 NIR_DEFINE_CAST(nir_instr_as_ssa_undef
, nir_instr
, nir_ssa_undef_instr
, instr
)
1362 NIR_DEFINE_CAST(nir_instr_as_phi
, nir_instr
, nir_phi_instr
, instr
)
1363 NIR_DEFINE_CAST(nir_instr_as_parallel_copy
, nir_instr
,
1364 nir_parallel_copy_instr
, instr
)
1369 * Control flow consists of a tree of control flow nodes, which include
1370 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1371 * instructions that always run start-to-finish. Each basic block also keeps
1372 * track of its successors (blocks which may run immediately after the current
1373 * block) and predecessors (blocks which could have run immediately before the
1374 * current block). Each function also has a start block and an end block which
1375 * all return statements point to (which is always empty). Together, all the
1376 * blocks with their predecessors and successors make up the control flow
1377 * graph (CFG) of the function. There are helpers that modify the tree of
1378 * control flow nodes while modifying the CFG appropriately; these should be
1379 * used instead of modifying the tree directly.
1386 nir_cf_node_function
1389 typedef struct nir_cf_node
{
1390 struct exec_node node
;
1391 nir_cf_node_type type
;
1392 struct nir_cf_node
*parent
;
1395 typedef struct nir_block
{
1396 nir_cf_node cf_node
;
1398 struct exec_list instr_list
; /** < list of nir_instr */
1400 /** generic block index; generated by nir_index_blocks */
1404 * Each block can only have up to 2 successors, so we put them in a simple
1405 * array - no need for anything more complicated.
1407 struct nir_block
*successors
[2];
1409 /* Set of nir_block predecessors in the CFG */
1410 struct set
*predecessors
;
1413 * this node's immediate dominator in the dominance tree - set to NULL for
1416 struct nir_block
*imm_dom
;
1418 /* This node's children in the dominance tree */
1419 unsigned num_dom_children
;
1420 struct nir_block
**dom_children
;
1422 /* Set of nir_block's on the dominance frontier of this block */
1423 struct set
*dom_frontier
;
1426 * These two indices have the property that dom_{pre,post}_index for each
1427 * child of this block in the dominance tree will always be between
1428 * dom_pre_index and dom_post_index for this block, which makes testing if
1429 * a given block is dominated by another block an O(1) operation.
1431 unsigned dom_pre_index
, dom_post_index
;
1433 /* live in and out for this block; used for liveness analysis */
1434 BITSET_WORD
*live_in
;
1435 BITSET_WORD
*live_out
;
1438 static inline nir_instr
*
1439 nir_block_first_instr(nir_block
*block
)
1441 struct exec_node
*head
= exec_list_get_head(&block
->instr_list
);
1442 return exec_node_data(nir_instr
, head
, node
);
1445 static inline nir_instr
*
1446 nir_block_last_instr(nir_block
*block
)
1448 struct exec_node
*tail
= exec_list_get_tail(&block
->instr_list
);
1449 return exec_node_data(nir_instr
, tail
, node
);
1452 #define nir_foreach_instr(block, instr) \
1453 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1454 #define nir_foreach_instr_reverse(block, instr) \
1455 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1456 #define nir_foreach_instr_safe(block, instr) \
1457 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1458 #define nir_foreach_instr_reverse_safe(block, instr) \
1459 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1461 typedef struct nir_if
{
1462 nir_cf_node cf_node
;
1465 struct exec_list then_list
; /** < list of nir_cf_node */
1466 struct exec_list else_list
; /** < list of nir_cf_node */
1469 static inline nir_cf_node
*
1470 nir_if_first_then_node(nir_if
*if_stmt
)
1472 struct exec_node
*head
= exec_list_get_head(&if_stmt
->then_list
);
1473 return exec_node_data(nir_cf_node
, head
, node
);
1476 static inline nir_cf_node
*
1477 nir_if_last_then_node(nir_if
*if_stmt
)
1479 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->then_list
);
1480 return exec_node_data(nir_cf_node
, tail
, node
);
1483 static inline nir_cf_node
*
1484 nir_if_first_else_node(nir_if
*if_stmt
)
1486 struct exec_node
*head
= exec_list_get_head(&if_stmt
->else_list
);
1487 return exec_node_data(nir_cf_node
, head
, node
);
1490 static inline nir_cf_node
*
1491 nir_if_last_else_node(nir_if
*if_stmt
)
1493 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->else_list
);
1494 return exec_node_data(nir_cf_node
, tail
, node
);
1498 nir_cf_node cf_node
;
1500 struct exec_list body
; /** < list of nir_cf_node */
1503 static inline nir_cf_node
*
1504 nir_loop_first_cf_node(nir_loop
*loop
)
1506 return exec_node_data(nir_cf_node
, exec_list_get_head(&loop
->body
), node
);
1509 static inline nir_cf_node
*
1510 nir_loop_last_cf_node(nir_loop
*loop
)
1512 return exec_node_data(nir_cf_node
, exec_list_get_tail(&loop
->body
), node
);
1516 * Various bits of metadata that can may be created or required by
1517 * optimization and analysis passes
1520 nir_metadata_none
= 0x0,
1521 nir_metadata_block_index
= 0x1,
1522 nir_metadata_dominance
= 0x2,
1523 nir_metadata_live_ssa_defs
= 0x4,
1524 nir_metadata_not_properly_reset
= 0x8,
1528 nir_cf_node cf_node
;
1530 /** pointer to the function of which this is an implementation */
1531 struct nir_function
*function
;
1533 struct exec_list body
; /** < list of nir_cf_node */
1535 nir_block
*end_block
;
1537 /** list for all local variables in the function */
1538 struct exec_list locals
;
1540 /** array of variables used as parameters */
1541 unsigned num_params
;
1542 nir_variable
**params
;
1544 /** variable used to hold the result of the function */
1545 nir_variable
*return_var
;
1547 /** list of local registers in the function */
1548 struct exec_list registers
;
1550 /** next available local register index */
1553 /** next available SSA value index */
1556 /* total number of basic blocks, only valid when block_index_dirty = false */
1557 unsigned num_blocks
;
1559 nir_metadata valid_metadata
;
1560 } nir_function_impl
;
1562 static inline nir_block
*
1563 nir_start_block(nir_function_impl
*impl
)
1565 return (nir_block
*) exec_list_get_head(&impl
->body
);
1568 static inline nir_cf_node
*
1569 nir_cf_node_next(nir_cf_node
*node
)
1571 struct exec_node
*next
= exec_node_get_next(&node
->node
);
1572 if (exec_node_is_tail_sentinel(next
))
1575 return exec_node_data(nir_cf_node
, next
, node
);
1578 static inline nir_cf_node
*
1579 nir_cf_node_prev(nir_cf_node
*node
)
1581 struct exec_node
*prev
= exec_node_get_prev(&node
->node
);
1582 if (exec_node_is_head_sentinel(prev
))
1585 return exec_node_data(nir_cf_node
, prev
, node
);
1589 nir_cf_node_is_first(const nir_cf_node
*node
)
1591 return exec_node_is_head_sentinel(node
->node
.prev
);
1595 nir_cf_node_is_last(const nir_cf_node
*node
)
1597 return exec_node_is_tail_sentinel(node
->node
.next
);
1600 NIR_DEFINE_CAST(nir_cf_node_as_block
, nir_cf_node
, nir_block
, cf_node
)
1601 NIR_DEFINE_CAST(nir_cf_node_as_if
, nir_cf_node
, nir_if
, cf_node
)
1602 NIR_DEFINE_CAST(nir_cf_node_as_loop
, nir_cf_node
, nir_loop
, cf_node
)
1603 NIR_DEFINE_CAST(nir_cf_node_as_function
, nir_cf_node
, nir_function_impl
, cf_node
)
1608 nir_parameter_inout
,
1609 } nir_parameter_type
;
1612 nir_parameter_type param_type
;
1613 const struct glsl_type
*type
;
1616 typedef struct nir_function
{
1617 struct exec_node node
;
1620 struct nir_shader
*shader
;
1622 unsigned num_params
;
1623 nir_parameter
*params
;
1624 const struct glsl_type
*return_type
;
1626 /** The implementation of this function.
1628 * If the function is only declared and not implemented, this is NULL.
1630 nir_function_impl
*impl
;
1633 typedef struct nir_shader_compiler_options
{
1641 bool lower_bitfield_extract
;
1642 bool lower_bitfield_insert
;
1643 bool lower_uadd_carry
;
1644 bool lower_usub_borrow
;
1645 /** lowers fneg and ineg to fsub and isub. */
1647 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1650 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1653 /* Does the native fdot instruction replicate its result for four
1654 * components? If so, then opt_algebraic_late will turn all fdotN
1655 * instructions into fdot_replicatedN instructions.
1657 bool fdot_replicates
;
1659 /** lowers ffract to fsub+ffloor: */
1662 bool lower_pack_half_2x16
;
1663 bool lower_pack_unorm_2x16
;
1664 bool lower_pack_snorm_2x16
;
1665 bool lower_pack_unorm_4x8
;
1666 bool lower_pack_snorm_4x8
;
1667 bool lower_unpack_half_2x16
;
1668 bool lower_unpack_unorm_2x16
;
1669 bool lower_unpack_snorm_2x16
;
1670 bool lower_unpack_unorm_4x8
;
1671 bool lower_unpack_snorm_4x8
;
1673 bool lower_extract_byte
;
1674 bool lower_extract_word
;
1677 * Does the driver support real 32-bit integers? (Otherwise, integers
1678 * are simulated by floats.)
1680 bool native_integers
;
1682 /* Indicates that the driver only has zero-based vertex id */
1683 bool vertex_id_zero_based
;
1684 } nir_shader_compiler_options
;
1686 typedef struct nir_shader_info
{
1689 /* Descriptive name provided by the client; may be NULL */
1692 /* Number of textures used by this shader */
1693 unsigned num_textures
;
1694 /* Number of uniform buffers used by this shader */
1696 /* Number of atomic buffers used by this shader */
1698 /* Number of shader storage buffers used by this shader */
1700 /* Number of images used by this shader */
1701 unsigned num_images
;
1703 /* Which inputs are actually read */
1704 uint64_t inputs_read
;
1705 /* Which outputs are actually written */
1706 uint64_t outputs_written
;
1707 /* Which system values are actually read */
1708 uint64_t system_values_read
;
1710 /* Which patch inputs are actually read */
1711 uint32_t patch_inputs_read
;
1712 /* Which patch outputs are actually written */
1713 uint32_t patch_outputs_written
;
1715 /* Whether or not this shader ever uses textureGather() */
1716 bool uses_texture_gather
;
1718 /* Whether or not this shader uses the gl_ClipDistance output */
1719 bool uses_clip_distance_out
;
1721 /* Whether or not separate shader objects were used */
1722 bool separate_shader
;
1724 /** Was this shader linked with any transform feedback varyings? */
1725 bool has_transform_feedback_varyings
;
1729 /** The number of vertices recieves per input primitive */
1730 unsigned vertices_in
;
1732 /** The output primitive type (GL enum value) */
1733 unsigned output_primitive
;
1735 /** The maximum number of vertices the geometry shader might write. */
1736 unsigned vertices_out
;
1738 /** 1 .. MAX_GEOMETRY_SHADER_INVOCATIONS */
1739 unsigned invocations
;
1741 /** Whether or not this shader uses EndPrimitive */
1742 bool uses_end_primitive
;
1744 /** Whether or not this shader uses non-zero streams */
1752 * Whether early fragment tests are enabled as defined by
1753 * ARB_shader_image_load_store.
1755 bool early_fragment_tests
;
1757 /** gl_FragDepth layout for ARB_conservative_depth. */
1758 enum gl_frag_depth_layout depth_layout
;
1762 unsigned local_size
[3];
1766 /** The number of vertices in the TCS output patch. */
1767 unsigned vertices_out
;
1772 typedef struct nir_shader
{
1773 /** list of uniforms (nir_variable) */
1774 struct exec_list uniforms
;
1776 /** list of inputs (nir_variable) */
1777 struct exec_list inputs
;
1779 /** list of outputs (nir_variable) */
1780 struct exec_list outputs
;
1782 /** list of shared compute variables (nir_variable) */
1783 struct exec_list shared
;
1785 /** Set of driver-specific options for the shader.
1787 * The memory for the options is expected to be kept in a single static
1788 * copy by the driver.
1790 const struct nir_shader_compiler_options
*options
;
1792 /** Various bits of compile-time information about a given shader */
1793 struct nir_shader_info info
;
1795 /** list of global variables in the shader (nir_variable) */
1796 struct exec_list globals
;
1798 /** list of system value variables in the shader (nir_variable) */
1799 struct exec_list system_values
;
1801 struct exec_list functions
; /** < list of nir_function */
1803 /** list of global register in the shader */
1804 struct exec_list registers
;
1806 /** next available global register index */
1810 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
1813 unsigned num_inputs
, num_uniforms
, num_outputs
, num_shared
;
1815 /** The shader stage, such as MESA_SHADER_VERTEX. */
1816 gl_shader_stage stage
;
1819 static inline nir_function
*
1820 nir_shader_get_entrypoint(nir_shader
*shader
)
1822 assert(exec_list_length(&shader
->functions
) == 1);
1823 struct exec_node
*func_node
= exec_list_get_head(&shader
->functions
);
1824 nir_function
*func
= exec_node_data(nir_function
, func_node
, node
);
1828 #define nir_foreach_function(shader, func) \
1829 foreach_list_typed(nir_function, func, node, &(shader)->functions)
1831 nir_shader
*nir_shader_create(void *mem_ctx
,
1832 gl_shader_stage stage
,
1833 const nir_shader_compiler_options
*options
);
1835 /** creates a register, including assigning it an index and adding it to the list */
1836 nir_register
*nir_global_reg_create(nir_shader
*shader
);
1838 nir_register
*nir_local_reg_create(nir_function_impl
*impl
);
1840 void nir_reg_remove(nir_register
*reg
);
1842 /** Adds a variable to the appropreate list in nir_shader */
1843 void nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
);
1846 nir_function_impl_add_variable(nir_function_impl
*impl
, nir_variable
*var
)
1848 assert(var
->data
.mode
== nir_var_local
);
1849 exec_list_push_tail(&impl
->locals
, &var
->node
);
1852 /** creates a variable, sets a few defaults, and adds it to the list */
1853 nir_variable
*nir_variable_create(nir_shader
*shader
,
1854 nir_variable_mode mode
,
1855 const struct glsl_type
*type
,
1857 /** creates a local variable and adds it to the list */
1858 nir_variable
*nir_local_variable_create(nir_function_impl
*impl
,
1859 const struct glsl_type
*type
,
1862 /** creates a function and adds it to the shader's list of functions */
1863 nir_function
*nir_function_create(nir_shader
*shader
, const char *name
);
1865 nir_function_impl
*nir_function_impl_create(nir_function
*func
);
1866 /** creates a function_impl that isn't tied to any particular function */
1867 nir_function_impl
*nir_function_impl_create_bare(nir_shader
*shader
);
1869 nir_block
*nir_block_create(nir_shader
*shader
);
1870 nir_if
*nir_if_create(nir_shader
*shader
);
1871 nir_loop
*nir_loop_create(nir_shader
*shader
);
1873 nir_function_impl
*nir_cf_node_get_function(nir_cf_node
*node
);
1875 /** requests that the given pieces of metadata be generated */
1876 void nir_metadata_require(nir_function_impl
*impl
, nir_metadata required
);
1877 /** dirties all but the preserved metadata */
1878 void nir_metadata_preserve(nir_function_impl
*impl
, nir_metadata preserved
);
1880 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
1881 nir_alu_instr
*nir_alu_instr_create(nir_shader
*shader
, nir_op op
);
1883 nir_jump_instr
*nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
);
1885 nir_load_const_instr
*nir_load_const_instr_create(nir_shader
*shader
,
1886 unsigned num_components
);
1888 nir_intrinsic_instr
*nir_intrinsic_instr_create(nir_shader
*shader
,
1889 nir_intrinsic_op op
);
1891 nir_call_instr
*nir_call_instr_create(nir_shader
*shader
,
1892 nir_function
*callee
);
1894 nir_tex_instr
*nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
);
1896 nir_phi_instr
*nir_phi_instr_create(nir_shader
*shader
);
1898 nir_parallel_copy_instr
*nir_parallel_copy_instr_create(nir_shader
*shader
);
1900 nir_ssa_undef_instr
*nir_ssa_undef_instr_create(nir_shader
*shader
,
1901 unsigned num_components
);
1903 nir_deref_var
*nir_deref_var_create(void *mem_ctx
, nir_variable
*var
);
1904 nir_deref_array
*nir_deref_array_create(void *mem_ctx
);
1905 nir_deref_struct
*nir_deref_struct_create(void *mem_ctx
, unsigned field_index
);
1907 nir_deref
*nir_copy_deref(void *mem_ctx
, nir_deref
*deref
);
1909 nir_load_const_instr
*
1910 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
);
1913 * NIR Cursors and Instruction Insertion API
1916 * A tiny struct representing a point to insert/extract instructions or
1917 * control flow nodes. Helps reduce the combinatorial explosion of possible
1918 * points to insert/extract.
1920 * \sa nir_control_flow.h
1923 nir_cursor_before_block
,
1924 nir_cursor_after_block
,
1925 nir_cursor_before_instr
,
1926 nir_cursor_after_instr
,
1927 } nir_cursor_option
;
1930 nir_cursor_option option
;
1937 static inline nir_block
*
1938 nir_cursor_current_block(nir_cursor cursor
)
1940 if (cursor
.option
== nir_cursor_before_instr
||
1941 cursor
.option
== nir_cursor_after_instr
) {
1942 return cursor
.instr
->block
;
1944 return cursor
.block
;
1948 bool nir_cursors_equal(nir_cursor a
, nir_cursor b
);
1950 static inline nir_cursor
1951 nir_before_block(nir_block
*block
)
1954 cursor
.option
= nir_cursor_before_block
;
1955 cursor
.block
= block
;
1959 static inline nir_cursor
1960 nir_after_block(nir_block
*block
)
1963 cursor
.option
= nir_cursor_after_block
;
1964 cursor
.block
= block
;
1968 static inline nir_cursor
1969 nir_before_instr(nir_instr
*instr
)
1972 cursor
.option
= nir_cursor_before_instr
;
1973 cursor
.instr
= instr
;
1977 static inline nir_cursor
1978 nir_after_instr(nir_instr
*instr
)
1981 cursor
.option
= nir_cursor_after_instr
;
1982 cursor
.instr
= instr
;
1986 static inline nir_cursor
1987 nir_after_block_before_jump(nir_block
*block
)
1989 nir_instr
*last_instr
= nir_block_last_instr(block
);
1990 if (last_instr
&& last_instr
->type
== nir_instr_type_jump
) {
1991 return nir_before_instr(last_instr
);
1993 return nir_after_block(block
);
1997 static inline nir_cursor
1998 nir_before_cf_node(nir_cf_node
*node
)
2000 if (node
->type
== nir_cf_node_block
)
2001 return nir_before_block(nir_cf_node_as_block(node
));
2003 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node
)));
2006 static inline nir_cursor
2007 nir_after_cf_node(nir_cf_node
*node
)
2009 if (node
->type
== nir_cf_node_block
)
2010 return nir_after_block(nir_cf_node_as_block(node
));
2012 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node
)));
2015 static inline nir_cursor
2016 nir_after_cf_node_and_phis(nir_cf_node
*node
)
2018 if (node
->type
== nir_cf_node_block
)
2019 return nir_after_block(nir_cf_node_as_block(node
));
2021 nir_block
*block
= nir_cf_node_as_block(nir_cf_node_next(node
));
2022 assert(block
->cf_node
.type
== nir_cf_node_block
);
2024 nir_foreach_instr(block
, instr
) {
2025 if (instr
->type
!= nir_instr_type_phi
)
2026 return nir_before_instr(instr
);
2028 return nir_after_block(block
);
2031 static inline nir_cursor
2032 nir_before_cf_list(struct exec_list
*cf_list
)
2034 nir_cf_node
*first_node
= exec_node_data(nir_cf_node
,
2035 exec_list_get_head(cf_list
), node
);
2036 return nir_before_cf_node(first_node
);
2039 static inline nir_cursor
2040 nir_after_cf_list(struct exec_list
*cf_list
)
2042 nir_cf_node
*last_node
= exec_node_data(nir_cf_node
,
2043 exec_list_get_tail(cf_list
), node
);
2044 return nir_after_cf_node(last_node
);
2048 * Insert a NIR instruction at the given cursor.
2050 * Note: This does not update the cursor.
2052 void nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
);
2055 nir_instr_insert_before(nir_instr
*instr
, nir_instr
*before
)
2057 nir_instr_insert(nir_before_instr(instr
), before
);
2061 nir_instr_insert_after(nir_instr
*instr
, nir_instr
*after
)
2063 nir_instr_insert(nir_after_instr(instr
), after
);
2067 nir_instr_insert_before_block(nir_block
*block
, nir_instr
*before
)
2069 nir_instr_insert(nir_before_block(block
), before
);
2073 nir_instr_insert_after_block(nir_block
*block
, nir_instr
*after
)
2075 nir_instr_insert(nir_after_block(block
), after
);
2079 nir_instr_insert_before_cf(nir_cf_node
*node
, nir_instr
*before
)
2081 nir_instr_insert(nir_before_cf_node(node
), before
);
2085 nir_instr_insert_after_cf(nir_cf_node
*node
, nir_instr
*after
)
2087 nir_instr_insert(nir_after_cf_node(node
), after
);
2091 nir_instr_insert_before_cf_list(struct exec_list
*list
, nir_instr
*before
)
2093 nir_instr_insert(nir_before_cf_list(list
), before
);
2097 nir_instr_insert_after_cf_list(struct exec_list
*list
, nir_instr
*after
)
2099 nir_instr_insert(nir_after_cf_list(list
), after
);
2102 void nir_instr_remove(nir_instr
*instr
);
2106 typedef bool (*nir_foreach_ssa_def_cb
)(nir_ssa_def
*def
, void *state
);
2107 typedef bool (*nir_foreach_dest_cb
)(nir_dest
*dest
, void *state
);
2108 typedef bool (*nir_foreach_src_cb
)(nir_src
*src
, void *state
);
2109 bool nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
,
2111 bool nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
);
2112 bool nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
);
2114 nir_const_value
*nir_src_as_const_value(nir_src src
);
2115 bool nir_src_is_dynamically_uniform(nir_src src
);
2116 bool nir_srcs_equal(nir_src src1
, nir_src src2
);
2117 void nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
);
2118 void nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
);
2119 void nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
);
2120 void nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
,
2123 void nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
2124 unsigned num_components
, unsigned bit_size
,
2126 void nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
2127 unsigned num_components
, unsigned bit_size
,
2129 void nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
);
2130 void nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
2131 nir_instr
*after_me
);
2133 /* visits basic blocks in source-code order */
2134 typedef bool (*nir_foreach_block_cb
)(nir_block
*block
, void *state
);
2135 bool nir_foreach_block(nir_function_impl
*impl
, nir_foreach_block_cb cb
,
2137 bool nir_foreach_block_reverse(nir_function_impl
*impl
, nir_foreach_block_cb cb
,
2139 bool nir_foreach_block_in_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
2142 /* If the following CF node is an if, this function returns that if.
2143 * Otherwise, it returns NULL.
2145 nir_if
*nir_block_get_following_if(nir_block
*block
);
2147 nir_loop
*nir_block_get_following_loop(nir_block
*block
);
2149 void nir_index_local_regs(nir_function_impl
*impl
);
2150 void nir_index_global_regs(nir_shader
*shader
);
2151 void nir_index_ssa_defs(nir_function_impl
*impl
);
2152 unsigned nir_index_instrs(nir_function_impl
*impl
);
2154 void nir_index_blocks(nir_function_impl
*impl
);
2156 void nir_print_shader(nir_shader
*shader
, FILE *fp
);
2157 void nir_print_instr(const nir_instr
*instr
, FILE *fp
);
2159 nir_shader
*nir_shader_clone(void *mem_ctx
, const nir_shader
*s
);
2160 nir_function_impl
*nir_function_impl_clone(const nir_function_impl
*fi
);
2161 nir_constant
*nir_constant_clone(const nir_constant
*c
, nir_variable
*var
);
2162 nir_variable
*nir_variable_clone(const nir_variable
*c
, nir_shader
*shader
);
2165 void nir_validate_shader(nir_shader
*shader
);
2166 void nir_metadata_set_validation_flag(nir_shader
*shader
);
2167 void nir_metadata_check_validation_flag(nir_shader
*shader
);
2169 #include "util/debug.h"
2171 should_clone_nir(void)
2173 static int should_clone
= -1;
2174 if (should_clone
< 0)
2175 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
2177 return should_clone
;
2180 static inline void nir_validate_shader(nir_shader
*shader
) { (void) shader
; }
2181 static inline void nir_metadata_set_validation_flag(nir_shader
*shader
) { (void) shader
; }
2182 static inline void nir_metadata_check_validation_flag(nir_shader
*shader
) { (void) shader
; }
2183 static inline bool should_clone_nir(void) { return false; }
2186 #define _PASS(nir, do_pass) do { \
2188 nir_validate_shader(nir); \
2189 if (should_clone_nir()) { \
2190 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2196 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2197 nir_metadata_set_validation_flag(nir); \
2198 if (pass(nir, ##__VA_ARGS__)) { \
2200 nir_metadata_check_validation_flag(nir); \
2204 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2205 pass(nir, ##__VA_ARGS__); \
2208 void nir_calc_dominance_impl(nir_function_impl
*impl
);
2209 void nir_calc_dominance(nir_shader
*shader
);
2211 nir_block
*nir_dominance_lca(nir_block
*b1
, nir_block
*b2
);
2212 bool nir_block_dominates(nir_block
*parent
, nir_block
*child
);
2214 void nir_dump_dom_tree_impl(nir_function_impl
*impl
, FILE *fp
);
2215 void nir_dump_dom_tree(nir_shader
*shader
, FILE *fp
);
2217 void nir_dump_dom_frontier_impl(nir_function_impl
*impl
, FILE *fp
);
2218 void nir_dump_dom_frontier(nir_shader
*shader
, FILE *fp
);
2220 void nir_dump_cfg_impl(nir_function_impl
*impl
, FILE *fp
);
2221 void nir_dump_cfg(nir_shader
*shader
, FILE *fp
);
2223 int nir_gs_count_vertices(const nir_shader
*shader
);
2225 bool nir_split_var_copies(nir_shader
*shader
);
2227 bool nir_lower_returns_impl(nir_function_impl
*impl
);
2228 bool nir_lower_returns(nir_shader
*shader
);
2230 bool nir_inline_functions(nir_shader
*shader
);
2232 void nir_lower_var_copy_instr(nir_intrinsic_instr
*copy
, void *mem_ctx
);
2233 void nir_lower_var_copies(nir_shader
*shader
);
2235 bool nir_lower_global_vars_to_local(nir_shader
*shader
);
2237 bool nir_lower_indirect_derefs(nir_shader
*shader
, uint32_t mode_mask
);
2239 bool nir_lower_locals_to_regs(nir_shader
*shader
);
2241 void nir_lower_outputs_to_temporaries(nir_shader
*shader
,
2242 nir_function
*entrypoint
);
2244 void nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
);
2246 void nir_assign_var_locations(struct exec_list
*var_list
,
2248 int (*type_size
)(const struct glsl_type
*));
2250 void nir_lower_io(nir_shader
*shader
,
2251 nir_variable_mode mode
,
2252 int (*type_size
)(const struct glsl_type
*));
2253 nir_src
*nir_get_io_offset_src(nir_intrinsic_instr
*instr
);
2254 nir_src
*nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
);
2256 void nir_lower_vars_to_ssa(nir_shader
*shader
);
2258 bool nir_remove_dead_variables(nir_shader
*shader
, nir_variable_mode mode
);
2260 void nir_move_vec_src_uses_to_dest(nir_shader
*shader
);
2261 bool nir_lower_vec_to_movs(nir_shader
*shader
);
2262 void nir_lower_alu_to_scalar(nir_shader
*shader
);
2263 void nir_lower_load_const_to_scalar(nir_shader
*shader
);
2265 void nir_lower_phis_to_scalar(nir_shader
*shader
);
2267 void nir_lower_samplers(nir_shader
*shader
,
2268 const struct gl_shader_program
*shader_program
);
2270 bool nir_lower_system_values(nir_shader
*shader
);
2272 typedef struct nir_lower_tex_options
{
2274 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2275 * sampler types a texture projector is lowered.
2280 * If true, lower rect textures to 2D, using txs to fetch the
2281 * texture dimensions and dividing the texture coords by the
2282 * texture dims to normalize.
2287 * To emulate certain texture wrap modes, this can be used
2288 * to saturate the specified tex coord to [0.0, 1.0]. The
2289 * bits are according to sampler #, ie. if, for example:
2291 * (conf->saturate_s & (1 << n))
2293 * is true, then the s coord for sampler n is saturated.
2295 * Note that clamping must happen *after* projector lowering
2296 * so any projected texture sample instruction with a clamped
2297 * coordinate gets automatically lowered, regardless of the
2298 * 'lower_txp' setting.
2300 unsigned saturate_s
;
2301 unsigned saturate_t
;
2302 unsigned saturate_r
;
2304 /* Bitmask of textures that need swizzling.
2306 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2307 * swizzles[texture_index] is applied to the result of the texturing
2310 unsigned swizzle_result
;
2312 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2313 * while 4 and 5 represent 0 and 1 respectively.
2315 uint8_t swizzles
[32][4];
2316 } nir_lower_tex_options
;
2318 bool nir_lower_tex(nir_shader
*shader
,
2319 const nir_lower_tex_options
*options
);
2321 void nir_lower_idiv(nir_shader
*shader
);
2323 void nir_lower_clip_vs(nir_shader
*shader
, unsigned ucp_enables
);
2324 void nir_lower_clip_fs(nir_shader
*shader
, unsigned ucp_enables
);
2326 void nir_lower_two_sided_color(nir_shader
*shader
);
2328 void nir_lower_atomics(nir_shader
*shader
,
2329 const struct gl_shader_program
*shader_program
);
2330 void nir_lower_to_source_mods(nir_shader
*shader
);
2332 bool nir_lower_gs_intrinsics(nir_shader
*shader
);
2334 bool nir_normalize_cubemap_coords(nir_shader
*shader
);
2336 void nir_live_ssa_defs_impl(nir_function_impl
*impl
);
2337 bool nir_ssa_defs_interfere(nir_ssa_def
*a
, nir_ssa_def
*b
);
2339 void nir_convert_to_ssa_impl(nir_function_impl
*impl
);
2340 void nir_convert_to_ssa(nir_shader
*shader
);
2342 bool nir_repair_ssa_impl(nir_function_impl
*impl
);
2343 bool nir_repair_ssa(nir_shader
*shader
);
2345 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2346 * registers. If false, convert all values (even those not involved in a phi
2347 * node) to registers.
2349 void nir_convert_from_ssa(nir_shader
*shader
, bool phi_webs_only
);
2351 bool nir_opt_algebraic(nir_shader
*shader
);
2352 bool nir_opt_algebraic_late(nir_shader
*shader
);
2353 bool nir_opt_constant_folding(nir_shader
*shader
);
2355 bool nir_opt_global_to_local(nir_shader
*shader
);
2357 bool nir_copy_prop(nir_shader
*shader
);
2359 bool nir_opt_cse(nir_shader
*shader
);
2361 bool nir_opt_dce(nir_shader
*shader
);
2363 bool nir_opt_dead_cf(nir_shader
*shader
);
2365 void nir_opt_gcm(nir_shader
*shader
);
2367 bool nir_opt_peephole_select(nir_shader
*shader
);
2369 bool nir_opt_remove_phis(nir_shader
*shader
);
2371 bool nir_opt_undef(nir_shader
*shader
);
2373 void nir_sweep(nir_shader
*shader
);
2375 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val
);
2376 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin
);