2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
30 #include "util/hash_table.h"
31 #include "compiler/glsl/list.h"
32 #include "GL/gl.h" /* GLenum */
33 #include "util/list.h"
34 #include "util/ralloc.h"
36 #include "util/bitset.h"
37 #include "util/macros.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/shader_enums.h"
40 #include "compiler/shader_info.h"
43 #include "nir_opcodes.h"
50 struct gl_shader_program
;
53 #define NIR_TRUE (~0u)
55 /** Defines a cast function
57 * This macro defines a cast function from in_type to out_type where
58 * out_type is some structure type that contains a field of type out_type.
60 * Note that you have to be a bit careful as the generated cast function
63 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
64 type_field, type_value) \
65 static inline out_type * \
66 name(const in_type *parent) \
68 assert(parent && parent->type_field == type_value); \
69 return exec_node_data(out_type, parent, field); \
78 * Description of built-in state associated with a uniform
80 * \sa nir_variable::state_slots
88 nir_var_shader_in
= (1 << 0),
89 nir_var_shader_out
= (1 << 1),
90 nir_var_global
= (1 << 2),
91 nir_var_local
= (1 << 3),
92 nir_var_uniform
= (1 << 4),
93 nir_var_shader_storage
= (1 << 5),
94 nir_var_system_value
= (1 << 6),
95 nir_var_param
= (1 << 7),
96 nir_var_shared
= (1 << 8),
110 typedef struct nir_constant
{
112 * Value of the constant.
114 * The field used to back the values supplied by the constant is determined
115 * by the type associated with the \c nir_variable. Constants may be
116 * scalars, vectors, or matrices.
118 nir_const_value values
[4];
120 /* we could get this from the var->type but makes clone *much* easier to
121 * not have to care about the type.
123 unsigned num_elements
;
125 /* Array elements / Structure Fields */
126 struct nir_constant
**elements
;
130 * \brief Layout qualifiers for gl_FragDepth.
132 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
133 * with a layout qualifier.
136 nir_depth_layout_none
, /**< No depth layout is specified. */
137 nir_depth_layout_any
,
138 nir_depth_layout_greater
,
139 nir_depth_layout_less
,
140 nir_depth_layout_unchanged
144 * Either a uniform, global variable, shader input, or shader output. Based on
145 * ir_variable - it should be easy to translate between the two.
148 typedef struct nir_variable
{
149 struct exec_node node
;
152 * Declared type of the variable
154 const struct glsl_type
*type
;
157 * Declared name of the variable
161 struct nir_variable_data
{
163 * Storage class of the variable.
165 * \sa nir_variable_mode
167 nir_variable_mode mode
;
170 * Is the variable read-only?
172 * This is set for variables declared as \c const, shader inputs,
175 unsigned read_only
:1;
179 unsigned invariant
:1;
182 * Interpolation mode for shader inputs / outputs
184 * \sa glsl_interp_mode
186 unsigned interpolation
:2;
189 * \name ARB_fragment_coord_conventions
192 unsigned origin_upper_left
:1;
193 unsigned pixel_center_integer
:1;
197 * If non-zero, then this variable may be packed along with other variables
198 * into a single varying slot, so this offset should be applied when
199 * accessing components. For example, an offset of 1 means that the x
200 * component of this variable is actually stored in component y of the
201 * location specified by \c location.
203 unsigned location_frac
:2;
206 * If true, this variable represents an array of scalars that should
207 * be tightly packed. In other words, consecutive array elements
208 * should be stored one component apart, rather than one slot apart.
213 * Whether this is a fragment shader output implicitly initialized with
214 * the previous contents of the specified render target at the
215 * framebuffer location corresponding to this shader invocation.
217 unsigned fb_fetch_output
:1;
220 * \brief Layout qualifier for gl_FragDepth.
222 * This is not equal to \c ir_depth_layout_none if and only if this
223 * variable is \c gl_FragDepth and a layout qualifier is specified.
225 nir_depth_layout depth_layout
;
228 * Storage location of the base of this variable
230 * The precise meaning of this field depends on the nature of the variable.
232 * - Vertex shader input: one of the values from \c gl_vert_attrib.
233 * - Vertex shader output: one of the values from \c gl_varying_slot.
234 * - Geometry shader input: one of the values from \c gl_varying_slot.
235 * - Geometry shader output: one of the values from \c gl_varying_slot.
236 * - Fragment shader input: one of the values from \c gl_varying_slot.
237 * - Fragment shader output: one of the values from \c gl_frag_result.
238 * - Uniforms: Per-stage uniform slot number for default uniform block.
239 * - Uniforms: Index within the uniform block definition for UBO members.
240 * - Non-UBO Uniforms: uniform slot number.
241 * - Other: This field is not currently used.
243 * If the variable is a uniform, shader input, or shader output, and the
244 * slot has not been assigned, the value will be -1.
249 * The actual location of the variable in the IR. Only valid for inputs
252 unsigned int driver_location
;
255 * output index for dual source blending.
260 * Descriptor set binding for sampler or UBO.
265 * Initial binding point for a sampler or UBO.
267 * For array types, this represents the binding point for the first element.
272 * Location an atomic counter is stored at.
277 * ARB_shader_image_load_store qualifiers.
280 bool read_only
; /**< "readonly" qualifier. */
281 bool write_only
; /**< "writeonly" qualifier. */
286 /** Image internal format if specified explicitly, otherwise GL_NONE. */
292 * Built-in state that backs this uniform
294 * Once set at variable creation, \c state_slots must remain invariant.
295 * This is because, ideally, this array would be shared by all clones of
296 * this variable in the IR tree. In other words, we'd really like for it
297 * to be a fly-weight.
299 * If the variable is not a uniform, \c num_state_slots will be zero and
300 * \c state_slots will be \c NULL.
303 unsigned num_state_slots
; /**< Number of state slots used */
304 nir_state_slot
*state_slots
; /**< State descriptors. */
308 * Constant expression assigned in the initializer of the variable
310 * This field should only be used temporarily by creators of NIR shaders
311 * and then lower_constant_initializers can be used to get rid of them.
312 * Most of the rest of NIR ignores this field or asserts that it's NULL.
314 nir_constant
*constant_initializer
;
317 * For variables that are in an interface block or are an instance of an
318 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
320 * \sa ir_variable::location
322 const struct glsl_type
*interface_type
;
325 #define nir_foreach_variable(var, var_list) \
326 foreach_list_typed(nir_variable, var, node, var_list)
328 #define nir_foreach_variable_safe(var, var_list) \
329 foreach_list_typed_safe(nir_variable, var, node, var_list)
332 nir_variable_is_global(const nir_variable
*var
)
334 return var
->data
.mode
!= nir_var_local
&& var
->data
.mode
!= nir_var_param
;
337 typedef struct nir_register
{
338 struct exec_node node
;
340 unsigned num_components
; /** < number of vector components */
341 unsigned num_array_elems
; /** < size of array (0 for no array) */
343 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
346 /** generic register index. */
349 /** only for debug purposes, can be NULL */
352 /** whether this register is local (per-function) or global (per-shader) */
356 * If this flag is set to true, then accessing channels >= num_components
357 * is well-defined, and simply spills over to the next array element. This
358 * is useful for backends that can do per-component accessing, in
359 * particular scalar backends. By setting this flag and making
360 * num_components equal to 1, structures can be packed tightly into
361 * registers and then registers can be accessed per-component to get to
362 * each structure member, even if it crosses vec4 boundaries.
366 /** set of nir_src's where this register is used (read from) */
367 struct list_head uses
;
369 /** set of nir_dest's where this register is defined (written to) */
370 struct list_head defs
;
372 /** set of nir_if's where this register is used as a condition */
373 struct list_head if_uses
;
380 nir_instr_type_intrinsic
,
381 nir_instr_type_load_const
,
383 nir_instr_type_ssa_undef
,
385 nir_instr_type_parallel_copy
,
388 typedef struct nir_instr
{
389 struct exec_node node
;
391 struct nir_block
*block
;
393 /** generic instruction index. */
396 /* A temporary for optimization and analysis passes to use for storing
397 * flags. For instance, DCE uses this to store the "dead/live" info.
402 static inline nir_instr
*
403 nir_instr_next(nir_instr
*instr
)
405 struct exec_node
*next
= exec_node_get_next(&instr
->node
);
406 if (exec_node_is_tail_sentinel(next
))
409 return exec_node_data(nir_instr
, next
, node
);
412 static inline nir_instr
*
413 nir_instr_prev(nir_instr
*instr
)
415 struct exec_node
*prev
= exec_node_get_prev(&instr
->node
);
416 if (exec_node_is_head_sentinel(prev
))
419 return exec_node_data(nir_instr
, prev
, node
);
423 nir_instr_is_first(nir_instr
*instr
)
425 return exec_node_is_head_sentinel(exec_node_get_prev(&instr
->node
));
429 nir_instr_is_last(nir_instr
*instr
)
431 return exec_node_is_tail_sentinel(exec_node_get_next(&instr
->node
));
434 typedef struct nir_ssa_def
{
435 /** for debugging only, can be NULL */
438 /** generic SSA definition index. */
441 /** Index into the live_in and live_out bitfields */
444 nir_instr
*parent_instr
;
446 /** set of nir_instr's where this register is used (read from) */
447 struct list_head uses
;
449 /** set of nir_if's where this register is used as a condition */
450 struct list_head if_uses
;
452 uint8_t num_components
;
454 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
462 struct nir_src
*indirect
; /** < NULL for no indirect offset */
463 unsigned base_offset
;
465 /* TODO use-def chain goes here */
469 nir_instr
*parent_instr
;
470 struct list_head def_link
;
473 struct nir_src
*indirect
; /** < NULL for no indirect offset */
474 unsigned base_offset
;
476 /* TODO def-use chain goes here */
481 typedef struct nir_src
{
483 nir_instr
*parent_instr
;
484 struct nir_if
*parent_if
;
487 struct list_head use_link
;
497 static inline nir_src
500 nir_src src
= { { NULL
} };
504 #define NIR_SRC_INIT nir_src_init()
506 #define nir_foreach_use(src, reg_or_ssa_def) \
507 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
509 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
510 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
512 #define nir_foreach_if_use(src, reg_or_ssa_def) \
513 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
515 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
516 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
527 static inline nir_dest
530 nir_dest dest
= { { { NULL
} } };
534 #define NIR_DEST_INIT nir_dest_init()
536 #define nir_foreach_def(dest, reg) \
537 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
539 #define nir_foreach_def_safe(dest, reg) \
540 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
542 static inline nir_src
543 nir_src_for_ssa(nir_ssa_def
*def
)
545 nir_src src
= NIR_SRC_INIT
;
553 static inline nir_src
554 nir_src_for_reg(nir_register
*reg
)
556 nir_src src
= NIR_SRC_INIT
;
560 src
.reg
.indirect
= NULL
;
561 src
.reg
.base_offset
= 0;
566 static inline nir_dest
567 nir_dest_for_reg(nir_register
*reg
)
569 nir_dest dest
= NIR_DEST_INIT
;
576 static inline unsigned
577 nir_src_bit_size(nir_src src
)
579 return src
.is_ssa
? src
.ssa
->bit_size
: src
.reg
.reg
->bit_size
;
582 static inline unsigned
583 nir_dest_bit_size(nir_dest dest
)
585 return dest
.is_ssa
? dest
.ssa
.bit_size
: dest
.reg
.reg
->bit_size
;
588 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *instr_or_if
);
589 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
);
595 * \name input modifiers
599 * For inputs interpreted as floating point, flips the sign bit. For
600 * inputs interpreted as integers, performs the two's complement negation.
605 * Clears the sign bit for floating point values, and computes the integer
606 * absolute value for integers. Note that the negate modifier acts after
607 * the absolute value modifier, therefore if both are set then all inputs
608 * will become negative.
614 * For each input component, says which component of the register it is
615 * chosen from. Note that which elements of the swizzle are used and which
616 * are ignored are based on the write mask for most opcodes - for example,
617 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
618 * a swizzle of {2, x, 1, 0} where x means "don't care."
627 * \name saturate output modifier
629 * Only valid for opcodes that output floating-point numbers. Clamps the
630 * output to between 0.0 and 1.0 inclusive.
635 unsigned write_mask
: 4; /* ignored if dest.is_ssa is true */
639 nir_type_invalid
= 0, /* Not a valid type */
644 nir_type_bool32
= 32 | nir_type_bool
,
645 nir_type_int8
= 8 | nir_type_int
,
646 nir_type_int16
= 16 | nir_type_int
,
647 nir_type_int32
= 32 | nir_type_int
,
648 nir_type_int64
= 64 | nir_type_int
,
649 nir_type_uint8
= 8 | nir_type_uint
,
650 nir_type_uint16
= 16 | nir_type_uint
,
651 nir_type_uint32
= 32 | nir_type_uint
,
652 nir_type_uint64
= 64 | nir_type_uint
,
653 nir_type_float16
= 16 | nir_type_float
,
654 nir_type_float32
= 32 | nir_type_float
,
655 nir_type_float64
= 64 | nir_type_float
,
658 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
659 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
661 static inline unsigned
662 nir_alu_type_get_type_size(nir_alu_type type
)
664 return type
& NIR_ALU_TYPE_SIZE_MASK
;
667 static inline unsigned
668 nir_alu_type_get_base_type(nir_alu_type type
)
670 return type
& NIR_ALU_TYPE_BASE_TYPE_MASK
;
674 NIR_OP_IS_COMMUTATIVE
= (1 << 0),
675 NIR_OP_IS_ASSOCIATIVE
= (1 << 1),
676 } nir_op_algebraic_property
;
684 * The number of components in the output
686 * If non-zero, this is the size of the output and input sizes are
687 * explicitly given; swizzle and writemask are still in effect, but if
688 * the output component is masked out, then the input component may
691 * If zero, the opcode acts in the standard, per-component manner; the
692 * operation is performed on each component (except the ones that are
693 * masked out) with the input being taken from the input swizzle for
696 * The size of some of the inputs may be given (i.e. non-zero) even
697 * though output_size is zero; in that case, the inputs with a zero
698 * size act per-component, while the inputs with non-zero size don't.
700 unsigned output_size
;
703 * The type of vector that the instruction outputs. Note that the
704 * staurate modifier is only allowed on outputs with the float type.
707 nir_alu_type output_type
;
710 * The number of components in each input
712 unsigned input_sizes
[4];
715 * The type of vector that each input takes. Note that negate and
716 * absolute value are only allowed on inputs with int or float type and
717 * behave differently on the two.
719 nir_alu_type input_types
[4];
721 nir_op_algebraic_property algebraic_properties
;
724 extern const nir_op_info nir_op_infos
[nir_num_opcodes
];
726 typedef struct nir_alu_instr
{
730 /** Indicates that this ALU instruction generates an exact value
732 * This is kind of a mixture of GLSL "precise" and "invariant" and not
733 * really equivalent to either. This indicates that the value generated by
734 * this operation is high-precision and any code transformations that touch
735 * it must ensure that the resulting value is bit-for-bit identical to the
744 void nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
745 nir_alu_instr
*instr
);
746 void nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
747 nir_alu_instr
*instr
);
749 /* is this source channel used? */
751 nir_alu_instr_channel_used(nir_alu_instr
*instr
, unsigned src
, unsigned channel
)
753 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
754 return channel
< nir_op_infos
[instr
->op
].input_sizes
[src
];
756 return (instr
->dest
.write_mask
>> channel
) & 1;
760 * For instructions whose destinations are SSA, get the number of channels
763 static inline unsigned
764 nir_ssa_alu_instr_src_components(const nir_alu_instr
*instr
, unsigned src
)
766 assert(instr
->dest
.dest
.is_ssa
);
768 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
769 return nir_op_infos
[instr
->op
].input_sizes
[src
];
771 return instr
->dest
.dest
.ssa
.num_components
;
774 bool nir_alu_srcs_equal(const nir_alu_instr
*alu1
, const nir_alu_instr
*alu2
,
775 unsigned src1
, unsigned src2
);
779 nir_deref_type_array
,
780 nir_deref_type_struct
783 typedef struct nir_deref
{
784 nir_deref_type deref_type
;
785 struct nir_deref
*child
;
786 const struct glsl_type
*type
;
795 /* This enum describes how the array is referenced. If the deref is
796 * direct then the base_offset is used. If the deref is indirect then
797 * offset is given by base_offset + indirect. If the deref is a wildcard
798 * then the deref refers to all of the elements of the array at the same
799 * time. Wildcard dereferences are only ever allowed in copy_var
800 * intrinsics and the source and destination derefs must have matching
804 nir_deref_array_type_direct
,
805 nir_deref_array_type_indirect
,
806 nir_deref_array_type_wildcard
,
807 } nir_deref_array_type
;
812 nir_deref_array_type deref_array_type
;
813 unsigned base_offset
;
823 NIR_DEFINE_CAST(nir_deref_as_var
, nir_deref
, nir_deref_var
, deref
,
824 deref_type
, nir_deref_type_var
)
825 NIR_DEFINE_CAST(nir_deref_as_array
, nir_deref
, nir_deref_array
, deref
,
826 deref_type
, nir_deref_type_array
)
827 NIR_DEFINE_CAST(nir_deref_as_struct
, nir_deref
, nir_deref_struct
, deref
,
828 deref_type
, nir_deref_type_struct
)
830 /* Returns the last deref in the chain. */
831 static inline nir_deref
*
832 nir_deref_tail(nir_deref
*deref
)
835 deref
= deref
->child
;
843 nir_deref_var
**params
;
844 nir_deref_var
*return_deref
;
846 struct nir_function
*callee
;
849 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
850 num_variables, num_indices, idx0, idx1, idx2, flags) \
851 nir_intrinsic_##name,
853 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
856 #include "nir_intrinsics.h"
857 nir_num_intrinsics
= nir_last_intrinsic
+ 1
860 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
862 /** Represents an intrinsic
864 * An intrinsic is an instruction type for handling things that are
865 * more-or-less regular operations but don't just consume and produce SSA
866 * values like ALU operations do. Intrinsics are not for things that have
867 * special semantic meaning such as phi nodes and parallel copies.
868 * Examples of intrinsics include variable load/store operations, system
869 * value loads, and the like. Even though texturing more-or-less falls
870 * under this category, texturing is its own instruction type because
871 * trying to represent texturing with intrinsics would lead to a
872 * combinatorial explosion of intrinsic opcodes.
874 * By having a single instruction type for handling a lot of different
875 * cases, optimization passes can look for intrinsics and, for the most
876 * part, completely ignore them. Each intrinsic type also has a few
877 * possible flags that govern whether or not they can be reordered or
878 * eliminated. That way passes like dead code elimination can still work
879 * on intrisics without understanding the meaning of each.
881 * Each intrinsic has some number of constant indices, some number of
882 * variables, and some number of sources. What these sources, variables,
883 * and indices mean depends on the intrinsic and is documented with the
884 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
885 * instructions are the only types of instruction that can operate on
891 nir_intrinsic_op intrinsic
;
895 /** number of components if this is a vectorized intrinsic
897 * Similarly to ALU operations, some intrinsics are vectorized.
898 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
899 * For vectorized intrinsics, the num_components field specifies the
900 * number of destination components and the number of source components
901 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
903 uint8_t num_components
;
905 int const_index
[NIR_INTRINSIC_MAX_CONST_INDEX
];
907 nir_deref_var
*variables
[2];
910 } nir_intrinsic_instr
;
913 * \name NIR intrinsics semantic flags
915 * information about what the compiler can do with the intrinsics.
917 * \sa nir_intrinsic_info::flags
921 * whether the intrinsic can be safely eliminated if none of its output
922 * value is not being used.
924 NIR_INTRINSIC_CAN_ELIMINATE
= (1 << 0),
927 * Whether the intrinsic can be reordered with respect to any other
928 * intrinsic, i.e. whether the only reordering dependencies of the
929 * intrinsic are due to the register reads/writes.
931 NIR_INTRINSIC_CAN_REORDER
= (1 << 1),
932 } nir_intrinsic_semantic_flag
;
935 * \name NIR intrinsics const-index flag
937 * Indicates the usage of a const_index slot.
939 * \sa nir_intrinsic_info::index_map
943 * Generally instructions that take a offset src argument, can encode
944 * a constant 'base' value which is added to the offset.
946 NIR_INTRINSIC_BASE
= 1,
949 * For store instructions, a writemask for the store.
951 NIR_INTRINSIC_WRMASK
= 2,
954 * The stream-id for GS emit_vertex/end_primitive intrinsics.
956 NIR_INTRINSIC_STREAM_ID
= 3,
959 * The clip-plane id for load_user_clip_plane intrinsic.
961 NIR_INTRINSIC_UCP_ID
= 4,
964 * The amount of data, starting from BASE, that this instruction may
965 * access. This is used to provide bounds if the offset is not constant.
967 NIR_INTRINSIC_RANGE
= 5,
970 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
972 NIR_INTRINSIC_DESC_SET
= 6,
975 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
977 NIR_INTRINSIC_BINDING
= 7,
982 NIR_INTRINSIC_COMPONENT
= 8,
985 * Interpolation mode (only meaningful for FS inputs).
987 NIR_INTRINSIC_INTERP_MODE
= 9,
989 NIR_INTRINSIC_NUM_INDEX_FLAGS
,
991 } nir_intrinsic_index_flag
;
993 #define NIR_INTRINSIC_MAX_INPUTS 4
998 unsigned num_srcs
; /** < number of register/SSA inputs */
1000 /** number of components of each input register
1002 * If this value is 0, the number of components is given by the
1003 * num_components field of nir_intrinsic_instr.
1005 unsigned src_components
[NIR_INTRINSIC_MAX_INPUTS
];
1009 /** number of components of the output register
1011 * If this value is 0, the number of components is given by the
1012 * num_components field of nir_intrinsic_instr.
1014 unsigned dest_components
;
1016 /** the number of inputs/outputs that are variables */
1017 unsigned num_variables
;
1019 /** the number of constant indices used by the intrinsic */
1020 unsigned num_indices
;
1022 /** indicates the usage of intr->const_index[n] */
1023 unsigned index_map
[NIR_INTRINSIC_NUM_INDEX_FLAGS
];
1025 /** semantic flags for calls to this intrinsic */
1026 nir_intrinsic_semantic_flag flags
;
1027 } nir_intrinsic_info
;
1029 extern const nir_intrinsic_info nir_intrinsic_infos
[nir_num_intrinsics
];
1032 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1033 static inline type \
1034 nir_intrinsic_##name(nir_intrinsic_instr *instr) \
1036 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1037 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1038 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1040 static inline void \
1041 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1043 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1044 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1045 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1048 INTRINSIC_IDX_ACCESSORS(write_mask
, WRMASK
, unsigned)
1049 INTRINSIC_IDX_ACCESSORS(base
, BASE
, int)
1050 INTRINSIC_IDX_ACCESSORS(stream_id
, STREAM_ID
, unsigned)
1051 INTRINSIC_IDX_ACCESSORS(ucp_id
, UCP_ID
, unsigned)
1052 INTRINSIC_IDX_ACCESSORS(range
, RANGE
, unsigned)
1053 INTRINSIC_IDX_ACCESSORS(desc_set
, DESC_SET
, unsigned)
1054 INTRINSIC_IDX_ACCESSORS(binding
, BINDING
, unsigned)
1055 INTRINSIC_IDX_ACCESSORS(component
, COMPONENT
, unsigned)
1056 INTRINSIC_IDX_ACCESSORS(interp_mode
, INTERP_MODE
, unsigned)
1059 * \group texture information
1061 * This gives semantic information about textures which is useful to the
1062 * frontend, the backend, and lowering passes, but not the optimizer.
1067 nir_tex_src_projector
,
1068 nir_tex_src_comparator
, /* shadow comparator */
1072 nir_tex_src_ms_index
, /* MSAA sample index */
1073 nir_tex_src_ms_mcs
, /* MSAA compression value */
1076 nir_tex_src_texture_offset
, /* < dynamically uniform indirect offset */
1077 nir_tex_src_sampler_offset
, /* < dynamically uniform indirect offset */
1078 nir_tex_src_plane
, /* < selects plane for planar textures */
1079 nir_num_tex_src_types
1084 nir_tex_src_type src_type
;
1088 nir_texop_tex
, /**< Regular texture look-up */
1089 nir_texop_txb
, /**< Texture look-up with LOD bias */
1090 nir_texop_txl
, /**< Texture look-up with explicit LOD */
1091 nir_texop_txd
, /**< Texture look-up with partial derivatvies */
1092 nir_texop_txf
, /**< Texel fetch with explicit LOD */
1093 nir_texop_txf_ms
, /**< Multisample texture fetch */
1094 nir_texop_txf_ms_mcs
, /**< Multisample compression value fetch */
1095 nir_texop_txs
, /**< Texture size */
1096 nir_texop_lod
, /**< Texture lod query */
1097 nir_texop_tg4
, /**< Texture gather */
1098 nir_texop_query_levels
, /**< Texture levels query */
1099 nir_texop_texture_samples
, /**< Texture samples query */
1100 nir_texop_samples_identical
, /**< Query whether all samples are definitely
1108 enum glsl_sampler_dim sampler_dim
;
1109 nir_alu_type dest_type
;
1114 unsigned num_srcs
, coord_components
;
1115 bool is_array
, is_shadow
;
1118 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1119 * components or the new-style shadow that outputs 1 component.
1121 bool is_new_style_shadow
;
1123 /* gather component selector */
1124 unsigned component
: 2;
1126 /** The texture index
1128 * If this texture instruction has a nir_tex_src_texture_offset source,
1129 * then the texture index is given by texture_index + texture_offset.
1131 unsigned texture_index
;
1133 /** The size of the texture array or 0 if it's not an array */
1134 unsigned texture_array_size
;
1136 /** The texture deref
1138 * If this is null, use texture_index instead.
1140 nir_deref_var
*texture
;
1142 /** The sampler index
1144 * The following operations do not require a sampler and, as such, this
1145 * field should be ignored:
1147 * - nir_texop_txf_ms
1151 * - nir_texop_query_levels
1152 * - nir_texop_texture_samples
1153 * - nir_texop_samples_identical
1155 * If this texture instruction has a nir_tex_src_sampler_offset source,
1156 * then the sampler index is given by sampler_index + sampler_offset.
1158 unsigned sampler_index
;
1160 /** The sampler deref
1162 * If this is null, use sampler_index instead.
1164 nir_deref_var
*sampler
;
1167 static inline unsigned
1168 nir_tex_instr_dest_size(nir_tex_instr
*instr
)
1170 switch (instr
->op
) {
1171 case nir_texop_txs
: {
1173 switch (instr
->sampler_dim
) {
1174 case GLSL_SAMPLER_DIM_1D
:
1175 case GLSL_SAMPLER_DIM_BUF
:
1178 case GLSL_SAMPLER_DIM_2D
:
1179 case GLSL_SAMPLER_DIM_CUBE
:
1180 case GLSL_SAMPLER_DIM_MS
:
1181 case GLSL_SAMPLER_DIM_RECT
:
1182 case GLSL_SAMPLER_DIM_EXTERNAL
:
1183 case GLSL_SAMPLER_DIM_SUBPASS
:
1186 case GLSL_SAMPLER_DIM_3D
:
1190 unreachable("not reached");
1192 if (instr
->is_array
)
1200 case nir_texop_texture_samples
:
1201 case nir_texop_query_levels
:
1202 case nir_texop_samples_identical
:
1206 if (instr
->is_shadow
&& instr
->is_new_style_shadow
)
1213 /* Returns true if this texture operation queries something about the texture
1214 * rather than actually sampling it.
1217 nir_tex_instr_is_query(nir_tex_instr
*instr
)
1219 switch (instr
->op
) {
1222 case nir_texop_texture_samples
:
1223 case nir_texop_query_levels
:
1224 case nir_texop_txf_ms_mcs
:
1231 case nir_texop_txf_ms
:
1235 unreachable("Invalid texture opcode");
1239 static inline nir_alu_type
1240 nir_tex_instr_src_type(nir_tex_instr
*instr
, unsigned src
)
1242 switch (instr
->src
[src
].src_type
) {
1243 case nir_tex_src_coord
:
1244 switch (instr
->op
) {
1246 case nir_texop_txf_ms
:
1247 case nir_texop_txf_ms_mcs
:
1248 case nir_texop_samples_identical
:
1249 return nir_type_int
;
1252 return nir_type_float
;
1255 case nir_tex_src_lod
:
1256 switch (instr
->op
) {
1259 return nir_type_int
;
1262 return nir_type_float
;
1265 case nir_tex_src_projector
:
1266 case nir_tex_src_comparator
:
1267 case nir_tex_src_bias
:
1268 case nir_tex_src_ddx
:
1269 case nir_tex_src_ddy
:
1270 return nir_type_float
;
1272 case nir_tex_src_offset
:
1273 case nir_tex_src_ms_index
:
1274 case nir_tex_src_texture_offset
:
1275 case nir_tex_src_sampler_offset
:
1276 return nir_type_int
;
1279 unreachable("Invalid texture source type");
1283 static inline unsigned
1284 nir_tex_instr_src_size(nir_tex_instr
*instr
, unsigned src
)
1286 if (instr
->src
[src
].src_type
== nir_tex_src_coord
)
1287 return instr
->coord_components
;
1289 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1290 if (instr
->src
[src
].src_type
== nir_tex_src_ms_mcs
)
1293 if (instr
->src
[src
].src_type
== nir_tex_src_offset
||
1294 instr
->src
[src
].src_type
== nir_tex_src_ddx
||
1295 instr
->src
[src
].src_type
== nir_tex_src_ddy
) {
1296 if (instr
->is_array
)
1297 return instr
->coord_components
- 1;
1299 return instr
->coord_components
;
1306 nir_tex_instr_src_index(nir_tex_instr
*instr
, nir_tex_src_type type
)
1308 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1309 if (instr
->src
[i
].src_type
== type
)
1315 void nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
);
1320 nir_const_value value
;
1323 } nir_load_const_instr
;
1336 /* creates a new SSA variable in an undefined state */
1341 } nir_ssa_undef_instr
;
1344 struct exec_node node
;
1346 /* The predecessor block corresponding to this source */
1347 struct nir_block
*pred
;
1352 #define nir_foreach_phi_src(phi_src, phi) \
1353 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1354 #define nir_foreach_phi_src_safe(phi_src, phi) \
1355 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1360 struct exec_list srcs
; /** < list of nir_phi_src */
1366 struct exec_node node
;
1369 } nir_parallel_copy_entry
;
1371 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1372 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1377 /* A list of nir_parallel_copy_entry's. The sources of all of the
1378 * entries are copied to the corresponding destinations "in parallel".
1379 * In other words, if we have two entries: a -> b and b -> a, the values
1382 struct exec_list entries
;
1383 } nir_parallel_copy_instr
;
1385 NIR_DEFINE_CAST(nir_instr_as_alu
, nir_instr
, nir_alu_instr
, instr
,
1386 type
, nir_instr_type_alu
)
1387 NIR_DEFINE_CAST(nir_instr_as_call
, nir_instr
, nir_call_instr
, instr
,
1388 type
, nir_instr_type_call
)
1389 NIR_DEFINE_CAST(nir_instr_as_jump
, nir_instr
, nir_jump_instr
, instr
,
1390 type
, nir_instr_type_jump
)
1391 NIR_DEFINE_CAST(nir_instr_as_tex
, nir_instr
, nir_tex_instr
, instr
,
1392 type
, nir_instr_type_tex
)
1393 NIR_DEFINE_CAST(nir_instr_as_intrinsic
, nir_instr
, nir_intrinsic_instr
, instr
,
1394 type
, nir_instr_type_intrinsic
)
1395 NIR_DEFINE_CAST(nir_instr_as_load_const
, nir_instr
, nir_load_const_instr
, instr
,
1396 type
, nir_instr_type_load_const
)
1397 NIR_DEFINE_CAST(nir_instr_as_ssa_undef
, nir_instr
, nir_ssa_undef_instr
, instr
,
1398 type
, nir_instr_type_ssa_undef
)
1399 NIR_DEFINE_CAST(nir_instr_as_phi
, nir_instr
, nir_phi_instr
, instr
,
1400 type
, nir_instr_type_phi
)
1401 NIR_DEFINE_CAST(nir_instr_as_parallel_copy
, nir_instr
,
1402 nir_parallel_copy_instr
, instr
,
1403 type
, nir_instr_type_parallel_copy
)
1408 * Control flow consists of a tree of control flow nodes, which include
1409 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1410 * instructions that always run start-to-finish. Each basic block also keeps
1411 * track of its successors (blocks which may run immediately after the current
1412 * block) and predecessors (blocks which could have run immediately before the
1413 * current block). Each function also has a start block and an end block which
1414 * all return statements point to (which is always empty). Together, all the
1415 * blocks with their predecessors and successors make up the control flow
1416 * graph (CFG) of the function. There are helpers that modify the tree of
1417 * control flow nodes while modifying the CFG appropriately; these should be
1418 * used instead of modifying the tree directly.
1425 nir_cf_node_function
1428 typedef struct nir_cf_node
{
1429 struct exec_node node
;
1430 nir_cf_node_type type
;
1431 struct nir_cf_node
*parent
;
1434 typedef struct nir_block
{
1435 nir_cf_node cf_node
;
1437 struct exec_list instr_list
; /** < list of nir_instr */
1439 /** generic block index; generated by nir_index_blocks */
1443 * Each block can only have up to 2 successors, so we put them in a simple
1444 * array - no need for anything more complicated.
1446 struct nir_block
*successors
[2];
1448 /* Set of nir_block predecessors in the CFG */
1449 struct set
*predecessors
;
1452 * this node's immediate dominator in the dominance tree - set to NULL for
1455 struct nir_block
*imm_dom
;
1457 /* This node's children in the dominance tree */
1458 unsigned num_dom_children
;
1459 struct nir_block
**dom_children
;
1461 /* Set of nir_block's on the dominance frontier of this block */
1462 struct set
*dom_frontier
;
1465 * These two indices have the property that dom_{pre,post}_index for each
1466 * child of this block in the dominance tree will always be between
1467 * dom_pre_index and dom_post_index for this block, which makes testing if
1468 * a given block is dominated by another block an O(1) operation.
1470 unsigned dom_pre_index
, dom_post_index
;
1472 /* live in and out for this block; used for liveness analysis */
1473 BITSET_WORD
*live_in
;
1474 BITSET_WORD
*live_out
;
1477 static inline nir_instr
*
1478 nir_block_first_instr(nir_block
*block
)
1480 struct exec_node
*head
= exec_list_get_head(&block
->instr_list
);
1481 return exec_node_data(nir_instr
, head
, node
);
1484 static inline nir_instr
*
1485 nir_block_last_instr(nir_block
*block
)
1487 struct exec_node
*tail
= exec_list_get_tail(&block
->instr_list
);
1488 return exec_node_data(nir_instr
, tail
, node
);
1491 #define nir_foreach_instr(instr, block) \
1492 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1493 #define nir_foreach_instr_reverse(instr, block) \
1494 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1495 #define nir_foreach_instr_safe(instr, block) \
1496 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1497 #define nir_foreach_instr_reverse_safe(instr, block) \
1498 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1500 typedef struct nir_if
{
1501 nir_cf_node cf_node
;
1504 struct exec_list then_list
; /** < list of nir_cf_node */
1505 struct exec_list else_list
; /** < list of nir_cf_node */
1509 nir_cf_node cf_node
;
1511 struct exec_list body
; /** < list of nir_cf_node */
1515 * Various bits of metadata that can may be created or required by
1516 * optimization and analysis passes
1519 nir_metadata_none
= 0x0,
1520 nir_metadata_block_index
= 0x1,
1521 nir_metadata_dominance
= 0x2,
1522 nir_metadata_live_ssa_defs
= 0x4,
1523 nir_metadata_not_properly_reset
= 0x8,
1527 nir_cf_node cf_node
;
1529 /** pointer to the function of which this is an implementation */
1530 struct nir_function
*function
;
1532 struct exec_list body
; /** < list of nir_cf_node */
1534 nir_block
*end_block
;
1536 /** list for all local variables in the function */
1537 struct exec_list locals
;
1539 /** array of variables used as parameters */
1540 unsigned num_params
;
1541 nir_variable
**params
;
1543 /** variable used to hold the result of the function */
1544 nir_variable
*return_var
;
1546 /** list of local registers in the function */
1547 struct exec_list registers
;
1549 /** next available local register index */
1552 /** next available SSA value index */
1555 /* total number of basic blocks, only valid when block_index_dirty = false */
1556 unsigned num_blocks
;
1558 nir_metadata valid_metadata
;
1559 } nir_function_impl
;
1561 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
1562 nir_start_block(nir_function_impl
*impl
)
1564 return (nir_block
*) impl
->body
.head_sentinel
.next
;
1567 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
1568 nir_impl_last_block(nir_function_impl
*impl
)
1570 return (nir_block
*) impl
->body
.tail_sentinel
.prev
;
1573 static inline nir_cf_node
*
1574 nir_cf_node_next(nir_cf_node
*node
)
1576 struct exec_node
*next
= exec_node_get_next(&node
->node
);
1577 if (exec_node_is_tail_sentinel(next
))
1580 return exec_node_data(nir_cf_node
, next
, node
);
1583 static inline nir_cf_node
*
1584 nir_cf_node_prev(nir_cf_node
*node
)
1586 struct exec_node
*prev
= exec_node_get_prev(&node
->node
);
1587 if (exec_node_is_head_sentinel(prev
))
1590 return exec_node_data(nir_cf_node
, prev
, node
);
1594 nir_cf_node_is_first(const nir_cf_node
*node
)
1596 return exec_node_is_head_sentinel(node
->node
.prev
);
1600 nir_cf_node_is_last(const nir_cf_node
*node
)
1602 return exec_node_is_tail_sentinel(node
->node
.next
);
1605 NIR_DEFINE_CAST(nir_cf_node_as_block
, nir_cf_node
, nir_block
, cf_node
,
1606 type
, nir_cf_node_block
)
1607 NIR_DEFINE_CAST(nir_cf_node_as_if
, nir_cf_node
, nir_if
, cf_node
,
1608 type
, nir_cf_node_if
)
1609 NIR_DEFINE_CAST(nir_cf_node_as_loop
, nir_cf_node
, nir_loop
, cf_node
,
1610 type
, nir_cf_node_loop
)
1611 NIR_DEFINE_CAST(nir_cf_node_as_function
, nir_cf_node
,
1612 nir_function_impl
, cf_node
, type
, nir_cf_node_function
)
1614 static inline nir_block
*
1615 nir_if_first_then_block(nir_if
*if_stmt
)
1617 struct exec_node
*head
= exec_list_get_head(&if_stmt
->then_list
);
1618 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
1621 static inline nir_block
*
1622 nir_if_last_then_block(nir_if
*if_stmt
)
1624 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->then_list
);
1625 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
1628 static inline nir_block
*
1629 nir_if_first_else_block(nir_if
*if_stmt
)
1631 struct exec_node
*head
= exec_list_get_head(&if_stmt
->else_list
);
1632 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
1635 static inline nir_block
*
1636 nir_if_last_else_block(nir_if
*if_stmt
)
1638 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->else_list
);
1639 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
1642 static inline nir_block
*
1643 nir_loop_first_block(nir_loop
*loop
)
1645 struct exec_node
*head
= exec_list_get_head(&loop
->body
);
1646 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
1649 static inline nir_block
*
1650 nir_loop_last_block(nir_loop
*loop
)
1652 struct exec_node
*tail
= exec_list_get_tail(&loop
->body
);
1653 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
1659 nir_parameter_inout
,
1660 } nir_parameter_type
;
1663 nir_parameter_type param_type
;
1664 const struct glsl_type
*type
;
1667 typedef struct nir_function
{
1668 struct exec_node node
;
1671 struct nir_shader
*shader
;
1673 unsigned num_params
;
1674 nir_parameter
*params
;
1675 const struct glsl_type
*return_type
;
1677 /** The implementation of this function.
1679 * If the function is only declared and not implemented, this is NULL.
1681 nir_function_impl
*impl
;
1684 typedef struct nir_shader_compiler_options
{
1689 /** Lowers flrp when it does not support doubles */
1696 bool lower_bitfield_extract
;
1697 bool lower_bitfield_insert
;
1698 bool lower_uadd_carry
;
1699 bool lower_usub_borrow
;
1700 /** lowers fneg and ineg to fsub and isub. */
1702 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1705 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1708 /** enables rules to lower idiv by power-of-two: */
1711 /* Does the native fdot instruction replicate its result for four
1712 * components? If so, then opt_algebraic_late will turn all fdotN
1713 * instructions into fdot_replicatedN instructions.
1715 bool fdot_replicates
;
1717 /** lowers ffract to fsub+ffloor: */
1720 bool lower_pack_half_2x16
;
1721 bool lower_pack_unorm_2x16
;
1722 bool lower_pack_snorm_2x16
;
1723 bool lower_pack_unorm_4x8
;
1724 bool lower_pack_snorm_4x8
;
1725 bool lower_unpack_half_2x16
;
1726 bool lower_unpack_unorm_2x16
;
1727 bool lower_unpack_snorm_2x16
;
1728 bool lower_unpack_unorm_4x8
;
1729 bool lower_unpack_snorm_4x8
;
1731 bool lower_extract_byte
;
1732 bool lower_extract_word
;
1735 * Does the driver support real 32-bit integers? (Otherwise, integers
1736 * are simulated by floats.)
1738 bool native_integers
;
1740 /* Indicates that the driver only has zero-based vertex id */
1741 bool vertex_id_zero_based
;
1743 bool lower_cs_local_index_from_id
;
1746 * Should nir_lower_io() create load_interpolated_input intrinsics?
1748 * If not, it generates regular load_input intrinsics and interpolation
1749 * information must be inferred from the list of input nir_variables.
1751 bool use_interpolated_input_intrinsics
;
1752 } nir_shader_compiler_options
;
1754 typedef struct nir_shader
{
1755 /** list of uniforms (nir_variable) */
1756 struct exec_list uniforms
;
1758 /** list of inputs (nir_variable) */
1759 struct exec_list inputs
;
1761 /** list of outputs (nir_variable) */
1762 struct exec_list outputs
;
1764 /** list of shared compute variables (nir_variable) */
1765 struct exec_list shared
;
1767 /** Set of driver-specific options for the shader.
1769 * The memory for the options is expected to be kept in a single static
1770 * copy by the driver.
1772 const struct nir_shader_compiler_options
*options
;
1774 /** Various bits of compile-time information about a given shader */
1775 struct shader_info
*info
;
1777 /** list of global variables in the shader (nir_variable) */
1778 struct exec_list globals
;
1780 /** list of system value variables in the shader (nir_variable) */
1781 struct exec_list system_values
;
1783 struct exec_list functions
; /** < list of nir_function */
1785 /** list of global register in the shader */
1786 struct exec_list registers
;
1788 /** next available global register index */
1792 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
1795 unsigned num_inputs
, num_uniforms
, num_outputs
, num_shared
;
1797 /** The shader stage, such as MESA_SHADER_VERTEX. */
1798 gl_shader_stage stage
;
1801 static inline nir_function_impl
*
1802 nir_shader_get_entrypoint(nir_shader
*shader
)
1804 assert(exec_list_length(&shader
->functions
) == 1);
1805 struct exec_node
*func_node
= exec_list_get_head(&shader
->functions
);
1806 nir_function
*func
= exec_node_data(nir_function
, func_node
, node
);
1807 assert(func
->return_type
== glsl_void_type());
1808 assert(func
->num_params
== 0);
1813 #define nir_foreach_function(func, shader) \
1814 foreach_list_typed(nir_function, func, node, &(shader)->functions)
1816 nir_shader
*nir_shader_create(void *mem_ctx
,
1817 gl_shader_stage stage
,
1818 const nir_shader_compiler_options
*options
,
1821 /** creates a register, including assigning it an index and adding it to the list */
1822 nir_register
*nir_global_reg_create(nir_shader
*shader
);
1824 nir_register
*nir_local_reg_create(nir_function_impl
*impl
);
1826 void nir_reg_remove(nir_register
*reg
);
1828 /** Adds a variable to the appropreate list in nir_shader */
1829 void nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
);
1832 nir_function_impl_add_variable(nir_function_impl
*impl
, nir_variable
*var
)
1834 assert(var
->data
.mode
== nir_var_local
);
1835 exec_list_push_tail(&impl
->locals
, &var
->node
);
1838 /** creates a variable, sets a few defaults, and adds it to the list */
1839 nir_variable
*nir_variable_create(nir_shader
*shader
,
1840 nir_variable_mode mode
,
1841 const struct glsl_type
*type
,
1843 /** creates a local variable and adds it to the list */
1844 nir_variable
*nir_local_variable_create(nir_function_impl
*impl
,
1845 const struct glsl_type
*type
,
1848 /** creates a function and adds it to the shader's list of functions */
1849 nir_function
*nir_function_create(nir_shader
*shader
, const char *name
);
1851 nir_function_impl
*nir_function_impl_create(nir_function
*func
);
1852 /** creates a function_impl that isn't tied to any particular function */
1853 nir_function_impl
*nir_function_impl_create_bare(nir_shader
*shader
);
1855 nir_block
*nir_block_create(nir_shader
*shader
);
1856 nir_if
*nir_if_create(nir_shader
*shader
);
1857 nir_loop
*nir_loop_create(nir_shader
*shader
);
1859 nir_function_impl
*nir_cf_node_get_function(nir_cf_node
*node
);
1861 /** requests that the given pieces of metadata be generated */
1862 void nir_metadata_require(nir_function_impl
*impl
, nir_metadata required
);
1863 /** dirties all but the preserved metadata */
1864 void nir_metadata_preserve(nir_function_impl
*impl
, nir_metadata preserved
);
1866 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
1867 nir_alu_instr
*nir_alu_instr_create(nir_shader
*shader
, nir_op op
);
1869 nir_jump_instr
*nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
);
1871 nir_load_const_instr
*nir_load_const_instr_create(nir_shader
*shader
,
1872 unsigned num_components
,
1875 nir_intrinsic_instr
*nir_intrinsic_instr_create(nir_shader
*shader
,
1876 nir_intrinsic_op op
);
1878 nir_call_instr
*nir_call_instr_create(nir_shader
*shader
,
1879 nir_function
*callee
);
1881 nir_tex_instr
*nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
);
1883 nir_phi_instr
*nir_phi_instr_create(nir_shader
*shader
);
1885 nir_parallel_copy_instr
*nir_parallel_copy_instr_create(nir_shader
*shader
);
1887 nir_ssa_undef_instr
*nir_ssa_undef_instr_create(nir_shader
*shader
,
1888 unsigned num_components
,
1891 nir_deref_var
*nir_deref_var_create(void *mem_ctx
, nir_variable
*var
);
1892 nir_deref_array
*nir_deref_array_create(void *mem_ctx
);
1893 nir_deref_struct
*nir_deref_struct_create(void *mem_ctx
, unsigned field_index
);
1895 nir_deref
*nir_copy_deref(void *mem_ctx
, nir_deref
*deref
);
1897 typedef bool (*nir_deref_foreach_leaf_cb
)(nir_deref_var
*deref
, void *state
);
1898 bool nir_deref_foreach_leaf(nir_deref_var
*deref
,
1899 nir_deref_foreach_leaf_cb cb
, void *state
);
1901 nir_load_const_instr
*
1902 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
);
1905 * NIR Cursors and Instruction Insertion API
1908 * A tiny struct representing a point to insert/extract instructions or
1909 * control flow nodes. Helps reduce the combinatorial explosion of possible
1910 * points to insert/extract.
1912 * \sa nir_control_flow.h
1915 nir_cursor_before_block
,
1916 nir_cursor_after_block
,
1917 nir_cursor_before_instr
,
1918 nir_cursor_after_instr
,
1919 } nir_cursor_option
;
1922 nir_cursor_option option
;
1929 static inline nir_block
*
1930 nir_cursor_current_block(nir_cursor cursor
)
1932 if (cursor
.option
== nir_cursor_before_instr
||
1933 cursor
.option
== nir_cursor_after_instr
) {
1934 return cursor
.instr
->block
;
1936 return cursor
.block
;
1940 bool nir_cursors_equal(nir_cursor a
, nir_cursor b
);
1942 static inline nir_cursor
1943 nir_before_block(nir_block
*block
)
1946 cursor
.option
= nir_cursor_before_block
;
1947 cursor
.block
= block
;
1951 static inline nir_cursor
1952 nir_after_block(nir_block
*block
)
1955 cursor
.option
= nir_cursor_after_block
;
1956 cursor
.block
= block
;
1960 static inline nir_cursor
1961 nir_before_instr(nir_instr
*instr
)
1964 cursor
.option
= nir_cursor_before_instr
;
1965 cursor
.instr
= instr
;
1969 static inline nir_cursor
1970 nir_after_instr(nir_instr
*instr
)
1973 cursor
.option
= nir_cursor_after_instr
;
1974 cursor
.instr
= instr
;
1978 static inline nir_cursor
1979 nir_after_block_before_jump(nir_block
*block
)
1981 nir_instr
*last_instr
= nir_block_last_instr(block
);
1982 if (last_instr
&& last_instr
->type
== nir_instr_type_jump
) {
1983 return nir_before_instr(last_instr
);
1985 return nir_after_block(block
);
1989 static inline nir_cursor
1990 nir_before_cf_node(nir_cf_node
*node
)
1992 if (node
->type
== nir_cf_node_block
)
1993 return nir_before_block(nir_cf_node_as_block(node
));
1995 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node
)));
1998 static inline nir_cursor
1999 nir_after_cf_node(nir_cf_node
*node
)
2001 if (node
->type
== nir_cf_node_block
)
2002 return nir_after_block(nir_cf_node_as_block(node
));
2004 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node
)));
2007 static inline nir_cursor
2008 nir_after_phis(nir_block
*block
)
2010 nir_foreach_instr(instr
, block
) {
2011 if (instr
->type
!= nir_instr_type_phi
)
2012 return nir_before_instr(instr
);
2014 return nir_after_block(block
);
2017 static inline nir_cursor
2018 nir_after_cf_node_and_phis(nir_cf_node
*node
)
2020 if (node
->type
== nir_cf_node_block
)
2021 return nir_after_block(nir_cf_node_as_block(node
));
2023 nir_block
*block
= nir_cf_node_as_block(nir_cf_node_next(node
));
2025 return nir_after_phis(block
);
2028 static inline nir_cursor
2029 nir_before_cf_list(struct exec_list
*cf_list
)
2031 nir_cf_node
*first_node
= exec_node_data(nir_cf_node
,
2032 exec_list_get_head(cf_list
), node
);
2033 return nir_before_cf_node(first_node
);
2036 static inline nir_cursor
2037 nir_after_cf_list(struct exec_list
*cf_list
)
2039 nir_cf_node
*last_node
= exec_node_data(nir_cf_node
,
2040 exec_list_get_tail(cf_list
), node
);
2041 return nir_after_cf_node(last_node
);
2045 * Insert a NIR instruction at the given cursor.
2047 * Note: This does not update the cursor.
2049 void nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
);
2052 nir_instr_insert_before(nir_instr
*instr
, nir_instr
*before
)
2054 nir_instr_insert(nir_before_instr(instr
), before
);
2058 nir_instr_insert_after(nir_instr
*instr
, nir_instr
*after
)
2060 nir_instr_insert(nir_after_instr(instr
), after
);
2064 nir_instr_insert_before_block(nir_block
*block
, nir_instr
*before
)
2066 nir_instr_insert(nir_before_block(block
), before
);
2070 nir_instr_insert_after_block(nir_block
*block
, nir_instr
*after
)
2072 nir_instr_insert(nir_after_block(block
), after
);
2076 nir_instr_insert_before_cf(nir_cf_node
*node
, nir_instr
*before
)
2078 nir_instr_insert(nir_before_cf_node(node
), before
);
2082 nir_instr_insert_after_cf(nir_cf_node
*node
, nir_instr
*after
)
2084 nir_instr_insert(nir_after_cf_node(node
), after
);
2088 nir_instr_insert_before_cf_list(struct exec_list
*list
, nir_instr
*before
)
2090 nir_instr_insert(nir_before_cf_list(list
), before
);
2094 nir_instr_insert_after_cf_list(struct exec_list
*list
, nir_instr
*after
)
2096 nir_instr_insert(nir_after_cf_list(list
), after
);
2099 void nir_instr_remove(nir_instr
*instr
);
2103 typedef bool (*nir_foreach_ssa_def_cb
)(nir_ssa_def
*def
, void *state
);
2104 typedef bool (*nir_foreach_dest_cb
)(nir_dest
*dest
, void *state
);
2105 typedef bool (*nir_foreach_src_cb
)(nir_src
*src
, void *state
);
2106 bool nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
,
2108 bool nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
);
2109 bool nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
);
2111 nir_const_value
*nir_src_as_const_value(nir_src src
);
2112 bool nir_src_is_dynamically_uniform(nir_src src
);
2113 bool nir_srcs_equal(nir_src src1
, nir_src src2
);
2114 void nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
);
2115 void nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
);
2116 void nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
);
2117 void nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
,
2120 void nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
2121 unsigned num_components
, unsigned bit_size
,
2123 void nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
2124 unsigned num_components
, unsigned bit_size
,
2126 void nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
);
2127 void nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
2128 nir_instr
*after_me
);
2130 uint8_t nir_ssa_def_components_read(nir_ssa_def
*def
);
2133 * finds the next basic block in source-code order, returns NULL if there is
2137 nir_block
*nir_block_cf_tree_next(nir_block
*block
);
2139 /* Performs the opposite of nir_block_cf_tree_next() */
2141 nir_block
*nir_block_cf_tree_prev(nir_block
*block
);
2143 /* Gets the first block in a CF node in source-code order */
2145 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
);
2147 /* Gets the last block in a CF node in source-code order */
2149 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
);
2151 /* Gets the next block after a CF node in source-code order */
2153 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
);
2155 /* Macros for loops that visit blocks in source-code order */
2157 #define nir_foreach_block(block, impl) \
2158 for (nir_block *block = nir_start_block(impl); block != NULL; \
2159 block = nir_block_cf_tree_next(block))
2161 #define nir_foreach_block_safe(block, impl) \
2162 for (nir_block *block = nir_start_block(impl), \
2163 *next = nir_block_cf_tree_next(block); \
2165 block = next, next = nir_block_cf_tree_next(block))
2167 #define nir_foreach_block_reverse(block, impl) \
2168 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2169 block = nir_block_cf_tree_prev(block))
2171 #define nir_foreach_block_reverse_safe(block, impl) \
2172 for (nir_block *block = nir_impl_last_block(impl), \
2173 *prev = nir_block_cf_tree_prev(block); \
2175 block = prev, prev = nir_block_cf_tree_prev(block))
2177 #define nir_foreach_block_in_cf_node(block, node) \
2178 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2179 block != nir_cf_node_cf_tree_next(node); \
2180 block = nir_block_cf_tree_next(block))
2182 /* If the following CF node is an if, this function returns that if.
2183 * Otherwise, it returns NULL.
2185 nir_if
*nir_block_get_following_if(nir_block
*block
);
2187 nir_loop
*nir_block_get_following_loop(nir_block
*block
);
2189 void nir_index_local_regs(nir_function_impl
*impl
);
2190 void nir_index_global_regs(nir_shader
*shader
);
2191 void nir_index_ssa_defs(nir_function_impl
*impl
);
2192 unsigned nir_index_instrs(nir_function_impl
*impl
);
2194 void nir_index_blocks(nir_function_impl
*impl
);
2196 void nir_print_shader(nir_shader
*shader
, FILE *fp
);
2197 void nir_print_shader_annotated(nir_shader
*shader
, FILE *fp
, struct hash_table
*errors
);
2198 void nir_print_instr(const nir_instr
*instr
, FILE *fp
);
2200 nir_shader
*nir_shader_clone(void *mem_ctx
, const nir_shader
*s
);
2201 nir_function_impl
*nir_function_impl_clone(const nir_function_impl
*fi
);
2202 nir_constant
*nir_constant_clone(const nir_constant
*c
, nir_variable
*var
);
2203 nir_variable
*nir_variable_clone(const nir_variable
*c
, nir_shader
*shader
);
2206 void nir_validate_shader(nir_shader
*shader
);
2207 void nir_metadata_set_validation_flag(nir_shader
*shader
);
2208 void nir_metadata_check_validation_flag(nir_shader
*shader
);
2210 #include "util/debug.h"
2212 should_clone_nir(void)
2214 static int should_clone
= -1;
2215 if (should_clone
< 0)
2216 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
2218 return should_clone
;
2221 static inline void nir_validate_shader(nir_shader
*shader
) { (void) shader
; }
2222 static inline void nir_metadata_set_validation_flag(nir_shader
*shader
) { (void) shader
; }
2223 static inline void nir_metadata_check_validation_flag(nir_shader
*shader
) { (void) shader
; }
2224 static inline bool should_clone_nir(void) { return false; }
2227 #define _PASS(nir, do_pass) do { \
2229 nir_validate_shader(nir); \
2230 if (should_clone_nir()) { \
2231 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2237 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2238 nir_metadata_set_validation_flag(nir); \
2239 if (pass(nir, ##__VA_ARGS__)) { \
2241 nir_metadata_check_validation_flag(nir); \
2245 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2246 pass(nir, ##__VA_ARGS__); \
2249 void nir_calc_dominance_impl(nir_function_impl
*impl
);
2250 void nir_calc_dominance(nir_shader
*shader
);
2252 nir_block
*nir_dominance_lca(nir_block
*b1
, nir_block
*b2
);
2253 bool nir_block_dominates(nir_block
*parent
, nir_block
*child
);
2255 void nir_dump_dom_tree_impl(nir_function_impl
*impl
, FILE *fp
);
2256 void nir_dump_dom_tree(nir_shader
*shader
, FILE *fp
);
2258 void nir_dump_dom_frontier_impl(nir_function_impl
*impl
, FILE *fp
);
2259 void nir_dump_dom_frontier(nir_shader
*shader
, FILE *fp
);
2261 void nir_dump_cfg_impl(nir_function_impl
*impl
, FILE *fp
);
2262 void nir_dump_cfg(nir_shader
*shader
, FILE *fp
);
2264 int nir_gs_count_vertices(const nir_shader
*shader
);
2266 bool nir_split_var_copies(nir_shader
*shader
);
2268 bool nir_lower_returns_impl(nir_function_impl
*impl
);
2269 bool nir_lower_returns(nir_shader
*shader
);
2271 bool nir_inline_functions(nir_shader
*shader
);
2273 bool nir_propagate_invariant(nir_shader
*shader
);
2275 void nir_lower_var_copy_instr(nir_intrinsic_instr
*copy
, void *mem_ctx
);
2276 void nir_lower_var_copies(nir_shader
*shader
);
2278 bool nir_lower_global_vars_to_local(nir_shader
*shader
);
2280 bool nir_lower_indirect_derefs(nir_shader
*shader
, nir_variable_mode modes
);
2282 bool nir_lower_locals_to_regs(nir_shader
*shader
);
2284 void nir_lower_io_to_temporaries(nir_shader
*shader
,
2285 nir_function_impl
*entrypoint
,
2286 bool outputs
, bool inputs
);
2288 void nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
);
2290 void nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
2291 int (*type_size
)(const struct glsl_type
*));
2294 /* If set, this forces all non-flat fragment shader inputs to be
2295 * interpolated as if with the "sample" qualifier. This requires
2296 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
2298 nir_lower_io_force_sample_interpolation
= (1 << 1),
2299 } nir_lower_io_options
;
2300 void nir_lower_io(nir_shader
*shader
,
2301 nir_variable_mode modes
,
2302 int (*type_size
)(const struct glsl_type
*),
2303 nir_lower_io_options
);
2304 nir_src
*nir_get_io_offset_src(nir_intrinsic_instr
*instr
);
2305 nir_src
*nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
);
2307 bool nir_is_per_vertex_io(nir_variable
*var
, gl_shader_stage stage
);
2309 void nir_lower_io_types(nir_shader
*shader
);
2310 void nir_lower_vars_to_ssa(nir_shader
*shader
);
2312 bool nir_remove_dead_variables(nir_shader
*shader
, nir_variable_mode modes
);
2313 bool nir_lower_constant_initializers(nir_shader
*shader
,
2314 nir_variable_mode modes
);
2316 void nir_move_vec_src_uses_to_dest(nir_shader
*shader
);
2317 bool nir_lower_vec_to_movs(nir_shader
*shader
);
2318 bool nir_lower_alu_to_scalar(nir_shader
*shader
);
2319 void nir_lower_load_const_to_scalar(nir_shader
*shader
);
2321 bool nir_lower_phis_to_scalar(nir_shader
*shader
);
2322 void nir_lower_io_to_scalar(nir_shader
*shader
, nir_variable_mode mask
);
2324 void nir_lower_samplers(nir_shader
*shader
,
2325 const struct gl_shader_program
*shader_program
);
2327 bool nir_lower_system_values(nir_shader
*shader
);
2329 typedef struct nir_lower_tex_options
{
2331 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2332 * sampler types a texture projector is lowered.
2337 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2339 bool lower_txf_offset
;
2342 * If true, lower away nir_tex_src_offset for all rect textures.
2344 bool lower_rect_offset
;
2347 * If true, lower rect textures to 2D, using txs to fetch the
2348 * texture dimensions and dividing the texture coords by the
2349 * texture dims to normalize.
2354 * If true, convert yuv to rgb.
2356 unsigned lower_y_uv_external
;
2357 unsigned lower_y_u_v_external
;
2358 unsigned lower_yx_xuxv_external
;
2361 * To emulate certain texture wrap modes, this can be used
2362 * to saturate the specified tex coord to [0.0, 1.0]. The
2363 * bits are according to sampler #, ie. if, for example:
2365 * (conf->saturate_s & (1 << n))
2367 * is true, then the s coord for sampler n is saturated.
2369 * Note that clamping must happen *after* projector lowering
2370 * so any projected texture sample instruction with a clamped
2371 * coordinate gets automatically lowered, regardless of the
2372 * 'lower_txp' setting.
2374 unsigned saturate_s
;
2375 unsigned saturate_t
;
2376 unsigned saturate_r
;
2378 /* Bitmask of textures that need swizzling.
2380 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2381 * swizzles[texture_index] is applied to the result of the texturing
2384 unsigned swizzle_result
;
2386 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2387 * while 4 and 5 represent 0 and 1 respectively.
2389 uint8_t swizzles
[32][4];
2392 * Bitmap of textures that need srgb to linear conversion. If
2393 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2394 * of the texture are lowered to linear.
2396 unsigned lower_srgb
;
2397 } nir_lower_tex_options
;
2399 bool nir_lower_tex(nir_shader
*shader
,
2400 const nir_lower_tex_options
*options
);
2402 bool nir_lower_idiv(nir_shader
*shader
);
2404 void nir_lower_clip_vs(nir_shader
*shader
, unsigned ucp_enables
);
2405 void nir_lower_clip_fs(nir_shader
*shader
, unsigned ucp_enables
);
2406 void nir_lower_clip_cull_distance_arrays(nir_shader
*nir
);
2408 void nir_lower_two_sided_color(nir_shader
*shader
);
2410 void nir_lower_clamp_color_outputs(nir_shader
*shader
);
2412 void nir_lower_passthrough_edgeflags(nir_shader
*shader
);
2414 typedef struct nir_lower_wpos_ytransform_options
{
2415 int state_tokens
[5];
2416 bool fs_coord_origin_upper_left
:1;
2417 bool fs_coord_origin_lower_left
:1;
2418 bool fs_coord_pixel_center_integer
:1;
2419 bool fs_coord_pixel_center_half_integer
:1;
2420 } nir_lower_wpos_ytransform_options
;
2422 bool nir_lower_wpos_ytransform(nir_shader
*shader
,
2423 const nir_lower_wpos_ytransform_options
*options
);
2424 bool nir_lower_wpos_center(nir_shader
*shader
);
2426 typedef struct nir_lower_drawpixels_options
{
2427 int texcoord_state_tokens
[5];
2428 int scale_state_tokens
[5];
2429 int bias_state_tokens
[5];
2430 unsigned drawpix_sampler
;
2431 unsigned pixelmap_sampler
;
2433 bool scale_and_bias
:1;
2434 } nir_lower_drawpixels_options
;
2436 void nir_lower_drawpixels(nir_shader
*shader
,
2437 const nir_lower_drawpixels_options
*options
);
2439 typedef struct nir_lower_bitmap_options
{
2442 } nir_lower_bitmap_options
;
2444 void nir_lower_bitmap(nir_shader
*shader
, const nir_lower_bitmap_options
*options
);
2446 void nir_lower_atomics(nir_shader
*shader
,
2447 const struct gl_shader_program
*shader_program
);
2448 void nir_lower_to_source_mods(nir_shader
*shader
);
2450 bool nir_lower_gs_intrinsics(nir_shader
*shader
);
2453 nir_lower_drcp
= (1 << 0),
2454 nir_lower_dsqrt
= (1 << 1),
2455 nir_lower_drsq
= (1 << 2),
2456 nir_lower_dtrunc
= (1 << 3),
2457 nir_lower_dfloor
= (1 << 4),
2458 nir_lower_dceil
= (1 << 5),
2459 nir_lower_dfract
= (1 << 6),
2460 nir_lower_dround_even
= (1 << 7),
2461 nir_lower_dmod
= (1 << 8)
2462 } nir_lower_doubles_options
;
2464 void nir_lower_doubles(nir_shader
*shader
, nir_lower_doubles_options options
);
2465 void nir_lower_double_pack(nir_shader
*shader
);
2467 bool nir_normalize_cubemap_coords(nir_shader
*shader
);
2469 void nir_live_ssa_defs_impl(nir_function_impl
*impl
);
2470 bool nir_ssa_defs_interfere(nir_ssa_def
*a
, nir_ssa_def
*b
);
2472 void nir_convert_to_ssa_impl(nir_function_impl
*impl
);
2473 void nir_convert_to_ssa(nir_shader
*shader
);
2475 bool nir_repair_ssa_impl(nir_function_impl
*impl
);
2476 bool nir_repair_ssa(nir_shader
*shader
);
2478 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2479 * registers. If false, convert all values (even those not involved in a phi
2480 * node) to registers.
2482 void nir_convert_from_ssa(nir_shader
*shader
, bool phi_webs_only
);
2484 bool nir_opt_algebraic(nir_shader
*shader
);
2485 bool nir_opt_algebraic_late(nir_shader
*shader
);
2486 bool nir_opt_constant_folding(nir_shader
*shader
);
2488 bool nir_opt_global_to_local(nir_shader
*shader
);
2490 bool nir_copy_prop(nir_shader
*shader
);
2492 bool nir_opt_cse(nir_shader
*shader
);
2494 bool nir_opt_dce(nir_shader
*shader
);
2496 bool nir_opt_dead_cf(nir_shader
*shader
);
2498 bool nir_opt_gcm(nir_shader
*shader
, bool value_number
);
2500 bool nir_opt_peephole_select(nir_shader
*shader
, unsigned limit
);
2502 bool nir_opt_remove_phis(nir_shader
*shader
);
2504 bool nir_opt_undef(nir_shader
*shader
);
2506 bool nir_opt_conditional_discard(nir_shader
*shader
);
2508 void nir_sweep(nir_shader
*shader
);
2510 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val
);
2511 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin
);