2 * Copyright © 2014 Connor Abbott
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "GL/gl.h" /* GLenum */
34 #include "util/list.h"
35 #include "util/ralloc.h"
37 #include "util/bitscan.h"
38 #include "util/bitset.h"
39 #include "util/macros.h"
40 #include "compiler/nir_types.h"
41 #include "compiler/shader_enums.h"
42 #include "compiler/shader_info.h"
46 #include "util/debug.h"
49 #include "nir_opcodes.h"
51 #if defined(_WIN32) && !defined(snprintf)
52 #define snprintf _snprintf
60 #define NIR_TRUE (~0u)
61 #define NIR_MAX_VEC_COMPONENTS 4
62 #define NIR_MAX_MATRIX_COLUMNS 4
63 typedef uint8_t nir_component_mask_t
;
65 /** Defines a cast function
67 * This macro defines a cast function from in_type to out_type where
68 * out_type is some structure type that contains a field of type out_type.
70 * Note that you have to be a bit careful as the generated cast function
73 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
74 type_field, type_value) \
75 static inline out_type * \
76 name(const in_type *parent) \
78 assert(parent && parent->type_field == type_value); \
79 return exec_node_data(out_type, parent, field); \
89 * Description of built-in state associated with a uniform
91 * \sa nir_variable::state_slots
94 gl_state_index16 tokens
[STATE_LENGTH
];
99 nir_var_shader_in
= (1 << 0),
100 nir_var_shader_out
= (1 << 1),
101 nir_var_shader_temp
= (1 << 2),
102 nir_var_function_temp
= (1 << 3),
103 nir_var_uniform
= (1 << 4),
104 nir_var_mem_ubo
= (1 << 5),
105 nir_var_system_value
= (1 << 6),
106 nir_var_mem_ssbo
= (1 << 7),
107 nir_var_mem_shared
= (1 << 8),
108 nir_var_mem_global
= (1 << 9),
116 nir_rounding_mode_undef
= 0,
117 nir_rounding_mode_rtne
= 1, /* round to nearest even */
118 nir_rounding_mode_ru
= 2, /* round up */
119 nir_rounding_mode_rd
= 3, /* round down */
120 nir_rounding_mode_rtz
= 4, /* round towards zero */
137 #define nir_const_value_to_array(arr, c, components, m) \
139 for (unsigned i = 0; i < components; ++i) \
143 static inline nir_const_value
144 nir_const_value_for_raw_uint(uint64_t x
, unsigned bit_size
)
147 memset(&v
, 0, sizeof(v
));
150 case 1: v
.b
= x
; break;
151 case 8: v
.u8
= x
; break;
152 case 16: v
.u16
= x
; break;
153 case 32: v
.u32
= x
; break;
154 case 64: v
.u64
= x
; break;
156 unreachable("Invalid bit size");
162 static inline nir_const_value
163 nir_const_value_for_int(int64_t i
, unsigned bit_size
)
166 memset(&v
, 0, sizeof(v
));
168 assert(bit_size
<= 64);
170 assert(i
>= (-(1ll << (bit_size
- 1))));
171 assert(i
< (1ll << (bit_size
- 1)));
174 return nir_const_value_for_raw_uint(i
, bit_size
);
177 static inline nir_const_value
178 nir_const_value_for_uint(uint64_t u
, unsigned bit_size
)
181 memset(&v
, 0, sizeof(v
));
183 assert(bit_size
<= 64);
185 assert(u
< (1ull << bit_size
));
187 return nir_const_value_for_raw_uint(u
, bit_size
);
190 static inline nir_const_value
191 nir_const_value_for_bool(bool b
, unsigned bit_size
)
193 /* Booleans use a 0/-1 convention */
194 return nir_const_value_for_int(-(int)b
, bit_size
);
197 /* This one isn't inline because it requires half-float conversion */
198 nir_const_value
nir_const_value_for_float(double b
, unsigned bit_size
);
200 static inline int64_t
201 nir_const_value_as_int(nir_const_value value
, unsigned bit_size
)
204 /* int1_t uses 0/-1 convention */
205 case 1: return -(int)value
.b
;
206 case 8: return value
.i8
;
207 case 16: return value
.i16
;
208 case 32: return value
.i32
;
209 case 64: return value
.i64
;
211 unreachable("Invalid bit size");
215 static inline int64_t
216 nir_const_value_as_uint(nir_const_value value
, unsigned bit_size
)
219 case 1: return value
.b
;
220 case 8: return value
.u8
;
221 case 16: return value
.u16
;
222 case 32: return value
.u32
;
223 case 64: return value
.u64
;
225 unreachable("Invalid bit size");
230 nir_const_value_as_bool(nir_const_value value
, unsigned bit_size
)
232 int64_t i
= nir_const_value_as_int(value
, bit_size
);
234 /* Booleans of any size use 0/-1 convention */
235 assert(i
== 0 || i
== -1);
240 /* This one isn't inline because it requires half-float conversion */
241 double nir_const_value_as_float(nir_const_value value
, unsigned bit_size
);
243 typedef struct nir_constant
{
245 * Value of the constant.
247 * The field used to back the values supplied by the constant is determined
248 * by the type associated with the \c nir_variable. Constants may be
249 * scalars, vectors, or matrices.
251 nir_const_value values
[NIR_MAX_VEC_COMPONENTS
];
253 /* we could get this from the var->type but makes clone *much* easier to
254 * not have to care about the type.
256 unsigned num_elements
;
258 /* Array elements / Structure Fields */
259 struct nir_constant
**elements
;
263 * \brief Layout qualifiers for gl_FragDepth.
265 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
266 * with a layout qualifier.
269 nir_depth_layout_none
, /**< No depth layout is specified. */
270 nir_depth_layout_any
,
271 nir_depth_layout_greater
,
272 nir_depth_layout_less
,
273 nir_depth_layout_unchanged
277 * Enum keeping track of how a variable was declared.
281 * Normal declaration.
283 nir_var_declared_normally
= 0,
286 * Variable is implicitly generated by the compiler and should not be
287 * visible via the API.
290 } nir_var_declaration_type
;
293 * Either a uniform, global variable, shader input, or shader output. Based on
294 * ir_variable - it should be easy to translate between the two.
297 typedef struct nir_variable
{
298 struct exec_node node
;
301 * Declared type of the variable
303 const struct glsl_type
*type
;
306 * Declared name of the variable
310 struct nir_variable_data
{
312 * Storage class of the variable.
314 * \sa nir_variable_mode
316 nir_variable_mode mode
;
319 * Is the variable read-only?
321 * This is set for variables declared as \c const, shader inputs,
324 unsigned read_only
:1;
328 unsigned invariant
:1;
331 * Can this variable be coalesced with another?
333 * This is set by nir_lower_io_to_temporaries to say that any
334 * copies involving this variable should stay put. Propagating it can
335 * duplicate the resulting load/store, which is not wanted, and may
336 * result in a load/store of the variable with an indirect offset which
337 * the backend may not be able to handle.
339 unsigned cannot_coalesce
:1;
342 * When separate shader programs are enabled, only input/outputs between
343 * the stages of a multi-stage separate program can be safely removed
344 * from the shader interface. Other input/outputs must remains active.
346 * This is also used to make sure xfb varyings that are unused by the
347 * fragment shader are not removed.
349 unsigned always_active_io
:1;
352 * Interpolation mode for shader inputs / outputs
354 * \sa glsl_interp_mode
356 unsigned interpolation
:2;
359 * If non-zero, then this variable may be packed along with other variables
360 * into a single varying slot, so this offset should be applied when
361 * accessing components. For example, an offset of 1 means that the x
362 * component of this variable is actually stored in component y of the
363 * location specified by \c location.
365 unsigned location_frac
:2;
368 * If true, this variable represents an array of scalars that should
369 * be tightly packed. In other words, consecutive array elements
370 * should be stored one component apart, rather than one slot apart.
375 * Whether this is a fragment shader output implicitly initialized with
376 * the previous contents of the specified render target at the
377 * framebuffer location corresponding to this shader invocation.
379 unsigned fb_fetch_output
:1;
382 * Non-zero if this variable is considered bindless as defined by
383 * ARB_bindless_texture.
388 * Was an explicit binding set in the shader?
390 unsigned explicit_binding
:1;
393 * Was a transfer feedback buffer set in the shader?
395 unsigned explicit_xfb_buffer
:1;
398 * Was a transfer feedback stride set in the shader?
400 unsigned explicit_xfb_stride
:1;
403 * Was an explicit offset set in the shader?
405 unsigned explicit_offset
:1;
408 * \brief Layout qualifier for gl_FragDepth.
410 * This is not equal to \c ir_depth_layout_none if and only if this
411 * variable is \c gl_FragDepth and a layout qualifier is specified.
413 nir_depth_layout depth_layout
;
416 * Storage location of the base of this variable
418 * The precise meaning of this field depends on the nature of the variable.
420 * - Vertex shader input: one of the values from \c gl_vert_attrib.
421 * - Vertex shader output: one of the values from \c gl_varying_slot.
422 * - Geometry shader input: one of the values from \c gl_varying_slot.
423 * - Geometry shader output: one of the values from \c gl_varying_slot.
424 * - Fragment shader input: one of the values from \c gl_varying_slot.
425 * - Fragment shader output: one of the values from \c gl_frag_result.
426 * - Uniforms: Per-stage uniform slot number for default uniform block.
427 * - Uniforms: Index within the uniform block definition for UBO members.
428 * - Non-UBO Uniforms: uniform slot number.
429 * - Other: This field is not currently used.
431 * If the variable is a uniform, shader input, or shader output, and the
432 * slot has not been assigned, the value will be -1.
437 * The actual location of the variable in the IR. Only valid for inputs
440 unsigned int driver_location
;
443 * Vertex stream output identifier.
445 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
446 * stream of the i-th component.
451 * output index for dual source blending.
456 * Descriptor set binding for sampler or UBO.
461 * Initial binding point for a sampler or UBO.
463 * For array types, this represents the binding point for the first element.
468 * Location an atomic counter or transform feedback is stored at.
473 * Transform feedback buffer.
478 * Transform feedback stride.
483 * How the variable was declared. See nir_var_declaration_type.
485 * This is used to detect variables generated by the compiler, so should
486 * not be visible via the API.
488 unsigned how_declared
:2;
491 * ARB_shader_image_load_store qualifiers.
494 enum gl_access_qualifier access
;
496 /** Image internal format if specified explicitly, otherwise GL_NONE. */
502 * Built-in state that backs this uniform
504 * Once set at variable creation, \c state_slots must remain invariant.
505 * This is because, ideally, this array would be shared by all clones of
506 * this variable in the IR tree. In other words, we'd really like for it
507 * to be a fly-weight.
509 * If the variable is not a uniform, \c num_state_slots will be zero and
510 * \c state_slots will be \c NULL.
513 unsigned num_state_slots
; /**< Number of state slots used */
514 nir_state_slot
*state_slots
; /**< State descriptors. */
518 * Constant expression assigned in the initializer of the variable
520 * This field should only be used temporarily by creators of NIR shaders
521 * and then lower_constant_initializers can be used to get rid of them.
522 * Most of the rest of NIR ignores this field or asserts that it's NULL.
524 nir_constant
*constant_initializer
;
527 * For variables that are in an interface block or are an instance of an
528 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
530 * \sa ir_variable::location
532 const struct glsl_type
*interface_type
;
535 * Description of per-member data for per-member struct variables
537 * This is used for variables which are actually an amalgamation of
538 * multiple entities such as a struct of built-in values or a struct of
539 * inputs each with their own layout specifier. This is only allowed on
540 * variables with a struct or array of array of struct type.
542 unsigned num_members
;
543 struct nir_variable_data
*members
;
546 #define nir_foreach_variable(var, var_list) \
547 foreach_list_typed(nir_variable, var, node, var_list)
549 #define nir_foreach_variable_safe(var, var_list) \
550 foreach_list_typed_safe(nir_variable, var, node, var_list)
553 nir_variable_is_global(const nir_variable
*var
)
555 return var
->data
.mode
!= nir_var_function_temp
;
558 typedef struct nir_register
{
559 struct exec_node node
;
561 unsigned num_components
; /** < number of vector components */
562 unsigned num_array_elems
; /** < size of array (0 for no array) */
564 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
567 /** generic register index. */
570 /** only for debug purposes, can be NULL */
573 /** set of nir_srcs where this register is used (read from) */
574 struct list_head uses
;
576 /** set of nir_dests where this register is defined (written to) */
577 struct list_head defs
;
579 /** set of nir_ifs where this register is used as a condition */
580 struct list_head if_uses
;
583 #define nir_foreach_register(reg, reg_list) \
584 foreach_list_typed(nir_register, reg, node, reg_list)
585 #define nir_foreach_register_safe(reg, reg_list) \
586 foreach_list_typed_safe(nir_register, reg, node, reg_list)
588 typedef enum PACKED
{
590 nir_instr_type_deref
,
593 nir_instr_type_intrinsic
,
594 nir_instr_type_load_const
,
596 nir_instr_type_ssa_undef
,
598 nir_instr_type_parallel_copy
,
601 typedef struct nir_instr
{
602 struct exec_node node
;
603 struct nir_block
*block
;
606 /* A temporary for optimization and analysis passes to use for storing
607 * flags. For instance, DCE uses this to store the "dead/live" info.
611 /** generic instruction index. */
615 static inline nir_instr
*
616 nir_instr_next(nir_instr
*instr
)
618 struct exec_node
*next
= exec_node_get_next(&instr
->node
);
619 if (exec_node_is_tail_sentinel(next
))
622 return exec_node_data(nir_instr
, next
, node
);
625 static inline nir_instr
*
626 nir_instr_prev(nir_instr
*instr
)
628 struct exec_node
*prev
= exec_node_get_prev(&instr
->node
);
629 if (exec_node_is_head_sentinel(prev
))
632 return exec_node_data(nir_instr
, prev
, node
);
636 nir_instr_is_first(const nir_instr
*instr
)
638 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr
->node
));
642 nir_instr_is_last(const nir_instr
*instr
)
644 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr
->node
));
647 typedef struct nir_ssa_def
{
648 /** for debugging only, can be NULL */
651 /** generic SSA definition index. */
654 /** Index into the live_in and live_out bitfields */
657 /** Instruction which produces this SSA value. */
658 nir_instr
*parent_instr
;
660 /** set of nir_instrs where this register is used (read from) */
661 struct list_head uses
;
663 /** set of nir_ifs where this register is used as a condition */
664 struct list_head if_uses
;
666 uint8_t num_components
;
668 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
676 struct nir_src
*indirect
; /** < NULL for no indirect offset */
677 unsigned base_offset
;
679 /* TODO use-def chain goes here */
683 nir_instr
*parent_instr
;
684 struct list_head def_link
;
687 struct nir_src
*indirect
; /** < NULL for no indirect offset */
688 unsigned base_offset
;
690 /* TODO def-use chain goes here */
695 typedef struct nir_src
{
697 /** Instruction that consumes this value as a source. */
698 nir_instr
*parent_instr
;
699 struct nir_if
*parent_if
;
702 struct list_head use_link
;
712 static inline nir_src
715 nir_src src
= { { NULL
} };
719 #define NIR_SRC_INIT nir_src_init()
721 #define nir_foreach_use(src, reg_or_ssa_def) \
722 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
724 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
725 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
727 #define nir_foreach_if_use(src, reg_or_ssa_def) \
728 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
730 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
731 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
742 static inline nir_dest
745 nir_dest dest
= { { { NULL
} } };
749 #define NIR_DEST_INIT nir_dest_init()
751 #define nir_foreach_def(dest, reg) \
752 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
754 #define nir_foreach_def_safe(dest, reg) \
755 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
757 static inline nir_src
758 nir_src_for_ssa(nir_ssa_def
*def
)
760 nir_src src
= NIR_SRC_INIT
;
768 static inline nir_src
769 nir_src_for_reg(nir_register
*reg
)
771 nir_src src
= NIR_SRC_INIT
;
775 src
.reg
.indirect
= NULL
;
776 src
.reg
.base_offset
= 0;
781 static inline nir_dest
782 nir_dest_for_reg(nir_register
*reg
)
784 nir_dest dest
= NIR_DEST_INIT
;
791 static inline unsigned
792 nir_src_bit_size(nir_src src
)
794 return src
.is_ssa
? src
.ssa
->bit_size
: src
.reg
.reg
->bit_size
;
797 static inline unsigned
798 nir_src_num_components(nir_src src
)
800 return src
.is_ssa
? src
.ssa
->num_components
: src
.reg
.reg
->num_components
;
804 nir_src_is_const(nir_src src
)
807 src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
;
810 static inline unsigned
811 nir_dest_bit_size(nir_dest dest
)
813 return dest
.is_ssa
? dest
.ssa
.bit_size
: dest
.reg
.reg
->bit_size
;
816 static inline unsigned
817 nir_dest_num_components(nir_dest dest
)
819 return dest
.is_ssa
? dest
.ssa
.num_components
: dest
.reg
.reg
->num_components
;
822 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *instr_or_if
);
823 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
);
829 * \name input modifiers
833 * For inputs interpreted as floating point, flips the sign bit. For
834 * inputs interpreted as integers, performs the two's complement negation.
839 * Clears the sign bit for floating point values, and computes the integer
840 * absolute value for integers. Note that the negate modifier acts after
841 * the absolute value modifier, therefore if both are set then all inputs
842 * will become negative.
848 * For each input component, says which component of the register it is
849 * chosen from. Note that which elements of the swizzle are used and which
850 * are ignored are based on the write mask for most opcodes - for example,
851 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
852 * a swizzle of {2, x, 1, 0} where x means "don't care."
854 uint8_t swizzle
[NIR_MAX_VEC_COMPONENTS
];
861 * \name saturate output modifier
863 * Only valid for opcodes that output floating-point numbers. Clamps the
864 * output to between 0.0 and 1.0 inclusive.
869 unsigned write_mask
: NIR_MAX_VEC_COMPONENTS
; /* ignored if dest.is_ssa is true */
872 /** NIR sized and unsized types
874 * The values in this enum are carefully chosen so that the sized type is
875 * just the unsized type OR the number of bits.
878 nir_type_invalid
= 0, /* Not a valid type */
882 nir_type_float
= 128,
883 nir_type_bool1
= 1 | nir_type_bool
,
884 nir_type_bool32
= 32 | nir_type_bool
,
885 nir_type_int1
= 1 | nir_type_int
,
886 nir_type_int8
= 8 | nir_type_int
,
887 nir_type_int16
= 16 | nir_type_int
,
888 nir_type_int32
= 32 | nir_type_int
,
889 nir_type_int64
= 64 | nir_type_int
,
890 nir_type_uint1
= 1 | nir_type_uint
,
891 nir_type_uint8
= 8 | nir_type_uint
,
892 nir_type_uint16
= 16 | nir_type_uint
,
893 nir_type_uint32
= 32 | nir_type_uint
,
894 nir_type_uint64
= 64 | nir_type_uint
,
895 nir_type_float16
= 16 | nir_type_float
,
896 nir_type_float32
= 32 | nir_type_float
,
897 nir_type_float64
= 64 | nir_type_float
,
900 #define NIR_ALU_TYPE_SIZE_MASK 0x79
901 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
903 static inline unsigned
904 nir_alu_type_get_type_size(nir_alu_type type
)
906 return type
& NIR_ALU_TYPE_SIZE_MASK
;
909 static inline unsigned
910 nir_alu_type_get_base_type(nir_alu_type type
)
912 return type
& NIR_ALU_TYPE_BASE_TYPE_MASK
;
915 static inline nir_alu_type
916 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type
)
920 return nir_type_bool1
;
923 return nir_type_uint32
;
926 return nir_type_int32
;
928 case GLSL_TYPE_UINT16
:
929 return nir_type_uint16
;
931 case GLSL_TYPE_INT16
:
932 return nir_type_int16
;
934 case GLSL_TYPE_UINT8
:
935 return nir_type_uint8
;
937 return nir_type_int8
;
938 case GLSL_TYPE_UINT64
:
939 return nir_type_uint64
;
941 case GLSL_TYPE_INT64
:
942 return nir_type_int64
;
944 case GLSL_TYPE_FLOAT
:
945 return nir_type_float32
;
947 case GLSL_TYPE_FLOAT16
:
948 return nir_type_float16
;
950 case GLSL_TYPE_DOUBLE
:
951 return nir_type_float64
;
954 case GLSL_TYPE_SAMPLER
:
955 case GLSL_TYPE_IMAGE
:
956 case GLSL_TYPE_ATOMIC_UINT
:
957 case GLSL_TYPE_STRUCT
:
958 case GLSL_TYPE_INTERFACE
:
959 case GLSL_TYPE_ARRAY
:
961 case GLSL_TYPE_SUBROUTINE
:
962 case GLSL_TYPE_FUNCTION
:
963 case GLSL_TYPE_ERROR
:
964 return nir_type_invalid
;
967 unreachable("unknown type");
970 static inline nir_alu_type
971 nir_get_nir_type_for_glsl_type(const struct glsl_type
*type
)
973 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type
));
976 nir_op
nir_type_conversion_op(nir_alu_type src
, nir_alu_type dst
,
977 nir_rounding_mode rnd
);
980 nir_op_vec(unsigned components
)
982 switch (components
) {
983 case 1: return nir_op_mov
;
984 case 2: return nir_op_vec2
;
985 case 3: return nir_op_vec3
;
986 case 4: return nir_op_vec4
;
987 default: unreachable("bad component count");
993 * Operation where the first two sources are commutative.
995 * For 2-source operations, this just mathematical commutativity. Some
996 * 3-source operations, like ffma, are only commutative in the first two
999 NIR_OP_IS_2SRC_COMMUTATIVE
= (1 << 0),
1000 NIR_OP_IS_ASSOCIATIVE
= (1 << 1),
1001 } nir_op_algebraic_property
;
1006 unsigned num_inputs
;
1009 * The number of components in the output
1011 * If non-zero, this is the size of the output and input sizes are
1012 * explicitly given; swizzle and writemask are still in effect, but if
1013 * the output component is masked out, then the input component may
1016 * If zero, the opcode acts in the standard, per-component manner; the
1017 * operation is performed on each component (except the ones that are
1018 * masked out) with the input being taken from the input swizzle for
1021 * The size of some of the inputs may be given (i.e. non-zero) even
1022 * though output_size is zero; in that case, the inputs with a zero
1023 * size act per-component, while the inputs with non-zero size don't.
1025 unsigned output_size
;
1028 * The type of vector that the instruction outputs. Note that the
1029 * staurate modifier is only allowed on outputs with the float type.
1032 nir_alu_type output_type
;
1035 * The number of components in each input
1037 unsigned input_sizes
[NIR_MAX_VEC_COMPONENTS
];
1040 * The type of vector that each input takes. Note that negate and
1041 * absolute value are only allowed on inputs with int or float type and
1042 * behave differently on the two.
1044 nir_alu_type input_types
[NIR_MAX_VEC_COMPONENTS
];
1046 nir_op_algebraic_property algebraic_properties
;
1048 /* Whether this represents a numeric conversion opcode */
1052 extern const nir_op_info nir_op_infos
[nir_num_opcodes
];
1054 typedef struct nir_alu_instr
{
1058 /** Indicates that this ALU instruction generates an exact value
1060 * This is kind of a mixture of GLSL "precise" and "invariant" and not
1061 * really equivalent to either. This indicates that the value generated by
1062 * this operation is high-precision and any code transformations that touch
1063 * it must ensure that the resulting value is bit-for-bit identical to the
1069 * Indicates that this instruction do not cause wrapping to occur, in the
1070 * form of overflow or underflow.
1072 bool no_signed_wrap
:1;
1073 bool no_unsigned_wrap
:1;
1079 void nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
1080 nir_alu_instr
*instr
);
1081 void nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
1082 nir_alu_instr
*instr
);
1084 /* is this source channel used? */
1086 nir_alu_instr_channel_used(const nir_alu_instr
*instr
, unsigned src
,
1089 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
1090 return channel
< nir_op_infos
[instr
->op
].input_sizes
[src
];
1092 return (instr
->dest
.write_mask
>> channel
) & 1;
1095 static inline nir_component_mask_t
1096 nir_alu_instr_src_read_mask(const nir_alu_instr
*instr
, unsigned src
)
1098 nir_component_mask_t read_mask
= 0;
1099 for (unsigned c
= 0; c
< NIR_MAX_VEC_COMPONENTS
; c
++) {
1100 if (!nir_alu_instr_channel_used(instr
, src
, c
))
1103 read_mask
|= (1 << instr
->src
[src
].swizzle
[c
]);
1109 * Get the number of channels used for a source
1111 static inline unsigned
1112 nir_ssa_alu_instr_src_components(const nir_alu_instr
*instr
, unsigned src
)
1114 if (nir_op_infos
[instr
->op
].input_sizes
[src
] > 0)
1115 return nir_op_infos
[instr
->op
].input_sizes
[src
];
1117 return nir_dest_num_components(instr
->dest
.dest
);
1120 bool nir_const_value_negative_equal(nir_const_value c1
, nir_const_value c2
,
1121 nir_alu_type full_type
);
1123 bool nir_alu_srcs_equal(const nir_alu_instr
*alu1
, const nir_alu_instr
*alu2
,
1124 unsigned src1
, unsigned src2
);
1126 bool nir_alu_srcs_negative_equal(const nir_alu_instr
*alu1
,
1127 const nir_alu_instr
*alu2
,
1128 unsigned src1
, unsigned src2
);
1132 nir_deref_type_array
,
1133 nir_deref_type_array_wildcard
,
1134 nir_deref_type_ptr_as_array
,
1135 nir_deref_type_struct
,
1136 nir_deref_type_cast
,
1142 /** The type of this deref instruction */
1143 nir_deref_type deref_type
;
1145 /** The mode of the underlying variable */
1146 nir_variable_mode mode
;
1148 /** The dereferenced type of the resulting pointer value */
1149 const struct glsl_type
*type
;
1152 /** Variable being dereferenced if deref_type is a deref_var */
1155 /** Parent deref if deref_type is not deref_var */
1159 /** Additional deref parameters */
1170 unsigned ptr_stride
;
1174 /** Destination to store the resulting "pointer" */
1178 static inline nir_deref_instr
*nir_src_as_deref(nir_src src
);
1180 static inline nir_deref_instr
*
1181 nir_deref_instr_parent(const nir_deref_instr
*instr
)
1183 if (instr
->deref_type
== nir_deref_type_var
)
1186 return nir_src_as_deref(instr
->parent
);
1189 static inline nir_variable
*
1190 nir_deref_instr_get_variable(const nir_deref_instr
*instr
)
1192 while (instr
->deref_type
!= nir_deref_type_var
) {
1193 if (instr
->deref_type
== nir_deref_type_cast
)
1196 instr
= nir_deref_instr_parent(instr
);
1202 bool nir_deref_instr_has_indirect(nir_deref_instr
*instr
);
1203 bool nir_deref_instr_has_complex_use(nir_deref_instr
*instr
);
1205 bool nir_deref_instr_remove_if_unused(nir_deref_instr
*instr
);
1207 unsigned nir_deref_instr_ptr_as_array_stride(nir_deref_instr
*instr
);
1212 struct nir_function
*callee
;
1214 unsigned num_params
;
1218 #include "nir_intrinsics.h"
1220 #define NIR_INTRINSIC_MAX_CONST_INDEX 4
1222 /** Represents an intrinsic
1224 * An intrinsic is an instruction type for handling things that are
1225 * more-or-less regular operations but don't just consume and produce SSA
1226 * values like ALU operations do. Intrinsics are not for things that have
1227 * special semantic meaning such as phi nodes and parallel copies.
1228 * Examples of intrinsics include variable load/store operations, system
1229 * value loads, and the like. Even though texturing more-or-less falls
1230 * under this category, texturing is its own instruction type because
1231 * trying to represent texturing with intrinsics would lead to a
1232 * combinatorial explosion of intrinsic opcodes.
1234 * By having a single instruction type for handling a lot of different
1235 * cases, optimization passes can look for intrinsics and, for the most
1236 * part, completely ignore them. Each intrinsic type also has a few
1237 * possible flags that govern whether or not they can be reordered or
1238 * eliminated. That way passes like dead code elimination can still work
1239 * on intrisics without understanding the meaning of each.
1241 * Each intrinsic has some number of constant indices, some number of
1242 * variables, and some number of sources. What these sources, variables,
1243 * and indices mean depends on the intrinsic and is documented with the
1244 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
1245 * instructions are the only types of instruction that can operate on
1251 nir_intrinsic_op intrinsic
;
1255 /** number of components if this is a vectorized intrinsic
1257 * Similarly to ALU operations, some intrinsics are vectorized.
1258 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
1259 * For vectorized intrinsics, the num_components field specifies the
1260 * number of destination components and the number of source components
1261 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
1263 uint8_t num_components
;
1265 int const_index
[NIR_INTRINSIC_MAX_CONST_INDEX
];
1268 } nir_intrinsic_instr
;
1270 static inline nir_variable
*
1271 nir_intrinsic_get_var(nir_intrinsic_instr
*intrin
, unsigned i
)
1273 return nir_deref_instr_get_variable(nir_src_as_deref(intrin
->src
[i
]));
1277 * \name NIR intrinsics semantic flags
1279 * information about what the compiler can do with the intrinsics.
1281 * \sa nir_intrinsic_info::flags
1285 * whether the intrinsic can be safely eliminated if none of its output
1286 * value is not being used.
1288 NIR_INTRINSIC_CAN_ELIMINATE
= (1 << 0),
1291 * Whether the intrinsic can be reordered with respect to any other
1292 * intrinsic, i.e. whether the only reordering dependencies of the
1293 * intrinsic are due to the register reads/writes.
1295 NIR_INTRINSIC_CAN_REORDER
= (1 << 1),
1296 } nir_intrinsic_semantic_flag
;
1299 * \name NIR intrinsics const-index flag
1301 * Indicates the usage of a const_index slot.
1303 * \sa nir_intrinsic_info::index_map
1307 * Generally instructions that take a offset src argument, can encode
1308 * a constant 'base' value which is added to the offset.
1310 NIR_INTRINSIC_BASE
= 1,
1313 * For store instructions, a writemask for the store.
1315 NIR_INTRINSIC_WRMASK
,
1318 * The stream-id for GS emit_vertex/end_primitive intrinsics.
1320 NIR_INTRINSIC_STREAM_ID
,
1323 * The clip-plane id for load_user_clip_plane intrinsic.
1325 NIR_INTRINSIC_UCP_ID
,
1328 * The amount of data, starting from BASE, that this instruction may
1329 * access. This is used to provide bounds if the offset is not constant.
1331 NIR_INTRINSIC_RANGE
,
1334 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1336 NIR_INTRINSIC_DESC_SET
,
1339 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1341 NIR_INTRINSIC_BINDING
,
1346 NIR_INTRINSIC_COMPONENT
,
1349 * Interpolation mode (only meaningful for FS inputs).
1351 NIR_INTRINSIC_INTERP_MODE
,
1354 * A binary nir_op to use when performing a reduction or scan operation
1356 NIR_INTRINSIC_REDUCTION_OP
,
1359 * Cluster size for reduction operations
1361 NIR_INTRINSIC_CLUSTER_SIZE
,
1364 * Parameter index for a load_param intrinsic
1366 NIR_INTRINSIC_PARAM_IDX
,
1369 * Image dimensionality for image intrinsics
1371 * One of GLSL_SAMPLER_DIM_*
1373 NIR_INTRINSIC_IMAGE_DIM
,
1376 * Non-zero if we are accessing an array image
1378 NIR_INTRINSIC_IMAGE_ARRAY
,
1381 * Image format for image intrinsics
1383 NIR_INTRINSIC_FORMAT
,
1386 * Access qualifiers for image and memory access intrinsics
1388 NIR_INTRINSIC_ACCESS
,
1391 * Alignment for offsets and addresses
1393 * These two parameters, specify an alignment in terms of a multiplier and
1394 * an offset. The offset or address parameter X of the intrinsic is
1395 * guaranteed to satisfy the following:
1397 * (X - align_offset) % align_mul == 0
1399 NIR_INTRINSIC_ALIGN_MUL
,
1400 NIR_INTRINSIC_ALIGN_OFFSET
,
1403 * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
1405 NIR_INTRINSIC_DESC_TYPE
,
1408 * The nir_alu_type of a uniform/input/output
1413 * The swizzle mask for the instructions
1414 * SwizzleInvocationsAMD and SwizzleInvocationsMaskedAMD
1416 NIR_INTRINSIC_SWIZZLE_MASK
,
1418 /* Separate source/dest access flags for copies */
1419 NIR_INTRINSIC_SRC_ACCESS
= 21,
1420 NIR_INTRINSIC_DST_ACCESS
= 22,
1422 NIR_INTRINSIC_NUM_INDEX_FLAGS
,
1424 } nir_intrinsic_index_flag
;
1426 #define NIR_INTRINSIC_MAX_INPUTS 5
1431 unsigned num_srcs
; /** < number of register/SSA inputs */
1433 /** number of components of each input register
1435 * If this value is 0, the number of components is given by the
1436 * num_components field of nir_intrinsic_instr. If this value is -1, the
1437 * intrinsic consumes however many components are provided and it is not
1440 int src_components
[NIR_INTRINSIC_MAX_INPUTS
];
1444 /** number of components of the output register
1446 * If this value is 0, the number of components is given by the
1447 * num_components field of nir_intrinsic_instr.
1449 unsigned dest_components
;
1451 /** bitfield of legal bit sizes */
1452 unsigned dest_bit_sizes
;
1454 /** the number of constant indices used by the intrinsic */
1455 unsigned num_indices
;
1457 /** indicates the usage of intr->const_index[n] */
1458 unsigned index_map
[NIR_INTRINSIC_NUM_INDEX_FLAGS
];
1460 /** semantic flags for calls to this intrinsic */
1461 nir_intrinsic_semantic_flag flags
;
1462 } nir_intrinsic_info
;
1464 extern const nir_intrinsic_info nir_intrinsic_infos
[nir_num_intrinsics
];
1466 static inline unsigned
1467 nir_intrinsic_src_components(nir_intrinsic_instr
*intr
, unsigned srcn
)
1469 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1470 assert(srcn
< info
->num_srcs
);
1471 if (info
->src_components
[srcn
] > 0)
1472 return info
->src_components
[srcn
];
1473 else if (info
->src_components
[srcn
] == 0)
1474 return intr
->num_components
;
1476 return nir_src_num_components(intr
->src
[srcn
]);
1479 static inline unsigned
1480 nir_intrinsic_dest_components(nir_intrinsic_instr
*intr
)
1482 const nir_intrinsic_info
*info
= &nir_intrinsic_infos
[intr
->intrinsic
];
1483 if (!info
->has_dest
)
1485 else if (info
->dest_components
)
1486 return info
->dest_components
;
1488 return intr
->num_components
;
1491 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1492 static inline type \
1493 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \
1495 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1496 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1497 return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1499 static inline void \
1500 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1502 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1503 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1504 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1507 INTRINSIC_IDX_ACCESSORS(write_mask
, WRMASK
, unsigned)
1508 INTRINSIC_IDX_ACCESSORS(base
, BASE
, int)
1509 INTRINSIC_IDX_ACCESSORS(stream_id
, STREAM_ID
, unsigned)
1510 INTRINSIC_IDX_ACCESSORS(ucp_id
, UCP_ID
, unsigned)
1511 INTRINSIC_IDX_ACCESSORS(range
, RANGE
, unsigned)
1512 INTRINSIC_IDX_ACCESSORS(desc_set
, DESC_SET
, unsigned)
1513 INTRINSIC_IDX_ACCESSORS(binding
, BINDING
, unsigned)
1514 INTRINSIC_IDX_ACCESSORS(component
, COMPONENT
, unsigned)
1515 INTRINSIC_IDX_ACCESSORS(interp_mode
, INTERP_MODE
, unsigned)
1516 INTRINSIC_IDX_ACCESSORS(reduction_op
, REDUCTION_OP
, unsigned)
1517 INTRINSIC_IDX_ACCESSORS(cluster_size
, CLUSTER_SIZE
, unsigned)
1518 INTRINSIC_IDX_ACCESSORS(param_idx
, PARAM_IDX
, unsigned)
1519 INTRINSIC_IDX_ACCESSORS(image_dim
, IMAGE_DIM
, enum glsl_sampler_dim
)
1520 INTRINSIC_IDX_ACCESSORS(image_array
, IMAGE_ARRAY
, bool)
1521 INTRINSIC_IDX_ACCESSORS(access
, ACCESS
, enum gl_access_qualifier
)
1522 INTRINSIC_IDX_ACCESSORS(src_access
, SRC_ACCESS
, enum gl_access_qualifier
)
1523 INTRINSIC_IDX_ACCESSORS(dst_access
, DST_ACCESS
, enum gl_access_qualifier
)
1524 INTRINSIC_IDX_ACCESSORS(format
, FORMAT
, unsigned)
1525 INTRINSIC_IDX_ACCESSORS(align_mul
, ALIGN_MUL
, unsigned)
1526 INTRINSIC_IDX_ACCESSORS(align_offset
, ALIGN_OFFSET
, unsigned)
1527 INTRINSIC_IDX_ACCESSORS(desc_type
, DESC_TYPE
, unsigned)
1528 INTRINSIC_IDX_ACCESSORS(type
, TYPE
, nir_alu_type
)
1529 INTRINSIC_IDX_ACCESSORS(swizzle_mask
, SWIZZLE_MASK
, unsigned)
1532 nir_intrinsic_set_align(nir_intrinsic_instr
*intrin
,
1533 unsigned align_mul
, unsigned align_offset
)
1535 assert(util_is_power_of_two_nonzero(align_mul
));
1536 assert(align_offset
< align_mul
);
1537 nir_intrinsic_set_align_mul(intrin
, align_mul
);
1538 nir_intrinsic_set_align_offset(intrin
, align_offset
);
1541 /** Returns a simple alignment for a load/store intrinsic offset
1543 * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
1544 * and ALIGN_OFFSET parameters, this helper takes both into account and
1545 * provides a single simple alignment parameter. The offset X is guaranteed
1546 * to satisfy X % align == 0.
1548 static inline unsigned
1549 nir_intrinsic_align(const nir_intrinsic_instr
*intrin
)
1551 const unsigned align_mul
= nir_intrinsic_align_mul(intrin
);
1552 const unsigned align_offset
= nir_intrinsic_align_offset(intrin
);
1553 assert(align_offset
< align_mul
);
1554 return align_offset
? 1 << (ffs(align_offset
) - 1) : align_mul
;
1557 /* Converts a image_deref_* intrinsic into a image_* one */
1558 void nir_rewrite_image_intrinsic(nir_intrinsic_instr
*instr
,
1559 nir_ssa_def
*handle
, bool bindless
);
1561 /* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
1563 nir_intrinsic_can_reorder(nir_intrinsic_instr
*instr
)
1565 if (instr
->intrinsic
== nir_intrinsic_load_deref
||
1566 instr
->intrinsic
== nir_intrinsic_load_ssbo
||
1567 instr
->intrinsic
== nir_intrinsic_bindless_image_load
||
1568 instr
->intrinsic
== nir_intrinsic_image_deref_load
||
1569 instr
->intrinsic
== nir_intrinsic_image_load
) {
1570 return nir_intrinsic_access(instr
) & ACCESS_CAN_REORDER
;
1572 const nir_intrinsic_info
*info
=
1573 &nir_intrinsic_infos
[instr
->intrinsic
];
1574 return (info
->flags
& NIR_INTRINSIC_CAN_ELIMINATE
) &&
1575 (info
->flags
& NIR_INTRINSIC_CAN_REORDER
);
1580 * \group texture information
1582 * This gives semantic information about textures which is useful to the
1583 * frontend, the backend, and lowering passes, but not the optimizer.
1588 nir_tex_src_projector
,
1589 nir_tex_src_comparator
, /* shadow comparator */
1593 nir_tex_src_min_lod
,
1594 nir_tex_src_ms_index
, /* MSAA sample index */
1595 nir_tex_src_ms_mcs
, /* MSAA compression value */
1598 nir_tex_src_texture_deref
, /* < deref pointing to the texture */
1599 nir_tex_src_sampler_deref
, /* < deref pointing to the sampler */
1600 nir_tex_src_texture_offset
, /* < dynamically uniform indirect offset */
1601 nir_tex_src_sampler_offset
, /* < dynamically uniform indirect offset */
1602 nir_tex_src_texture_handle
, /* < bindless texture handle */
1603 nir_tex_src_sampler_handle
, /* < bindless sampler handle */
1604 nir_tex_src_plane
, /* < selects plane for planar textures */
1605 nir_num_tex_src_types
1610 nir_tex_src_type src_type
;
1614 nir_texop_tex
, /**< Regular texture look-up */
1615 nir_texop_txb
, /**< Texture look-up with LOD bias */
1616 nir_texop_txl
, /**< Texture look-up with explicit LOD */
1617 nir_texop_txd
, /**< Texture look-up with partial derivatives */
1618 nir_texop_txf
, /**< Texel fetch with explicit LOD */
1619 nir_texop_txf_ms
, /**< Multisample texture fetch */
1620 nir_texop_txf_ms_fb
, /**< Multisample texture fetch from framebuffer */
1621 nir_texop_txf_ms_mcs
, /**< Multisample compression value fetch */
1622 nir_texop_txs
, /**< Texture size */
1623 nir_texop_lod
, /**< Texture lod query */
1624 nir_texop_tg4
, /**< Texture gather */
1625 nir_texop_query_levels
, /**< Texture levels query */
1626 nir_texop_texture_samples
, /**< Texture samples query */
1627 nir_texop_samples_identical
, /**< Query whether all samples are definitely
1635 enum glsl_sampler_dim sampler_dim
;
1636 nir_alu_type dest_type
;
1641 unsigned num_srcs
, coord_components
;
1642 bool is_array
, is_shadow
;
1645 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1646 * components or the new-style shadow that outputs 1 component.
1648 bool is_new_style_shadow
;
1650 /* gather component selector */
1651 unsigned component
: 2;
1653 /* gather offsets */
1654 int8_t tg4_offsets
[4][2];
1656 /* True if the texture index or handle is not dynamically uniform */
1657 bool texture_non_uniform
;
1659 /* True if the sampler index or handle is not dynamically uniform */
1660 bool sampler_non_uniform
;
1662 /** The texture index
1664 * If this texture instruction has a nir_tex_src_texture_offset source,
1665 * then the texture index is given by texture_index + texture_offset.
1667 unsigned texture_index
;
1669 /** The size of the texture array or 0 if it's not an array */
1670 unsigned texture_array_size
;
1672 /** The sampler index
1674 * The following operations do not require a sampler and, as such, this
1675 * field should be ignored:
1677 * - nir_texop_txf_ms
1680 * - nir_texop_query_levels
1681 * - nir_texop_texture_samples
1682 * - nir_texop_samples_identical
1684 * If this texture instruction has a nir_tex_src_sampler_offset source,
1685 * then the sampler index is given by sampler_index + sampler_offset.
1687 unsigned sampler_index
;
1690 static inline unsigned
1691 nir_tex_instr_dest_size(const nir_tex_instr
*instr
)
1693 switch (instr
->op
) {
1694 case nir_texop_txs
: {
1696 switch (instr
->sampler_dim
) {
1697 case GLSL_SAMPLER_DIM_1D
:
1698 case GLSL_SAMPLER_DIM_BUF
:
1701 case GLSL_SAMPLER_DIM_2D
:
1702 case GLSL_SAMPLER_DIM_CUBE
:
1703 case GLSL_SAMPLER_DIM_MS
:
1704 case GLSL_SAMPLER_DIM_RECT
:
1705 case GLSL_SAMPLER_DIM_EXTERNAL
:
1706 case GLSL_SAMPLER_DIM_SUBPASS
:
1709 case GLSL_SAMPLER_DIM_3D
:
1713 unreachable("not reached");
1715 if (instr
->is_array
)
1723 case nir_texop_texture_samples
:
1724 case nir_texop_query_levels
:
1725 case nir_texop_samples_identical
:
1729 if (instr
->is_shadow
&& instr
->is_new_style_shadow
)
1736 /* Returns true if this texture operation queries something about the texture
1737 * rather than actually sampling it.
1740 nir_tex_instr_is_query(const nir_tex_instr
*instr
)
1742 switch (instr
->op
) {
1745 case nir_texop_texture_samples
:
1746 case nir_texop_query_levels
:
1747 case nir_texop_txf_ms_mcs
:
1754 case nir_texop_txf_ms
:
1755 case nir_texop_txf_ms_fb
:
1759 unreachable("Invalid texture opcode");
1764 nir_alu_instr_is_comparison(const nir_alu_instr
*instr
)
1766 switch (instr
->op
) {
1786 static inline nir_alu_type
1787 nir_tex_instr_src_type(const nir_tex_instr
*instr
, unsigned src
)
1789 switch (instr
->src
[src
].src_type
) {
1790 case nir_tex_src_coord
:
1791 switch (instr
->op
) {
1793 case nir_texop_txf_ms
:
1794 case nir_texop_txf_ms_fb
:
1795 case nir_texop_txf_ms_mcs
:
1796 case nir_texop_samples_identical
:
1797 return nir_type_int
;
1800 return nir_type_float
;
1803 case nir_tex_src_lod
:
1804 switch (instr
->op
) {
1807 return nir_type_int
;
1810 return nir_type_float
;
1813 case nir_tex_src_projector
:
1814 case nir_tex_src_comparator
:
1815 case nir_tex_src_bias
:
1816 case nir_tex_src_ddx
:
1817 case nir_tex_src_ddy
:
1818 return nir_type_float
;
1820 case nir_tex_src_offset
:
1821 case nir_tex_src_ms_index
:
1822 case nir_tex_src_texture_offset
:
1823 case nir_tex_src_sampler_offset
:
1824 return nir_type_int
;
1827 unreachable("Invalid texture source type");
1831 static inline unsigned
1832 nir_tex_instr_src_size(const nir_tex_instr
*instr
, unsigned src
)
1834 if (instr
->src
[src
].src_type
== nir_tex_src_coord
)
1835 return instr
->coord_components
;
1837 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1838 if (instr
->src
[src
].src_type
== nir_tex_src_ms_mcs
)
1841 if (instr
->src
[src
].src_type
== nir_tex_src_ddx
||
1842 instr
->src
[src
].src_type
== nir_tex_src_ddy
) {
1843 if (instr
->is_array
)
1844 return instr
->coord_components
- 1;
1846 return instr
->coord_components
;
1849 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
1850 * the offset, since a cube maps to a single face.
1852 if (instr
->src
[src
].src_type
== nir_tex_src_offset
) {
1853 if (instr
->sampler_dim
== GLSL_SAMPLER_DIM_CUBE
)
1855 else if (instr
->is_array
)
1856 return instr
->coord_components
- 1;
1858 return instr
->coord_components
;
1865 nir_tex_instr_src_index(const nir_tex_instr
*instr
, nir_tex_src_type type
)
1867 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1868 if (instr
->src
[i
].src_type
== type
)
1874 void nir_tex_instr_add_src(nir_tex_instr
*tex
,
1875 nir_tex_src_type src_type
,
1878 void nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
);
1880 bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr
*tex
);
1887 nir_const_value value
[];
1888 } nir_load_const_instr
;
1890 #define nir_const_load_to_arr(arr, l, m) \
1892 nir_const_value_to_array(arr, l->value, l->def.num_components, m); \
1906 /* creates a new SSA variable in an undefined state */
1911 } nir_ssa_undef_instr
;
1914 struct exec_node node
;
1916 /* The predecessor block corresponding to this source */
1917 struct nir_block
*pred
;
1922 #define nir_foreach_phi_src(phi_src, phi) \
1923 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1924 #define nir_foreach_phi_src_safe(phi_src, phi) \
1925 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1930 struct exec_list srcs
; /** < list of nir_phi_src */
1936 struct exec_node node
;
1939 } nir_parallel_copy_entry
;
1941 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1942 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1947 /* A list of nir_parallel_copy_entrys. The sources of all of the
1948 * entries are copied to the corresponding destinations "in parallel".
1949 * In other words, if we have two entries: a -> b and b -> a, the values
1952 struct exec_list entries
;
1953 } nir_parallel_copy_instr
;
1955 NIR_DEFINE_CAST(nir_instr_as_alu
, nir_instr
, nir_alu_instr
, instr
,
1956 type
, nir_instr_type_alu
)
1957 NIR_DEFINE_CAST(nir_instr_as_deref
, nir_instr
, nir_deref_instr
, instr
,
1958 type
, nir_instr_type_deref
)
1959 NIR_DEFINE_CAST(nir_instr_as_call
, nir_instr
, nir_call_instr
, instr
,
1960 type
, nir_instr_type_call
)
1961 NIR_DEFINE_CAST(nir_instr_as_jump
, nir_instr
, nir_jump_instr
, instr
,
1962 type
, nir_instr_type_jump
)
1963 NIR_DEFINE_CAST(nir_instr_as_tex
, nir_instr
, nir_tex_instr
, instr
,
1964 type
, nir_instr_type_tex
)
1965 NIR_DEFINE_CAST(nir_instr_as_intrinsic
, nir_instr
, nir_intrinsic_instr
, instr
,
1966 type
, nir_instr_type_intrinsic
)
1967 NIR_DEFINE_CAST(nir_instr_as_load_const
, nir_instr
, nir_load_const_instr
, instr
,
1968 type
, nir_instr_type_load_const
)
1969 NIR_DEFINE_CAST(nir_instr_as_ssa_undef
, nir_instr
, nir_ssa_undef_instr
, instr
,
1970 type
, nir_instr_type_ssa_undef
)
1971 NIR_DEFINE_CAST(nir_instr_as_phi
, nir_instr
, nir_phi_instr
, instr
,
1972 type
, nir_instr_type_phi
)
1973 NIR_DEFINE_CAST(nir_instr_as_parallel_copy
, nir_instr
,
1974 nir_parallel_copy_instr
, instr
,
1975 type
, nir_instr_type_parallel_copy
)
1978 #define NIR_DEFINE_SRC_AS_CONST(type, suffix) \
1979 static inline type \
1980 nir_src_comp_as_##suffix(nir_src src, unsigned comp) \
1982 assert(nir_src_is_const(src)); \
1983 nir_load_const_instr *load = \
1984 nir_instr_as_load_const(src.ssa->parent_instr); \
1985 assert(comp < load->def.num_components); \
1986 return nir_const_value_as_##suffix(load->value[comp], \
1987 load->def.bit_size); \
1990 static inline type \
1991 nir_src_as_##suffix(nir_src src) \
1993 assert(nir_src_num_components(src) == 1); \
1994 return nir_src_comp_as_##suffix(src, 0); \
1997 NIR_DEFINE_SRC_AS_CONST(int64_t, int)
1998 NIR_DEFINE_SRC_AS_CONST(uint64_t, uint
)
1999 NIR_DEFINE_SRC_AS_CONST(bool, bool)
2000 NIR_DEFINE_SRC_AS_CONST(double, float)
2002 #undef NIR_DEFINE_SRC_AS_CONST
2011 nir_ssa_scalar_is_const(nir_ssa_scalar s
)
2013 return s
.def
->parent_instr
->type
== nir_instr_type_load_const
;
2016 static inline nir_const_value
2017 nir_ssa_scalar_as_const_value(nir_ssa_scalar s
)
2019 assert(s
.comp
< s
.def
->num_components
);
2020 nir_load_const_instr
*load
= nir_instr_as_load_const(s
.def
->parent_instr
);
2021 return load
->value
[s
.comp
];
2024 #define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
2025 static inline type \
2026 nir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \
2028 return nir_const_value_as_##suffix( \
2029 nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
2032 NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
2033 NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint
)
2034 NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
2035 NIR_DEFINE_SCALAR_AS_CONST(double, float)
2037 #undef NIR_DEFINE_SCALAR_AS_CONST
2040 nir_ssa_scalar_is_alu(nir_ssa_scalar s
)
2042 return s
.def
->parent_instr
->type
== nir_instr_type_alu
;
2045 static inline nir_op
2046 nir_ssa_scalar_alu_op(nir_ssa_scalar s
)
2048 return nir_instr_as_alu(s
.def
->parent_instr
)->op
;
2051 static inline nir_ssa_scalar
2052 nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s
, unsigned alu_src_idx
)
2054 nir_ssa_scalar out
= { NULL
, 0 };
2056 nir_alu_instr
*alu
= nir_instr_as_alu(s
.def
->parent_instr
);
2057 assert(alu_src_idx
< nir_op_infos
[alu
->op
].num_inputs
);
2059 /* Our component must be written */
2060 assert(s
.comp
< s
.def
->num_components
);
2061 assert(alu
->dest
.write_mask
& (1u << s
.comp
));
2063 assert(alu
->src
[alu_src_idx
].src
.is_ssa
);
2064 out
.def
= alu
->src
[alu_src_idx
].src
.ssa
;
2066 if (nir_op_infos
[alu
->op
].input_sizes
[alu_src_idx
] == 0) {
2067 /* The ALU src is unsized so the source component follows the
2068 * destination component.
2070 out
.comp
= alu
->src
[alu_src_idx
].swizzle
[s
.comp
];
2072 /* This is a sized source so all source components work together to
2073 * produce all the destination components. Since we need to return a
2074 * scalar, this only works if the source is a scalar.
2076 assert(nir_op_infos
[alu
->op
].input_sizes
[alu_src_idx
] == 1);
2077 out
.comp
= alu
->src
[alu_src_idx
].swizzle
[0];
2079 assert(out
.comp
< out
.def
->num_components
);
2088 * Control flow consists of a tree of control flow nodes, which include
2089 * if-statements and loops. The leaves of the tree are basic blocks, lists of
2090 * instructions that always run start-to-finish. Each basic block also keeps
2091 * track of its successors (blocks which may run immediately after the current
2092 * block) and predecessors (blocks which could have run immediately before the
2093 * current block). Each function also has a start block and an end block which
2094 * all return statements point to (which is always empty). Together, all the
2095 * blocks with their predecessors and successors make up the control flow
2096 * graph (CFG) of the function. There are helpers that modify the tree of
2097 * control flow nodes while modifying the CFG appropriately; these should be
2098 * used instead of modifying the tree directly.
2105 nir_cf_node_function
2108 typedef struct nir_cf_node
{
2109 struct exec_node node
;
2110 nir_cf_node_type type
;
2111 struct nir_cf_node
*parent
;
2114 typedef struct nir_block
{
2115 nir_cf_node cf_node
;
2117 struct exec_list instr_list
; /** < list of nir_instr */
2119 /** generic block index; generated by nir_index_blocks */
2123 * Each block can only have up to 2 successors, so we put them in a simple
2124 * array - no need for anything more complicated.
2126 struct nir_block
*successors
[2];
2128 /* Set of nir_block predecessors in the CFG */
2129 struct set
*predecessors
;
2132 * this node's immediate dominator in the dominance tree - set to NULL for
2135 struct nir_block
*imm_dom
;
2137 /* This node's children in the dominance tree */
2138 unsigned num_dom_children
;
2139 struct nir_block
**dom_children
;
2141 /* Set of nir_blocks on the dominance frontier of this block */
2142 struct set
*dom_frontier
;
2145 * These two indices have the property that dom_{pre,post}_index for each
2146 * child of this block in the dominance tree will always be between
2147 * dom_pre_index and dom_post_index for this block, which makes testing if
2148 * a given block is dominated by another block an O(1) operation.
2150 unsigned dom_pre_index
, dom_post_index
;
2152 /* live in and out for this block; used for liveness analysis */
2153 BITSET_WORD
*live_in
;
2154 BITSET_WORD
*live_out
;
2157 static inline nir_instr
*
2158 nir_block_first_instr(nir_block
*block
)
2160 struct exec_node
*head
= exec_list_get_head(&block
->instr_list
);
2161 return exec_node_data(nir_instr
, head
, node
);
2164 static inline nir_instr
*
2165 nir_block_last_instr(nir_block
*block
)
2167 struct exec_node
*tail
= exec_list_get_tail(&block
->instr_list
);
2168 return exec_node_data(nir_instr
, tail
, node
);
2172 nir_block_ends_in_jump(nir_block
*block
)
2174 return !exec_list_is_empty(&block
->instr_list
) &&
2175 nir_block_last_instr(block
)->type
== nir_instr_type_jump
;
2178 #define nir_foreach_instr(instr, block) \
2179 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
2180 #define nir_foreach_instr_reverse(instr, block) \
2181 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
2182 #define nir_foreach_instr_safe(instr, block) \
2183 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
2184 #define nir_foreach_instr_reverse_safe(instr, block) \
2185 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
2188 nir_selection_control_none
= 0x0,
2189 nir_selection_control_flatten
= 0x1,
2190 nir_selection_control_dont_flatten
= 0x2,
2191 } nir_selection_control
;
2193 typedef struct nir_if
{
2194 nir_cf_node cf_node
;
2196 nir_selection_control control
;
2198 struct exec_list then_list
; /** < list of nir_cf_node */
2199 struct exec_list else_list
; /** < list of nir_cf_node */
2205 /** Instruction that generates nif::condition. */
2206 nir_instr
*conditional_instr
;
2208 /** Block within ::nif that has the break instruction. */
2209 nir_block
*break_block
;
2211 /** Last block for the then- or else-path that does not contain the break. */
2212 nir_block
*continue_from_block
;
2214 /** True when ::break_block is in the else-path of ::nif. */
2215 bool continue_from_then
;
2218 /* This is true if the terminators exact trip count is unknown. For
2221 * for (int i = 0; i < imin(x, 4); i++)
2224 * Here loop analysis would have set a max_trip_count of 4 however we dont
2225 * know for sure that this is the exact trip count.
2227 bool exact_trip_count_unknown
;
2229 struct list_head loop_terminator_link
;
2230 } nir_loop_terminator
;
2233 /* Estimated cost (in number of instructions) of the loop */
2234 unsigned instr_cost
;
2236 /* Guessed trip count based on array indexing */
2237 unsigned guessed_trip_count
;
2239 /* Maximum number of times the loop is run (if known) */
2240 unsigned max_trip_count
;
2242 /* Do we know the exact number of times the loop will be run */
2243 bool exact_trip_count_known
;
2245 /* Unroll the loop regardless of its size */
2248 /* Does the loop contain complex loop terminators, continues or other
2249 * complex behaviours? If this is true we can't rely on
2250 * loop_terminator_list to be complete or accurate.
2254 nir_loop_terminator
*limiting_terminator
;
2256 /* A list of loop_terminators terminating this loop. */
2257 struct list_head loop_terminator_list
;
2261 nir_loop_control_none
= 0x0,
2262 nir_loop_control_unroll
= 0x1,
2263 nir_loop_control_dont_unroll
= 0x2,
2267 nir_cf_node cf_node
;
2269 struct exec_list body
; /** < list of nir_cf_node */
2271 nir_loop_info
*info
;
2272 nir_loop_control control
;
2273 bool partially_unrolled
;
2277 * Various bits of metadata that can may be created or required by
2278 * optimization and analysis passes
2281 nir_metadata_none
= 0x0,
2282 nir_metadata_block_index
= 0x1,
2283 nir_metadata_dominance
= 0x2,
2284 nir_metadata_live_ssa_defs
= 0x4,
2285 nir_metadata_not_properly_reset
= 0x8,
2286 nir_metadata_loop_analysis
= 0x10,
2290 nir_cf_node cf_node
;
2292 /** pointer to the function of which this is an implementation */
2293 struct nir_function
*function
;
2295 struct exec_list body
; /** < list of nir_cf_node */
2297 nir_block
*end_block
;
2299 /** list for all local variables in the function */
2300 struct exec_list locals
;
2302 /** list of local registers in the function */
2303 struct exec_list registers
;
2305 /** next available local register index */
2308 /** next available SSA value index */
2311 /* total number of basic blocks, only valid when block_index_dirty = false */
2312 unsigned num_blocks
;
2314 nir_metadata valid_metadata
;
2315 } nir_function_impl
;
2317 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
2318 nir_start_block(nir_function_impl
*impl
)
2320 return (nir_block
*) impl
->body
.head_sentinel
.next
;
2323 ATTRIBUTE_RETURNS_NONNULL
static inline nir_block
*
2324 nir_impl_last_block(nir_function_impl
*impl
)
2326 return (nir_block
*) impl
->body
.tail_sentinel
.prev
;
2329 static inline nir_cf_node
*
2330 nir_cf_node_next(nir_cf_node
*node
)
2332 struct exec_node
*next
= exec_node_get_next(&node
->node
);
2333 if (exec_node_is_tail_sentinel(next
))
2336 return exec_node_data(nir_cf_node
, next
, node
);
2339 static inline nir_cf_node
*
2340 nir_cf_node_prev(nir_cf_node
*node
)
2342 struct exec_node
*prev
= exec_node_get_prev(&node
->node
);
2343 if (exec_node_is_head_sentinel(prev
))
2346 return exec_node_data(nir_cf_node
, prev
, node
);
2350 nir_cf_node_is_first(const nir_cf_node
*node
)
2352 return exec_node_is_head_sentinel(node
->node
.prev
);
2356 nir_cf_node_is_last(const nir_cf_node
*node
)
2358 return exec_node_is_tail_sentinel(node
->node
.next
);
2361 NIR_DEFINE_CAST(nir_cf_node_as_block
, nir_cf_node
, nir_block
, cf_node
,
2362 type
, nir_cf_node_block
)
2363 NIR_DEFINE_CAST(nir_cf_node_as_if
, nir_cf_node
, nir_if
, cf_node
,
2364 type
, nir_cf_node_if
)
2365 NIR_DEFINE_CAST(nir_cf_node_as_loop
, nir_cf_node
, nir_loop
, cf_node
,
2366 type
, nir_cf_node_loop
)
2367 NIR_DEFINE_CAST(nir_cf_node_as_function
, nir_cf_node
,
2368 nir_function_impl
, cf_node
, type
, nir_cf_node_function
)
2370 static inline nir_block
*
2371 nir_if_first_then_block(nir_if
*if_stmt
)
2373 struct exec_node
*head
= exec_list_get_head(&if_stmt
->then_list
);
2374 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2377 static inline nir_block
*
2378 nir_if_last_then_block(nir_if
*if_stmt
)
2380 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->then_list
);
2381 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2384 static inline nir_block
*
2385 nir_if_first_else_block(nir_if
*if_stmt
)
2387 struct exec_node
*head
= exec_list_get_head(&if_stmt
->else_list
);
2388 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2391 static inline nir_block
*
2392 nir_if_last_else_block(nir_if
*if_stmt
)
2394 struct exec_node
*tail
= exec_list_get_tail(&if_stmt
->else_list
);
2395 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2398 static inline nir_block
*
2399 nir_loop_first_block(nir_loop
*loop
)
2401 struct exec_node
*head
= exec_list_get_head(&loop
->body
);
2402 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2405 static inline nir_block
*
2406 nir_loop_last_block(nir_loop
*loop
)
2408 struct exec_node
*tail
= exec_list_get_tail(&loop
->body
);
2409 return nir_cf_node_as_block(exec_node_data(nir_cf_node
, tail
, node
));
2413 * Return true if this list of cf_nodes contains a single empty block.
2416 nir_cf_list_is_empty_block(struct exec_list
*cf_list
)
2418 if (exec_list_is_singular(cf_list
)) {
2419 struct exec_node
*head
= exec_list_get_head(cf_list
);
2421 nir_cf_node_as_block(exec_node_data(nir_cf_node
, head
, node
));
2422 return exec_list_is_empty(&block
->instr_list
);
2428 uint8_t num_components
;
2432 typedef struct nir_function
{
2433 struct exec_node node
;
2436 struct nir_shader
*shader
;
2438 unsigned num_params
;
2439 nir_parameter
*params
;
2441 /** The implementation of this function.
2443 * If the function is only declared and not implemented, this is NULL.
2445 nir_function_impl
*impl
;
2451 nir_lower_imul64
= (1 << 0),
2452 nir_lower_isign64
= (1 << 1),
2453 /** Lower all int64 modulus and division opcodes */
2454 nir_lower_divmod64
= (1 << 2),
2455 /** Lower all 64-bit umul_high and imul_high opcodes */
2456 nir_lower_imul_high64
= (1 << 3),
2457 nir_lower_mov64
= (1 << 4),
2458 nir_lower_icmp64
= (1 << 5),
2459 nir_lower_iadd64
= (1 << 6),
2460 nir_lower_iabs64
= (1 << 7),
2461 nir_lower_ineg64
= (1 << 8),
2462 nir_lower_logic64
= (1 << 9),
2463 nir_lower_minmax64
= (1 << 10),
2464 nir_lower_shift64
= (1 << 11),
2465 nir_lower_imul_2x32_64
= (1 << 12),
2466 nir_lower_extract64
= (1 << 13),
2467 } nir_lower_int64_options
;
2470 nir_lower_drcp
= (1 << 0),
2471 nir_lower_dsqrt
= (1 << 1),
2472 nir_lower_drsq
= (1 << 2),
2473 nir_lower_dtrunc
= (1 << 3),
2474 nir_lower_dfloor
= (1 << 4),
2475 nir_lower_dceil
= (1 << 5),
2476 nir_lower_dfract
= (1 << 6),
2477 nir_lower_dround_even
= (1 << 7),
2478 nir_lower_dmod
= (1 << 8),
2479 nir_lower_fp64_full_software
= (1 << 9),
2480 } nir_lower_doubles_options
;
2482 typedef struct nir_shader_compiler_options
{
2488 /** Lowers flrp when it does not support doubles */
2494 /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
2495 bool lower_bitfield_extract
;
2496 /** Lowers ibitfield_extract/ubitfield_extract to compares, shifts. */
2497 bool lower_bitfield_extract_to_shifts
;
2498 /** Lowers bitfield_insert to bfi/bfm */
2499 bool lower_bitfield_insert
;
2500 /** Lowers bitfield_insert to compares, and shifts. */
2501 bool lower_bitfield_insert_to_shifts
;
2502 /** Lowers bitfield_insert to bfm/bitfield_select. */
2503 bool lower_bitfield_insert_to_bitfield_select
;
2504 /** Lowers bitfield_reverse to shifts. */
2505 bool lower_bitfield_reverse
;
2506 /** Lowers bit_count to shifts. */
2507 bool lower_bit_count
;
2508 /** Lowers ifind_msb to compare and ufind_msb */
2509 bool lower_ifind_msb
;
2510 /** Lowers find_lsb to ufind_msb and logic ops */
2511 bool lower_find_lsb
;
2512 bool lower_uadd_carry
;
2513 bool lower_usub_borrow
;
2514 /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
2515 bool lower_mul_high
;
2516 /** lowers fneg and ineg to fsub and isub. */
2518 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
2521 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
2524 /** enables rules to lower idiv by power-of-two: */
2527 /** enable rules to avoid bit shifts */
2528 bool lower_bitshift
;
2530 /** enables rules to lower isign to imin+imax */
2533 /** enables rules to lower fsign to fsub and flt */
2536 /* Does the native fdot instruction replicate its result for four
2537 * components? If so, then opt_algebraic_late will turn all fdotN
2538 * instructions into fdot_replicatedN instructions.
2540 bool fdot_replicates
;
2542 /** lowers ffloor to fsub+ffract: */
2545 /** lowers ffract to fsub+ffloor: */
2548 /** lowers fceil to fneg+ffloor+fneg: */
2555 bool lower_pack_half_2x16
;
2556 bool lower_pack_unorm_2x16
;
2557 bool lower_pack_snorm_2x16
;
2558 bool lower_pack_unorm_4x8
;
2559 bool lower_pack_snorm_4x8
;
2560 bool lower_unpack_half_2x16
;
2561 bool lower_unpack_unorm_2x16
;
2562 bool lower_unpack_snorm_2x16
;
2563 bool lower_unpack_unorm_4x8
;
2564 bool lower_unpack_snorm_4x8
;
2566 bool lower_extract_byte
;
2567 bool lower_extract_word
;
2569 bool lower_all_io_to_temps
;
2570 bool lower_all_io_to_elements
;
2572 /* Indicates that the driver only has zero-based vertex id */
2573 bool vertex_id_zero_based
;
2576 * If enabled, gl_BaseVertex will be lowered as:
2577 * is_indexed_draw (~0/0) & firstvertex
2579 bool lower_base_vertex
;
2582 * If enabled, gl_HelperInvocation will be lowered as:
2584 * !((1 << sample_id) & sample_mask_in))
2586 * This depends on some possibly hw implementation details, which may
2587 * not be true for all hw. In particular that the FS is only executed
2588 * for covered samples or for helper invocations. So, do not blindly
2589 * enable this option.
2591 * Note: See also issue #22 in ARB_shader_image_load_store
2593 bool lower_helper_invocation
;
2596 * Convert gl_SampleMaskIn to gl_HelperInvocation as follows:
2598 * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
2599 * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
2601 bool optimize_sample_mask_in
;
2603 bool lower_cs_local_index_from_id
;
2604 bool lower_cs_local_id_from_index
;
2606 bool lower_device_index_to_zero
;
2608 /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
2609 bool lower_wpos_pntc
;
2615 * Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
2616 * for IO purposes and would prefer loads/stores be vectorized.
2621 * Should nir_lower_io() create load_interpolated_input intrinsics?
2623 * If not, it generates regular load_input intrinsics and interpolation
2624 * information must be inferred from the list of input nir_variables.
2626 bool use_interpolated_input_intrinsics
;
2628 /* Lowers when 32x32->64 bit multiplication is not supported */
2629 bool lower_mul_2x32_64
;
2631 /* Lowers when rotate instruction is not supported */
2635 * Is this the Intel vec4 backend?
2637 * Used to inhibit algebraic optimizations that are known to be harmful on
2638 * the Intel vec4 backend. This is generally applicable to any
2639 * optimization that might cause more immediate values to be used in
2640 * 3-source (e.g., ffma and flrp) instructions.
2644 unsigned max_unroll_iterations
;
2646 nir_lower_int64_options lower_int64_options
;
2647 nir_lower_doubles_options lower_doubles_options
;
2648 } nir_shader_compiler_options
;
2650 typedef struct nir_shader
{
2651 /** list of uniforms (nir_variable) */
2652 struct exec_list uniforms
;
2654 /** list of inputs (nir_variable) */
2655 struct exec_list inputs
;
2657 /** list of outputs (nir_variable) */
2658 struct exec_list outputs
;
2660 /** list of shared compute variables (nir_variable) */
2661 struct exec_list shared
;
2663 /** Set of driver-specific options for the shader.
2665 * The memory for the options is expected to be kept in a single static
2666 * copy by the driver.
2668 const struct nir_shader_compiler_options
*options
;
2670 /** Various bits of compile-time information about a given shader */
2671 struct shader_info info
;
2673 /** list of global variables in the shader (nir_variable) */
2674 struct exec_list globals
;
2676 /** list of system value variables in the shader (nir_variable) */
2677 struct exec_list system_values
;
2679 struct exec_list functions
; /** < list of nir_function */
2682 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
2685 unsigned num_inputs
, num_uniforms
, num_outputs
, num_shared
;
2687 /** Size in bytes of required scratch space */
2688 unsigned scratch_size
;
2690 /** Constant data associated with this shader.
2692 * Constant data is loaded through load_constant intrinsics. See also
2693 * nir_opt_large_constants.
2695 void *constant_data
;
2696 unsigned constant_data_size
;
2699 #define nir_foreach_function(func, shader) \
2700 foreach_list_typed(nir_function, func, node, &(shader)->functions)
2702 static inline nir_function_impl
*
2703 nir_shader_get_entrypoint(nir_shader
*shader
)
2705 nir_function
*func
= NULL
;
2707 nir_foreach_function(function
, shader
) {
2708 assert(func
== NULL
);
2709 if (function
->is_entrypoint
) {
2720 assert(func
->num_params
== 0);
2725 nir_shader
*nir_shader_create(void *mem_ctx
,
2726 gl_shader_stage stage
,
2727 const nir_shader_compiler_options
*options
,
2730 nir_register
*nir_local_reg_create(nir_function_impl
*impl
);
2732 void nir_reg_remove(nir_register
*reg
);
2734 /** Adds a variable to the appropriate list in nir_shader */
2735 void nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
);
2738 nir_function_impl_add_variable(nir_function_impl
*impl
, nir_variable
*var
)
2740 assert(var
->data
.mode
== nir_var_function_temp
);
2741 exec_list_push_tail(&impl
->locals
, &var
->node
);
2744 /** creates a variable, sets a few defaults, and adds it to the list */
2745 nir_variable
*nir_variable_create(nir_shader
*shader
,
2746 nir_variable_mode mode
,
2747 const struct glsl_type
*type
,
2749 /** creates a local variable and adds it to the list */
2750 nir_variable
*nir_local_variable_create(nir_function_impl
*impl
,
2751 const struct glsl_type
*type
,
2754 /** creates a function and adds it to the shader's list of functions */
2755 nir_function
*nir_function_create(nir_shader
*shader
, const char *name
);
2757 nir_function_impl
*nir_function_impl_create(nir_function
*func
);
2758 /** creates a function_impl that isn't tied to any particular function */
2759 nir_function_impl
*nir_function_impl_create_bare(nir_shader
*shader
);
2761 nir_block
*nir_block_create(nir_shader
*shader
);
2762 nir_if
*nir_if_create(nir_shader
*shader
);
2763 nir_loop
*nir_loop_create(nir_shader
*shader
);
2765 nir_function_impl
*nir_cf_node_get_function(nir_cf_node
*node
);
2767 /** requests that the given pieces of metadata be generated */
2768 void nir_metadata_require(nir_function_impl
*impl
, nir_metadata required
, ...);
2769 /** dirties all but the preserved metadata */
2770 void nir_metadata_preserve(nir_function_impl
*impl
, nir_metadata preserved
);
2772 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
2773 nir_alu_instr
*nir_alu_instr_create(nir_shader
*shader
, nir_op op
);
2775 nir_deref_instr
*nir_deref_instr_create(nir_shader
*shader
,
2776 nir_deref_type deref_type
);
2778 nir_jump_instr
*nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
);
2780 nir_load_const_instr
*nir_load_const_instr_create(nir_shader
*shader
,
2781 unsigned num_components
,
2784 nir_intrinsic_instr
*nir_intrinsic_instr_create(nir_shader
*shader
,
2785 nir_intrinsic_op op
);
2787 nir_call_instr
*nir_call_instr_create(nir_shader
*shader
,
2788 nir_function
*callee
);
2790 nir_tex_instr
*nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
);
2792 nir_phi_instr
*nir_phi_instr_create(nir_shader
*shader
);
2794 nir_parallel_copy_instr
*nir_parallel_copy_instr_create(nir_shader
*shader
);
2796 nir_ssa_undef_instr
*nir_ssa_undef_instr_create(nir_shader
*shader
,
2797 unsigned num_components
,
2800 nir_const_value
nir_alu_binop_identity(nir_op binop
, unsigned bit_size
);
2803 * NIR Cursors and Instruction Insertion API
2806 * A tiny struct representing a point to insert/extract instructions or
2807 * control flow nodes. Helps reduce the combinatorial explosion of possible
2808 * points to insert/extract.
2810 * \sa nir_control_flow.h
2813 nir_cursor_before_block
,
2814 nir_cursor_after_block
,
2815 nir_cursor_before_instr
,
2816 nir_cursor_after_instr
,
2817 } nir_cursor_option
;
2820 nir_cursor_option option
;
2827 static inline nir_block
*
2828 nir_cursor_current_block(nir_cursor cursor
)
2830 if (cursor
.option
== nir_cursor_before_instr
||
2831 cursor
.option
== nir_cursor_after_instr
) {
2832 return cursor
.instr
->block
;
2834 return cursor
.block
;
2838 bool nir_cursors_equal(nir_cursor a
, nir_cursor b
);
2840 static inline nir_cursor
2841 nir_before_block(nir_block
*block
)
2844 cursor
.option
= nir_cursor_before_block
;
2845 cursor
.block
= block
;
2849 static inline nir_cursor
2850 nir_after_block(nir_block
*block
)
2853 cursor
.option
= nir_cursor_after_block
;
2854 cursor
.block
= block
;
2858 static inline nir_cursor
2859 nir_before_instr(nir_instr
*instr
)
2862 cursor
.option
= nir_cursor_before_instr
;
2863 cursor
.instr
= instr
;
2867 static inline nir_cursor
2868 nir_after_instr(nir_instr
*instr
)
2871 cursor
.option
= nir_cursor_after_instr
;
2872 cursor
.instr
= instr
;
2876 static inline nir_cursor
2877 nir_after_block_before_jump(nir_block
*block
)
2879 nir_instr
*last_instr
= nir_block_last_instr(block
);
2880 if (last_instr
&& last_instr
->type
== nir_instr_type_jump
) {
2881 return nir_before_instr(last_instr
);
2883 return nir_after_block(block
);
2887 static inline nir_cursor
2888 nir_before_src(nir_src
*src
, bool is_if_condition
)
2890 if (is_if_condition
) {
2891 nir_block
*prev_block
=
2892 nir_cf_node_as_block(nir_cf_node_prev(&src
->parent_if
->cf_node
));
2893 assert(!nir_block_ends_in_jump(prev_block
));
2894 return nir_after_block(prev_block
);
2895 } else if (src
->parent_instr
->type
== nir_instr_type_phi
) {
2897 nir_phi_instr
*cond_phi
= nir_instr_as_phi(src
->parent_instr
);
2899 nir_foreach_phi_src(phi_src
, cond_phi
) {
2900 if (phi_src
->src
.ssa
== src
->ssa
) {
2907 /* The LIST_ENTRY macro is a generic container-of macro, it just happens
2908 * to have a more specific name.
2910 nir_phi_src
*phi_src
= LIST_ENTRY(nir_phi_src
, src
, src
);
2911 return nir_after_block_before_jump(phi_src
->pred
);
2913 return nir_before_instr(src
->parent_instr
);
2917 static inline nir_cursor
2918 nir_before_cf_node(nir_cf_node
*node
)
2920 if (node
->type
== nir_cf_node_block
)
2921 return nir_before_block(nir_cf_node_as_block(node
));
2923 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node
)));
2926 static inline nir_cursor
2927 nir_after_cf_node(nir_cf_node
*node
)
2929 if (node
->type
== nir_cf_node_block
)
2930 return nir_after_block(nir_cf_node_as_block(node
));
2932 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node
)));
2935 static inline nir_cursor
2936 nir_after_phis(nir_block
*block
)
2938 nir_foreach_instr(instr
, block
) {
2939 if (instr
->type
!= nir_instr_type_phi
)
2940 return nir_before_instr(instr
);
2942 return nir_after_block(block
);
2945 static inline nir_cursor
2946 nir_after_cf_node_and_phis(nir_cf_node
*node
)
2948 if (node
->type
== nir_cf_node_block
)
2949 return nir_after_block(nir_cf_node_as_block(node
));
2951 nir_block
*block
= nir_cf_node_as_block(nir_cf_node_next(node
));
2953 return nir_after_phis(block
);
2956 static inline nir_cursor
2957 nir_before_cf_list(struct exec_list
*cf_list
)
2959 nir_cf_node
*first_node
= exec_node_data(nir_cf_node
,
2960 exec_list_get_head(cf_list
), node
);
2961 return nir_before_cf_node(first_node
);
2964 static inline nir_cursor
2965 nir_after_cf_list(struct exec_list
*cf_list
)
2967 nir_cf_node
*last_node
= exec_node_data(nir_cf_node
,
2968 exec_list_get_tail(cf_list
), node
);
2969 return nir_after_cf_node(last_node
);
2973 * Insert a NIR instruction at the given cursor.
2975 * Note: This does not update the cursor.
2977 void nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
);
2980 nir_instr_insert_before(nir_instr
*instr
, nir_instr
*before
)
2982 nir_instr_insert(nir_before_instr(instr
), before
);
2986 nir_instr_insert_after(nir_instr
*instr
, nir_instr
*after
)
2988 nir_instr_insert(nir_after_instr(instr
), after
);
2992 nir_instr_insert_before_block(nir_block
*block
, nir_instr
*before
)
2994 nir_instr_insert(nir_before_block(block
), before
);
2998 nir_instr_insert_after_block(nir_block
*block
, nir_instr
*after
)
3000 nir_instr_insert(nir_after_block(block
), after
);
3004 nir_instr_insert_before_cf(nir_cf_node
*node
, nir_instr
*before
)
3006 nir_instr_insert(nir_before_cf_node(node
), before
);
3010 nir_instr_insert_after_cf(nir_cf_node
*node
, nir_instr
*after
)
3012 nir_instr_insert(nir_after_cf_node(node
), after
);
3016 nir_instr_insert_before_cf_list(struct exec_list
*list
, nir_instr
*before
)
3018 nir_instr_insert(nir_before_cf_list(list
), before
);
3022 nir_instr_insert_after_cf_list(struct exec_list
*list
, nir_instr
*after
)
3024 nir_instr_insert(nir_after_cf_list(list
), after
);
3027 void nir_instr_remove_v(nir_instr
*instr
);
3029 static inline nir_cursor
3030 nir_instr_remove(nir_instr
*instr
)
3033 nir_instr
*prev
= nir_instr_prev(instr
);
3035 cursor
= nir_after_instr(prev
);
3037 cursor
= nir_before_block(instr
->block
);
3039 nir_instr_remove_v(instr
);
3045 typedef bool (*nir_foreach_ssa_def_cb
)(nir_ssa_def
*def
, void *state
);
3046 typedef bool (*nir_foreach_dest_cb
)(nir_dest
*dest
, void *state
);
3047 typedef bool (*nir_foreach_src_cb
)(nir_src
*src
, void *state
);
3048 bool nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
,
3050 bool nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
);
3051 bool nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
);
3053 nir_const_value
*nir_src_as_const_value(nir_src src
);
3055 #define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
3056 static inline c_type * \
3057 nir_src_as_ ## name (nir_src src) \
3059 return src.is_ssa && src.ssa->parent_instr->type == type_enum \
3060 ? cast_macro(src.ssa->parent_instr) : NULL; \
3063 NIR_SRC_AS_(alu_instr
, nir_alu_instr
, nir_instr_type_alu
, nir_instr_as_alu
)
3064 NIR_SRC_AS_(intrinsic
, nir_intrinsic_instr
,
3065 nir_instr_type_intrinsic
, nir_instr_as_intrinsic
)
3066 NIR_SRC_AS_(deref
, nir_deref_instr
, nir_instr_type_deref
, nir_instr_as_deref
)
3068 bool nir_src_is_dynamically_uniform(nir_src src
);
3069 bool nir_srcs_equal(nir_src src1
, nir_src src2
);
3070 bool nir_instrs_equal(const nir_instr
*instr1
, const nir_instr
*instr2
);
3071 void nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
);
3072 void nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
);
3073 void nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
);
3074 void nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
,
3077 void nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
3078 unsigned num_components
, unsigned bit_size
,
3080 void nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
3081 unsigned num_components
, unsigned bit_size
,
3084 nir_ssa_dest_init_for_type(nir_instr
*instr
, nir_dest
*dest
,
3085 const struct glsl_type
*type
,
3088 assert(glsl_type_is_vector_or_scalar(type
));
3089 nir_ssa_dest_init(instr
, dest
, glsl_get_components(type
),
3090 glsl_get_bit_size(type
), name
);
3092 void nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
);
3093 void nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
3094 nir_instr
*after_me
);
3096 nir_component_mask_t
nir_ssa_def_components_read(const nir_ssa_def
*def
);
3099 * finds the next basic block in source-code order, returns NULL if there is
3103 nir_block
*nir_block_cf_tree_next(nir_block
*block
);
3105 /* Performs the opposite of nir_block_cf_tree_next() */
3107 nir_block
*nir_block_cf_tree_prev(nir_block
*block
);
3109 /* Gets the first block in a CF node in source-code order */
3111 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
);
3113 /* Gets the last block in a CF node in source-code order */
3115 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
);
3117 /* Gets the next block after a CF node in source-code order */
3119 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
);
3121 /* Macros for loops that visit blocks in source-code order */
3123 #define nir_foreach_block(block, impl) \
3124 for (nir_block *block = nir_start_block(impl); block != NULL; \
3125 block = nir_block_cf_tree_next(block))
3127 #define nir_foreach_block_safe(block, impl) \
3128 for (nir_block *block = nir_start_block(impl), \
3129 *next = nir_block_cf_tree_next(block); \
3131 block = next, next = nir_block_cf_tree_next(block))
3133 #define nir_foreach_block_reverse(block, impl) \
3134 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
3135 block = nir_block_cf_tree_prev(block))
3137 #define nir_foreach_block_reverse_safe(block, impl) \
3138 for (nir_block *block = nir_impl_last_block(impl), \
3139 *prev = nir_block_cf_tree_prev(block); \
3141 block = prev, prev = nir_block_cf_tree_prev(block))
3143 #define nir_foreach_block_in_cf_node(block, node) \
3144 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
3145 block != nir_cf_node_cf_tree_next(node); \
3146 block = nir_block_cf_tree_next(block))
3148 /* If the following CF node is an if, this function returns that if.
3149 * Otherwise, it returns NULL.
3151 nir_if
*nir_block_get_following_if(nir_block
*block
);
3153 nir_loop
*nir_block_get_following_loop(nir_block
*block
);
3155 void nir_index_local_regs(nir_function_impl
*impl
);
3156 void nir_index_ssa_defs(nir_function_impl
*impl
);
3157 unsigned nir_index_instrs(nir_function_impl
*impl
);
3159 void nir_index_blocks(nir_function_impl
*impl
);
3161 void nir_print_shader(nir_shader
*shader
, FILE *fp
);
3162 void nir_print_shader_annotated(nir_shader
*shader
, FILE *fp
, struct hash_table
*errors
);
3163 void nir_print_instr(const nir_instr
*instr
, FILE *fp
);
3164 void nir_print_deref(const nir_deref_instr
*deref
, FILE *fp
);
3166 /** Shallow clone of a single ALU instruction. */
3167 nir_alu_instr
*nir_alu_instr_clone(nir_shader
*s
, const nir_alu_instr
*orig
);
3169 nir_shader
*nir_shader_clone(void *mem_ctx
, const nir_shader
*s
);
3170 nir_function_impl
*nir_function_impl_clone(nir_shader
*shader
,
3171 const nir_function_impl
*fi
);
3172 nir_constant
*nir_constant_clone(const nir_constant
*c
, nir_variable
*var
);
3173 nir_variable
*nir_variable_clone(const nir_variable
*c
, nir_shader
*shader
);
3175 void nir_shader_replace(nir_shader
*dest
, nir_shader
*src
);
3177 void nir_shader_serialize_deserialize(nir_shader
*s
);
3180 void nir_validate_shader(nir_shader
*shader
, const char *when
);
3181 void nir_metadata_set_validation_flag(nir_shader
*shader
);
3182 void nir_metadata_check_validation_flag(nir_shader
*shader
);
3185 should_skip_nir(const char *name
)
3187 static const char *list
= NULL
;
3189 /* Comma separated list of names to skip. */
3190 list
= getenv("NIR_SKIP");
3198 return comma_separated_list_contains(list
, name
);
3202 should_clone_nir(void)
3204 static int should_clone
= -1;
3205 if (should_clone
< 0)
3206 should_clone
= env_var_as_boolean("NIR_TEST_CLONE", false);
3208 return should_clone
;
3212 should_serialize_deserialize_nir(void)
3214 static int test_serialize
= -1;
3215 if (test_serialize
< 0)
3216 test_serialize
= env_var_as_boolean("NIR_TEST_SERIALIZE", false);
3218 return test_serialize
;
3222 should_print_nir(void)
3224 static int should_print
= -1;
3225 if (should_print
< 0)
3226 should_print
= env_var_as_boolean("NIR_PRINT", false);
3228 return should_print
;
3231 static inline void nir_validate_shader(nir_shader
*shader
, const char *when
) { (void) shader
; (void)when
; }
3232 static inline void nir_metadata_set_validation_flag(nir_shader
*shader
) { (void) shader
; }
3233 static inline void nir_metadata_check_validation_flag(nir_shader
*shader
) { (void) shader
; }
3234 static inline bool should_skip_nir(UNUSED
const char *pass_name
) { return false; }
3235 static inline bool should_clone_nir(void) { return false; }
3236 static inline bool should_serialize_deserialize_nir(void) { return false; }
3237 static inline bool should_print_nir(void) { return false; }
3240 #define _PASS(pass, nir, do_pass) do { \
3241 if (should_skip_nir(#pass)) { \
3242 printf("skipping %s\n", #pass); \
3246 nir_validate_shader(nir, "after " #pass); \
3247 if (should_clone_nir()) { \
3248 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
3249 nir_shader_replace(nir, clone); \
3251 if (should_serialize_deserialize_nir()) { \
3252 nir_shader_serialize_deserialize(nir); \
3256 #define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
3257 nir_metadata_set_validation_flag(nir); \
3258 if (should_print_nir()) \
3259 printf("%s\n", #pass); \
3260 if (pass(nir, ##__VA_ARGS__)) { \
3262 if (should_print_nir()) \
3263 nir_print_shader(nir, stdout); \
3264 nir_metadata_check_validation_flag(nir); \
3268 #define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
3269 if (should_print_nir()) \
3270 printf("%s\n", #pass); \
3271 pass(nir, ##__VA_ARGS__); \
3272 if (should_print_nir()) \
3273 nir_print_shader(nir, stdout); \
3276 #define NIR_SKIP(name) should_skip_nir(#name)
3278 void nir_calc_dominance_impl(nir_function_impl
*impl
);
3279 void nir_calc_dominance(nir_shader
*shader
);
3281 nir_block
*nir_dominance_lca(nir_block
*b1
, nir_block
*b2
);
3282 bool nir_block_dominates(nir_block
*parent
, nir_block
*child
);
3284 void nir_dump_dom_tree_impl(nir_function_impl
*impl
, FILE *fp
);
3285 void nir_dump_dom_tree(nir_shader
*shader
, FILE *fp
);
3287 void nir_dump_dom_frontier_impl(nir_function_impl
*impl
, FILE *fp
);
3288 void nir_dump_dom_frontier(nir_shader
*shader
, FILE *fp
);
3290 void nir_dump_cfg_impl(nir_function_impl
*impl
, FILE *fp
);
3291 void nir_dump_cfg(nir_shader
*shader
, FILE *fp
);
3293 int nir_gs_count_vertices(const nir_shader
*shader
);
3295 bool nir_shrink_vec_array_vars(nir_shader
*shader
, nir_variable_mode modes
);
3296 bool nir_split_array_vars(nir_shader
*shader
, nir_variable_mode modes
);
3297 bool nir_split_var_copies(nir_shader
*shader
);
3298 bool nir_split_per_member_structs(nir_shader
*shader
);
3299 bool nir_split_struct_vars(nir_shader
*shader
, nir_variable_mode modes
);
3301 bool nir_lower_returns_impl(nir_function_impl
*impl
);
3302 bool nir_lower_returns(nir_shader
*shader
);
3304 void nir_inline_function_impl(struct nir_builder
*b
,
3305 const nir_function_impl
*impl
,
3306 nir_ssa_def
**params
);
3307 bool nir_inline_functions(nir_shader
*shader
);
3309 bool nir_propagate_invariant(nir_shader
*shader
);
3311 void nir_lower_var_copy_instr(nir_intrinsic_instr
*copy
, nir_shader
*shader
);
3312 void nir_lower_deref_copy_instr(struct nir_builder
*b
,
3313 nir_intrinsic_instr
*copy
);
3314 bool nir_lower_var_copies(nir_shader
*shader
);
3316 void nir_fixup_deref_modes(nir_shader
*shader
);
3318 bool nir_lower_global_vars_to_local(nir_shader
*shader
);
3321 nir_lower_direct_array_deref_of_vec_load
= (1 << 0),
3322 nir_lower_indirect_array_deref_of_vec_load
= (1 << 1),
3323 nir_lower_direct_array_deref_of_vec_store
= (1 << 2),
3324 nir_lower_indirect_array_deref_of_vec_store
= (1 << 3),
3325 } nir_lower_array_deref_of_vec_options
;
3327 bool nir_lower_array_deref_of_vec(nir_shader
*shader
, nir_variable_mode modes
,
3328 nir_lower_array_deref_of_vec_options options
);
3330 bool nir_lower_indirect_derefs(nir_shader
*shader
, nir_variable_mode modes
);
3332 bool nir_lower_locals_to_regs(nir_shader
*shader
);
3334 void nir_lower_io_to_temporaries(nir_shader
*shader
,
3335 nir_function_impl
*entrypoint
,
3336 bool outputs
, bool inputs
);
3338 bool nir_lower_vars_to_scratch(nir_shader
*shader
,
3339 nir_variable_mode modes
,
3341 glsl_type_size_align_func size_align
);
3343 void nir_shader_gather_info(nir_shader
*shader
, nir_function_impl
*entrypoint
);
3345 void nir_gather_ssa_types(nir_function_impl
*impl
,
3346 BITSET_WORD
*float_types
,
3347 BITSET_WORD
*int_types
);
3349 void nir_assign_var_locations(struct exec_list
*var_list
, unsigned *size
,
3350 int (*type_size
)(const struct glsl_type
*, bool));
3352 /* Some helpers to do very simple linking */
3353 bool nir_remove_unused_varyings(nir_shader
*producer
, nir_shader
*consumer
);
3354 bool nir_remove_unused_io_vars(nir_shader
*shader
, struct exec_list
*var_list
,
3355 uint64_t *used_by_other_stage
,
3356 uint64_t *used_by_other_stage_patches
);
3357 void nir_compact_varyings(nir_shader
*producer
, nir_shader
*consumer
,
3358 bool default_to_smooth_interp
);
3359 void nir_link_xfb_varyings(nir_shader
*producer
, nir_shader
*consumer
);
3360 bool nir_link_opt_varyings(nir_shader
*producer
, nir_shader
*consumer
);
3363 void nir_assign_io_var_locations(struct exec_list
*var_list
,
3365 gl_shader_stage stage
);
3368 /* If set, this forces all non-flat fragment shader inputs to be
3369 * interpolated as if with the "sample" qualifier. This requires
3370 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
3372 nir_lower_io_force_sample_interpolation
= (1 << 1),
3373 } nir_lower_io_options
;
3374 bool nir_lower_io(nir_shader
*shader
,
3375 nir_variable_mode modes
,
3376 int (*type_size
)(const struct glsl_type
*, bool),
3377 nir_lower_io_options
);
3379 bool nir_io_add_const_offset_to_base(nir_shader
*nir
, nir_variable_mode mode
);
3383 * An address format which is a simple 32-bit global GPU address.
3385 nir_address_format_32bit_global
,
3388 * An address format which is a simple 64-bit global GPU address.
3390 nir_address_format_64bit_global
,
3393 * An address format which is a bounds-checked 64-bit global GPU address.
3395 * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
3396 * address stored with the low bits in .x and high bits in .y, .z is a
3397 * size, and .w is an offset. When the final I/O operation is lowered, .w
3398 * is checked against .z and the operation is predicated on the result.
3400 nir_address_format_64bit_bounded_global
,
3403 * An address format which is comprised of a vec2 where the first
3404 * component is a buffer index and the second is an offset.
3406 nir_address_format_32bit_index_offset
,
3409 * An address format which is a simple 32-bit offset.
3411 nir_address_format_32bit_offset
,
3414 * An address format representing a purely logical addressing model. In
3415 * this model, all deref chains must be complete from the dereference
3416 * operation to the variable. Cast derefs are not allowed. These
3417 * addresses will be 32-bit scalars but the format is immaterial because
3418 * you can always chase the chain.
3420 nir_address_format_logical
,
3421 } nir_address_format
;
3423 static inline unsigned
3424 nir_address_format_bit_size(nir_address_format addr_format
)
3426 switch (addr_format
) {
3427 case nir_address_format_32bit_global
: return 32;
3428 case nir_address_format_64bit_global
: return 64;
3429 case nir_address_format_64bit_bounded_global
: return 32;
3430 case nir_address_format_32bit_index_offset
: return 32;
3431 case nir_address_format_32bit_offset
: return 32;
3432 case nir_address_format_logical
: return 32;
3434 unreachable("Invalid address format");
3437 static inline unsigned
3438 nir_address_format_num_components(nir_address_format addr_format
)
3440 switch (addr_format
) {
3441 case nir_address_format_32bit_global
: return 1;
3442 case nir_address_format_64bit_global
: return 1;
3443 case nir_address_format_64bit_bounded_global
: return 4;
3444 case nir_address_format_32bit_index_offset
: return 2;
3445 case nir_address_format_32bit_offset
: return 1;
3446 case nir_address_format_logical
: return 1;
3448 unreachable("Invalid address format");
3451 static inline const struct glsl_type
*
3452 nir_address_format_to_glsl_type(nir_address_format addr_format
)
3454 unsigned bit_size
= nir_address_format_bit_size(addr_format
);
3455 assert(bit_size
== 32 || bit_size
== 64);
3456 return glsl_vector_type(bit_size
== 32 ? GLSL_TYPE_UINT
: GLSL_TYPE_UINT64
,
3457 nir_address_format_num_components(addr_format
));
3460 const nir_const_value
*nir_address_format_null_value(nir_address_format addr_format
);
3462 nir_ssa_def
*nir_build_addr_ieq(struct nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
3463 nir_address_format addr_format
);
3465 nir_ssa_def
*nir_build_addr_isub(struct nir_builder
*b
, nir_ssa_def
*addr0
, nir_ssa_def
*addr1
,
3466 nir_address_format addr_format
);
3468 nir_ssa_def
* nir_explicit_io_address_from_deref(struct nir_builder
*b
,
3469 nir_deref_instr
*deref
,
3470 nir_ssa_def
*base_addr
,
3471 nir_address_format addr_format
);
3472 void nir_lower_explicit_io_instr(struct nir_builder
*b
,
3473 nir_intrinsic_instr
*io_instr
,
3475 nir_address_format addr_format
);
3477 bool nir_lower_explicit_io(nir_shader
*shader
,
3478 nir_variable_mode modes
,
3479 nir_address_format
);
3481 nir_src
*nir_get_io_offset_src(nir_intrinsic_instr
*instr
);
3482 nir_src
*nir_get_io_vertex_index_src(nir_intrinsic_instr
*instr
);
3484 bool nir_is_per_vertex_io(const nir_variable
*var
, gl_shader_stage stage
);
3486 bool nir_lower_regs_to_ssa_impl(nir_function_impl
*impl
);
3487 bool nir_lower_regs_to_ssa(nir_shader
*shader
);
3488 bool nir_lower_vars_to_ssa(nir_shader
*shader
);
3490 bool nir_remove_dead_derefs(nir_shader
*shader
);
3491 bool nir_remove_dead_derefs_impl(nir_function_impl
*impl
);
3492 bool nir_remove_dead_variables(nir_shader
*shader
, nir_variable_mode modes
);
3493 bool nir_lower_constant_initializers(nir_shader
*shader
,
3494 nir_variable_mode modes
);
3496 bool nir_move_load_const(nir_shader
*shader
);
3497 bool nir_move_vec_src_uses_to_dest(nir_shader
*shader
);
3498 bool nir_lower_vec_to_movs(nir_shader
*shader
);
3499 void nir_lower_alpha_test(nir_shader
*shader
, enum compare_func func
,
3501 bool nir_lower_alu(nir_shader
*shader
);
3503 bool nir_lower_flrp(nir_shader
*shader
, unsigned lowering_mask
,
3504 bool always_precise
, bool have_ffma
);
3506 bool nir_lower_alu_to_scalar(nir_shader
*shader
, BITSET_WORD
*lower_set
);
3507 bool nir_lower_bool_to_float(nir_shader
*shader
);
3508 bool nir_lower_bool_to_int32(nir_shader
*shader
);
3509 bool nir_lower_int_to_float(nir_shader
*shader
);
3510 bool nir_lower_load_const_to_scalar(nir_shader
*shader
);
3511 bool nir_lower_read_invocation_to_scalar(nir_shader
*shader
);
3512 bool nir_lower_phis_to_scalar(nir_shader
*shader
);
3513 void nir_lower_io_arrays_to_elements(nir_shader
*producer
, nir_shader
*consumer
);
3514 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader
*shader
,
3516 void nir_lower_io_to_scalar(nir_shader
*shader
, nir_variable_mode mask
);
3517 void nir_lower_io_to_scalar_early(nir_shader
*shader
, nir_variable_mode mask
);
3518 bool nir_lower_io_to_vector(nir_shader
*shader
, nir_variable_mode mask
);
3520 void nir_lower_fragcoord_wtrans(nir_shader
*shader
);
3521 void nir_lower_viewport_transform(nir_shader
*shader
);
3522 bool nir_lower_uniforms_to_ubo(nir_shader
*shader
, int multiplier
);
3524 typedef struct nir_lower_subgroups_options
{
3525 uint8_t subgroup_size
;
3526 uint8_t ballot_bit_size
;
3527 bool lower_to_scalar
:1;
3528 bool lower_vote_trivial
:1;
3529 bool lower_vote_eq_to_ballot
:1;
3530 bool lower_subgroup_masks
:1;
3531 bool lower_shuffle
:1;
3532 bool lower_shuffle_to_32bit
:1;
3534 } nir_lower_subgroups_options
;
3536 bool nir_lower_subgroups(nir_shader
*shader
,
3537 const nir_lower_subgroups_options
*options
);
3539 bool nir_lower_system_values(nir_shader
*shader
);
3541 enum PACKED nir_lower_tex_packing
{
3542 nir_lower_tex_packing_none
= 0,
3543 /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed
3544 * or unsigned ints based on the sampler type
3546 nir_lower_tex_packing_16
,
3547 /* The sampler returns 1 32-bit word of 4x8 unorm */
3548 nir_lower_tex_packing_8
,
3551 typedef struct nir_lower_tex_options
{
3553 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
3554 * sampler types a texture projector is lowered.
3559 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
3561 bool lower_txf_offset
;
3564 * If true, lower away nir_tex_src_offset for all rect textures.
3566 bool lower_rect_offset
;
3569 * If true, lower rect textures to 2D, using txs to fetch the
3570 * texture dimensions and dividing the texture coords by the
3571 * texture dims to normalize.
3576 * If true, convert yuv to rgb.
3578 unsigned lower_y_uv_external
;
3579 unsigned lower_y_u_v_external
;
3580 unsigned lower_yx_xuxv_external
;
3581 unsigned lower_xy_uxvx_external
;
3582 unsigned lower_ayuv_external
;
3583 unsigned lower_xyuv_external
;
3586 * To emulate certain texture wrap modes, this can be used
3587 * to saturate the specified tex coord to [0.0, 1.0]. The
3588 * bits are according to sampler #, ie. if, for example:
3590 * (conf->saturate_s & (1 << n))
3592 * is true, then the s coord for sampler n is saturated.
3594 * Note that clamping must happen *after* projector lowering
3595 * so any projected texture sample instruction with a clamped
3596 * coordinate gets automatically lowered, regardless of the
3597 * 'lower_txp' setting.
3599 unsigned saturate_s
;
3600 unsigned saturate_t
;
3601 unsigned saturate_r
;
3603 /* Bitmask of textures that need swizzling.
3605 * If (swizzle_result & (1 << texture_index)), then the swizzle in
3606 * swizzles[texture_index] is applied to the result of the texturing
3609 unsigned swizzle_result
;
3611 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
3612 * while 4 and 5 represent 0 and 1 respectively.
3614 uint8_t swizzles
[32][4];
3616 /* Can be used to scale sampled values in range required by the format. */
3617 float scale_factors
[32];
3620 * Bitmap of textures that need srgb to linear conversion. If
3621 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
3622 * of the texture are lowered to linear.
3624 unsigned lower_srgb
;
3627 * If true, lower nir_texop_tex on shaders that doesn't support implicit
3628 * LODs to nir_texop_txl.
3630 bool lower_tex_without_implicit_lod
;
3633 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
3635 bool lower_txd_cube_map
;
3638 * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl.
3643 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
3644 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
3645 * with lower_txd_cube_map.
3647 bool lower_txd_shadow
;
3650 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
3651 * Implies lower_txd_cube_map and lower_txd_shadow.
3656 * If true, lower nir_texop_txb that try to use shadow compare and min_lod
3657 * at the same time to a nir_texop_lod, some math, and nir_texop_tex.
3659 bool lower_txb_shadow_clamp
;
3662 * If true, lower nir_texop_txd on shadow samplers when it uses min_lod
3663 * with nir_texop_txl. This includes cube maps.
3665 bool lower_txd_shadow_clamp
;
3668 * If true, lower nir_texop_txd on when it uses both offset and min_lod
3669 * with nir_texop_txl. This includes cube maps.
3671 bool lower_txd_offset_clamp
;
3674 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
3675 * sampler is bindless.
3677 bool lower_txd_clamp_bindless_sampler
;
3680 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
3681 * sampler index is not statically determinable to be less than 16.
3683 bool lower_txd_clamp_if_sampler_index_not_lt_16
;
3686 * If true, lower nir_texop_txs with a non-0-lod into nir_texop_txs with
3687 * 0-lod followed by a nir_ishr.
3692 * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
3693 * mixed-up tg4 locations.
3695 bool lower_tg4_broadcom_swizzle
;
3698 * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls
3700 bool lower_tg4_offsets
;
3702 enum nir_lower_tex_packing lower_tex_packing
[32];
3703 } nir_lower_tex_options
;
3705 bool nir_lower_tex(nir_shader
*shader
,
3706 const nir_lower_tex_options
*options
);
3708 enum nir_lower_non_uniform_access_type
{
3709 nir_lower_non_uniform_ubo_access
= (1 << 0),
3710 nir_lower_non_uniform_ssbo_access
= (1 << 1),
3711 nir_lower_non_uniform_texture_access
= (1 << 2),
3712 nir_lower_non_uniform_image_access
= (1 << 3),
3715 bool nir_lower_non_uniform_access(nir_shader
*shader
,
3716 enum nir_lower_non_uniform_access_type
);
3718 bool nir_lower_idiv(nir_shader
*shader
);
3720 bool nir_lower_input_attachments(nir_shader
*shader
, bool use_fragcoord_sysval
);
3722 bool nir_lower_clip_vs(nir_shader
*shader
, unsigned ucp_enables
, bool use_vars
);
3723 bool nir_lower_clip_fs(nir_shader
*shader
, unsigned ucp_enables
);
3724 bool nir_lower_clip_cull_distance_arrays(nir_shader
*nir
);
3726 bool nir_lower_frexp(nir_shader
*nir
);
3728 void nir_lower_two_sided_color(nir_shader
*shader
);
3730 bool nir_lower_clamp_color_outputs(nir_shader
*shader
);
3732 void nir_lower_passthrough_edgeflags(nir_shader
*shader
);
3733 bool nir_lower_patch_vertices(nir_shader
*nir
, unsigned static_count
,
3734 const gl_state_index16
*uniform_state_tokens
);
3736 typedef struct nir_lower_wpos_ytransform_options
{
3737 gl_state_index16 state_tokens
[STATE_LENGTH
];
3738 bool fs_coord_origin_upper_left
:1;
3739 bool fs_coord_origin_lower_left
:1;
3740 bool fs_coord_pixel_center_integer
:1;
3741 bool fs_coord_pixel_center_half_integer
:1;
3742 } nir_lower_wpos_ytransform_options
;
3744 bool nir_lower_wpos_ytransform(nir_shader
*shader
,
3745 const nir_lower_wpos_ytransform_options
*options
);
3746 bool nir_lower_wpos_center(nir_shader
*shader
, const bool for_sample_shading
);
3748 bool nir_lower_fb_read(nir_shader
*shader
);
3750 typedef struct nir_lower_drawpixels_options
{
3751 gl_state_index16 texcoord_state_tokens
[STATE_LENGTH
];
3752 gl_state_index16 scale_state_tokens
[STATE_LENGTH
];
3753 gl_state_index16 bias_state_tokens
[STATE_LENGTH
];
3754 unsigned drawpix_sampler
;
3755 unsigned pixelmap_sampler
;
3757 bool scale_and_bias
:1;
3758 } nir_lower_drawpixels_options
;
3760 void nir_lower_drawpixels(nir_shader
*shader
,
3761 const nir_lower_drawpixels_options
*options
);
3763 typedef struct nir_lower_bitmap_options
{
3766 } nir_lower_bitmap_options
;
3768 void nir_lower_bitmap(nir_shader
*shader
, const nir_lower_bitmap_options
*options
);
3770 bool nir_lower_atomics_to_ssbo(nir_shader
*shader
, unsigned ssbo_offset
);
3773 nir_lower_int_source_mods
= 1 << 0,
3774 nir_lower_float_source_mods
= 1 << 1,
3775 nir_lower_triop_abs
= 1 << 2,
3776 nir_lower_all_source_mods
= (1 << 3) - 1
3777 } nir_lower_to_source_mods_flags
;
3780 bool nir_lower_to_source_mods(nir_shader
*shader
, nir_lower_to_source_mods_flags options
);
3782 bool nir_lower_gs_intrinsics(nir_shader
*shader
);
3784 typedef unsigned (*nir_lower_bit_size_callback
)(const nir_alu_instr
*, void *);
3786 bool nir_lower_bit_size(nir_shader
*shader
,
3787 nir_lower_bit_size_callback callback
,
3788 void *callback_data
);
3790 nir_lower_int64_options
nir_lower_int64_op_to_options_mask(nir_op opcode
);
3791 bool nir_lower_int64(nir_shader
*shader
, nir_lower_int64_options options
);
3793 nir_lower_doubles_options
nir_lower_doubles_op_to_options_mask(nir_op opcode
);
3794 bool nir_lower_doubles(nir_shader
*shader
, const nir_shader
*softfp64
,
3795 nir_lower_doubles_options options
);
3796 bool nir_lower_pack(nir_shader
*shader
);
3799 nir_lower_interpolation_at_sample
= (1 << 1),
3800 nir_lower_interpolation_at_offset
= (1 << 2),
3801 nir_lower_interpolation_centroid
= (1 << 3),
3802 nir_lower_interpolation_pixel
= (1 << 4),
3803 nir_lower_interpolation_sample
= (1 << 5),
3804 } nir_lower_interpolation_options
;
3806 bool nir_lower_interpolation(nir_shader
*shader
,
3807 nir_lower_interpolation_options options
);
3809 bool nir_normalize_cubemap_coords(nir_shader
*shader
);
3811 void nir_live_ssa_defs_impl(nir_function_impl
*impl
);
3813 void nir_loop_analyze_impl(nir_function_impl
*impl
,
3814 nir_variable_mode indirect_mask
);
3816 bool nir_ssa_defs_interfere(nir_ssa_def
*a
, nir_ssa_def
*b
);
3818 bool nir_repair_ssa_impl(nir_function_impl
*impl
);
3819 bool nir_repair_ssa(nir_shader
*shader
);
3821 void nir_convert_loop_to_lcssa(nir_loop
*loop
);
3823 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
3824 * registers. If false, convert all values (even those not involved in a phi
3825 * node) to registers.
3827 bool nir_convert_from_ssa(nir_shader
*shader
, bool phi_webs_only
);
3829 bool nir_lower_phis_to_regs_block(nir_block
*block
);
3830 bool nir_lower_ssa_defs_to_regs_block(nir_block
*block
);
3831 bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl
*impl
);
3833 /* This is here for unit tests. */
3834 bool nir_opt_comparison_pre_impl(nir_function_impl
*impl
);
3836 bool nir_opt_comparison_pre(nir_shader
*shader
);
3838 bool nir_opt_algebraic(nir_shader
*shader
);
3839 bool nir_opt_algebraic_before_ffma(nir_shader
*shader
);
3840 bool nir_opt_algebraic_late(nir_shader
*shader
);
3841 bool nir_opt_constant_folding(nir_shader
*shader
);
3843 bool nir_opt_combine_stores(nir_shader
*shader
, nir_variable_mode modes
);
3845 bool nir_copy_prop(nir_shader
*shader
);
3847 bool nir_opt_copy_prop_vars(nir_shader
*shader
);
3849 bool nir_opt_cse(nir_shader
*shader
);
3851 bool nir_opt_dce(nir_shader
*shader
);
3853 bool nir_opt_dead_cf(nir_shader
*shader
);
3855 bool nir_opt_dead_write_vars(nir_shader
*shader
);
3857 bool nir_opt_deref_impl(nir_function_impl
*impl
);
3858 bool nir_opt_deref(nir_shader
*shader
);
3860 bool nir_opt_find_array_copies(nir_shader
*shader
);
3862 bool nir_opt_gcm(nir_shader
*shader
, bool value_number
);
3864 bool nir_opt_idiv_const(nir_shader
*shader
, unsigned min_bit_size
);
3866 bool nir_opt_if(nir_shader
*shader
, bool aggressive_last_continue
);
3868 bool nir_opt_intrinsics(nir_shader
*shader
);
3870 bool nir_opt_large_constants(nir_shader
*shader
,
3871 glsl_type_size_align_func size_align
,
3872 unsigned threshold
);
3874 bool nir_opt_loop_unroll(nir_shader
*shader
, nir_variable_mode indirect_mask
);
3876 bool nir_opt_move_comparisons(nir_shader
*shader
);
3878 bool nir_opt_move_load_ubo(nir_shader
*shader
);
3880 bool nir_opt_peephole_select(nir_shader
*shader
, unsigned limit
,
3881 bool indirect_load_ok
, bool expensive_alu_ok
);
3883 bool nir_opt_rematerialize_compares(nir_shader
*shader
);
3885 bool nir_opt_remove_phis(nir_shader
*shader
);
3886 bool nir_opt_remove_phis_block(nir_block
*block
);
3888 bool nir_opt_shrink_load(nir_shader
*shader
);
3890 bool nir_opt_trivial_continues(nir_shader
*shader
);
3892 bool nir_opt_undef(nir_shader
*shader
);
3894 bool nir_opt_vectorize(nir_shader
*shader
);
3896 bool nir_opt_conditional_discard(nir_shader
*shader
);
3898 void nir_strip(nir_shader
*shader
);
3900 void nir_sweep(nir_shader
*shader
);
3902 void nir_remap_dual_slot_attributes(nir_shader
*shader
,
3903 uint64_t *dual_slot_inputs
);
3904 uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs
, uint64_t dual_slot
);
3906 nir_intrinsic_op
nir_intrinsic_from_system_value(gl_system_value val
);
3907 gl_system_value
nir_system_value_from_intrinsic(nir_intrinsic_op intrin
);
3909 bool nir_lower_sincos(nir_shader
*shader
);
3912 nir_variable_is_in_ubo(const nir_variable
*var
)
3914 return (var
->data
.mode
== nir_var_mem_ubo
&&
3915 var
->interface_type
!= NULL
);
3919 nir_variable_is_in_ssbo(const nir_variable
*var
)
3921 return (var
->data
.mode
== nir_var_mem_ssbo
&&
3922 var
->interface_type
!= NULL
);
3926 nir_variable_is_in_block(const nir_variable
*var
)
3928 return nir_variable_is_in_ubo(var
) || nir_variable_is_in_ssbo(var
);