5b28c727c80386fbc56e8bf4d33d985987f9b937
[mesa.git] / src / compiler / nir / nir.h
1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #ifndef NIR_H
29 #define NIR_H
30
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "GL/gl.h" /* GLenum */
34 #include "util/list.h"
35 #include "util/ralloc.h"
36 #include "util/set.h"
37 #include "util/bitset.h"
38 #include "util/macros.h"
39 #include "compiler/nir_types.h"
40 #include "compiler/shader_enums.h"
41 #include "compiler/shader_info.h"
42 #include <stdio.h>
43
44 #ifndef NDEBUG
45 #include "util/debug.h"
46 #endif /* NDEBUG */
47
48 #include "nir_opcodes.h"
49
50 #if defined(_WIN32) && !defined(snprintf)
51 #define snprintf _snprintf
52 #endif
53
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57
58 struct gl_program;
59 struct gl_shader_program;
60
61 #define NIR_FALSE 0u
62 #define NIR_TRUE (~0u)
63
64 /** Defines a cast function
65 *
66 * This macro defines a cast function from in_type to out_type where
67 * out_type is some structure type that contains a field of type out_type.
68 *
69 * Note that you have to be a bit careful as the generated cast function
70 * destroys constness.
71 */
72 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
73 type_field, type_value) \
74 static inline out_type * \
75 name(const in_type *parent) \
76 { \
77 assert(parent && parent->type_field == type_value); \
78 return exec_node_data(out_type, parent, field); \
79 }
80
81 struct nir_function;
82 struct nir_shader;
83 struct nir_instr;
84
85
86 /**
87 * Description of built-in state associated with a uniform
88 *
89 * \sa nir_variable::state_slots
90 */
91 typedef struct {
92 gl_state_index16 tokens[STATE_LENGTH];
93 int swizzle;
94 } nir_state_slot;
95
96 typedef enum {
97 nir_var_shader_in = (1 << 0),
98 nir_var_shader_out = (1 << 1),
99 nir_var_global = (1 << 2),
100 nir_var_local = (1 << 3),
101 nir_var_uniform = (1 << 4),
102 nir_var_shader_storage = (1 << 5),
103 nir_var_system_value = (1 << 6),
104 nir_var_param = (1 << 7),
105 nir_var_shared = (1 << 8),
106 nir_var_all = ~0,
107 } nir_variable_mode;
108
109 /**
110 * Rounding modes.
111 */
112 typedef enum {
113 nir_rounding_mode_undef = 0,
114 nir_rounding_mode_rtne = 1, /* round to nearest even */
115 nir_rounding_mode_ru = 2, /* round up */
116 nir_rounding_mode_rd = 3, /* round down */
117 nir_rounding_mode_rtz = 4, /* round towards zero */
118 } nir_rounding_mode;
119
120 typedef union {
121 float f32[4];
122 double f64[4];
123 int8_t i8[4];
124 uint8_t u8[4];
125 int16_t i16[4];
126 uint16_t u16[4];
127 int32_t i32[4];
128 uint32_t u32[4];
129 int64_t i64[4];
130 uint64_t u64[4];
131 } nir_const_value;
132
133 typedef struct nir_constant {
134 /**
135 * Value of the constant.
136 *
137 * The field used to back the values supplied by the constant is determined
138 * by the type associated with the \c nir_variable. Constants may be
139 * scalars, vectors, or matrices.
140 */
141 nir_const_value values[4];
142
143 /* we could get this from the var->type but makes clone *much* easier to
144 * not have to care about the type.
145 */
146 unsigned num_elements;
147
148 /* Array elements / Structure Fields */
149 struct nir_constant **elements;
150 } nir_constant;
151
152 /**
153 * \brief Layout qualifiers for gl_FragDepth.
154 *
155 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
156 * with a layout qualifier.
157 */
158 typedef enum {
159 nir_depth_layout_none, /**< No depth layout is specified. */
160 nir_depth_layout_any,
161 nir_depth_layout_greater,
162 nir_depth_layout_less,
163 nir_depth_layout_unchanged
164 } nir_depth_layout;
165
166 /**
167 * Either a uniform, global variable, shader input, or shader output. Based on
168 * ir_variable - it should be easy to translate between the two.
169 */
170
171 typedef struct nir_variable {
172 struct exec_node node;
173
174 /**
175 * Declared type of the variable
176 */
177 const struct glsl_type *type;
178
179 /**
180 * Declared name of the variable
181 */
182 char *name;
183
184 struct nir_variable_data {
185 /**
186 * Storage class of the variable.
187 *
188 * \sa nir_variable_mode
189 */
190 nir_variable_mode mode;
191
192 /**
193 * Is the variable read-only?
194 *
195 * This is set for variables declared as \c const, shader inputs,
196 * and uniforms.
197 */
198 unsigned read_only:1;
199 unsigned centroid:1;
200 unsigned sample:1;
201 unsigned patch:1;
202 unsigned invariant:1;
203
204 /**
205 * When separate shader programs are enabled, only input/outputs between
206 * the stages of a multi-stage separate program can be safely removed
207 * from the shader interface. Other input/outputs must remains active.
208 *
209 * This is also used to make sure xfb varyings that are unused by the
210 * fragment shader are not removed.
211 */
212 unsigned always_active_io:1;
213
214 /**
215 * Interpolation mode for shader inputs / outputs
216 *
217 * \sa glsl_interp_mode
218 */
219 unsigned interpolation:2;
220
221 /**
222 * \name ARB_fragment_coord_conventions
223 * @{
224 */
225 unsigned origin_upper_left:1;
226 unsigned pixel_center_integer:1;
227 /*@}*/
228
229 /**
230 * If non-zero, then this variable may be packed along with other variables
231 * into a single varying slot, so this offset should be applied when
232 * accessing components. For example, an offset of 1 means that the x
233 * component of this variable is actually stored in component y of the
234 * location specified by \c location.
235 */
236 unsigned location_frac:2;
237
238 /**
239 * If true, this variable represents an array of scalars that should
240 * be tightly packed. In other words, consecutive array elements
241 * should be stored one component apart, rather than one slot apart.
242 */
243 unsigned compact:1;
244
245 /**
246 * Whether this is a fragment shader output implicitly initialized with
247 * the previous contents of the specified render target at the
248 * framebuffer location corresponding to this shader invocation.
249 */
250 unsigned fb_fetch_output:1;
251
252 /**
253 * \brief Layout qualifier for gl_FragDepth.
254 *
255 * This is not equal to \c ir_depth_layout_none if and only if this
256 * variable is \c gl_FragDepth and a layout qualifier is specified.
257 */
258 nir_depth_layout depth_layout;
259
260 /**
261 * Storage location of the base of this variable
262 *
263 * The precise meaning of this field depends on the nature of the variable.
264 *
265 * - Vertex shader input: one of the values from \c gl_vert_attrib.
266 * - Vertex shader output: one of the values from \c gl_varying_slot.
267 * - Geometry shader input: one of the values from \c gl_varying_slot.
268 * - Geometry shader output: one of the values from \c gl_varying_slot.
269 * - Fragment shader input: one of the values from \c gl_varying_slot.
270 * - Fragment shader output: one of the values from \c gl_frag_result.
271 * - Uniforms: Per-stage uniform slot number for default uniform block.
272 * - Uniforms: Index within the uniform block definition for UBO members.
273 * - Non-UBO Uniforms: uniform slot number.
274 * - Other: This field is not currently used.
275 *
276 * If the variable is a uniform, shader input, or shader output, and the
277 * slot has not been assigned, the value will be -1.
278 */
279 int location;
280
281 /**
282 * The actual location of the variable in the IR. Only valid for inputs
283 * and outputs.
284 */
285 unsigned int driver_location;
286
287 /**
288 * Vertex stream output identifier.
289 *
290 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
291 * stream of the i-th component.
292 */
293 unsigned stream;
294
295 /**
296 * output index for dual source blending.
297 */
298 int index;
299
300 /**
301 * Descriptor set binding for sampler or UBO.
302 */
303 int descriptor_set;
304
305 /**
306 * Initial binding point for a sampler or UBO.
307 *
308 * For array types, this represents the binding point for the first element.
309 */
310 int binding;
311
312 /**
313 * Location an atomic counter is stored at.
314 */
315 unsigned offset;
316
317 /**
318 * ARB_shader_image_load_store qualifiers.
319 */
320 struct {
321 bool read_only; /**< "readonly" qualifier. */
322 bool write_only; /**< "writeonly" qualifier. */
323 bool coherent;
324 bool _volatile;
325 bool restrict_flag;
326
327 /** Image internal format if specified explicitly, otherwise GL_NONE. */
328 GLenum format;
329 } image;
330 } data;
331
332 /**
333 * Built-in state that backs this uniform
334 *
335 * Once set at variable creation, \c state_slots must remain invariant.
336 * This is because, ideally, this array would be shared by all clones of
337 * this variable in the IR tree. In other words, we'd really like for it
338 * to be a fly-weight.
339 *
340 * If the variable is not a uniform, \c num_state_slots will be zero and
341 * \c state_slots will be \c NULL.
342 */
343 /*@{*/
344 unsigned num_state_slots; /**< Number of state slots used */
345 nir_state_slot *state_slots; /**< State descriptors. */
346 /*@}*/
347
348 /**
349 * Constant expression assigned in the initializer of the variable
350 *
351 * This field should only be used temporarily by creators of NIR shaders
352 * and then lower_constant_initializers can be used to get rid of them.
353 * Most of the rest of NIR ignores this field or asserts that it's NULL.
354 */
355 nir_constant *constant_initializer;
356
357 /**
358 * For variables that are in an interface block or are an instance of an
359 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
360 *
361 * \sa ir_variable::location
362 */
363 const struct glsl_type *interface_type;
364 } nir_variable;
365
366 #define nir_foreach_variable(var, var_list) \
367 foreach_list_typed(nir_variable, var, node, var_list)
368
369 #define nir_foreach_variable_safe(var, var_list) \
370 foreach_list_typed_safe(nir_variable, var, node, var_list)
371
372 static inline bool
373 nir_variable_is_global(const nir_variable *var)
374 {
375 return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
376 }
377
378 typedef struct nir_register {
379 struct exec_node node;
380
381 unsigned num_components; /** < number of vector components */
382 unsigned num_array_elems; /** < size of array (0 for no array) */
383
384 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
385 uint8_t bit_size;
386
387 /** generic register index. */
388 unsigned index;
389
390 /** only for debug purposes, can be NULL */
391 const char *name;
392
393 /** whether this register is local (per-function) or global (per-shader) */
394 bool is_global;
395
396 /**
397 * If this flag is set to true, then accessing channels >= num_components
398 * is well-defined, and simply spills over to the next array element. This
399 * is useful for backends that can do per-component accessing, in
400 * particular scalar backends. By setting this flag and making
401 * num_components equal to 1, structures can be packed tightly into
402 * registers and then registers can be accessed per-component to get to
403 * each structure member, even if it crosses vec4 boundaries.
404 */
405 bool is_packed;
406
407 /** set of nir_srcs where this register is used (read from) */
408 struct list_head uses;
409
410 /** set of nir_dests where this register is defined (written to) */
411 struct list_head defs;
412
413 /** set of nir_ifs where this register is used as a condition */
414 struct list_head if_uses;
415 } nir_register;
416
417 #define nir_foreach_register(reg, reg_list) \
418 foreach_list_typed(nir_register, reg, node, reg_list)
419 #define nir_foreach_register_safe(reg, reg_list) \
420 foreach_list_typed_safe(nir_register, reg, node, reg_list)
421
422 typedef enum {
423 nir_instr_type_alu,
424 nir_instr_type_call,
425 nir_instr_type_tex,
426 nir_instr_type_intrinsic,
427 nir_instr_type_load_const,
428 nir_instr_type_jump,
429 nir_instr_type_ssa_undef,
430 nir_instr_type_phi,
431 nir_instr_type_parallel_copy,
432 } nir_instr_type;
433
434 typedef struct nir_instr {
435 struct exec_node node;
436 nir_instr_type type;
437 struct nir_block *block;
438
439 /** generic instruction index. */
440 unsigned index;
441
442 /* A temporary for optimization and analysis passes to use for storing
443 * flags. For instance, DCE uses this to store the "dead/live" info.
444 */
445 uint8_t pass_flags;
446 } nir_instr;
447
448 static inline nir_instr *
449 nir_instr_next(nir_instr *instr)
450 {
451 struct exec_node *next = exec_node_get_next(&instr->node);
452 if (exec_node_is_tail_sentinel(next))
453 return NULL;
454 else
455 return exec_node_data(nir_instr, next, node);
456 }
457
458 static inline nir_instr *
459 nir_instr_prev(nir_instr *instr)
460 {
461 struct exec_node *prev = exec_node_get_prev(&instr->node);
462 if (exec_node_is_head_sentinel(prev))
463 return NULL;
464 else
465 return exec_node_data(nir_instr, prev, node);
466 }
467
468 static inline bool
469 nir_instr_is_first(const nir_instr *instr)
470 {
471 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr->node));
472 }
473
474 static inline bool
475 nir_instr_is_last(const nir_instr *instr)
476 {
477 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node));
478 }
479
480 typedef struct nir_ssa_def {
481 /** for debugging only, can be NULL */
482 const char* name;
483
484 /** generic SSA definition index. */
485 unsigned index;
486
487 /** Index into the live_in and live_out bitfields */
488 unsigned live_index;
489
490 nir_instr *parent_instr;
491
492 /** set of nir_instrs where this register is used (read from) */
493 struct list_head uses;
494
495 /** set of nir_ifs where this register is used as a condition */
496 struct list_head if_uses;
497
498 uint8_t num_components;
499
500 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
501 uint8_t bit_size;
502 } nir_ssa_def;
503
504 struct nir_src;
505
506 typedef struct {
507 nir_register *reg;
508 struct nir_src *indirect; /** < NULL for no indirect offset */
509 unsigned base_offset;
510
511 /* TODO use-def chain goes here */
512 } nir_reg_src;
513
514 typedef struct {
515 nir_instr *parent_instr;
516 struct list_head def_link;
517
518 nir_register *reg;
519 struct nir_src *indirect; /** < NULL for no indirect offset */
520 unsigned base_offset;
521
522 /* TODO def-use chain goes here */
523 } nir_reg_dest;
524
525 struct nir_if;
526
527 typedef struct nir_src {
528 union {
529 nir_instr *parent_instr;
530 struct nir_if *parent_if;
531 };
532
533 struct list_head use_link;
534
535 union {
536 nir_reg_src reg;
537 nir_ssa_def *ssa;
538 };
539
540 bool is_ssa;
541 } nir_src;
542
543 static inline nir_src
544 nir_src_init(void)
545 {
546 nir_src src = { { NULL } };
547 return src;
548 }
549
550 #define NIR_SRC_INIT nir_src_init()
551
552 #define nir_foreach_use(src, reg_or_ssa_def) \
553 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
554
555 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
556 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
557
558 #define nir_foreach_if_use(src, reg_or_ssa_def) \
559 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
560
561 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
562 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
563
564 typedef struct {
565 union {
566 nir_reg_dest reg;
567 nir_ssa_def ssa;
568 };
569
570 bool is_ssa;
571 } nir_dest;
572
573 static inline nir_dest
574 nir_dest_init(void)
575 {
576 nir_dest dest = { { { NULL } } };
577 return dest;
578 }
579
580 #define NIR_DEST_INIT nir_dest_init()
581
582 #define nir_foreach_def(dest, reg) \
583 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
584
585 #define nir_foreach_def_safe(dest, reg) \
586 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
587
588 static inline nir_src
589 nir_src_for_ssa(nir_ssa_def *def)
590 {
591 nir_src src = NIR_SRC_INIT;
592
593 src.is_ssa = true;
594 src.ssa = def;
595
596 return src;
597 }
598
599 static inline nir_src
600 nir_src_for_reg(nir_register *reg)
601 {
602 nir_src src = NIR_SRC_INIT;
603
604 src.is_ssa = false;
605 src.reg.reg = reg;
606 src.reg.indirect = NULL;
607 src.reg.base_offset = 0;
608
609 return src;
610 }
611
612 static inline nir_dest
613 nir_dest_for_reg(nir_register *reg)
614 {
615 nir_dest dest = NIR_DEST_INIT;
616
617 dest.reg.reg = reg;
618
619 return dest;
620 }
621
622 static inline unsigned
623 nir_src_bit_size(nir_src src)
624 {
625 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
626 }
627
628 static inline unsigned
629 nir_dest_bit_size(nir_dest dest)
630 {
631 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
632 }
633
634 void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
635 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
636
637 typedef struct {
638 nir_src src;
639
640 /**
641 * \name input modifiers
642 */
643 /*@{*/
644 /**
645 * For inputs interpreted as floating point, flips the sign bit. For
646 * inputs interpreted as integers, performs the two's complement negation.
647 */
648 bool negate;
649
650 /**
651 * Clears the sign bit for floating point values, and computes the integer
652 * absolute value for integers. Note that the negate modifier acts after
653 * the absolute value modifier, therefore if both are set then all inputs
654 * will become negative.
655 */
656 bool abs;
657 /*@}*/
658
659 /**
660 * For each input component, says which component of the register it is
661 * chosen from. Note that which elements of the swizzle are used and which
662 * are ignored are based on the write mask for most opcodes - for example,
663 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
664 * a swizzle of {2, x, 1, 0} where x means "don't care."
665 */
666 uint8_t swizzle[4];
667 } nir_alu_src;
668
669 typedef struct {
670 nir_dest dest;
671
672 /**
673 * \name saturate output modifier
674 *
675 * Only valid for opcodes that output floating-point numbers. Clamps the
676 * output to between 0.0 and 1.0 inclusive.
677 */
678
679 bool saturate;
680
681 unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
682 } nir_alu_dest;
683
684 typedef enum {
685 nir_type_invalid = 0, /* Not a valid type */
686 nir_type_float,
687 nir_type_int,
688 nir_type_uint,
689 nir_type_bool,
690 nir_type_bool32 = 32 | nir_type_bool,
691 nir_type_int8 = 8 | nir_type_int,
692 nir_type_int16 = 16 | nir_type_int,
693 nir_type_int32 = 32 | nir_type_int,
694 nir_type_int64 = 64 | nir_type_int,
695 nir_type_uint8 = 8 | nir_type_uint,
696 nir_type_uint16 = 16 | nir_type_uint,
697 nir_type_uint32 = 32 | nir_type_uint,
698 nir_type_uint64 = 64 | nir_type_uint,
699 nir_type_float16 = 16 | nir_type_float,
700 nir_type_float32 = 32 | nir_type_float,
701 nir_type_float64 = 64 | nir_type_float,
702 } nir_alu_type;
703
704 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
705 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
706
707 static inline unsigned
708 nir_alu_type_get_type_size(nir_alu_type type)
709 {
710 return type & NIR_ALU_TYPE_SIZE_MASK;
711 }
712
713 static inline unsigned
714 nir_alu_type_get_base_type(nir_alu_type type)
715 {
716 return type & NIR_ALU_TYPE_BASE_TYPE_MASK;
717 }
718
719 static inline nir_alu_type
720 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type)
721 {
722 switch (base_type) {
723 case GLSL_TYPE_BOOL:
724 return nir_type_bool32;
725 break;
726 case GLSL_TYPE_UINT:
727 return nir_type_uint32;
728 break;
729 case GLSL_TYPE_INT:
730 return nir_type_int32;
731 break;
732 case GLSL_TYPE_UINT16:
733 return nir_type_uint16;
734 break;
735 case GLSL_TYPE_INT16:
736 return nir_type_int16;
737 break;
738 case GLSL_TYPE_UINT64:
739 return nir_type_uint64;
740 break;
741 case GLSL_TYPE_INT64:
742 return nir_type_int64;
743 break;
744 case GLSL_TYPE_FLOAT:
745 return nir_type_float32;
746 break;
747 case GLSL_TYPE_FLOAT16:
748 return nir_type_float16;
749 break;
750 case GLSL_TYPE_DOUBLE:
751 return nir_type_float64;
752 break;
753 default:
754 unreachable("unknown type");
755 }
756 }
757
758 static inline nir_alu_type
759 nir_get_nir_type_for_glsl_type(const struct glsl_type *type)
760 {
761 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type));
762 }
763
764 nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
765 nir_rounding_mode rnd);
766
767 typedef enum {
768 NIR_OP_IS_COMMUTATIVE = (1 << 0),
769 NIR_OP_IS_ASSOCIATIVE = (1 << 1),
770 } nir_op_algebraic_property;
771
772 typedef struct {
773 const char *name;
774
775 unsigned num_inputs;
776
777 /**
778 * The number of components in the output
779 *
780 * If non-zero, this is the size of the output and input sizes are
781 * explicitly given; swizzle and writemask are still in effect, but if
782 * the output component is masked out, then the input component may
783 * still be in use.
784 *
785 * If zero, the opcode acts in the standard, per-component manner; the
786 * operation is performed on each component (except the ones that are
787 * masked out) with the input being taken from the input swizzle for
788 * that component.
789 *
790 * The size of some of the inputs may be given (i.e. non-zero) even
791 * though output_size is zero; in that case, the inputs with a zero
792 * size act per-component, while the inputs with non-zero size don't.
793 */
794 unsigned output_size;
795
796 /**
797 * The type of vector that the instruction outputs. Note that the
798 * staurate modifier is only allowed on outputs with the float type.
799 */
800
801 nir_alu_type output_type;
802
803 /**
804 * The number of components in each input
805 */
806 unsigned input_sizes[4];
807
808 /**
809 * The type of vector that each input takes. Note that negate and
810 * absolute value are only allowed on inputs with int or float type and
811 * behave differently on the two.
812 */
813 nir_alu_type input_types[4];
814
815 nir_op_algebraic_property algebraic_properties;
816 } nir_op_info;
817
818 extern const nir_op_info nir_op_infos[nir_num_opcodes];
819
820 typedef struct nir_alu_instr {
821 nir_instr instr;
822 nir_op op;
823
824 /** Indicates that this ALU instruction generates an exact value
825 *
826 * This is kind of a mixture of GLSL "precise" and "invariant" and not
827 * really equivalent to either. This indicates that the value generated by
828 * this operation is high-precision and any code transformations that touch
829 * it must ensure that the resulting value is bit-for-bit identical to the
830 * original.
831 */
832 bool exact;
833
834 nir_alu_dest dest;
835 nir_alu_src src[];
836 } nir_alu_instr;
837
838 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
839 nir_alu_instr *instr);
840 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
841 nir_alu_instr *instr);
842
843 /* is this source channel used? */
844 static inline bool
845 nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src,
846 unsigned channel)
847 {
848 if (nir_op_infos[instr->op].input_sizes[src] > 0)
849 return channel < nir_op_infos[instr->op].input_sizes[src];
850
851 return (instr->dest.write_mask >> channel) & 1;
852 }
853
854 /*
855 * For instructions whose destinations are SSA, get the number of channels
856 * used for a source
857 */
858 static inline unsigned
859 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
860 {
861 assert(instr->dest.dest.is_ssa);
862
863 if (nir_op_infos[instr->op].input_sizes[src] > 0)
864 return nir_op_infos[instr->op].input_sizes[src];
865
866 return instr->dest.dest.ssa.num_components;
867 }
868
869 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
870 unsigned src1, unsigned src2);
871
872 typedef enum {
873 nir_deref_type_var,
874 nir_deref_type_array,
875 nir_deref_type_struct
876 } nir_deref_type;
877
878 typedef struct nir_deref {
879 nir_deref_type deref_type;
880 struct nir_deref *child;
881 const struct glsl_type *type;
882 } nir_deref;
883
884 typedef struct {
885 nir_deref deref;
886
887 nir_variable *var;
888 } nir_deref_var;
889
890 /* This enum describes how the array is referenced. If the deref is
891 * direct then the base_offset is used. If the deref is indirect then
892 * offset is given by base_offset + indirect. If the deref is a wildcard
893 * then the deref refers to all of the elements of the array at the same
894 * time. Wildcard dereferences are only ever allowed in copy_var
895 * intrinsics and the source and destination derefs must have matching
896 * wildcards.
897 */
898 typedef enum {
899 nir_deref_array_type_direct,
900 nir_deref_array_type_indirect,
901 nir_deref_array_type_wildcard,
902 } nir_deref_array_type;
903
904 typedef struct {
905 nir_deref deref;
906
907 nir_deref_array_type deref_array_type;
908 unsigned base_offset;
909 nir_src indirect;
910 } nir_deref_array;
911
912 typedef struct {
913 nir_deref deref;
914
915 unsigned index;
916 } nir_deref_struct;
917
918 NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref,
919 deref_type, nir_deref_type_var)
920 NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref,
921 deref_type, nir_deref_type_array)
922 NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref,
923 deref_type, nir_deref_type_struct)
924
925 /* Returns the last deref in the chain. */
926 static inline nir_deref *
927 nir_deref_tail(nir_deref *deref)
928 {
929 while (deref->child)
930 deref = deref->child;
931 return deref;
932 }
933
934 typedef struct {
935 nir_instr instr;
936
937 unsigned num_params;
938 nir_deref_var **params;
939 nir_deref_var *return_deref;
940
941 struct nir_function *callee;
942 } nir_call_instr;
943
944 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
945 num_variables, num_indices, idx0, idx1, idx2, flags) \
946 nir_intrinsic_##name,
947
948 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
949
950 typedef enum {
951 #include "nir_intrinsics.h"
952 nir_num_intrinsics = nir_last_intrinsic + 1
953 } nir_intrinsic_op;
954
955 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
956
957 /** Represents an intrinsic
958 *
959 * An intrinsic is an instruction type for handling things that are
960 * more-or-less regular operations but don't just consume and produce SSA
961 * values like ALU operations do. Intrinsics are not for things that have
962 * special semantic meaning such as phi nodes and parallel copies.
963 * Examples of intrinsics include variable load/store operations, system
964 * value loads, and the like. Even though texturing more-or-less falls
965 * under this category, texturing is its own instruction type because
966 * trying to represent texturing with intrinsics would lead to a
967 * combinatorial explosion of intrinsic opcodes.
968 *
969 * By having a single instruction type for handling a lot of different
970 * cases, optimization passes can look for intrinsics and, for the most
971 * part, completely ignore them. Each intrinsic type also has a few
972 * possible flags that govern whether or not they can be reordered or
973 * eliminated. That way passes like dead code elimination can still work
974 * on intrisics without understanding the meaning of each.
975 *
976 * Each intrinsic has some number of constant indices, some number of
977 * variables, and some number of sources. What these sources, variables,
978 * and indices mean depends on the intrinsic and is documented with the
979 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
980 * instructions are the only types of instruction that can operate on
981 * variables.
982 */
983 typedef struct {
984 nir_instr instr;
985
986 nir_intrinsic_op intrinsic;
987
988 nir_dest dest;
989
990 /** number of components if this is a vectorized intrinsic
991 *
992 * Similarly to ALU operations, some intrinsics are vectorized.
993 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
994 * For vectorized intrinsics, the num_components field specifies the
995 * number of destination components and the number of source components
996 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
997 */
998 uint8_t num_components;
999
1000 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
1001
1002 nir_deref_var *variables[2];
1003
1004 nir_src src[];
1005 } nir_intrinsic_instr;
1006
1007 /**
1008 * \name NIR intrinsics semantic flags
1009 *
1010 * information about what the compiler can do with the intrinsics.
1011 *
1012 * \sa nir_intrinsic_info::flags
1013 */
1014 typedef enum {
1015 /**
1016 * whether the intrinsic can be safely eliminated if none of its output
1017 * value is not being used.
1018 */
1019 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0),
1020
1021 /**
1022 * Whether the intrinsic can be reordered with respect to any other
1023 * intrinsic, i.e. whether the only reordering dependencies of the
1024 * intrinsic are due to the register reads/writes.
1025 */
1026 NIR_INTRINSIC_CAN_REORDER = (1 << 1),
1027 } nir_intrinsic_semantic_flag;
1028
1029 /**
1030 * \name NIR intrinsics const-index flag
1031 *
1032 * Indicates the usage of a const_index slot.
1033 *
1034 * \sa nir_intrinsic_info::index_map
1035 */
1036 typedef enum {
1037 /**
1038 * Generally instructions that take a offset src argument, can encode
1039 * a constant 'base' value which is added to the offset.
1040 */
1041 NIR_INTRINSIC_BASE = 1,
1042
1043 /**
1044 * For store instructions, a writemask for the store.
1045 */
1046 NIR_INTRINSIC_WRMASK = 2,
1047
1048 /**
1049 * The stream-id for GS emit_vertex/end_primitive intrinsics.
1050 */
1051 NIR_INTRINSIC_STREAM_ID = 3,
1052
1053 /**
1054 * The clip-plane id for load_user_clip_plane intrinsic.
1055 */
1056 NIR_INTRINSIC_UCP_ID = 4,
1057
1058 /**
1059 * The amount of data, starting from BASE, that this instruction may
1060 * access. This is used to provide bounds if the offset is not constant.
1061 */
1062 NIR_INTRINSIC_RANGE = 5,
1063
1064 /**
1065 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1066 */
1067 NIR_INTRINSIC_DESC_SET = 6,
1068
1069 /**
1070 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1071 */
1072 NIR_INTRINSIC_BINDING = 7,
1073
1074 /**
1075 * Component offset.
1076 */
1077 NIR_INTRINSIC_COMPONENT = 8,
1078
1079 /**
1080 * Interpolation mode (only meaningful for FS inputs).
1081 */
1082 NIR_INTRINSIC_INTERP_MODE = 9,
1083
1084 /**
1085 * A binary nir_op to use when performing a reduction or scan operation
1086 */
1087 NIR_INTRINSIC_REDUCTION_OP = 10,
1088
1089 /**
1090 * Cluster size for reduction operations
1091 */
1092 NIR_INTRINSIC_CLUSTER_SIZE = 11,
1093
1094 NIR_INTRINSIC_NUM_INDEX_FLAGS,
1095
1096 } nir_intrinsic_index_flag;
1097
1098 #define NIR_INTRINSIC_MAX_INPUTS 4
1099
1100 typedef struct {
1101 const char *name;
1102
1103 unsigned num_srcs; /** < number of register/SSA inputs */
1104
1105 /** number of components of each input register
1106 *
1107 * If this value is 0, the number of components is given by the
1108 * num_components field of nir_intrinsic_instr.
1109 */
1110 unsigned src_components[NIR_INTRINSIC_MAX_INPUTS];
1111
1112 bool has_dest;
1113
1114 /** number of components of the output register
1115 *
1116 * If this value is 0, the number of components is given by the
1117 * num_components field of nir_intrinsic_instr.
1118 */
1119 unsigned dest_components;
1120
1121 /** the number of inputs/outputs that are variables */
1122 unsigned num_variables;
1123
1124 /** the number of constant indices used by the intrinsic */
1125 unsigned num_indices;
1126
1127 /** indicates the usage of intr->const_index[n] */
1128 unsigned index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS];
1129
1130 /** semantic flags for calls to this intrinsic */
1131 nir_intrinsic_semantic_flag flags;
1132 } nir_intrinsic_info;
1133
1134 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
1135
1136
1137 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1138 static inline type \
1139 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \
1140 { \
1141 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1142 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1143 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1144 } \
1145 static inline void \
1146 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1147 { \
1148 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1149 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1150 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1151 }
1152
1153 INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
1154 INTRINSIC_IDX_ACCESSORS(base, BASE, int)
1155 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned)
1156 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned)
1157 INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned)
1158 INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned)
1159 INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
1160 INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
1161 INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
1162 INTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned)
1163 INTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned)
1164
1165 /**
1166 * \group texture information
1167 *
1168 * This gives semantic information about textures which is useful to the
1169 * frontend, the backend, and lowering passes, but not the optimizer.
1170 */
1171
1172 typedef enum {
1173 nir_tex_src_coord,
1174 nir_tex_src_projector,
1175 nir_tex_src_comparator, /* shadow comparator */
1176 nir_tex_src_offset,
1177 nir_tex_src_bias,
1178 nir_tex_src_lod,
1179 nir_tex_src_ms_index, /* MSAA sample index */
1180 nir_tex_src_ms_mcs, /* MSAA compression value */
1181 nir_tex_src_ddx,
1182 nir_tex_src_ddy,
1183 nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
1184 nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
1185 nir_tex_src_plane, /* < selects plane for planar textures */
1186 nir_num_tex_src_types
1187 } nir_tex_src_type;
1188
1189 typedef struct {
1190 nir_src src;
1191 nir_tex_src_type src_type;
1192 } nir_tex_src;
1193
1194 typedef enum {
1195 nir_texop_tex, /**< Regular texture look-up */
1196 nir_texop_txb, /**< Texture look-up with LOD bias */
1197 nir_texop_txl, /**< Texture look-up with explicit LOD */
1198 nir_texop_txd, /**< Texture look-up with partial derivatives */
1199 nir_texop_txf, /**< Texel fetch with explicit LOD */
1200 nir_texop_txf_ms, /**< Multisample texture fetch */
1201 nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
1202 nir_texop_txs, /**< Texture size */
1203 nir_texop_lod, /**< Texture lod query */
1204 nir_texop_tg4, /**< Texture gather */
1205 nir_texop_query_levels, /**< Texture levels query */
1206 nir_texop_texture_samples, /**< Texture samples query */
1207 nir_texop_samples_identical, /**< Query whether all samples are definitely
1208 * identical.
1209 */
1210 } nir_texop;
1211
1212 typedef struct {
1213 nir_instr instr;
1214
1215 enum glsl_sampler_dim sampler_dim;
1216 nir_alu_type dest_type;
1217
1218 nir_texop op;
1219 nir_dest dest;
1220 nir_tex_src *src;
1221 unsigned num_srcs, coord_components;
1222 bool is_array, is_shadow;
1223
1224 /**
1225 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1226 * components or the new-style shadow that outputs 1 component.
1227 */
1228 bool is_new_style_shadow;
1229
1230 /* gather component selector */
1231 unsigned component : 2;
1232
1233 /** The texture index
1234 *
1235 * If this texture instruction has a nir_tex_src_texture_offset source,
1236 * then the texture index is given by texture_index + texture_offset.
1237 */
1238 unsigned texture_index;
1239
1240 /** The size of the texture array or 0 if it's not an array */
1241 unsigned texture_array_size;
1242
1243 /** The texture deref
1244 *
1245 * If this is null, use texture_index instead.
1246 */
1247 nir_deref_var *texture;
1248
1249 /** The sampler index
1250 *
1251 * The following operations do not require a sampler and, as such, this
1252 * field should be ignored:
1253 * - nir_texop_txf
1254 * - nir_texop_txf_ms
1255 * - nir_texop_txs
1256 * - nir_texop_lod
1257 * - nir_texop_query_levels
1258 * - nir_texop_texture_samples
1259 * - nir_texop_samples_identical
1260 *
1261 * If this texture instruction has a nir_tex_src_sampler_offset source,
1262 * then the sampler index is given by sampler_index + sampler_offset.
1263 */
1264 unsigned sampler_index;
1265
1266 /** The sampler deref
1267 *
1268 * If this is null, use sampler_index instead.
1269 */
1270 nir_deref_var *sampler;
1271 } nir_tex_instr;
1272
1273 static inline unsigned
1274 nir_tex_instr_dest_size(const nir_tex_instr *instr)
1275 {
1276 switch (instr->op) {
1277 case nir_texop_txs: {
1278 unsigned ret;
1279 switch (instr->sampler_dim) {
1280 case GLSL_SAMPLER_DIM_1D:
1281 case GLSL_SAMPLER_DIM_BUF:
1282 ret = 1;
1283 break;
1284 case GLSL_SAMPLER_DIM_2D:
1285 case GLSL_SAMPLER_DIM_CUBE:
1286 case GLSL_SAMPLER_DIM_MS:
1287 case GLSL_SAMPLER_DIM_RECT:
1288 case GLSL_SAMPLER_DIM_EXTERNAL:
1289 case GLSL_SAMPLER_DIM_SUBPASS:
1290 ret = 2;
1291 break;
1292 case GLSL_SAMPLER_DIM_3D:
1293 ret = 3;
1294 break;
1295 default:
1296 unreachable("not reached");
1297 }
1298 if (instr->is_array)
1299 ret++;
1300 return ret;
1301 }
1302
1303 case nir_texop_lod:
1304 return 2;
1305
1306 case nir_texop_texture_samples:
1307 case nir_texop_query_levels:
1308 case nir_texop_samples_identical:
1309 return 1;
1310
1311 default:
1312 if (instr->is_shadow && instr->is_new_style_shadow)
1313 return 1;
1314
1315 return 4;
1316 }
1317 }
1318
1319 /* Returns true if this texture operation queries something about the texture
1320 * rather than actually sampling it.
1321 */
1322 static inline bool
1323 nir_tex_instr_is_query(const nir_tex_instr *instr)
1324 {
1325 switch (instr->op) {
1326 case nir_texop_txs:
1327 case nir_texop_lod:
1328 case nir_texop_texture_samples:
1329 case nir_texop_query_levels:
1330 case nir_texop_txf_ms_mcs:
1331 return true;
1332 case nir_texop_tex:
1333 case nir_texop_txb:
1334 case nir_texop_txl:
1335 case nir_texop_txd:
1336 case nir_texop_txf:
1337 case nir_texop_txf_ms:
1338 case nir_texop_tg4:
1339 return false;
1340 default:
1341 unreachable("Invalid texture opcode");
1342 }
1343 }
1344
1345 static inline nir_alu_type
1346 nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src)
1347 {
1348 switch (instr->src[src].src_type) {
1349 case nir_tex_src_coord:
1350 switch (instr->op) {
1351 case nir_texop_txf:
1352 case nir_texop_txf_ms:
1353 case nir_texop_txf_ms_mcs:
1354 case nir_texop_samples_identical:
1355 return nir_type_int;
1356
1357 default:
1358 return nir_type_float;
1359 }
1360
1361 case nir_tex_src_lod:
1362 switch (instr->op) {
1363 case nir_texop_txs:
1364 case nir_texop_txf:
1365 return nir_type_int;
1366
1367 default:
1368 return nir_type_float;
1369 }
1370
1371 case nir_tex_src_projector:
1372 case nir_tex_src_comparator:
1373 case nir_tex_src_bias:
1374 case nir_tex_src_ddx:
1375 case nir_tex_src_ddy:
1376 return nir_type_float;
1377
1378 case nir_tex_src_offset:
1379 case nir_tex_src_ms_index:
1380 case nir_tex_src_texture_offset:
1381 case nir_tex_src_sampler_offset:
1382 return nir_type_int;
1383
1384 default:
1385 unreachable("Invalid texture source type");
1386 }
1387 }
1388
1389 static inline unsigned
1390 nir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src)
1391 {
1392 if (instr->src[src].src_type == nir_tex_src_coord)
1393 return instr->coord_components;
1394
1395 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1396 if (instr->src[src].src_type == nir_tex_src_ms_mcs)
1397 return 4;
1398
1399 if (instr->src[src].src_type == nir_tex_src_ddx ||
1400 instr->src[src].src_type == nir_tex_src_ddy) {
1401 if (instr->is_array)
1402 return instr->coord_components - 1;
1403 else
1404 return instr->coord_components;
1405 }
1406
1407 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
1408 * the offset, since a cube maps to a single face.
1409 */
1410 if (instr->src[src].src_type == nir_tex_src_offset) {
1411 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
1412 return 2;
1413 else if (instr->is_array)
1414 return instr->coord_components - 1;
1415 else
1416 return instr->coord_components;
1417 }
1418
1419 return 1;
1420 }
1421
1422 static inline int
1423 nir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type)
1424 {
1425 for (unsigned i = 0; i < instr->num_srcs; i++)
1426 if (instr->src[i].src_type == type)
1427 return (int) i;
1428
1429 return -1;
1430 }
1431
1432 void nir_tex_instr_add_src(nir_tex_instr *tex,
1433 nir_tex_src_type src_type,
1434 nir_src src);
1435
1436 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
1437
1438 typedef struct {
1439 nir_instr instr;
1440
1441 nir_const_value value;
1442
1443 nir_ssa_def def;
1444 } nir_load_const_instr;
1445
1446 typedef enum {
1447 nir_jump_return,
1448 nir_jump_break,
1449 nir_jump_continue,
1450 } nir_jump_type;
1451
1452 typedef struct {
1453 nir_instr instr;
1454 nir_jump_type type;
1455 } nir_jump_instr;
1456
1457 /* creates a new SSA variable in an undefined state */
1458
1459 typedef struct {
1460 nir_instr instr;
1461 nir_ssa_def def;
1462 } nir_ssa_undef_instr;
1463
1464 typedef struct {
1465 struct exec_node node;
1466
1467 /* The predecessor block corresponding to this source */
1468 struct nir_block *pred;
1469
1470 nir_src src;
1471 } nir_phi_src;
1472
1473 #define nir_foreach_phi_src(phi_src, phi) \
1474 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1475 #define nir_foreach_phi_src_safe(phi_src, phi) \
1476 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1477
1478 typedef struct {
1479 nir_instr instr;
1480
1481 struct exec_list srcs; /** < list of nir_phi_src */
1482
1483 nir_dest dest;
1484 } nir_phi_instr;
1485
1486 typedef struct {
1487 struct exec_node node;
1488 nir_src src;
1489 nir_dest dest;
1490 } nir_parallel_copy_entry;
1491
1492 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1493 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1494
1495 typedef struct {
1496 nir_instr instr;
1497
1498 /* A list of nir_parallel_copy_entrys. The sources of all of the
1499 * entries are copied to the corresponding destinations "in parallel".
1500 * In other words, if we have two entries: a -> b and b -> a, the values
1501 * get swapped.
1502 */
1503 struct exec_list entries;
1504 } nir_parallel_copy_instr;
1505
1506 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
1507 type, nir_instr_type_alu)
1508 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
1509 type, nir_instr_type_call)
1510 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
1511 type, nir_instr_type_jump)
1512 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr,
1513 type, nir_instr_type_tex)
1514 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
1515 type, nir_instr_type_intrinsic)
1516 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
1517 type, nir_instr_type_load_const)
1518 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
1519 type, nir_instr_type_ssa_undef)
1520 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
1521 type, nir_instr_type_phi)
1522 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
1523 nir_parallel_copy_instr, instr,
1524 type, nir_instr_type_parallel_copy)
1525
1526 /*
1527 * Control flow
1528 *
1529 * Control flow consists of a tree of control flow nodes, which include
1530 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1531 * instructions that always run start-to-finish. Each basic block also keeps
1532 * track of its successors (blocks which may run immediately after the current
1533 * block) and predecessors (blocks which could have run immediately before the
1534 * current block). Each function also has a start block and an end block which
1535 * all return statements point to (which is always empty). Together, all the
1536 * blocks with their predecessors and successors make up the control flow
1537 * graph (CFG) of the function. There are helpers that modify the tree of
1538 * control flow nodes while modifying the CFG appropriately; these should be
1539 * used instead of modifying the tree directly.
1540 */
1541
1542 typedef enum {
1543 nir_cf_node_block,
1544 nir_cf_node_if,
1545 nir_cf_node_loop,
1546 nir_cf_node_function
1547 } nir_cf_node_type;
1548
1549 typedef struct nir_cf_node {
1550 struct exec_node node;
1551 nir_cf_node_type type;
1552 struct nir_cf_node *parent;
1553 } nir_cf_node;
1554
1555 typedef struct nir_block {
1556 nir_cf_node cf_node;
1557
1558 struct exec_list instr_list; /** < list of nir_instr */
1559
1560 /** generic block index; generated by nir_index_blocks */
1561 unsigned index;
1562
1563 /*
1564 * Each block can only have up to 2 successors, so we put them in a simple
1565 * array - no need for anything more complicated.
1566 */
1567 struct nir_block *successors[2];
1568
1569 /* Set of nir_block predecessors in the CFG */
1570 struct set *predecessors;
1571
1572 /*
1573 * this node's immediate dominator in the dominance tree - set to NULL for
1574 * the start block.
1575 */
1576 struct nir_block *imm_dom;
1577
1578 /* This node's children in the dominance tree */
1579 unsigned num_dom_children;
1580 struct nir_block **dom_children;
1581
1582 /* Set of nir_blocks on the dominance frontier of this block */
1583 struct set *dom_frontier;
1584
1585 /*
1586 * These two indices have the property that dom_{pre,post}_index for each
1587 * child of this block in the dominance tree will always be between
1588 * dom_pre_index and dom_post_index for this block, which makes testing if
1589 * a given block is dominated by another block an O(1) operation.
1590 */
1591 unsigned dom_pre_index, dom_post_index;
1592
1593 /* live in and out for this block; used for liveness analysis */
1594 BITSET_WORD *live_in;
1595 BITSET_WORD *live_out;
1596 } nir_block;
1597
1598 static inline nir_instr *
1599 nir_block_first_instr(nir_block *block)
1600 {
1601 struct exec_node *head = exec_list_get_head(&block->instr_list);
1602 return exec_node_data(nir_instr, head, node);
1603 }
1604
1605 static inline nir_instr *
1606 nir_block_last_instr(nir_block *block)
1607 {
1608 struct exec_node *tail = exec_list_get_tail(&block->instr_list);
1609 return exec_node_data(nir_instr, tail, node);
1610 }
1611
1612 #define nir_foreach_instr(instr, block) \
1613 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1614 #define nir_foreach_instr_reverse(instr, block) \
1615 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1616 #define nir_foreach_instr_safe(instr, block) \
1617 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1618 #define nir_foreach_instr_reverse_safe(instr, block) \
1619 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1620
1621 typedef struct nir_if {
1622 nir_cf_node cf_node;
1623 nir_src condition;
1624
1625 struct exec_list then_list; /** < list of nir_cf_node */
1626 struct exec_list else_list; /** < list of nir_cf_node */
1627 } nir_if;
1628
1629 typedef struct {
1630 nir_if *nif;
1631
1632 nir_instr *conditional_instr;
1633
1634 nir_block *break_block;
1635 nir_block *continue_from_block;
1636
1637 bool continue_from_then;
1638
1639 struct list_head loop_terminator_link;
1640 } nir_loop_terminator;
1641
1642 typedef struct {
1643 /* Number of instructions in the loop */
1644 unsigned num_instructions;
1645
1646 /* How many times the loop is run (if known) */
1647 unsigned trip_count;
1648 bool is_trip_count_known;
1649
1650 /* Unroll the loop regardless of its size */
1651 bool force_unroll;
1652
1653 nir_loop_terminator *limiting_terminator;
1654
1655 /* A list of loop_terminators terminating this loop. */
1656 struct list_head loop_terminator_list;
1657 } nir_loop_info;
1658
1659 typedef struct {
1660 nir_cf_node cf_node;
1661
1662 struct exec_list body; /** < list of nir_cf_node */
1663
1664 nir_loop_info *info;
1665 } nir_loop;
1666
1667 /**
1668 * Various bits of metadata that can may be created or required by
1669 * optimization and analysis passes
1670 */
1671 typedef enum {
1672 nir_metadata_none = 0x0,
1673 nir_metadata_block_index = 0x1,
1674 nir_metadata_dominance = 0x2,
1675 nir_metadata_live_ssa_defs = 0x4,
1676 nir_metadata_not_properly_reset = 0x8,
1677 nir_metadata_loop_analysis = 0x10,
1678 } nir_metadata;
1679
1680 typedef struct {
1681 nir_cf_node cf_node;
1682
1683 /** pointer to the function of which this is an implementation */
1684 struct nir_function *function;
1685
1686 struct exec_list body; /** < list of nir_cf_node */
1687
1688 nir_block *end_block;
1689
1690 /** list for all local variables in the function */
1691 struct exec_list locals;
1692
1693 /** array of variables used as parameters */
1694 unsigned num_params;
1695 nir_variable **params;
1696
1697 /** variable used to hold the result of the function */
1698 nir_variable *return_var;
1699
1700 /** list of local registers in the function */
1701 struct exec_list registers;
1702
1703 /** next available local register index */
1704 unsigned reg_alloc;
1705
1706 /** next available SSA value index */
1707 unsigned ssa_alloc;
1708
1709 /* total number of basic blocks, only valid when block_index_dirty = false */
1710 unsigned num_blocks;
1711
1712 nir_metadata valid_metadata;
1713 } nir_function_impl;
1714
1715 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1716 nir_start_block(nir_function_impl *impl)
1717 {
1718 return (nir_block *) impl->body.head_sentinel.next;
1719 }
1720
1721 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1722 nir_impl_last_block(nir_function_impl *impl)
1723 {
1724 return (nir_block *) impl->body.tail_sentinel.prev;
1725 }
1726
1727 static inline nir_cf_node *
1728 nir_cf_node_next(nir_cf_node *node)
1729 {
1730 struct exec_node *next = exec_node_get_next(&node->node);
1731 if (exec_node_is_tail_sentinel(next))
1732 return NULL;
1733 else
1734 return exec_node_data(nir_cf_node, next, node);
1735 }
1736
1737 static inline nir_cf_node *
1738 nir_cf_node_prev(nir_cf_node *node)
1739 {
1740 struct exec_node *prev = exec_node_get_prev(&node->node);
1741 if (exec_node_is_head_sentinel(prev))
1742 return NULL;
1743 else
1744 return exec_node_data(nir_cf_node, prev, node);
1745 }
1746
1747 static inline bool
1748 nir_cf_node_is_first(const nir_cf_node *node)
1749 {
1750 return exec_node_is_head_sentinel(node->node.prev);
1751 }
1752
1753 static inline bool
1754 nir_cf_node_is_last(const nir_cf_node *node)
1755 {
1756 return exec_node_is_tail_sentinel(node->node.next);
1757 }
1758
1759 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node,
1760 type, nir_cf_node_block)
1761 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node,
1762 type, nir_cf_node_if)
1763 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node,
1764 type, nir_cf_node_loop)
1765 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node,
1766 nir_function_impl, cf_node, type, nir_cf_node_function)
1767
1768 static inline nir_block *
1769 nir_if_first_then_block(nir_if *if_stmt)
1770 {
1771 struct exec_node *head = exec_list_get_head(&if_stmt->then_list);
1772 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1773 }
1774
1775 static inline nir_block *
1776 nir_if_last_then_block(nir_if *if_stmt)
1777 {
1778 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list);
1779 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1780 }
1781
1782 static inline nir_block *
1783 nir_if_first_else_block(nir_if *if_stmt)
1784 {
1785 struct exec_node *head = exec_list_get_head(&if_stmt->else_list);
1786 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1787 }
1788
1789 static inline nir_block *
1790 nir_if_last_else_block(nir_if *if_stmt)
1791 {
1792 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list);
1793 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1794 }
1795
1796 static inline nir_block *
1797 nir_loop_first_block(nir_loop *loop)
1798 {
1799 struct exec_node *head = exec_list_get_head(&loop->body);
1800 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1801 }
1802
1803 static inline nir_block *
1804 nir_loop_last_block(nir_loop *loop)
1805 {
1806 struct exec_node *tail = exec_list_get_tail(&loop->body);
1807 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1808 }
1809
1810 typedef enum {
1811 nir_parameter_in,
1812 nir_parameter_out,
1813 nir_parameter_inout,
1814 } nir_parameter_type;
1815
1816 typedef struct {
1817 nir_parameter_type param_type;
1818 const struct glsl_type *type;
1819 } nir_parameter;
1820
1821 typedef struct nir_function {
1822 struct exec_node node;
1823
1824 const char *name;
1825 struct nir_shader *shader;
1826
1827 unsigned num_params;
1828 nir_parameter *params;
1829 const struct glsl_type *return_type;
1830
1831 /** The implementation of this function.
1832 *
1833 * If the function is only declared and not implemented, this is NULL.
1834 */
1835 nir_function_impl *impl;
1836 } nir_function;
1837
1838 typedef struct nir_shader_compiler_options {
1839 bool lower_fdiv;
1840 bool lower_ffma;
1841 bool fuse_ffma;
1842 bool lower_flrp32;
1843 /** Lowers flrp when it does not support doubles */
1844 bool lower_flrp64;
1845 bool lower_fpow;
1846 bool lower_fsat;
1847 bool lower_fsqrt;
1848 bool lower_fmod32;
1849 bool lower_fmod64;
1850 bool lower_bitfield_extract;
1851 bool lower_bitfield_insert;
1852 bool lower_uadd_carry;
1853 bool lower_usub_borrow;
1854 /** lowers fneg and ineg to fsub and isub. */
1855 bool lower_negate;
1856 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1857 bool lower_sub;
1858
1859 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1860 bool lower_scmp;
1861
1862 /** enables rules to lower idiv by power-of-two: */
1863 bool lower_idiv;
1864
1865 /* Does the native fdot instruction replicate its result for four
1866 * components? If so, then opt_algebraic_late will turn all fdotN
1867 * instructions into fdot_replicatedN instructions.
1868 */
1869 bool fdot_replicates;
1870
1871 /** lowers ffract to fsub+ffloor: */
1872 bool lower_ffract;
1873
1874 bool lower_ldexp;
1875
1876 bool lower_pack_half_2x16;
1877 bool lower_pack_unorm_2x16;
1878 bool lower_pack_snorm_2x16;
1879 bool lower_pack_unorm_4x8;
1880 bool lower_pack_snorm_4x8;
1881 bool lower_unpack_half_2x16;
1882 bool lower_unpack_unorm_2x16;
1883 bool lower_unpack_snorm_2x16;
1884 bool lower_unpack_unorm_4x8;
1885 bool lower_unpack_snorm_4x8;
1886
1887 bool lower_extract_byte;
1888 bool lower_extract_word;
1889
1890 bool lower_all_io_to_temps;
1891
1892 /**
1893 * Does the driver support real 32-bit integers? (Otherwise, integers
1894 * are simulated by floats.)
1895 */
1896 bool native_integers;
1897
1898 /* Indicates that the driver only has zero-based vertex id */
1899 bool vertex_id_zero_based;
1900
1901 bool lower_cs_local_index_from_id;
1902
1903 bool lower_device_index_to_zero;
1904
1905 /**
1906 * Should nir_lower_io() create load_interpolated_input intrinsics?
1907 *
1908 * If not, it generates regular load_input intrinsics and interpolation
1909 * information must be inferred from the list of input nir_variables.
1910 */
1911 bool use_interpolated_input_intrinsics;
1912
1913 /**
1914 * Do vertex shader double inputs use two locations? The Vulkan spec
1915 * requires two locations to be used, OpenGL allows a single location.
1916 */
1917 bool vs_inputs_dual_locations;
1918
1919 unsigned max_unroll_iterations;
1920 } nir_shader_compiler_options;
1921
1922 typedef struct nir_shader {
1923 /** list of uniforms (nir_variable) */
1924 struct exec_list uniforms;
1925
1926 /** list of inputs (nir_variable) */
1927 struct exec_list inputs;
1928
1929 /** list of outputs (nir_variable) */
1930 struct exec_list outputs;
1931
1932 /** list of shared compute variables (nir_variable) */
1933 struct exec_list shared;
1934
1935 /** Set of driver-specific options for the shader.
1936 *
1937 * The memory for the options is expected to be kept in a single static
1938 * copy by the driver.
1939 */
1940 const struct nir_shader_compiler_options *options;
1941
1942 /** Various bits of compile-time information about a given shader */
1943 struct shader_info info;
1944
1945 /** list of global variables in the shader (nir_variable) */
1946 struct exec_list globals;
1947
1948 /** list of system value variables in the shader (nir_variable) */
1949 struct exec_list system_values;
1950
1951 struct exec_list functions; /** < list of nir_function */
1952
1953 /** list of global register in the shader */
1954 struct exec_list registers;
1955
1956 /** next available global register index */
1957 unsigned reg_alloc;
1958
1959 /**
1960 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
1961 * access plus one
1962 */
1963 unsigned num_inputs, num_uniforms, num_outputs, num_shared;
1964 } nir_shader;
1965
1966 static inline nir_function_impl *
1967 nir_shader_get_entrypoint(nir_shader *shader)
1968 {
1969 assert(exec_list_length(&shader->functions) == 1);
1970 struct exec_node *func_node = exec_list_get_head(&shader->functions);
1971 nir_function *func = exec_node_data(nir_function, func_node, node);
1972 assert(func->return_type == glsl_void_type());
1973 assert(func->num_params == 0);
1974 assert(func->impl);
1975 return func->impl;
1976 }
1977
1978 #define nir_foreach_function(func, shader) \
1979 foreach_list_typed(nir_function, func, node, &(shader)->functions)
1980
1981 nir_shader *nir_shader_create(void *mem_ctx,
1982 gl_shader_stage stage,
1983 const nir_shader_compiler_options *options,
1984 shader_info *si);
1985
1986 /** creates a register, including assigning it an index and adding it to the list */
1987 nir_register *nir_global_reg_create(nir_shader *shader);
1988
1989 nir_register *nir_local_reg_create(nir_function_impl *impl);
1990
1991 void nir_reg_remove(nir_register *reg);
1992
1993 /** Adds a variable to the appropriate list in nir_shader */
1994 void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
1995
1996 static inline void
1997 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
1998 {
1999 assert(var->data.mode == nir_var_local);
2000 exec_list_push_tail(&impl->locals, &var->node);
2001 }
2002
2003 /** creates a variable, sets a few defaults, and adds it to the list */
2004 nir_variable *nir_variable_create(nir_shader *shader,
2005 nir_variable_mode mode,
2006 const struct glsl_type *type,
2007 const char *name);
2008 /** creates a local variable and adds it to the list */
2009 nir_variable *nir_local_variable_create(nir_function_impl *impl,
2010 const struct glsl_type *type,
2011 const char *name);
2012
2013 /** creates a function and adds it to the shader's list of functions */
2014 nir_function *nir_function_create(nir_shader *shader, const char *name);
2015
2016 nir_function_impl *nir_function_impl_create(nir_function *func);
2017 /** creates a function_impl that isn't tied to any particular function */
2018 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
2019
2020 nir_block *nir_block_create(nir_shader *shader);
2021 nir_if *nir_if_create(nir_shader *shader);
2022 nir_loop *nir_loop_create(nir_shader *shader);
2023
2024 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node);
2025
2026 /** requests that the given pieces of metadata be generated */
2027 void nir_metadata_require(nir_function_impl *impl, nir_metadata required, ...);
2028 /** dirties all but the preserved metadata */
2029 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved);
2030
2031 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
2032 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
2033
2034 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
2035
2036 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
2037 unsigned num_components,
2038 unsigned bit_size);
2039
2040 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader,
2041 nir_intrinsic_op op);
2042
2043 nir_call_instr *nir_call_instr_create(nir_shader *shader,
2044 nir_function *callee);
2045
2046 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
2047
2048 nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
2049
2050 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
2051
2052 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
2053 unsigned num_components,
2054 unsigned bit_size);
2055
2056 nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
2057 nir_deref_array *nir_deref_array_create(void *mem_ctx);
2058 nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
2059
2060 typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
2061 bool nir_deref_foreach_leaf(nir_deref_var *deref,
2062 nir_deref_foreach_leaf_cb cb, void *state);
2063
2064 nir_load_const_instr *
2065 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
2066
2067 nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
2068
2069 /**
2070 * NIR Cursors and Instruction Insertion API
2071 * @{
2072 *
2073 * A tiny struct representing a point to insert/extract instructions or
2074 * control flow nodes. Helps reduce the combinatorial explosion of possible
2075 * points to insert/extract.
2076 *
2077 * \sa nir_control_flow.h
2078 */
2079 typedef enum {
2080 nir_cursor_before_block,
2081 nir_cursor_after_block,
2082 nir_cursor_before_instr,
2083 nir_cursor_after_instr,
2084 } nir_cursor_option;
2085
2086 typedef struct {
2087 nir_cursor_option option;
2088 union {
2089 nir_block *block;
2090 nir_instr *instr;
2091 };
2092 } nir_cursor;
2093
2094 static inline nir_block *
2095 nir_cursor_current_block(nir_cursor cursor)
2096 {
2097 if (cursor.option == nir_cursor_before_instr ||
2098 cursor.option == nir_cursor_after_instr) {
2099 return cursor.instr->block;
2100 } else {
2101 return cursor.block;
2102 }
2103 }
2104
2105 bool nir_cursors_equal(nir_cursor a, nir_cursor b);
2106
2107 static inline nir_cursor
2108 nir_before_block(nir_block *block)
2109 {
2110 nir_cursor cursor;
2111 cursor.option = nir_cursor_before_block;
2112 cursor.block = block;
2113 return cursor;
2114 }
2115
2116 static inline nir_cursor
2117 nir_after_block(nir_block *block)
2118 {
2119 nir_cursor cursor;
2120 cursor.option = nir_cursor_after_block;
2121 cursor.block = block;
2122 return cursor;
2123 }
2124
2125 static inline nir_cursor
2126 nir_before_instr(nir_instr *instr)
2127 {
2128 nir_cursor cursor;
2129 cursor.option = nir_cursor_before_instr;
2130 cursor.instr = instr;
2131 return cursor;
2132 }
2133
2134 static inline nir_cursor
2135 nir_after_instr(nir_instr *instr)
2136 {
2137 nir_cursor cursor;
2138 cursor.option = nir_cursor_after_instr;
2139 cursor.instr = instr;
2140 return cursor;
2141 }
2142
2143 static inline nir_cursor
2144 nir_after_block_before_jump(nir_block *block)
2145 {
2146 nir_instr *last_instr = nir_block_last_instr(block);
2147 if (last_instr && last_instr->type == nir_instr_type_jump) {
2148 return nir_before_instr(last_instr);
2149 } else {
2150 return nir_after_block(block);
2151 }
2152 }
2153
2154 static inline nir_cursor
2155 nir_before_cf_node(nir_cf_node *node)
2156 {
2157 if (node->type == nir_cf_node_block)
2158 return nir_before_block(nir_cf_node_as_block(node));
2159
2160 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node)));
2161 }
2162
2163 static inline nir_cursor
2164 nir_after_cf_node(nir_cf_node *node)
2165 {
2166 if (node->type == nir_cf_node_block)
2167 return nir_after_block(nir_cf_node_as_block(node));
2168
2169 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
2170 }
2171
2172 static inline nir_cursor
2173 nir_after_phis(nir_block *block)
2174 {
2175 nir_foreach_instr(instr, block) {
2176 if (instr->type != nir_instr_type_phi)
2177 return nir_before_instr(instr);
2178 }
2179 return nir_after_block(block);
2180 }
2181
2182 static inline nir_cursor
2183 nir_after_cf_node_and_phis(nir_cf_node *node)
2184 {
2185 if (node->type == nir_cf_node_block)
2186 return nir_after_block(nir_cf_node_as_block(node));
2187
2188 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
2189
2190 return nir_after_phis(block);
2191 }
2192
2193 static inline nir_cursor
2194 nir_before_cf_list(struct exec_list *cf_list)
2195 {
2196 nir_cf_node *first_node = exec_node_data(nir_cf_node,
2197 exec_list_get_head(cf_list), node);
2198 return nir_before_cf_node(first_node);
2199 }
2200
2201 static inline nir_cursor
2202 nir_after_cf_list(struct exec_list *cf_list)
2203 {
2204 nir_cf_node *last_node = exec_node_data(nir_cf_node,
2205 exec_list_get_tail(cf_list), node);
2206 return nir_after_cf_node(last_node);
2207 }
2208
2209 /**
2210 * Insert a NIR instruction at the given cursor.
2211 *
2212 * Note: This does not update the cursor.
2213 */
2214 void nir_instr_insert(nir_cursor cursor, nir_instr *instr);
2215
2216 static inline void
2217 nir_instr_insert_before(nir_instr *instr, nir_instr *before)
2218 {
2219 nir_instr_insert(nir_before_instr(instr), before);
2220 }
2221
2222 static inline void
2223 nir_instr_insert_after(nir_instr *instr, nir_instr *after)
2224 {
2225 nir_instr_insert(nir_after_instr(instr), after);
2226 }
2227
2228 static inline void
2229 nir_instr_insert_before_block(nir_block *block, nir_instr *before)
2230 {
2231 nir_instr_insert(nir_before_block(block), before);
2232 }
2233
2234 static inline void
2235 nir_instr_insert_after_block(nir_block *block, nir_instr *after)
2236 {
2237 nir_instr_insert(nir_after_block(block), after);
2238 }
2239
2240 static inline void
2241 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before)
2242 {
2243 nir_instr_insert(nir_before_cf_node(node), before);
2244 }
2245
2246 static inline void
2247 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after)
2248 {
2249 nir_instr_insert(nir_after_cf_node(node), after);
2250 }
2251
2252 static inline void
2253 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before)
2254 {
2255 nir_instr_insert(nir_before_cf_list(list), before);
2256 }
2257
2258 static inline void
2259 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after)
2260 {
2261 nir_instr_insert(nir_after_cf_list(list), after);
2262 }
2263
2264 void nir_instr_remove(nir_instr *instr);
2265
2266 /** @} */
2267
2268 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
2269 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
2270 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
2271 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
2272 void *state);
2273 bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
2274 bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
2275
2276 nir_const_value *nir_src_as_const_value(nir_src src);
2277 bool nir_src_is_dynamically_uniform(nir_src src);
2278 bool nir_srcs_equal(nir_src src1, nir_src src2);
2279 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
2280 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
2281 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
2282 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
2283 nir_dest new_dest);
2284 void nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref,
2285 nir_deref_var *new_deref);
2286
2287 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
2288 unsigned num_components, unsigned bit_size,
2289 const char *name);
2290 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
2291 unsigned num_components, unsigned bit_size,
2292 const char *name);
2293 static inline void
2294 nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
2295 const struct glsl_type *type,
2296 const char *name)
2297 {
2298 assert(glsl_type_is_vector_or_scalar(type));
2299 nir_ssa_dest_init(instr, dest, glsl_get_components(type),
2300 glsl_get_bit_size(type), name);
2301 }
2302 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src);
2303 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
2304 nir_instr *after_me);
2305
2306 uint8_t nir_ssa_def_components_read(const nir_ssa_def *def);
2307
2308 /*
2309 * finds the next basic block in source-code order, returns NULL if there is
2310 * none
2311 */
2312
2313 nir_block *nir_block_cf_tree_next(nir_block *block);
2314
2315 /* Performs the opposite of nir_block_cf_tree_next() */
2316
2317 nir_block *nir_block_cf_tree_prev(nir_block *block);
2318
2319 /* Gets the first block in a CF node in source-code order */
2320
2321 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
2322
2323 /* Gets the last block in a CF node in source-code order */
2324
2325 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
2326
2327 /* Gets the next block after a CF node in source-code order */
2328
2329 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
2330
2331 /* Macros for loops that visit blocks in source-code order */
2332
2333 #define nir_foreach_block(block, impl) \
2334 for (nir_block *block = nir_start_block(impl); block != NULL; \
2335 block = nir_block_cf_tree_next(block))
2336
2337 #define nir_foreach_block_safe(block, impl) \
2338 for (nir_block *block = nir_start_block(impl), \
2339 *next = nir_block_cf_tree_next(block); \
2340 block != NULL; \
2341 block = next, next = nir_block_cf_tree_next(block))
2342
2343 #define nir_foreach_block_reverse(block, impl) \
2344 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2345 block = nir_block_cf_tree_prev(block))
2346
2347 #define nir_foreach_block_reverse_safe(block, impl) \
2348 for (nir_block *block = nir_impl_last_block(impl), \
2349 *prev = nir_block_cf_tree_prev(block); \
2350 block != NULL; \
2351 block = prev, prev = nir_block_cf_tree_prev(block))
2352
2353 #define nir_foreach_block_in_cf_node(block, node) \
2354 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2355 block != nir_cf_node_cf_tree_next(node); \
2356 block = nir_block_cf_tree_next(block))
2357
2358 /* If the following CF node is an if, this function returns that if.
2359 * Otherwise, it returns NULL.
2360 */
2361 nir_if *nir_block_get_following_if(nir_block *block);
2362
2363 nir_loop *nir_block_get_following_loop(nir_block *block);
2364
2365 void nir_index_local_regs(nir_function_impl *impl);
2366 void nir_index_global_regs(nir_shader *shader);
2367 void nir_index_ssa_defs(nir_function_impl *impl);
2368 unsigned nir_index_instrs(nir_function_impl *impl);
2369
2370 void nir_index_blocks(nir_function_impl *impl);
2371
2372 void nir_print_shader(nir_shader *shader, FILE *fp);
2373 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
2374 void nir_print_instr(const nir_instr *instr, FILE *fp);
2375
2376 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
2377 nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
2378 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
2379 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
2380 nir_deref *nir_deref_clone(const nir_deref *deref, void *mem_ctx);
2381 nir_deref_var *nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx);
2382
2383 nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s);
2384
2385 #ifndef NDEBUG
2386 void nir_validate_shader(nir_shader *shader);
2387 void nir_metadata_set_validation_flag(nir_shader *shader);
2388 void nir_metadata_check_validation_flag(nir_shader *shader);
2389
2390 static inline bool
2391 should_clone_nir(void)
2392 {
2393 static int should_clone = -1;
2394 if (should_clone < 0)
2395 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
2396
2397 return should_clone;
2398 }
2399
2400 static inline bool
2401 should_serialize_deserialize_nir(void)
2402 {
2403 static int test_serialize = -1;
2404 if (test_serialize < 0)
2405 test_serialize = env_var_as_boolean("NIR_TEST_SERIALIZE", false);
2406
2407 return test_serialize;
2408 }
2409
2410 static inline bool
2411 should_print_nir(void)
2412 {
2413 static int should_print = -1;
2414 if (should_print < 0)
2415 should_print = env_var_as_boolean("NIR_PRINT", false);
2416
2417 return should_print;
2418 }
2419 #else
2420 static inline void nir_validate_shader(nir_shader *shader) { (void) shader; }
2421 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
2422 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
2423 static inline bool should_clone_nir(void) { return false; }
2424 static inline bool should_serialize_deserialize_nir(void) { return false; }
2425 static inline bool should_print_nir(void) { return false; }
2426 #endif /* NDEBUG */
2427
2428 #define _PASS(nir, do_pass) do { \
2429 do_pass \
2430 nir_validate_shader(nir); \
2431 if (should_clone_nir()) { \
2432 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2433 ralloc_free(nir); \
2434 nir = clone; \
2435 } \
2436 if (should_serialize_deserialize_nir()) { \
2437 void *mem_ctx = ralloc_parent(nir); \
2438 nir = nir_shader_serialize_deserialize(mem_ctx, nir); \
2439 } \
2440 } while (0)
2441
2442 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2443 nir_metadata_set_validation_flag(nir); \
2444 if (should_print_nir()) \
2445 printf("%s\n", #pass); \
2446 if (pass(nir, ##__VA_ARGS__)) { \
2447 progress = true; \
2448 if (should_print_nir()) \
2449 nir_print_shader(nir, stdout); \
2450 nir_metadata_check_validation_flag(nir); \
2451 } \
2452 )
2453
2454 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2455 if (should_print_nir()) \
2456 printf("%s\n", #pass); \
2457 pass(nir, ##__VA_ARGS__); \
2458 if (should_print_nir()) \
2459 nir_print_shader(nir, stdout); \
2460 )
2461
2462 void nir_calc_dominance_impl(nir_function_impl *impl);
2463 void nir_calc_dominance(nir_shader *shader);
2464
2465 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
2466 bool nir_block_dominates(nir_block *parent, nir_block *child);
2467
2468 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
2469 void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
2470
2471 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp);
2472 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp);
2473
2474 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp);
2475 void nir_dump_cfg(nir_shader *shader, FILE *fp);
2476
2477 int nir_gs_count_vertices(const nir_shader *shader);
2478
2479 bool nir_split_var_copies(nir_shader *shader);
2480
2481 bool nir_lower_returns_impl(nir_function_impl *impl);
2482 bool nir_lower_returns(nir_shader *shader);
2483
2484 bool nir_inline_functions(nir_shader *shader);
2485
2486 bool nir_propagate_invariant(nir_shader *shader);
2487
2488 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader);
2489 bool nir_lower_var_copies(nir_shader *shader);
2490
2491 bool nir_lower_global_vars_to_local(nir_shader *shader);
2492
2493 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
2494
2495 bool nir_lower_locals_to_regs(nir_shader *shader);
2496
2497 void nir_lower_io_to_temporaries(nir_shader *shader,
2498 nir_function_impl *entrypoint,
2499 bool outputs, bool inputs);
2500
2501 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
2502
2503 void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
2504 int (*type_size)(const struct glsl_type *));
2505
2506 /* Some helpers to do very simple linking */
2507 bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
2508 void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
2509 bool default_to_smooth_interp);
2510
2511 typedef enum {
2512 /* If set, this forces all non-flat fragment shader inputs to be
2513 * interpolated as if with the "sample" qualifier. This requires
2514 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
2515 */
2516 nir_lower_io_force_sample_interpolation = (1 << 1),
2517 } nir_lower_io_options;
2518 bool nir_lower_io(nir_shader *shader,
2519 nir_variable_mode modes,
2520 int (*type_size)(const struct glsl_type *),
2521 nir_lower_io_options);
2522 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
2523 nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
2524
2525 bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage);
2526
2527 void nir_lower_io_types(nir_shader *shader);
2528 bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
2529 bool nir_lower_regs_to_ssa(nir_shader *shader);
2530 bool nir_lower_vars_to_ssa(nir_shader *shader);
2531
2532 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes);
2533 bool nir_lower_constant_initializers(nir_shader *shader,
2534 nir_variable_mode modes);
2535
2536 bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
2537 bool nir_lower_vec_to_movs(nir_shader *shader);
2538 void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
2539 bool alpha_to_one);
2540 bool nir_lower_alu_to_scalar(nir_shader *shader);
2541 bool nir_lower_load_const_to_scalar(nir_shader *shader);
2542 bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
2543 bool nir_lower_phis_to_scalar(nir_shader *shader);
2544 void nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer);
2545 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader,
2546 bool outputs_only);
2547 void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
2548 void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
2549
2550 bool nir_lower_samplers(nir_shader *shader,
2551 const struct gl_shader_program *shader_program);
2552 bool nir_lower_samplers_as_deref(nir_shader *shader,
2553 const struct gl_shader_program *shader_program);
2554
2555 typedef struct nir_lower_subgroups_options {
2556 uint8_t subgroup_size;
2557 uint8_t ballot_bit_size;
2558 bool lower_to_scalar:1;
2559 bool lower_vote_trivial:1;
2560 bool lower_subgroup_masks:1;
2561 bool lower_shuffle:1;
2562 bool lower_quad:1;
2563 } nir_lower_subgroups_options;
2564
2565 bool nir_lower_subgroups(nir_shader *shader,
2566 const nir_lower_subgroups_options *options);
2567
2568 bool nir_lower_system_values(nir_shader *shader);
2569
2570 typedef struct nir_lower_tex_options {
2571 /**
2572 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2573 * sampler types a texture projector is lowered.
2574 */
2575 unsigned lower_txp;
2576
2577 /**
2578 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2579 */
2580 bool lower_txf_offset;
2581
2582 /**
2583 * If true, lower away nir_tex_src_offset for all rect textures.
2584 */
2585 bool lower_rect_offset;
2586
2587 /**
2588 * If true, lower rect textures to 2D, using txs to fetch the
2589 * texture dimensions and dividing the texture coords by the
2590 * texture dims to normalize.
2591 */
2592 bool lower_rect;
2593
2594 /**
2595 * If true, convert yuv to rgb.
2596 */
2597 unsigned lower_y_uv_external;
2598 unsigned lower_y_u_v_external;
2599 unsigned lower_yx_xuxv_external;
2600 unsigned lower_xy_uxvx_external;
2601
2602 /**
2603 * To emulate certain texture wrap modes, this can be used
2604 * to saturate the specified tex coord to [0.0, 1.0]. The
2605 * bits are according to sampler #, ie. if, for example:
2606 *
2607 * (conf->saturate_s & (1 << n))
2608 *
2609 * is true, then the s coord for sampler n is saturated.
2610 *
2611 * Note that clamping must happen *after* projector lowering
2612 * so any projected texture sample instruction with a clamped
2613 * coordinate gets automatically lowered, regardless of the
2614 * 'lower_txp' setting.
2615 */
2616 unsigned saturate_s;
2617 unsigned saturate_t;
2618 unsigned saturate_r;
2619
2620 /* Bitmask of textures that need swizzling.
2621 *
2622 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2623 * swizzles[texture_index] is applied to the result of the texturing
2624 * operation.
2625 */
2626 unsigned swizzle_result;
2627
2628 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2629 * while 4 and 5 represent 0 and 1 respectively.
2630 */
2631 uint8_t swizzles[32][4];
2632
2633 /**
2634 * Bitmap of textures that need srgb to linear conversion. If
2635 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2636 * of the texture are lowered to linear.
2637 */
2638 unsigned lower_srgb;
2639
2640 /**
2641 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
2642 */
2643 bool lower_txd_cube_map;
2644
2645 /**
2646 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
2647 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
2648 * with lower_txd_cube_map.
2649 */
2650 bool lower_txd_shadow;
2651
2652 /**
2653 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
2654 * Implies lower_txd_cube_map and lower_txd_shadow.
2655 */
2656 bool lower_txd;
2657 } nir_lower_tex_options;
2658
2659 bool nir_lower_tex(nir_shader *shader,
2660 const nir_lower_tex_options *options);
2661
2662 bool nir_lower_idiv(nir_shader *shader);
2663
2664 bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables);
2665 bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
2666 bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
2667
2668 void nir_lower_two_sided_color(nir_shader *shader);
2669
2670 bool nir_lower_clamp_color_outputs(nir_shader *shader);
2671
2672 void nir_lower_passthrough_edgeflags(nir_shader *shader);
2673 void nir_lower_tes_patch_vertices(nir_shader *tes, unsigned patch_vertices);
2674
2675 typedef struct nir_lower_wpos_ytransform_options {
2676 gl_state_index16 state_tokens[STATE_LENGTH];
2677 bool fs_coord_origin_upper_left :1;
2678 bool fs_coord_origin_lower_left :1;
2679 bool fs_coord_pixel_center_integer :1;
2680 bool fs_coord_pixel_center_half_integer :1;
2681 } nir_lower_wpos_ytransform_options;
2682
2683 bool nir_lower_wpos_ytransform(nir_shader *shader,
2684 const nir_lower_wpos_ytransform_options *options);
2685 bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
2686
2687 typedef struct nir_lower_drawpixels_options {
2688 gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
2689 gl_state_index16 scale_state_tokens[STATE_LENGTH];
2690 gl_state_index16 bias_state_tokens[STATE_LENGTH];
2691 unsigned drawpix_sampler;
2692 unsigned pixelmap_sampler;
2693 bool pixel_maps :1;
2694 bool scale_and_bias :1;
2695 } nir_lower_drawpixels_options;
2696
2697 void nir_lower_drawpixels(nir_shader *shader,
2698 const nir_lower_drawpixels_options *options);
2699
2700 typedef struct nir_lower_bitmap_options {
2701 unsigned sampler;
2702 bool swizzle_xxxx;
2703 } nir_lower_bitmap_options;
2704
2705 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
2706
2707 bool nir_lower_atomics(nir_shader *shader,
2708 const struct gl_shader_program *shader_program);
2709 bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset);
2710 bool nir_lower_uniforms_to_ubo(nir_shader *shader);
2711 bool nir_lower_to_source_mods(nir_shader *shader);
2712
2713 bool nir_lower_gs_intrinsics(nir_shader *shader);
2714
2715 typedef enum {
2716 nir_lower_imul64 = (1 << 0),
2717 nir_lower_isign64 = (1 << 1),
2718 /** Lower all int64 modulus and division opcodes */
2719 nir_lower_divmod64 = (1 << 2),
2720 } nir_lower_int64_options;
2721
2722 bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options);
2723
2724 typedef enum {
2725 nir_lower_drcp = (1 << 0),
2726 nir_lower_dsqrt = (1 << 1),
2727 nir_lower_drsq = (1 << 2),
2728 nir_lower_dtrunc = (1 << 3),
2729 nir_lower_dfloor = (1 << 4),
2730 nir_lower_dceil = (1 << 5),
2731 nir_lower_dfract = (1 << 6),
2732 nir_lower_dround_even = (1 << 7),
2733 nir_lower_dmod = (1 << 8)
2734 } nir_lower_doubles_options;
2735
2736 bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
2737 bool nir_lower_64bit_pack(nir_shader *shader);
2738
2739 bool nir_normalize_cubemap_coords(nir_shader *shader);
2740
2741 void nir_live_ssa_defs_impl(nir_function_impl *impl);
2742
2743 void nir_loop_analyze_impl(nir_function_impl *impl,
2744 nir_variable_mode indirect_mask);
2745
2746 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
2747
2748 bool nir_repair_ssa_impl(nir_function_impl *impl);
2749 bool nir_repair_ssa(nir_shader *shader);
2750
2751 void nir_convert_loop_to_lcssa(nir_loop *loop);
2752
2753 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2754 * registers. If false, convert all values (even those not involved in a phi
2755 * node) to registers.
2756 */
2757 bool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
2758
2759 bool nir_lower_phis_to_regs_block(nir_block *block);
2760 bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
2761
2762 bool nir_opt_algebraic(nir_shader *shader);
2763 bool nir_opt_algebraic_before_ffma(nir_shader *shader);
2764 bool nir_opt_algebraic_late(nir_shader *shader);
2765 bool nir_opt_constant_folding(nir_shader *shader);
2766
2767 bool nir_opt_global_to_local(nir_shader *shader);
2768
2769 bool nir_copy_prop(nir_shader *shader);
2770
2771 bool nir_opt_copy_prop_vars(nir_shader *shader);
2772
2773 bool nir_opt_cse(nir_shader *shader);
2774
2775 bool nir_opt_dce(nir_shader *shader);
2776
2777 bool nir_opt_dead_cf(nir_shader *shader);
2778
2779 bool nir_opt_gcm(nir_shader *shader, bool value_number);
2780
2781 bool nir_opt_if(nir_shader *shader);
2782
2783 bool nir_opt_intrinsics(nir_shader *shader);
2784
2785 bool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask);
2786
2787 bool nir_opt_move_comparisons(nir_shader *shader);
2788
2789 bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
2790
2791 bool nir_opt_remove_phis(nir_shader *shader);
2792
2793 bool nir_opt_shrink_load(nir_shader *shader);
2794
2795 bool nir_opt_trivial_continues(nir_shader *shader);
2796
2797 bool nir_opt_undef(nir_shader *shader);
2798
2799 bool nir_opt_conditional_discard(nir_shader *shader);
2800
2801 void nir_sweep(nir_shader *shader);
2802
2803 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
2804 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
2805
2806 #ifdef __cplusplus
2807 } /* extern "C" */
2808 #endif
2809
2810 #endif /* NIR_H */