nir: Report progress from nir_lower_phis_to_scalar.
[mesa.git] / src / compiler / nir / nir.h
1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #pragma once
29
30 #include "util/hash_table.h"
31 #include "compiler/glsl/list.h"
32 #include "GL/gl.h" /* GLenum */
33 #include "util/list.h"
34 #include "util/ralloc.h"
35 #include "util/set.h"
36 #include "util/bitset.h"
37 #include "util/macros.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/shader_enums.h"
40 #include <stdio.h>
41
42 #include "nir_opcodes.h"
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48 struct gl_program;
49 struct gl_shader_program;
50
51 #define NIR_FALSE 0u
52 #define NIR_TRUE (~0u)
53
54 /** Defines a cast function
55 *
56 * This macro defines a cast function from in_type to out_type where
57 * out_type is some structure type that contains a field of type out_type.
58 *
59 * Note that you have to be a bit careful as the generated cast function
60 * destroys constness.
61 */
62 #define NIR_DEFINE_CAST(name, in_type, out_type, field) \
63 static inline out_type * \
64 name(const in_type *parent) \
65 { \
66 return exec_node_data(out_type, parent, field); \
67 }
68
69 struct nir_function;
70 struct nir_shader;
71 struct nir_instr;
72
73
74 /**
75 * Description of built-in state associated with a uniform
76 *
77 * \sa nir_variable::state_slots
78 */
79 typedef struct {
80 int tokens[5];
81 int swizzle;
82 } nir_state_slot;
83
84 typedef enum {
85 nir_var_shader_in = (1 << 0),
86 nir_var_shader_out = (1 << 1),
87 nir_var_global = (1 << 2),
88 nir_var_local = (1 << 3),
89 nir_var_uniform = (1 << 4),
90 nir_var_shader_storage = (1 << 5),
91 nir_var_system_value = (1 << 6),
92 nir_var_param = (1 << 7),
93 nir_var_shared = (1 << 8),
94 nir_var_all = ~0,
95 } nir_variable_mode;
96
97 /**
98 * Data stored in an nir_constant
99 */
100 union nir_constant_data {
101 unsigned u[16];
102 int i[16];
103 float f[16];
104 bool b[16];
105 double d[16];
106 };
107
108 typedef struct nir_constant {
109 /**
110 * Value of the constant.
111 *
112 * The field used to back the values supplied by the constant is determined
113 * by the type associated with the \c nir_variable. Constants may be
114 * scalars, vectors, or matrices.
115 */
116 union nir_constant_data value;
117
118 /* we could get this from the var->type but makes clone *much* easier to
119 * not have to care about the type.
120 */
121 unsigned num_elements;
122
123 /* Array elements / Structure Fields */
124 struct nir_constant **elements;
125 } nir_constant;
126
127 /**
128 * \brief Layout qualifiers for gl_FragDepth.
129 *
130 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
131 * with a layout qualifier.
132 */
133 typedef enum {
134 nir_depth_layout_none, /**< No depth layout is specified. */
135 nir_depth_layout_any,
136 nir_depth_layout_greater,
137 nir_depth_layout_less,
138 nir_depth_layout_unchanged
139 } nir_depth_layout;
140
141 /**
142 * Either a uniform, global variable, shader input, or shader output. Based on
143 * ir_variable - it should be easy to translate between the two.
144 */
145
146 typedef struct nir_variable {
147 struct exec_node node;
148
149 /**
150 * Declared type of the variable
151 */
152 const struct glsl_type *type;
153
154 /**
155 * Declared name of the variable
156 */
157 char *name;
158
159 struct nir_variable_data {
160 /**
161 * Storage class of the variable.
162 *
163 * \sa nir_variable_mode
164 */
165 nir_variable_mode mode;
166
167 /**
168 * Is the variable read-only?
169 *
170 * This is set for variables declared as \c const, shader inputs,
171 * and uniforms.
172 */
173 unsigned read_only:1;
174 unsigned centroid:1;
175 unsigned sample:1;
176 unsigned patch:1;
177 unsigned invariant:1;
178
179 /**
180 * Interpolation mode for shader inputs / outputs
181 *
182 * \sa glsl_interp_mode
183 */
184 unsigned interpolation:2;
185
186 /**
187 * \name ARB_fragment_coord_conventions
188 * @{
189 */
190 unsigned origin_upper_left:1;
191 unsigned pixel_center_integer:1;
192 /*@}*/
193
194 /**
195 * Was the location explicitly set in the shader?
196 *
197 * If the location is explicitly set in the shader, it \b cannot be changed
198 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
199 * no effect).
200 */
201 unsigned explicit_location:1;
202 unsigned explicit_index:1;
203
204 /**
205 * Was an initial binding explicitly set in the shader?
206 *
207 * If so, constant_initializer contains an integer nir_constant
208 * representing the initial binding point.
209 */
210 unsigned explicit_binding:1;
211
212 /**
213 * Does this variable have an initializer?
214 *
215 * This is used by the linker to cross-validiate initializers of global
216 * variables.
217 */
218 unsigned has_initializer:1;
219
220 /**
221 * If non-zero, then this variable may be packed along with other variables
222 * into a single varying slot, so this offset should be applied when
223 * accessing components. For example, an offset of 1 means that the x
224 * component of this variable is actually stored in component y of the
225 * location specified by \c location.
226 */
227 unsigned location_frac:2;
228
229 /**
230 * Whether this is a fragment shader output implicitly initialized with
231 * the previous contents of the specified render target at the
232 * framebuffer location corresponding to this shader invocation.
233 */
234 unsigned fb_fetch_output:1;
235
236 /**
237 * \brief Layout qualifier for gl_FragDepth.
238 *
239 * This is not equal to \c ir_depth_layout_none if and only if this
240 * variable is \c gl_FragDepth and a layout qualifier is specified.
241 */
242 nir_depth_layout depth_layout;
243
244 /**
245 * Storage location of the base of this variable
246 *
247 * The precise meaning of this field depends on the nature of the variable.
248 *
249 * - Vertex shader input: one of the values from \c gl_vert_attrib.
250 * - Vertex shader output: one of the values from \c gl_varying_slot.
251 * - Geometry shader input: one of the values from \c gl_varying_slot.
252 * - Geometry shader output: one of the values from \c gl_varying_slot.
253 * - Fragment shader input: one of the values from \c gl_varying_slot.
254 * - Fragment shader output: one of the values from \c gl_frag_result.
255 * - Uniforms: Per-stage uniform slot number for default uniform block.
256 * - Uniforms: Index within the uniform block definition for UBO members.
257 * - Non-UBO Uniforms: uniform slot number.
258 * - Other: This field is not currently used.
259 *
260 * If the variable is a uniform, shader input, or shader output, and the
261 * slot has not been assigned, the value will be -1.
262 */
263 int location;
264
265 /**
266 * The actual location of the variable in the IR. Only valid for inputs
267 * and outputs.
268 */
269 unsigned int driver_location;
270
271 /**
272 * output index for dual source blending.
273 */
274 int index;
275
276 /**
277 * Descriptor set binding for sampler or UBO.
278 */
279 int descriptor_set;
280
281 /**
282 * Initial binding point for a sampler or UBO.
283 *
284 * For array types, this represents the binding point for the first element.
285 */
286 int binding;
287
288 /**
289 * Location an atomic counter is stored at.
290 */
291 unsigned offset;
292
293 /**
294 * ARB_shader_image_load_store qualifiers.
295 */
296 struct {
297 bool read_only; /**< "readonly" qualifier. */
298 bool write_only; /**< "writeonly" qualifier. */
299 bool coherent;
300 bool _volatile;
301 bool restrict_flag;
302
303 /** Image internal format if specified explicitly, otherwise GL_NONE. */
304 GLenum format;
305 } image;
306
307 /**
308 * Highest element accessed with a constant expression array index
309 *
310 * Not used for non-array variables.
311 */
312 unsigned max_array_access;
313
314 } data;
315
316 /**
317 * Built-in state that backs this uniform
318 *
319 * Once set at variable creation, \c state_slots must remain invariant.
320 * This is because, ideally, this array would be shared by all clones of
321 * this variable in the IR tree. In other words, we'd really like for it
322 * to be a fly-weight.
323 *
324 * If the variable is not a uniform, \c num_state_slots will be zero and
325 * \c state_slots will be \c NULL.
326 */
327 /*@{*/
328 unsigned num_state_slots; /**< Number of state slots used */
329 nir_state_slot *state_slots; /**< State descriptors. */
330 /*@}*/
331
332 /**
333 * Constant expression assigned in the initializer of the variable
334 */
335 nir_constant *constant_initializer;
336
337 /**
338 * For variables that are in an interface block or are an instance of an
339 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
340 *
341 * \sa ir_variable::location
342 */
343 const struct glsl_type *interface_type;
344 } nir_variable;
345
346 #define nir_foreach_variable(var, var_list) \
347 foreach_list_typed(nir_variable, var, node, var_list)
348
349 #define nir_foreach_variable_safe(var, var_list) \
350 foreach_list_typed_safe(nir_variable, var, node, var_list)
351
352 static inline bool
353 nir_variable_is_global(const nir_variable *var)
354 {
355 return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
356 }
357
358 typedef struct nir_register {
359 struct exec_node node;
360
361 unsigned num_components; /** < number of vector components */
362 unsigned num_array_elems; /** < size of array (0 for no array) */
363
364 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
365 uint8_t bit_size;
366
367 /** generic register index. */
368 unsigned index;
369
370 /** only for debug purposes, can be NULL */
371 const char *name;
372
373 /** whether this register is local (per-function) or global (per-shader) */
374 bool is_global;
375
376 /**
377 * If this flag is set to true, then accessing channels >= num_components
378 * is well-defined, and simply spills over to the next array element. This
379 * is useful for backends that can do per-component accessing, in
380 * particular scalar backends. By setting this flag and making
381 * num_components equal to 1, structures can be packed tightly into
382 * registers and then registers can be accessed per-component to get to
383 * each structure member, even if it crosses vec4 boundaries.
384 */
385 bool is_packed;
386
387 /** set of nir_src's where this register is used (read from) */
388 struct list_head uses;
389
390 /** set of nir_dest's where this register is defined (written to) */
391 struct list_head defs;
392
393 /** set of nir_if's where this register is used as a condition */
394 struct list_head if_uses;
395 } nir_register;
396
397 typedef enum {
398 nir_instr_type_alu,
399 nir_instr_type_call,
400 nir_instr_type_tex,
401 nir_instr_type_intrinsic,
402 nir_instr_type_load_const,
403 nir_instr_type_jump,
404 nir_instr_type_ssa_undef,
405 nir_instr_type_phi,
406 nir_instr_type_parallel_copy,
407 } nir_instr_type;
408
409 typedef struct nir_instr {
410 struct exec_node node;
411 nir_instr_type type;
412 struct nir_block *block;
413
414 /** generic instruction index. */
415 unsigned index;
416
417 /* A temporary for optimization and analysis passes to use for storing
418 * flags. For instance, DCE uses this to store the "dead/live" info.
419 */
420 uint8_t pass_flags;
421 } nir_instr;
422
423 static inline nir_instr *
424 nir_instr_next(nir_instr *instr)
425 {
426 struct exec_node *next = exec_node_get_next(&instr->node);
427 if (exec_node_is_tail_sentinel(next))
428 return NULL;
429 else
430 return exec_node_data(nir_instr, next, node);
431 }
432
433 static inline nir_instr *
434 nir_instr_prev(nir_instr *instr)
435 {
436 struct exec_node *prev = exec_node_get_prev(&instr->node);
437 if (exec_node_is_head_sentinel(prev))
438 return NULL;
439 else
440 return exec_node_data(nir_instr, prev, node);
441 }
442
443 static inline bool
444 nir_instr_is_first(nir_instr *instr)
445 {
446 return exec_node_is_head_sentinel(exec_node_get_prev(&instr->node));
447 }
448
449 static inline bool
450 nir_instr_is_last(nir_instr *instr)
451 {
452 return exec_node_is_tail_sentinel(exec_node_get_next(&instr->node));
453 }
454
455 typedef struct nir_ssa_def {
456 /** for debugging only, can be NULL */
457 const char* name;
458
459 /** generic SSA definition index. */
460 unsigned index;
461
462 /** Index into the live_in and live_out bitfields */
463 unsigned live_index;
464
465 nir_instr *parent_instr;
466
467 /** set of nir_instr's where this register is used (read from) */
468 struct list_head uses;
469
470 /** set of nir_if's where this register is used as a condition */
471 struct list_head if_uses;
472
473 uint8_t num_components;
474
475 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
476 uint8_t bit_size;
477 } nir_ssa_def;
478
479 struct nir_src;
480
481 typedef struct {
482 nir_register *reg;
483 struct nir_src *indirect; /** < NULL for no indirect offset */
484 unsigned base_offset;
485
486 /* TODO use-def chain goes here */
487 } nir_reg_src;
488
489 typedef struct {
490 nir_instr *parent_instr;
491 struct list_head def_link;
492
493 nir_register *reg;
494 struct nir_src *indirect; /** < NULL for no indirect offset */
495 unsigned base_offset;
496
497 /* TODO def-use chain goes here */
498 } nir_reg_dest;
499
500 struct nir_if;
501
502 typedef struct nir_src {
503 union {
504 nir_instr *parent_instr;
505 struct nir_if *parent_if;
506 };
507
508 struct list_head use_link;
509
510 union {
511 nir_reg_src reg;
512 nir_ssa_def *ssa;
513 };
514
515 bool is_ssa;
516 } nir_src;
517
518 static inline nir_src
519 nir_src_init(void)
520 {
521 nir_src src = { { NULL } };
522 return src;
523 }
524
525 #define NIR_SRC_INIT nir_src_init()
526
527 #define nir_foreach_use(src, reg_or_ssa_def) \
528 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
529
530 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
531 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
532
533 #define nir_foreach_if_use(src, reg_or_ssa_def) \
534 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
535
536 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
537 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
538
539 typedef struct {
540 union {
541 nir_reg_dest reg;
542 nir_ssa_def ssa;
543 };
544
545 bool is_ssa;
546 } nir_dest;
547
548 static inline nir_dest
549 nir_dest_init(void)
550 {
551 nir_dest dest = { { { NULL } } };
552 return dest;
553 }
554
555 #define NIR_DEST_INIT nir_dest_init()
556
557 #define nir_foreach_def(dest, reg) \
558 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
559
560 #define nir_foreach_def_safe(dest, reg) \
561 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
562
563 static inline nir_src
564 nir_src_for_ssa(nir_ssa_def *def)
565 {
566 nir_src src = NIR_SRC_INIT;
567
568 src.is_ssa = true;
569 src.ssa = def;
570
571 return src;
572 }
573
574 static inline nir_src
575 nir_src_for_reg(nir_register *reg)
576 {
577 nir_src src = NIR_SRC_INIT;
578
579 src.is_ssa = false;
580 src.reg.reg = reg;
581 src.reg.indirect = NULL;
582 src.reg.base_offset = 0;
583
584 return src;
585 }
586
587 static inline nir_dest
588 nir_dest_for_reg(nir_register *reg)
589 {
590 nir_dest dest = NIR_DEST_INIT;
591
592 dest.reg.reg = reg;
593
594 return dest;
595 }
596
597 static inline unsigned
598 nir_src_bit_size(nir_src src)
599 {
600 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
601 }
602
603 static inline unsigned
604 nir_dest_bit_size(nir_dest dest)
605 {
606 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
607 }
608
609 void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
610 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
611
612 typedef struct {
613 nir_src src;
614
615 /**
616 * \name input modifiers
617 */
618 /*@{*/
619 /**
620 * For inputs interpreted as floating point, flips the sign bit. For
621 * inputs interpreted as integers, performs the two's complement negation.
622 */
623 bool negate;
624
625 /**
626 * Clears the sign bit for floating point values, and computes the integer
627 * absolute value for integers. Note that the negate modifier acts after
628 * the absolute value modifier, therefore if both are set then all inputs
629 * will become negative.
630 */
631 bool abs;
632 /*@}*/
633
634 /**
635 * For each input component, says which component of the register it is
636 * chosen from. Note that which elements of the swizzle are used and which
637 * are ignored are based on the write mask for most opcodes - for example,
638 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
639 * a swizzle of {2, x, 1, 0} where x means "don't care."
640 */
641 uint8_t swizzle[4];
642 } nir_alu_src;
643
644 typedef struct {
645 nir_dest dest;
646
647 /**
648 * \name saturate output modifier
649 *
650 * Only valid for opcodes that output floating-point numbers. Clamps the
651 * output to between 0.0 and 1.0 inclusive.
652 */
653
654 bool saturate;
655
656 unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
657 } nir_alu_dest;
658
659 typedef enum {
660 nir_type_invalid = 0, /* Not a valid type */
661 nir_type_float,
662 nir_type_int,
663 nir_type_uint,
664 nir_type_bool,
665 nir_type_bool32 = 32 | nir_type_bool,
666 nir_type_int8 = 8 | nir_type_int,
667 nir_type_int16 = 16 | nir_type_int,
668 nir_type_int32 = 32 | nir_type_int,
669 nir_type_int64 = 64 | nir_type_int,
670 nir_type_uint8 = 8 | nir_type_uint,
671 nir_type_uint16 = 16 | nir_type_uint,
672 nir_type_uint32 = 32 | nir_type_uint,
673 nir_type_uint64 = 64 | nir_type_uint,
674 nir_type_float16 = 16 | nir_type_float,
675 nir_type_float32 = 32 | nir_type_float,
676 nir_type_float64 = 64 | nir_type_float,
677 } nir_alu_type;
678
679 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
680 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
681
682 static inline unsigned
683 nir_alu_type_get_type_size(nir_alu_type type)
684 {
685 return type & NIR_ALU_TYPE_SIZE_MASK;
686 }
687
688 static inline unsigned
689 nir_alu_type_get_base_type(nir_alu_type type)
690 {
691 return type & NIR_ALU_TYPE_BASE_TYPE_MASK;
692 }
693
694 typedef enum {
695 NIR_OP_IS_COMMUTATIVE = (1 << 0),
696 NIR_OP_IS_ASSOCIATIVE = (1 << 1),
697 } nir_op_algebraic_property;
698
699 typedef struct {
700 const char *name;
701
702 unsigned num_inputs;
703
704 /**
705 * The number of components in the output
706 *
707 * If non-zero, this is the size of the output and input sizes are
708 * explicitly given; swizzle and writemask are still in effect, but if
709 * the output component is masked out, then the input component may
710 * still be in use.
711 *
712 * If zero, the opcode acts in the standard, per-component manner; the
713 * operation is performed on each component (except the ones that are
714 * masked out) with the input being taken from the input swizzle for
715 * that component.
716 *
717 * The size of some of the inputs may be given (i.e. non-zero) even
718 * though output_size is zero; in that case, the inputs with a zero
719 * size act per-component, while the inputs with non-zero size don't.
720 */
721 unsigned output_size;
722
723 /**
724 * The type of vector that the instruction outputs. Note that the
725 * staurate modifier is only allowed on outputs with the float type.
726 */
727
728 nir_alu_type output_type;
729
730 /**
731 * The number of components in each input
732 */
733 unsigned input_sizes[4];
734
735 /**
736 * The type of vector that each input takes. Note that negate and
737 * absolute value are only allowed on inputs with int or float type and
738 * behave differently on the two.
739 */
740 nir_alu_type input_types[4];
741
742 nir_op_algebraic_property algebraic_properties;
743 } nir_op_info;
744
745 extern const nir_op_info nir_op_infos[nir_num_opcodes];
746
747 typedef struct nir_alu_instr {
748 nir_instr instr;
749 nir_op op;
750
751 /** Indicates that this ALU instruction generates an exact value
752 *
753 * This is kind of a mixture of GLSL "precise" and "invariant" and not
754 * really equivalent to either. This indicates that the value generated by
755 * this operation is high-precision and any code transformations that touch
756 * it must ensure that the resulting value is bit-for-bit identical to the
757 * original.
758 */
759 bool exact;
760
761 nir_alu_dest dest;
762 nir_alu_src src[];
763 } nir_alu_instr;
764
765 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
766 nir_alu_instr *instr);
767 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
768 nir_alu_instr *instr);
769
770 /* is this source channel used? */
771 static inline bool
772 nir_alu_instr_channel_used(nir_alu_instr *instr, unsigned src, unsigned channel)
773 {
774 if (nir_op_infos[instr->op].input_sizes[src] > 0)
775 return channel < nir_op_infos[instr->op].input_sizes[src];
776
777 return (instr->dest.write_mask >> channel) & 1;
778 }
779
780 /*
781 * For instructions whose destinations are SSA, get the number of channels
782 * used for a source
783 */
784 static inline unsigned
785 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
786 {
787 assert(instr->dest.dest.is_ssa);
788
789 if (nir_op_infos[instr->op].input_sizes[src] > 0)
790 return nir_op_infos[instr->op].input_sizes[src];
791
792 return instr->dest.dest.ssa.num_components;
793 }
794
795 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
796 unsigned src1, unsigned src2);
797
798 typedef enum {
799 nir_deref_type_var,
800 nir_deref_type_array,
801 nir_deref_type_struct
802 } nir_deref_type;
803
804 typedef struct nir_deref {
805 nir_deref_type deref_type;
806 struct nir_deref *child;
807 const struct glsl_type *type;
808 } nir_deref;
809
810 typedef struct {
811 nir_deref deref;
812
813 nir_variable *var;
814 } nir_deref_var;
815
816 /* This enum describes how the array is referenced. If the deref is
817 * direct then the base_offset is used. If the deref is indirect then
818 * offset is given by base_offset + indirect. If the deref is a wildcard
819 * then the deref refers to all of the elements of the array at the same
820 * time. Wildcard dereferences are only ever allowed in copy_var
821 * intrinsics and the source and destination derefs must have matching
822 * wildcards.
823 */
824 typedef enum {
825 nir_deref_array_type_direct,
826 nir_deref_array_type_indirect,
827 nir_deref_array_type_wildcard,
828 } nir_deref_array_type;
829
830 typedef struct {
831 nir_deref deref;
832
833 nir_deref_array_type deref_array_type;
834 unsigned base_offset;
835 nir_src indirect;
836 } nir_deref_array;
837
838 typedef struct {
839 nir_deref deref;
840
841 unsigned index;
842 } nir_deref_struct;
843
844 NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref)
845 NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref)
846 NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref)
847
848 /* Returns the last deref in the chain. */
849 static inline nir_deref *
850 nir_deref_tail(nir_deref *deref)
851 {
852 while (deref->child)
853 deref = deref->child;
854 return deref;
855 }
856
857 typedef struct {
858 nir_instr instr;
859
860 unsigned num_params;
861 nir_deref_var **params;
862 nir_deref_var *return_deref;
863
864 struct nir_function *callee;
865 } nir_call_instr;
866
867 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
868 num_variables, num_indices, idx0, idx1, idx2, flags) \
869 nir_intrinsic_##name,
870
871 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
872
873 typedef enum {
874 #include "nir_intrinsics.h"
875 nir_num_intrinsics = nir_last_intrinsic + 1
876 } nir_intrinsic_op;
877
878 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
879
880 /** Represents an intrinsic
881 *
882 * An intrinsic is an instruction type for handling things that are
883 * more-or-less regular operations but don't just consume and produce SSA
884 * values like ALU operations do. Intrinsics are not for things that have
885 * special semantic meaning such as phi nodes and parallel copies.
886 * Examples of intrinsics include variable load/store operations, system
887 * value loads, and the like. Even though texturing more-or-less falls
888 * under this category, texturing is its own instruction type because
889 * trying to represent texturing with intrinsics would lead to a
890 * combinatorial explosion of intrinsic opcodes.
891 *
892 * By having a single instruction type for handling a lot of different
893 * cases, optimization passes can look for intrinsics and, for the most
894 * part, completely ignore them. Each intrinsic type also has a few
895 * possible flags that govern whether or not they can be reordered or
896 * eliminated. That way passes like dead code elimination can still work
897 * on intrisics without understanding the meaning of each.
898 *
899 * Each intrinsic has some number of constant indices, some number of
900 * variables, and some number of sources. What these sources, variables,
901 * and indices mean depends on the intrinsic and is documented with the
902 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
903 * instructions are the only types of instruction that can operate on
904 * variables.
905 */
906 typedef struct {
907 nir_instr instr;
908
909 nir_intrinsic_op intrinsic;
910
911 nir_dest dest;
912
913 /** number of components if this is a vectorized intrinsic
914 *
915 * Similarly to ALU operations, some intrinsics are vectorized.
916 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
917 * For vectorized intrinsics, the num_components field specifies the
918 * number of destination components and the number of source components
919 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
920 */
921 uint8_t num_components;
922
923 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
924
925 nir_deref_var *variables[2];
926
927 nir_src src[];
928 } nir_intrinsic_instr;
929
930 /**
931 * \name NIR intrinsics semantic flags
932 *
933 * information about what the compiler can do with the intrinsics.
934 *
935 * \sa nir_intrinsic_info::flags
936 */
937 typedef enum {
938 /**
939 * whether the intrinsic can be safely eliminated if none of its output
940 * value is not being used.
941 */
942 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0),
943
944 /**
945 * Whether the intrinsic can be reordered with respect to any other
946 * intrinsic, i.e. whether the only reordering dependencies of the
947 * intrinsic are due to the register reads/writes.
948 */
949 NIR_INTRINSIC_CAN_REORDER = (1 << 1),
950 } nir_intrinsic_semantic_flag;
951
952 /**
953 * \name NIR intrinsics const-index flag
954 *
955 * Indicates the usage of a const_index slot.
956 *
957 * \sa nir_intrinsic_info::index_map
958 */
959 typedef enum {
960 /**
961 * Generally instructions that take a offset src argument, can encode
962 * a constant 'base' value which is added to the offset.
963 */
964 NIR_INTRINSIC_BASE = 1,
965
966 /**
967 * For store instructions, a writemask for the store.
968 */
969 NIR_INTRINSIC_WRMASK = 2,
970
971 /**
972 * The stream-id for GS emit_vertex/end_primitive intrinsics.
973 */
974 NIR_INTRINSIC_STREAM_ID = 3,
975
976 /**
977 * The clip-plane id for load_user_clip_plane intrinsic.
978 */
979 NIR_INTRINSIC_UCP_ID = 4,
980
981 /**
982 * The amount of data, starting from BASE, that this instruction may
983 * access. This is used to provide bounds if the offset is not constant.
984 */
985 NIR_INTRINSIC_RANGE = 5,
986
987 /**
988 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
989 */
990 NIR_INTRINSIC_DESC_SET = 6,
991
992 /**
993 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
994 */
995 NIR_INTRINSIC_BINDING = 7,
996
997 /**
998 * Component offset.
999 */
1000 NIR_INTRINSIC_COMPONENT = 8,
1001
1002 /**
1003 * Interpolation mode (only meaningful for FS inputs).
1004 */
1005 NIR_INTRINSIC_INTERP_MODE = 9,
1006
1007 NIR_INTRINSIC_NUM_INDEX_FLAGS,
1008
1009 } nir_intrinsic_index_flag;
1010
1011 #define NIR_INTRINSIC_MAX_INPUTS 4
1012
1013 typedef struct {
1014 const char *name;
1015
1016 unsigned num_srcs; /** < number of register/SSA inputs */
1017
1018 /** number of components of each input register
1019 *
1020 * If this value is 0, the number of components is given by the
1021 * num_components field of nir_intrinsic_instr.
1022 */
1023 unsigned src_components[NIR_INTRINSIC_MAX_INPUTS];
1024
1025 bool has_dest;
1026
1027 /** number of components of the output register
1028 *
1029 * If this value is 0, the number of components is given by the
1030 * num_components field of nir_intrinsic_instr.
1031 */
1032 unsigned dest_components;
1033
1034 /** the number of inputs/outputs that are variables */
1035 unsigned num_variables;
1036
1037 /** the number of constant indices used by the intrinsic */
1038 unsigned num_indices;
1039
1040 /** indicates the usage of intr->const_index[n] */
1041 unsigned index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS];
1042
1043 /** semantic flags for calls to this intrinsic */
1044 nir_intrinsic_semantic_flag flags;
1045 } nir_intrinsic_info;
1046
1047 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
1048
1049
1050 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1051 static inline type \
1052 nir_intrinsic_##name(nir_intrinsic_instr *instr) \
1053 { \
1054 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1055 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1056 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1057 } \
1058 static inline void \
1059 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1060 { \
1061 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1062 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1063 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1064 }
1065
1066 INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
1067 INTRINSIC_IDX_ACCESSORS(base, BASE, int)
1068 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned)
1069 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned)
1070 INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned)
1071 INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned)
1072 INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
1073 INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
1074 INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
1075
1076 /**
1077 * \group texture information
1078 *
1079 * This gives semantic information about textures which is useful to the
1080 * frontend, the backend, and lowering passes, but not the optimizer.
1081 */
1082
1083 typedef enum {
1084 nir_tex_src_coord,
1085 nir_tex_src_projector,
1086 nir_tex_src_comparitor, /* shadow comparitor */
1087 nir_tex_src_offset,
1088 nir_tex_src_bias,
1089 nir_tex_src_lod,
1090 nir_tex_src_ms_index, /* MSAA sample index */
1091 nir_tex_src_ms_mcs, /* MSAA compression value */
1092 nir_tex_src_ddx,
1093 nir_tex_src_ddy,
1094 nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
1095 nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
1096 nir_tex_src_plane, /* < selects plane for planar textures */
1097 nir_num_tex_src_types
1098 } nir_tex_src_type;
1099
1100 typedef struct {
1101 nir_src src;
1102 nir_tex_src_type src_type;
1103 } nir_tex_src;
1104
1105 typedef enum {
1106 nir_texop_tex, /**< Regular texture look-up */
1107 nir_texop_txb, /**< Texture look-up with LOD bias */
1108 nir_texop_txl, /**< Texture look-up with explicit LOD */
1109 nir_texop_txd, /**< Texture look-up with partial derivatvies */
1110 nir_texop_txf, /**< Texel fetch with explicit LOD */
1111 nir_texop_txf_ms, /**< Multisample texture fetch */
1112 nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
1113 nir_texop_txs, /**< Texture size */
1114 nir_texop_lod, /**< Texture lod query */
1115 nir_texop_tg4, /**< Texture gather */
1116 nir_texop_query_levels, /**< Texture levels query */
1117 nir_texop_texture_samples, /**< Texture samples query */
1118 nir_texop_samples_identical, /**< Query whether all samples are definitely
1119 * identical.
1120 */
1121 } nir_texop;
1122
1123 typedef struct {
1124 nir_instr instr;
1125
1126 enum glsl_sampler_dim sampler_dim;
1127 nir_alu_type dest_type;
1128
1129 nir_texop op;
1130 nir_dest dest;
1131 nir_tex_src *src;
1132 unsigned num_srcs, coord_components;
1133 bool is_array, is_shadow;
1134
1135 /**
1136 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1137 * components or the new-style shadow that outputs 1 component.
1138 */
1139 bool is_new_style_shadow;
1140
1141 /* gather component selector */
1142 unsigned component : 2;
1143
1144 /** The texture index
1145 *
1146 * If this texture instruction has a nir_tex_src_texture_offset source,
1147 * then the texture index is given by texture_index + texture_offset.
1148 */
1149 unsigned texture_index;
1150
1151 /** The size of the texture array or 0 if it's not an array */
1152 unsigned texture_array_size;
1153
1154 /** The texture deref
1155 *
1156 * If this is null, use texture_index instead.
1157 */
1158 nir_deref_var *texture;
1159
1160 /** The sampler index
1161 *
1162 * The following operations do not require a sampler and, as such, this
1163 * field should be ignored:
1164 * - nir_texop_txf
1165 * - nir_texop_txf_ms
1166 * - nir_texop_txs
1167 * - nir_texop_lod
1168 * - nir_texop_tg4
1169 * - nir_texop_query_levels
1170 * - nir_texop_texture_samples
1171 * - nir_texop_samples_identical
1172 *
1173 * If this texture instruction has a nir_tex_src_sampler_offset source,
1174 * then the sampler index is given by sampler_index + sampler_offset.
1175 */
1176 unsigned sampler_index;
1177
1178 /** The sampler deref
1179 *
1180 * If this is null, use sampler_index instead.
1181 */
1182 nir_deref_var *sampler;
1183 } nir_tex_instr;
1184
1185 static inline unsigned
1186 nir_tex_instr_dest_size(nir_tex_instr *instr)
1187 {
1188 switch (instr->op) {
1189 case nir_texop_txs: {
1190 unsigned ret;
1191 switch (instr->sampler_dim) {
1192 case GLSL_SAMPLER_DIM_1D:
1193 case GLSL_SAMPLER_DIM_BUF:
1194 ret = 1;
1195 break;
1196 case GLSL_SAMPLER_DIM_2D:
1197 case GLSL_SAMPLER_DIM_CUBE:
1198 case GLSL_SAMPLER_DIM_MS:
1199 case GLSL_SAMPLER_DIM_RECT:
1200 case GLSL_SAMPLER_DIM_EXTERNAL:
1201 ret = 2;
1202 break;
1203 case GLSL_SAMPLER_DIM_3D:
1204 ret = 3;
1205 break;
1206 default:
1207 unreachable("not reached");
1208 }
1209 if (instr->is_array)
1210 ret++;
1211 return ret;
1212 }
1213
1214 case nir_texop_lod:
1215 return 2;
1216
1217 case nir_texop_texture_samples:
1218 case nir_texop_query_levels:
1219 case nir_texop_samples_identical:
1220 return 1;
1221
1222 default:
1223 if (instr->is_shadow && instr->is_new_style_shadow)
1224 return 1;
1225
1226 return 4;
1227 }
1228 }
1229
1230 /* Returns true if this texture operation queries something about the texture
1231 * rather than actually sampling it.
1232 */
1233 static inline bool
1234 nir_tex_instr_is_query(nir_tex_instr *instr)
1235 {
1236 switch (instr->op) {
1237 case nir_texop_txs:
1238 case nir_texop_lod:
1239 case nir_texop_texture_samples:
1240 case nir_texop_query_levels:
1241 case nir_texop_txf_ms_mcs:
1242 return true;
1243 case nir_texop_tex:
1244 case nir_texop_txb:
1245 case nir_texop_txl:
1246 case nir_texop_txd:
1247 case nir_texop_txf:
1248 case nir_texop_txf_ms:
1249 case nir_texop_tg4:
1250 return false;
1251 default:
1252 unreachable("Invalid texture opcode");
1253 }
1254 }
1255
1256 static inline nir_alu_type
1257 nir_tex_instr_src_type(nir_tex_instr *instr, unsigned src)
1258 {
1259 switch (instr->src[src].src_type) {
1260 case nir_tex_src_coord:
1261 switch (instr->op) {
1262 case nir_texop_txf:
1263 case nir_texop_txf_ms:
1264 case nir_texop_txf_ms_mcs:
1265 case nir_texop_samples_identical:
1266 return nir_type_int;
1267
1268 default:
1269 return nir_type_float;
1270 }
1271
1272 case nir_tex_src_lod:
1273 switch (instr->op) {
1274 case nir_texop_txs:
1275 case nir_texop_txf:
1276 return nir_type_int;
1277
1278 default:
1279 return nir_type_float;
1280 }
1281
1282 case nir_tex_src_projector:
1283 case nir_tex_src_comparitor:
1284 case nir_tex_src_bias:
1285 case nir_tex_src_ddx:
1286 case nir_tex_src_ddy:
1287 return nir_type_float;
1288
1289 case nir_tex_src_offset:
1290 case nir_tex_src_ms_index:
1291 case nir_tex_src_texture_offset:
1292 case nir_tex_src_sampler_offset:
1293 return nir_type_int;
1294
1295 default:
1296 unreachable("Invalid texture source type");
1297 }
1298 }
1299
1300 static inline unsigned
1301 nir_tex_instr_src_size(nir_tex_instr *instr, unsigned src)
1302 {
1303 if (instr->src[src].src_type == nir_tex_src_coord)
1304 return instr->coord_components;
1305
1306 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1307 if (instr->src[src].src_type == nir_tex_src_ms_mcs)
1308 return 4;
1309
1310 if (instr->src[src].src_type == nir_tex_src_offset ||
1311 instr->src[src].src_type == nir_tex_src_ddx ||
1312 instr->src[src].src_type == nir_tex_src_ddy) {
1313 if (instr->is_array)
1314 return instr->coord_components - 1;
1315 else
1316 return instr->coord_components;
1317 }
1318
1319 return 1;
1320 }
1321
1322 static inline int
1323 nir_tex_instr_src_index(nir_tex_instr *instr, nir_tex_src_type type)
1324 {
1325 for (unsigned i = 0; i < instr->num_srcs; i++)
1326 if (instr->src[i].src_type == type)
1327 return (int) i;
1328
1329 return -1;
1330 }
1331
1332 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
1333
1334 typedef union {
1335 float f32[4];
1336 double f64[4];
1337 int32_t i32[4];
1338 uint32_t u32[4];
1339 int64_t i64[4];
1340 uint64_t u64[4];
1341 } nir_const_value;
1342
1343 typedef struct {
1344 nir_instr instr;
1345
1346 nir_const_value value;
1347
1348 nir_ssa_def def;
1349 } nir_load_const_instr;
1350
1351 typedef enum {
1352 nir_jump_return,
1353 nir_jump_break,
1354 nir_jump_continue,
1355 } nir_jump_type;
1356
1357 typedef struct {
1358 nir_instr instr;
1359 nir_jump_type type;
1360 } nir_jump_instr;
1361
1362 /* creates a new SSA variable in an undefined state */
1363
1364 typedef struct {
1365 nir_instr instr;
1366 nir_ssa_def def;
1367 } nir_ssa_undef_instr;
1368
1369 typedef struct {
1370 struct exec_node node;
1371
1372 /* The predecessor block corresponding to this source */
1373 struct nir_block *pred;
1374
1375 nir_src src;
1376 } nir_phi_src;
1377
1378 #define nir_foreach_phi_src(phi_src, phi) \
1379 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1380 #define nir_foreach_phi_src_safe(phi_src, phi) \
1381 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1382
1383 typedef struct {
1384 nir_instr instr;
1385
1386 struct exec_list srcs; /** < list of nir_phi_src */
1387
1388 nir_dest dest;
1389 } nir_phi_instr;
1390
1391 typedef struct {
1392 struct exec_node node;
1393 nir_src src;
1394 nir_dest dest;
1395 } nir_parallel_copy_entry;
1396
1397 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1398 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1399
1400 typedef struct {
1401 nir_instr instr;
1402
1403 /* A list of nir_parallel_copy_entry's. The sources of all of the
1404 * entries are copied to the corresponding destinations "in parallel".
1405 * In other words, if we have two entries: a -> b and b -> a, the values
1406 * get swapped.
1407 */
1408 struct exec_list entries;
1409 } nir_parallel_copy_instr;
1410
1411 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr)
1412 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr)
1413 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr)
1414 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr)
1415 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr)
1416 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr)
1417 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr)
1418 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr)
1419 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
1420 nir_parallel_copy_instr, instr)
1421
1422 /*
1423 * Control flow
1424 *
1425 * Control flow consists of a tree of control flow nodes, which include
1426 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1427 * instructions that always run start-to-finish. Each basic block also keeps
1428 * track of its successors (blocks which may run immediately after the current
1429 * block) and predecessors (blocks which could have run immediately before the
1430 * current block). Each function also has a start block and an end block which
1431 * all return statements point to (which is always empty). Together, all the
1432 * blocks with their predecessors and successors make up the control flow
1433 * graph (CFG) of the function. There are helpers that modify the tree of
1434 * control flow nodes while modifying the CFG appropriately; these should be
1435 * used instead of modifying the tree directly.
1436 */
1437
1438 typedef enum {
1439 nir_cf_node_block,
1440 nir_cf_node_if,
1441 nir_cf_node_loop,
1442 nir_cf_node_function
1443 } nir_cf_node_type;
1444
1445 typedef struct nir_cf_node {
1446 struct exec_node node;
1447 nir_cf_node_type type;
1448 struct nir_cf_node *parent;
1449 } nir_cf_node;
1450
1451 typedef struct nir_block {
1452 nir_cf_node cf_node;
1453
1454 struct exec_list instr_list; /** < list of nir_instr */
1455
1456 /** generic block index; generated by nir_index_blocks */
1457 unsigned index;
1458
1459 /*
1460 * Each block can only have up to 2 successors, so we put them in a simple
1461 * array - no need for anything more complicated.
1462 */
1463 struct nir_block *successors[2];
1464
1465 /* Set of nir_block predecessors in the CFG */
1466 struct set *predecessors;
1467
1468 /*
1469 * this node's immediate dominator in the dominance tree - set to NULL for
1470 * the start block.
1471 */
1472 struct nir_block *imm_dom;
1473
1474 /* This node's children in the dominance tree */
1475 unsigned num_dom_children;
1476 struct nir_block **dom_children;
1477
1478 /* Set of nir_block's on the dominance frontier of this block */
1479 struct set *dom_frontier;
1480
1481 /*
1482 * These two indices have the property that dom_{pre,post}_index for each
1483 * child of this block in the dominance tree will always be between
1484 * dom_pre_index and dom_post_index for this block, which makes testing if
1485 * a given block is dominated by another block an O(1) operation.
1486 */
1487 unsigned dom_pre_index, dom_post_index;
1488
1489 /* live in and out for this block; used for liveness analysis */
1490 BITSET_WORD *live_in;
1491 BITSET_WORD *live_out;
1492 } nir_block;
1493
1494 static inline nir_instr *
1495 nir_block_first_instr(nir_block *block)
1496 {
1497 struct exec_node *head = exec_list_get_head(&block->instr_list);
1498 return exec_node_data(nir_instr, head, node);
1499 }
1500
1501 static inline nir_instr *
1502 nir_block_last_instr(nir_block *block)
1503 {
1504 struct exec_node *tail = exec_list_get_tail(&block->instr_list);
1505 return exec_node_data(nir_instr, tail, node);
1506 }
1507
1508 #define nir_foreach_instr(instr, block) \
1509 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1510 #define nir_foreach_instr_reverse(instr, block) \
1511 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1512 #define nir_foreach_instr_safe(instr, block) \
1513 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1514 #define nir_foreach_instr_reverse_safe(instr, block) \
1515 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1516
1517 typedef struct nir_if {
1518 nir_cf_node cf_node;
1519 nir_src condition;
1520
1521 struct exec_list then_list; /** < list of nir_cf_node */
1522 struct exec_list else_list; /** < list of nir_cf_node */
1523 } nir_if;
1524
1525 static inline nir_cf_node *
1526 nir_if_first_then_node(nir_if *if_stmt)
1527 {
1528 struct exec_node *head = exec_list_get_head(&if_stmt->then_list);
1529 return exec_node_data(nir_cf_node, head, node);
1530 }
1531
1532 static inline nir_cf_node *
1533 nir_if_last_then_node(nir_if *if_stmt)
1534 {
1535 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list);
1536 return exec_node_data(nir_cf_node, tail, node);
1537 }
1538
1539 static inline nir_cf_node *
1540 nir_if_first_else_node(nir_if *if_stmt)
1541 {
1542 struct exec_node *head = exec_list_get_head(&if_stmt->else_list);
1543 return exec_node_data(nir_cf_node, head, node);
1544 }
1545
1546 static inline nir_cf_node *
1547 nir_if_last_else_node(nir_if *if_stmt)
1548 {
1549 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list);
1550 return exec_node_data(nir_cf_node, tail, node);
1551 }
1552
1553 typedef struct {
1554 nir_cf_node cf_node;
1555
1556 struct exec_list body; /** < list of nir_cf_node */
1557 } nir_loop;
1558
1559 static inline nir_cf_node *
1560 nir_loop_first_cf_node(nir_loop *loop)
1561 {
1562 return exec_node_data(nir_cf_node, exec_list_get_head(&loop->body), node);
1563 }
1564
1565 static inline nir_cf_node *
1566 nir_loop_last_cf_node(nir_loop *loop)
1567 {
1568 return exec_node_data(nir_cf_node, exec_list_get_tail(&loop->body), node);
1569 }
1570
1571 /**
1572 * Various bits of metadata that can may be created or required by
1573 * optimization and analysis passes
1574 */
1575 typedef enum {
1576 nir_metadata_none = 0x0,
1577 nir_metadata_block_index = 0x1,
1578 nir_metadata_dominance = 0x2,
1579 nir_metadata_live_ssa_defs = 0x4,
1580 nir_metadata_not_properly_reset = 0x8,
1581 } nir_metadata;
1582
1583 typedef struct {
1584 nir_cf_node cf_node;
1585
1586 /** pointer to the function of which this is an implementation */
1587 struct nir_function *function;
1588
1589 struct exec_list body; /** < list of nir_cf_node */
1590
1591 nir_block *end_block;
1592
1593 /** list for all local variables in the function */
1594 struct exec_list locals;
1595
1596 /** array of variables used as parameters */
1597 unsigned num_params;
1598 nir_variable **params;
1599
1600 /** variable used to hold the result of the function */
1601 nir_variable *return_var;
1602
1603 /** list of local registers in the function */
1604 struct exec_list registers;
1605
1606 /** next available local register index */
1607 unsigned reg_alloc;
1608
1609 /** next available SSA value index */
1610 unsigned ssa_alloc;
1611
1612 /* total number of basic blocks, only valid when block_index_dirty = false */
1613 unsigned num_blocks;
1614
1615 nir_metadata valid_metadata;
1616 } nir_function_impl;
1617
1618 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1619 nir_start_block(nir_function_impl *impl)
1620 {
1621 return (nir_block *) impl->body.head_sentinel.next;
1622 }
1623
1624 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1625 nir_impl_last_block(nir_function_impl *impl)
1626 {
1627 return (nir_block *) impl->body.tail_sentinel.prev;
1628 }
1629
1630 static inline nir_cf_node *
1631 nir_cf_node_next(nir_cf_node *node)
1632 {
1633 struct exec_node *next = exec_node_get_next(&node->node);
1634 if (exec_node_is_tail_sentinel(next))
1635 return NULL;
1636 else
1637 return exec_node_data(nir_cf_node, next, node);
1638 }
1639
1640 static inline nir_cf_node *
1641 nir_cf_node_prev(nir_cf_node *node)
1642 {
1643 struct exec_node *prev = exec_node_get_prev(&node->node);
1644 if (exec_node_is_head_sentinel(prev))
1645 return NULL;
1646 else
1647 return exec_node_data(nir_cf_node, prev, node);
1648 }
1649
1650 static inline bool
1651 nir_cf_node_is_first(const nir_cf_node *node)
1652 {
1653 return exec_node_is_head_sentinel(node->node.prev);
1654 }
1655
1656 static inline bool
1657 nir_cf_node_is_last(const nir_cf_node *node)
1658 {
1659 return exec_node_is_tail_sentinel(node->node.next);
1660 }
1661
1662 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node)
1663 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node)
1664 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node)
1665 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node, nir_function_impl, cf_node)
1666
1667 typedef enum {
1668 nir_parameter_in,
1669 nir_parameter_out,
1670 nir_parameter_inout,
1671 } nir_parameter_type;
1672
1673 typedef struct {
1674 nir_parameter_type param_type;
1675 const struct glsl_type *type;
1676 } nir_parameter;
1677
1678 typedef struct nir_function {
1679 struct exec_node node;
1680
1681 const char *name;
1682 struct nir_shader *shader;
1683
1684 unsigned num_params;
1685 nir_parameter *params;
1686 const struct glsl_type *return_type;
1687
1688 /** The implementation of this function.
1689 *
1690 * If the function is only declared and not implemented, this is NULL.
1691 */
1692 nir_function_impl *impl;
1693 } nir_function;
1694
1695 typedef struct nir_shader_compiler_options {
1696 bool lower_fdiv;
1697 bool lower_ffma;
1698 bool fuse_ffma;
1699 bool lower_flrp32;
1700 /** Lowers flrp when it does not support doubles */
1701 bool lower_flrp64;
1702 bool lower_fpow;
1703 bool lower_fsat;
1704 bool lower_fsqrt;
1705 bool lower_fmod32;
1706 bool lower_fmod64;
1707 bool lower_bitfield_extract;
1708 bool lower_bitfield_insert;
1709 bool lower_uadd_carry;
1710 bool lower_usub_borrow;
1711 /** lowers fneg and ineg to fsub and isub. */
1712 bool lower_negate;
1713 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1714 bool lower_sub;
1715
1716 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1717 bool lower_scmp;
1718
1719 /** enables rules to lower idiv by power-of-two: */
1720 bool lower_idiv;
1721
1722 /* Does the native fdot instruction replicate its result for four
1723 * components? If so, then opt_algebraic_late will turn all fdotN
1724 * instructions into fdot_replicatedN instructions.
1725 */
1726 bool fdot_replicates;
1727
1728 /** lowers ffract to fsub+ffloor: */
1729 bool lower_ffract;
1730
1731 bool lower_pack_half_2x16;
1732 bool lower_pack_unorm_2x16;
1733 bool lower_pack_snorm_2x16;
1734 bool lower_pack_unorm_4x8;
1735 bool lower_pack_snorm_4x8;
1736 bool lower_unpack_half_2x16;
1737 bool lower_unpack_unorm_2x16;
1738 bool lower_unpack_snorm_2x16;
1739 bool lower_unpack_unorm_4x8;
1740 bool lower_unpack_snorm_4x8;
1741
1742 bool lower_extract_byte;
1743 bool lower_extract_word;
1744
1745 /**
1746 * Does the driver support real 32-bit integers? (Otherwise, integers
1747 * are simulated by floats.)
1748 */
1749 bool native_integers;
1750
1751 /* Indicates that the driver only has zero-based vertex id */
1752 bool vertex_id_zero_based;
1753
1754 bool lower_cs_local_index_from_id;
1755
1756 /**
1757 * Should nir_lower_io() create load_interpolated_input intrinsics?
1758 *
1759 * If not, it generates regular load_input intrinsics and interpolation
1760 * information must be inferred from the list of input nir_variables.
1761 */
1762 bool use_interpolated_input_intrinsics;
1763 } nir_shader_compiler_options;
1764
1765 typedef struct nir_shader_info {
1766 const char *name;
1767
1768 /* Descriptive name provided by the client; may be NULL */
1769 const char *label;
1770
1771 /* Number of textures used by this shader */
1772 unsigned num_textures;
1773 /* Number of uniform buffers used by this shader */
1774 unsigned num_ubos;
1775 /* Number of atomic buffers used by this shader */
1776 unsigned num_abos;
1777 /* Number of shader storage buffers used by this shader */
1778 unsigned num_ssbos;
1779 /* Number of images used by this shader */
1780 unsigned num_images;
1781
1782 /* Which inputs are actually read */
1783 uint64_t inputs_read;
1784 /* Which inputs are actually read and are double */
1785 uint64_t double_inputs_read;
1786 /* Which outputs are actually written */
1787 uint64_t outputs_written;
1788 /* Which outputs are actually read */
1789 uint64_t outputs_read;
1790 /* Which system values are actually read */
1791 uint64_t system_values_read;
1792
1793 /* Which patch inputs are actually read */
1794 uint32_t patch_inputs_read;
1795 /* Which patch outputs are actually written */
1796 uint32_t patch_outputs_written;
1797
1798 /* Whether or not this shader ever uses textureGather() */
1799 bool uses_texture_gather;
1800
1801 /* Whether or not this shader uses the gl_ClipDistance output */
1802 bool uses_clip_distance_out;
1803
1804 /* Whether or not separate shader objects were used */
1805 bool separate_shader;
1806
1807 /** Was this shader linked with any transform feedback varyings? */
1808 bool has_transform_feedback_varyings;
1809
1810 union {
1811 struct {
1812 /** The number of vertices recieves per input primitive */
1813 unsigned vertices_in;
1814
1815 /** The output primitive type (GL enum value) */
1816 unsigned output_primitive;
1817
1818 /** The maximum number of vertices the geometry shader might write. */
1819 unsigned vertices_out;
1820
1821 /** 1 .. MAX_GEOMETRY_SHADER_INVOCATIONS */
1822 unsigned invocations;
1823
1824 /** Whether or not this shader uses EndPrimitive */
1825 bool uses_end_primitive;
1826
1827 /** Whether or not this shader uses non-zero streams */
1828 bool uses_streams;
1829 } gs;
1830
1831 struct {
1832 bool uses_discard;
1833
1834 /**
1835 * Whether any inputs are declared with the "sample" qualifier.
1836 */
1837 bool uses_sample_qualifier;
1838
1839 /**
1840 * Whether early fragment tests are enabled as defined by
1841 * ARB_shader_image_load_store.
1842 */
1843 bool early_fragment_tests;
1844
1845 /** gl_FragDepth layout for ARB_conservative_depth. */
1846 enum gl_frag_depth_layout depth_layout;
1847 } fs;
1848
1849 struct {
1850 unsigned local_size[3];
1851 } cs;
1852
1853 struct {
1854 /** The number of vertices in the TCS output patch. */
1855 unsigned vertices_out;
1856 } tcs;
1857 };
1858 } nir_shader_info;
1859
1860 typedef struct nir_shader {
1861 /** list of uniforms (nir_variable) */
1862 struct exec_list uniforms;
1863
1864 /** list of inputs (nir_variable) */
1865 struct exec_list inputs;
1866
1867 /** list of outputs (nir_variable) */
1868 struct exec_list outputs;
1869
1870 /** list of shared compute variables (nir_variable) */
1871 struct exec_list shared;
1872
1873 /** Set of driver-specific options for the shader.
1874 *
1875 * The memory for the options is expected to be kept in a single static
1876 * copy by the driver.
1877 */
1878 const struct nir_shader_compiler_options *options;
1879
1880 /** Various bits of compile-time information about a given shader */
1881 struct nir_shader_info info;
1882
1883 /** list of global variables in the shader (nir_variable) */
1884 struct exec_list globals;
1885
1886 /** list of system value variables in the shader (nir_variable) */
1887 struct exec_list system_values;
1888
1889 struct exec_list functions; /** < list of nir_function */
1890
1891 /** list of global register in the shader */
1892 struct exec_list registers;
1893
1894 /** next available global register index */
1895 unsigned reg_alloc;
1896
1897 /**
1898 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
1899 * access plus one
1900 */
1901 unsigned num_inputs, num_uniforms, num_outputs, num_shared;
1902
1903 /** The shader stage, such as MESA_SHADER_VERTEX. */
1904 gl_shader_stage stage;
1905 } nir_shader;
1906
1907 static inline nir_function_impl *
1908 nir_shader_get_entrypoint(nir_shader *shader)
1909 {
1910 assert(exec_list_length(&shader->functions) == 1);
1911 struct exec_node *func_node = exec_list_get_head(&shader->functions);
1912 nir_function *func = exec_node_data(nir_function, func_node, node);
1913 assert(func->return_type == glsl_void_type());
1914 assert(func->num_params == 0);
1915 assert(func->impl);
1916 return func->impl;
1917 }
1918
1919 #define nir_foreach_function(func, shader) \
1920 foreach_list_typed(nir_function, func, node, &(shader)->functions)
1921
1922 nir_shader *nir_shader_create(void *mem_ctx,
1923 gl_shader_stage stage,
1924 const nir_shader_compiler_options *options);
1925
1926 /** creates a register, including assigning it an index and adding it to the list */
1927 nir_register *nir_global_reg_create(nir_shader *shader);
1928
1929 nir_register *nir_local_reg_create(nir_function_impl *impl);
1930
1931 void nir_reg_remove(nir_register *reg);
1932
1933 /** Adds a variable to the appropreate list in nir_shader */
1934 void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
1935
1936 static inline void
1937 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
1938 {
1939 assert(var->data.mode == nir_var_local);
1940 exec_list_push_tail(&impl->locals, &var->node);
1941 }
1942
1943 /** creates a variable, sets a few defaults, and adds it to the list */
1944 nir_variable *nir_variable_create(nir_shader *shader,
1945 nir_variable_mode mode,
1946 const struct glsl_type *type,
1947 const char *name);
1948 /** creates a local variable and adds it to the list */
1949 nir_variable *nir_local_variable_create(nir_function_impl *impl,
1950 const struct glsl_type *type,
1951 const char *name);
1952
1953 /** creates a function and adds it to the shader's list of functions */
1954 nir_function *nir_function_create(nir_shader *shader, const char *name);
1955
1956 nir_function_impl *nir_function_impl_create(nir_function *func);
1957 /** creates a function_impl that isn't tied to any particular function */
1958 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
1959
1960 nir_block *nir_block_create(nir_shader *shader);
1961 nir_if *nir_if_create(nir_shader *shader);
1962 nir_loop *nir_loop_create(nir_shader *shader);
1963
1964 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node);
1965
1966 /** requests that the given pieces of metadata be generated */
1967 void nir_metadata_require(nir_function_impl *impl, nir_metadata required);
1968 /** dirties all but the preserved metadata */
1969 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved);
1970
1971 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
1972 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
1973
1974 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
1975
1976 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
1977 unsigned num_components,
1978 unsigned bit_size);
1979
1980 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader,
1981 nir_intrinsic_op op);
1982
1983 nir_call_instr *nir_call_instr_create(nir_shader *shader,
1984 nir_function *callee);
1985
1986 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
1987
1988 nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
1989
1990 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
1991
1992 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
1993 unsigned num_components,
1994 unsigned bit_size);
1995
1996 nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
1997 nir_deref_array *nir_deref_array_create(void *mem_ctx);
1998 nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
1999
2000 nir_deref *nir_copy_deref(void *mem_ctx, nir_deref *deref);
2001
2002 typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
2003 bool nir_deref_foreach_leaf(nir_deref_var *deref,
2004 nir_deref_foreach_leaf_cb cb, void *state);
2005
2006 nir_load_const_instr *
2007 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
2008
2009 /**
2010 * NIR Cursors and Instruction Insertion API
2011 * @{
2012 *
2013 * A tiny struct representing a point to insert/extract instructions or
2014 * control flow nodes. Helps reduce the combinatorial explosion of possible
2015 * points to insert/extract.
2016 *
2017 * \sa nir_control_flow.h
2018 */
2019 typedef enum {
2020 nir_cursor_before_block,
2021 nir_cursor_after_block,
2022 nir_cursor_before_instr,
2023 nir_cursor_after_instr,
2024 } nir_cursor_option;
2025
2026 typedef struct {
2027 nir_cursor_option option;
2028 union {
2029 nir_block *block;
2030 nir_instr *instr;
2031 };
2032 } nir_cursor;
2033
2034 static inline nir_block *
2035 nir_cursor_current_block(nir_cursor cursor)
2036 {
2037 if (cursor.option == nir_cursor_before_instr ||
2038 cursor.option == nir_cursor_after_instr) {
2039 return cursor.instr->block;
2040 } else {
2041 return cursor.block;
2042 }
2043 }
2044
2045 bool nir_cursors_equal(nir_cursor a, nir_cursor b);
2046
2047 static inline nir_cursor
2048 nir_before_block(nir_block *block)
2049 {
2050 nir_cursor cursor;
2051 cursor.option = nir_cursor_before_block;
2052 cursor.block = block;
2053 return cursor;
2054 }
2055
2056 static inline nir_cursor
2057 nir_after_block(nir_block *block)
2058 {
2059 nir_cursor cursor;
2060 cursor.option = nir_cursor_after_block;
2061 cursor.block = block;
2062 return cursor;
2063 }
2064
2065 static inline nir_cursor
2066 nir_before_instr(nir_instr *instr)
2067 {
2068 nir_cursor cursor;
2069 cursor.option = nir_cursor_before_instr;
2070 cursor.instr = instr;
2071 return cursor;
2072 }
2073
2074 static inline nir_cursor
2075 nir_after_instr(nir_instr *instr)
2076 {
2077 nir_cursor cursor;
2078 cursor.option = nir_cursor_after_instr;
2079 cursor.instr = instr;
2080 return cursor;
2081 }
2082
2083 static inline nir_cursor
2084 nir_after_block_before_jump(nir_block *block)
2085 {
2086 nir_instr *last_instr = nir_block_last_instr(block);
2087 if (last_instr && last_instr->type == nir_instr_type_jump) {
2088 return nir_before_instr(last_instr);
2089 } else {
2090 return nir_after_block(block);
2091 }
2092 }
2093
2094 static inline nir_cursor
2095 nir_before_cf_node(nir_cf_node *node)
2096 {
2097 if (node->type == nir_cf_node_block)
2098 return nir_before_block(nir_cf_node_as_block(node));
2099
2100 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node)));
2101 }
2102
2103 static inline nir_cursor
2104 nir_after_cf_node(nir_cf_node *node)
2105 {
2106 if (node->type == nir_cf_node_block)
2107 return nir_after_block(nir_cf_node_as_block(node));
2108
2109 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
2110 }
2111
2112 static inline nir_cursor
2113 nir_after_phis(nir_block *block)
2114 {
2115 nir_foreach_instr(instr, block) {
2116 if (instr->type != nir_instr_type_phi)
2117 return nir_before_instr(instr);
2118 }
2119 return nir_after_block(block);
2120 }
2121
2122 static inline nir_cursor
2123 nir_after_cf_node_and_phis(nir_cf_node *node)
2124 {
2125 if (node->type == nir_cf_node_block)
2126 return nir_after_block(nir_cf_node_as_block(node));
2127
2128 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
2129 assert(block->cf_node.type == nir_cf_node_block);
2130
2131 return nir_after_phis(block);
2132 }
2133
2134 static inline nir_cursor
2135 nir_before_cf_list(struct exec_list *cf_list)
2136 {
2137 nir_cf_node *first_node = exec_node_data(nir_cf_node,
2138 exec_list_get_head(cf_list), node);
2139 return nir_before_cf_node(first_node);
2140 }
2141
2142 static inline nir_cursor
2143 nir_after_cf_list(struct exec_list *cf_list)
2144 {
2145 nir_cf_node *last_node = exec_node_data(nir_cf_node,
2146 exec_list_get_tail(cf_list), node);
2147 return nir_after_cf_node(last_node);
2148 }
2149
2150 /**
2151 * Insert a NIR instruction at the given cursor.
2152 *
2153 * Note: This does not update the cursor.
2154 */
2155 void nir_instr_insert(nir_cursor cursor, nir_instr *instr);
2156
2157 static inline void
2158 nir_instr_insert_before(nir_instr *instr, nir_instr *before)
2159 {
2160 nir_instr_insert(nir_before_instr(instr), before);
2161 }
2162
2163 static inline void
2164 nir_instr_insert_after(nir_instr *instr, nir_instr *after)
2165 {
2166 nir_instr_insert(nir_after_instr(instr), after);
2167 }
2168
2169 static inline void
2170 nir_instr_insert_before_block(nir_block *block, nir_instr *before)
2171 {
2172 nir_instr_insert(nir_before_block(block), before);
2173 }
2174
2175 static inline void
2176 nir_instr_insert_after_block(nir_block *block, nir_instr *after)
2177 {
2178 nir_instr_insert(nir_after_block(block), after);
2179 }
2180
2181 static inline void
2182 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before)
2183 {
2184 nir_instr_insert(nir_before_cf_node(node), before);
2185 }
2186
2187 static inline void
2188 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after)
2189 {
2190 nir_instr_insert(nir_after_cf_node(node), after);
2191 }
2192
2193 static inline void
2194 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before)
2195 {
2196 nir_instr_insert(nir_before_cf_list(list), before);
2197 }
2198
2199 static inline void
2200 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after)
2201 {
2202 nir_instr_insert(nir_after_cf_list(list), after);
2203 }
2204
2205 void nir_instr_remove(nir_instr *instr);
2206
2207 /** @} */
2208
2209 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
2210 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
2211 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
2212 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
2213 void *state);
2214 bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
2215 bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
2216
2217 nir_const_value *nir_src_as_const_value(nir_src src);
2218 bool nir_src_is_dynamically_uniform(nir_src src);
2219 bool nir_srcs_equal(nir_src src1, nir_src src2);
2220 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
2221 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
2222 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
2223 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
2224 nir_dest new_dest);
2225
2226 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
2227 unsigned num_components, unsigned bit_size,
2228 const char *name);
2229 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
2230 unsigned num_components, unsigned bit_size,
2231 const char *name);
2232 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src);
2233 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
2234 nir_instr *after_me);
2235
2236 uint8_t nir_ssa_def_components_read(nir_ssa_def *def);
2237
2238 /*
2239 * finds the next basic block in source-code order, returns NULL if there is
2240 * none
2241 */
2242
2243 nir_block *nir_block_cf_tree_next(nir_block *block);
2244
2245 /* Performs the opposite of nir_block_cf_tree_next() */
2246
2247 nir_block *nir_block_cf_tree_prev(nir_block *block);
2248
2249 /* Gets the first block in a CF node in source-code order */
2250
2251 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
2252
2253 /* Gets the last block in a CF node in source-code order */
2254
2255 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
2256
2257 /* Gets the next block after a CF node in source-code order */
2258
2259 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
2260
2261 /* Macros for loops that visit blocks in source-code order */
2262
2263 #define nir_foreach_block(block, impl) \
2264 for (nir_block *block = nir_start_block(impl); block != NULL; \
2265 block = nir_block_cf_tree_next(block))
2266
2267 #define nir_foreach_block_safe(block, impl) \
2268 for (nir_block *block = nir_start_block(impl), \
2269 *next = nir_block_cf_tree_next(block); \
2270 block != NULL; \
2271 block = next, next = nir_block_cf_tree_next(block))
2272
2273 #define nir_foreach_block_reverse(block, impl) \
2274 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2275 block = nir_block_cf_tree_prev(block))
2276
2277 #define nir_foreach_block_reverse_safe(block, impl) \
2278 for (nir_block *block = nir_impl_last_block(impl), \
2279 *prev = nir_block_cf_tree_prev(block); \
2280 block != NULL; \
2281 block = prev, prev = nir_block_cf_tree_prev(block))
2282
2283 #define nir_foreach_block_in_cf_node(block, node) \
2284 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2285 block != nir_cf_node_cf_tree_next(node); \
2286 block = nir_block_cf_tree_next(block))
2287
2288 /* If the following CF node is an if, this function returns that if.
2289 * Otherwise, it returns NULL.
2290 */
2291 nir_if *nir_block_get_following_if(nir_block *block);
2292
2293 nir_loop *nir_block_get_following_loop(nir_block *block);
2294
2295 void nir_index_local_regs(nir_function_impl *impl);
2296 void nir_index_global_regs(nir_shader *shader);
2297 void nir_index_ssa_defs(nir_function_impl *impl);
2298 unsigned nir_index_instrs(nir_function_impl *impl);
2299
2300 void nir_index_blocks(nir_function_impl *impl);
2301
2302 void nir_print_shader(nir_shader *shader, FILE *fp);
2303 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
2304 void nir_print_instr(const nir_instr *instr, FILE *fp);
2305
2306 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
2307 nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
2308 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
2309 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
2310
2311 #ifdef DEBUG
2312 void nir_validate_shader(nir_shader *shader);
2313 void nir_metadata_set_validation_flag(nir_shader *shader);
2314 void nir_metadata_check_validation_flag(nir_shader *shader);
2315
2316 #include "util/debug.h"
2317 static inline bool
2318 should_clone_nir(void)
2319 {
2320 static int should_clone = -1;
2321 if (should_clone < 0)
2322 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
2323
2324 return should_clone;
2325 }
2326 #else
2327 static inline void nir_validate_shader(nir_shader *shader) { (void) shader; }
2328 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
2329 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
2330 static inline bool should_clone_nir(void) { return false; }
2331 #endif /* DEBUG */
2332
2333 #define _PASS(nir, do_pass) do { \
2334 do_pass \
2335 nir_validate_shader(nir); \
2336 if (should_clone_nir()) { \
2337 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2338 ralloc_free(nir); \
2339 nir = clone; \
2340 } \
2341 } while (0)
2342
2343 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2344 nir_metadata_set_validation_flag(nir); \
2345 if (pass(nir, ##__VA_ARGS__)) { \
2346 progress = true; \
2347 nir_metadata_check_validation_flag(nir); \
2348 } \
2349 )
2350
2351 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2352 pass(nir, ##__VA_ARGS__); \
2353 )
2354
2355 void nir_calc_dominance_impl(nir_function_impl *impl);
2356 void nir_calc_dominance(nir_shader *shader);
2357
2358 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
2359 bool nir_block_dominates(nir_block *parent, nir_block *child);
2360
2361 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
2362 void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
2363
2364 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp);
2365 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp);
2366
2367 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp);
2368 void nir_dump_cfg(nir_shader *shader, FILE *fp);
2369
2370 int nir_gs_count_vertices(const nir_shader *shader);
2371
2372 bool nir_split_var_copies(nir_shader *shader);
2373
2374 bool nir_lower_returns_impl(nir_function_impl *impl);
2375 bool nir_lower_returns(nir_shader *shader);
2376
2377 bool nir_inline_functions(nir_shader *shader);
2378
2379 bool nir_propagate_invariant(nir_shader *shader);
2380
2381 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, void *mem_ctx);
2382 void nir_lower_var_copies(nir_shader *shader);
2383
2384 bool nir_lower_global_vars_to_local(nir_shader *shader);
2385
2386 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
2387
2388 bool nir_lower_locals_to_regs(nir_shader *shader);
2389
2390 void nir_lower_io_to_temporaries(nir_shader *shader,
2391 nir_function_impl *entrypoint,
2392 bool outputs, bool inputs);
2393
2394 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
2395
2396 void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
2397 unsigned base_offset,
2398 int (*type_size)(const struct glsl_type *));
2399
2400 void nir_lower_io(nir_shader *shader,
2401 nir_variable_mode modes,
2402 int (*type_size)(const struct glsl_type *));
2403 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
2404 nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
2405
2406 void nir_lower_io_types(nir_shader *shader);
2407 void nir_lower_vars_to_ssa(nir_shader *shader);
2408
2409 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes);
2410
2411 void nir_move_vec_src_uses_to_dest(nir_shader *shader);
2412 bool nir_lower_vec_to_movs(nir_shader *shader);
2413 bool nir_lower_alu_to_scalar(nir_shader *shader);
2414 void nir_lower_load_const_to_scalar(nir_shader *shader);
2415
2416 bool nir_lower_phis_to_scalar(nir_shader *shader);
2417 void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
2418
2419 void nir_lower_samplers(nir_shader *shader,
2420 const struct gl_shader_program *shader_program);
2421
2422 bool nir_lower_system_values(nir_shader *shader);
2423
2424 typedef struct nir_lower_tex_options {
2425 /**
2426 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2427 * sampler types a texture projector is lowered.
2428 */
2429 unsigned lower_txp;
2430
2431 /**
2432 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2433 */
2434 bool lower_txf_offset;
2435
2436 /**
2437 * If true, lower away nir_tex_src_offset for all rect textures.
2438 */
2439 bool lower_rect_offset;
2440
2441 /**
2442 * If true, lower rect textures to 2D, using txs to fetch the
2443 * texture dimensions and dividing the texture coords by the
2444 * texture dims to normalize.
2445 */
2446 bool lower_rect;
2447
2448 /**
2449 * If true, convert yuv to rgb.
2450 */
2451 unsigned lower_y_uv_external;
2452 unsigned lower_y_u_v_external;
2453 unsigned lower_yx_xuxv_external;
2454
2455 /**
2456 * To emulate certain texture wrap modes, this can be used
2457 * to saturate the specified tex coord to [0.0, 1.0]. The
2458 * bits are according to sampler #, ie. if, for example:
2459 *
2460 * (conf->saturate_s & (1 << n))
2461 *
2462 * is true, then the s coord for sampler n is saturated.
2463 *
2464 * Note that clamping must happen *after* projector lowering
2465 * so any projected texture sample instruction with a clamped
2466 * coordinate gets automatically lowered, regardless of the
2467 * 'lower_txp' setting.
2468 */
2469 unsigned saturate_s;
2470 unsigned saturate_t;
2471 unsigned saturate_r;
2472
2473 /* Bitmask of textures that need swizzling.
2474 *
2475 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2476 * swizzles[texture_index] is applied to the result of the texturing
2477 * operation.
2478 */
2479 unsigned swizzle_result;
2480
2481 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2482 * while 4 and 5 represent 0 and 1 respectively.
2483 */
2484 uint8_t swizzles[32][4];
2485
2486 /**
2487 * Bitmap of textures that need srgb to linear conversion. If
2488 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2489 * of the texture are lowered to linear.
2490 */
2491 unsigned lower_srgb;
2492 } nir_lower_tex_options;
2493
2494 bool nir_lower_tex(nir_shader *shader,
2495 const nir_lower_tex_options *options);
2496
2497 bool nir_lower_idiv(nir_shader *shader);
2498
2499 void nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables);
2500 void nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
2501
2502 void nir_lower_two_sided_color(nir_shader *shader);
2503
2504 void nir_lower_clamp_color_outputs(nir_shader *shader);
2505
2506 void nir_lower_passthrough_edgeflags(nir_shader *shader);
2507
2508 typedef struct nir_lower_wpos_ytransform_options {
2509 int state_tokens[5];
2510 bool fs_coord_origin_upper_left :1;
2511 bool fs_coord_origin_lower_left :1;
2512 bool fs_coord_pixel_center_integer :1;
2513 bool fs_coord_pixel_center_half_integer :1;
2514 } nir_lower_wpos_ytransform_options;
2515
2516 bool nir_lower_wpos_ytransform(nir_shader *shader,
2517 const nir_lower_wpos_ytransform_options *options);
2518 bool nir_lower_wpos_center(nir_shader *shader);
2519
2520 typedef struct nir_lower_drawpixels_options {
2521 int texcoord_state_tokens[5];
2522 int scale_state_tokens[5];
2523 int bias_state_tokens[5];
2524 unsigned drawpix_sampler;
2525 unsigned pixelmap_sampler;
2526 bool pixel_maps :1;
2527 bool scale_and_bias :1;
2528 } nir_lower_drawpixels_options;
2529
2530 void nir_lower_drawpixels(nir_shader *shader,
2531 const nir_lower_drawpixels_options *options);
2532
2533 typedef struct nir_lower_bitmap_options {
2534 unsigned sampler;
2535 bool swizzle_xxxx;
2536 } nir_lower_bitmap_options;
2537
2538 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
2539
2540 void nir_lower_atomics(nir_shader *shader,
2541 const struct gl_shader_program *shader_program);
2542 void nir_lower_to_source_mods(nir_shader *shader);
2543
2544 bool nir_lower_gs_intrinsics(nir_shader *shader);
2545
2546 typedef enum {
2547 nir_lower_drcp = (1 << 0),
2548 nir_lower_dsqrt = (1 << 1),
2549 nir_lower_drsq = (1 << 2),
2550 nir_lower_dtrunc = (1 << 3),
2551 nir_lower_dfloor = (1 << 4),
2552 nir_lower_dceil = (1 << 5),
2553 nir_lower_dfract = (1 << 6),
2554 nir_lower_dround_even = (1 << 7),
2555 nir_lower_dmod = (1 << 8)
2556 } nir_lower_doubles_options;
2557
2558 void nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
2559 void nir_lower_double_pack(nir_shader *shader);
2560
2561 bool nir_normalize_cubemap_coords(nir_shader *shader);
2562
2563 void nir_live_ssa_defs_impl(nir_function_impl *impl);
2564 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
2565
2566 void nir_convert_to_ssa_impl(nir_function_impl *impl);
2567 void nir_convert_to_ssa(nir_shader *shader);
2568
2569 bool nir_repair_ssa_impl(nir_function_impl *impl);
2570 bool nir_repair_ssa(nir_shader *shader);
2571
2572 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2573 * registers. If false, convert all values (even those not involved in a phi
2574 * node) to registers.
2575 */
2576 void nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
2577
2578 bool nir_opt_algebraic(nir_shader *shader);
2579 bool nir_opt_algebraic_late(nir_shader *shader);
2580 bool nir_opt_constant_folding(nir_shader *shader);
2581
2582 bool nir_opt_global_to_local(nir_shader *shader);
2583
2584 bool nir_copy_prop(nir_shader *shader);
2585
2586 bool nir_opt_cse(nir_shader *shader);
2587
2588 bool nir_opt_dce(nir_shader *shader);
2589
2590 bool nir_opt_dead_cf(nir_shader *shader);
2591
2592 bool nir_opt_gcm(nir_shader *shader, bool value_number);
2593
2594 bool nir_opt_peephole_select(nir_shader *shader);
2595
2596 bool nir_opt_remove_phis(nir_shader *shader);
2597
2598 bool nir_opt_undef(nir_shader *shader);
2599
2600 void nir_sweep(nir_shader *shader);
2601
2602 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
2603 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
2604
2605 #ifdef __cplusplus
2606 } /* extern "C" */
2607 #endif