nir: add a pass to compact clip/cull distances.
[mesa.git] / src / compiler / nir / nir.h
1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #pragma once
29
30 #include "util/hash_table.h"
31 #include "compiler/glsl/list.h"
32 #include "GL/gl.h" /* GLenum */
33 #include "util/list.h"
34 #include "util/ralloc.h"
35 #include "util/set.h"
36 #include "util/bitset.h"
37 #include "util/macros.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/shader_enums.h"
40 #include "compiler/shader_info.h"
41 #include <stdio.h>
42
43 #include "nir_opcodes.h"
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 struct gl_program;
50 struct gl_shader_program;
51
52 #define NIR_FALSE 0u
53 #define NIR_TRUE (~0u)
54
55 /** Defines a cast function
56 *
57 * This macro defines a cast function from in_type to out_type where
58 * out_type is some structure type that contains a field of type out_type.
59 *
60 * Note that you have to be a bit careful as the generated cast function
61 * destroys constness.
62 */
63 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
64 type_field, type_value) \
65 static inline out_type * \
66 name(const in_type *parent) \
67 { \
68 assert(parent && parent->type_field == type_value); \
69 return exec_node_data(out_type, parent, field); \
70 }
71
72 struct nir_function;
73 struct nir_shader;
74 struct nir_instr;
75
76
77 /**
78 * Description of built-in state associated with a uniform
79 *
80 * \sa nir_variable::state_slots
81 */
82 typedef struct {
83 int tokens[5];
84 int swizzle;
85 } nir_state_slot;
86
87 typedef enum {
88 nir_var_shader_in = (1 << 0),
89 nir_var_shader_out = (1 << 1),
90 nir_var_global = (1 << 2),
91 nir_var_local = (1 << 3),
92 nir_var_uniform = (1 << 4),
93 nir_var_shader_storage = (1 << 5),
94 nir_var_system_value = (1 << 6),
95 nir_var_param = (1 << 7),
96 nir_var_shared = (1 << 8),
97 nir_var_all = ~0,
98 } nir_variable_mode;
99
100 /**
101 * Data stored in an nir_constant
102 */
103 union nir_constant_data {
104 unsigned u[16];
105 int i[16];
106 float f[16];
107 bool b[16];
108 double d[16];
109 };
110
111 typedef struct nir_constant {
112 /**
113 * Value of the constant.
114 *
115 * The field used to back the values supplied by the constant is determined
116 * by the type associated with the \c nir_variable. Constants may be
117 * scalars, vectors, or matrices.
118 */
119 union nir_constant_data value;
120
121 /* we could get this from the var->type but makes clone *much* easier to
122 * not have to care about the type.
123 */
124 unsigned num_elements;
125
126 /* Array elements / Structure Fields */
127 struct nir_constant **elements;
128 } nir_constant;
129
130 /**
131 * \brief Layout qualifiers for gl_FragDepth.
132 *
133 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
134 * with a layout qualifier.
135 */
136 typedef enum {
137 nir_depth_layout_none, /**< No depth layout is specified. */
138 nir_depth_layout_any,
139 nir_depth_layout_greater,
140 nir_depth_layout_less,
141 nir_depth_layout_unchanged
142 } nir_depth_layout;
143
144 /**
145 * Either a uniform, global variable, shader input, or shader output. Based on
146 * ir_variable - it should be easy to translate between the two.
147 */
148
149 typedef struct nir_variable {
150 struct exec_node node;
151
152 /**
153 * Declared type of the variable
154 */
155 const struct glsl_type *type;
156
157 /**
158 * Declared name of the variable
159 */
160 char *name;
161
162 struct nir_variable_data {
163 /**
164 * Storage class of the variable.
165 *
166 * \sa nir_variable_mode
167 */
168 nir_variable_mode mode;
169
170 /**
171 * Is the variable read-only?
172 *
173 * This is set for variables declared as \c const, shader inputs,
174 * and uniforms.
175 */
176 unsigned read_only:1;
177 unsigned centroid:1;
178 unsigned sample:1;
179 unsigned patch:1;
180 unsigned invariant:1;
181
182 /**
183 * Interpolation mode for shader inputs / outputs
184 *
185 * \sa glsl_interp_mode
186 */
187 unsigned interpolation:2;
188
189 /**
190 * \name ARB_fragment_coord_conventions
191 * @{
192 */
193 unsigned origin_upper_left:1;
194 unsigned pixel_center_integer:1;
195 /*@}*/
196
197 /**
198 * Was the location explicitly set in the shader?
199 *
200 * If the location is explicitly set in the shader, it \b cannot be changed
201 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
202 * no effect).
203 */
204 unsigned explicit_location:1;
205 unsigned explicit_index:1;
206
207 /**
208 * Was an initial binding explicitly set in the shader?
209 *
210 * If so, constant_initializer contains an integer nir_constant
211 * representing the initial binding point.
212 */
213 unsigned explicit_binding:1;
214
215 /**
216 * Does this variable have an initializer?
217 *
218 * This is used by the linker to cross-validiate initializers of global
219 * variables.
220 */
221 unsigned has_initializer:1;
222
223 /**
224 * If non-zero, then this variable may be packed along with other variables
225 * into a single varying slot, so this offset should be applied when
226 * accessing components. For example, an offset of 1 means that the x
227 * component of this variable is actually stored in component y of the
228 * location specified by \c location.
229 */
230 unsigned location_frac:2;
231
232 /**
233 * If true, this variable represents an array of scalars that should
234 * be tightly packed. In other words, consecutive array elements
235 * should be stored one component apart, rather than one slot apart.
236 */
237 bool compact:1;
238
239 /**
240 * Whether this is a fragment shader output implicitly initialized with
241 * the previous contents of the specified render target at the
242 * framebuffer location corresponding to this shader invocation.
243 */
244 unsigned fb_fetch_output:1;
245
246 /**
247 * \brief Layout qualifier for gl_FragDepth.
248 *
249 * This is not equal to \c ir_depth_layout_none if and only if this
250 * variable is \c gl_FragDepth and a layout qualifier is specified.
251 */
252 nir_depth_layout depth_layout;
253
254 /**
255 * Storage location of the base of this variable
256 *
257 * The precise meaning of this field depends on the nature of the variable.
258 *
259 * - Vertex shader input: one of the values from \c gl_vert_attrib.
260 * - Vertex shader output: one of the values from \c gl_varying_slot.
261 * - Geometry shader input: one of the values from \c gl_varying_slot.
262 * - Geometry shader output: one of the values from \c gl_varying_slot.
263 * - Fragment shader input: one of the values from \c gl_varying_slot.
264 * - Fragment shader output: one of the values from \c gl_frag_result.
265 * - Uniforms: Per-stage uniform slot number for default uniform block.
266 * - Uniforms: Index within the uniform block definition for UBO members.
267 * - Non-UBO Uniforms: uniform slot number.
268 * - Other: This field is not currently used.
269 *
270 * If the variable is a uniform, shader input, or shader output, and the
271 * slot has not been assigned, the value will be -1.
272 */
273 int location;
274
275 /**
276 * The actual location of the variable in the IR. Only valid for inputs
277 * and outputs.
278 */
279 unsigned int driver_location;
280
281 /**
282 * output index for dual source blending.
283 */
284 int index;
285
286 /**
287 * Descriptor set binding for sampler or UBO.
288 */
289 int descriptor_set;
290
291 /**
292 * Initial binding point for a sampler or UBO.
293 *
294 * For array types, this represents the binding point for the first element.
295 */
296 int binding;
297
298 /**
299 * Location an atomic counter is stored at.
300 */
301 unsigned offset;
302
303 /**
304 * ARB_shader_image_load_store qualifiers.
305 */
306 struct {
307 bool read_only; /**< "readonly" qualifier. */
308 bool write_only; /**< "writeonly" qualifier. */
309 bool coherent;
310 bool _volatile;
311 bool restrict_flag;
312
313 /** Image internal format if specified explicitly, otherwise GL_NONE. */
314 GLenum format;
315 } image;
316
317 /**
318 * Highest element accessed with a constant expression array index
319 *
320 * Not used for non-array variables.
321 */
322 unsigned max_array_access;
323
324 } data;
325
326 /**
327 * Built-in state that backs this uniform
328 *
329 * Once set at variable creation, \c state_slots must remain invariant.
330 * This is because, ideally, this array would be shared by all clones of
331 * this variable in the IR tree. In other words, we'd really like for it
332 * to be a fly-weight.
333 *
334 * If the variable is not a uniform, \c num_state_slots will be zero and
335 * \c state_slots will be \c NULL.
336 */
337 /*@{*/
338 unsigned num_state_slots; /**< Number of state slots used */
339 nir_state_slot *state_slots; /**< State descriptors. */
340 /*@}*/
341
342 /**
343 * Constant expression assigned in the initializer of the variable
344 */
345 nir_constant *constant_initializer;
346
347 /**
348 * For variables that are in an interface block or are an instance of an
349 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
350 *
351 * \sa ir_variable::location
352 */
353 const struct glsl_type *interface_type;
354 } nir_variable;
355
356 #define nir_foreach_variable(var, var_list) \
357 foreach_list_typed(nir_variable, var, node, var_list)
358
359 #define nir_foreach_variable_safe(var, var_list) \
360 foreach_list_typed_safe(nir_variable, var, node, var_list)
361
362 static inline bool
363 nir_variable_is_global(const nir_variable *var)
364 {
365 return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
366 }
367
368 typedef struct nir_register {
369 struct exec_node node;
370
371 unsigned num_components; /** < number of vector components */
372 unsigned num_array_elems; /** < size of array (0 for no array) */
373
374 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
375 uint8_t bit_size;
376
377 /** generic register index. */
378 unsigned index;
379
380 /** only for debug purposes, can be NULL */
381 const char *name;
382
383 /** whether this register is local (per-function) or global (per-shader) */
384 bool is_global;
385
386 /**
387 * If this flag is set to true, then accessing channels >= num_components
388 * is well-defined, and simply spills over to the next array element. This
389 * is useful for backends that can do per-component accessing, in
390 * particular scalar backends. By setting this flag and making
391 * num_components equal to 1, structures can be packed tightly into
392 * registers and then registers can be accessed per-component to get to
393 * each structure member, even if it crosses vec4 boundaries.
394 */
395 bool is_packed;
396
397 /** set of nir_src's where this register is used (read from) */
398 struct list_head uses;
399
400 /** set of nir_dest's where this register is defined (written to) */
401 struct list_head defs;
402
403 /** set of nir_if's where this register is used as a condition */
404 struct list_head if_uses;
405 } nir_register;
406
407 typedef enum {
408 nir_instr_type_alu,
409 nir_instr_type_call,
410 nir_instr_type_tex,
411 nir_instr_type_intrinsic,
412 nir_instr_type_load_const,
413 nir_instr_type_jump,
414 nir_instr_type_ssa_undef,
415 nir_instr_type_phi,
416 nir_instr_type_parallel_copy,
417 } nir_instr_type;
418
419 typedef struct nir_instr {
420 struct exec_node node;
421 nir_instr_type type;
422 struct nir_block *block;
423
424 /** generic instruction index. */
425 unsigned index;
426
427 /* A temporary for optimization and analysis passes to use for storing
428 * flags. For instance, DCE uses this to store the "dead/live" info.
429 */
430 uint8_t pass_flags;
431 } nir_instr;
432
433 static inline nir_instr *
434 nir_instr_next(nir_instr *instr)
435 {
436 struct exec_node *next = exec_node_get_next(&instr->node);
437 if (exec_node_is_tail_sentinel(next))
438 return NULL;
439 else
440 return exec_node_data(nir_instr, next, node);
441 }
442
443 static inline nir_instr *
444 nir_instr_prev(nir_instr *instr)
445 {
446 struct exec_node *prev = exec_node_get_prev(&instr->node);
447 if (exec_node_is_head_sentinel(prev))
448 return NULL;
449 else
450 return exec_node_data(nir_instr, prev, node);
451 }
452
453 static inline bool
454 nir_instr_is_first(nir_instr *instr)
455 {
456 return exec_node_is_head_sentinel(exec_node_get_prev(&instr->node));
457 }
458
459 static inline bool
460 nir_instr_is_last(nir_instr *instr)
461 {
462 return exec_node_is_tail_sentinel(exec_node_get_next(&instr->node));
463 }
464
465 typedef struct nir_ssa_def {
466 /** for debugging only, can be NULL */
467 const char* name;
468
469 /** generic SSA definition index. */
470 unsigned index;
471
472 /** Index into the live_in and live_out bitfields */
473 unsigned live_index;
474
475 nir_instr *parent_instr;
476
477 /** set of nir_instr's where this register is used (read from) */
478 struct list_head uses;
479
480 /** set of nir_if's where this register is used as a condition */
481 struct list_head if_uses;
482
483 uint8_t num_components;
484
485 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
486 uint8_t bit_size;
487 } nir_ssa_def;
488
489 struct nir_src;
490
491 typedef struct {
492 nir_register *reg;
493 struct nir_src *indirect; /** < NULL for no indirect offset */
494 unsigned base_offset;
495
496 /* TODO use-def chain goes here */
497 } nir_reg_src;
498
499 typedef struct {
500 nir_instr *parent_instr;
501 struct list_head def_link;
502
503 nir_register *reg;
504 struct nir_src *indirect; /** < NULL for no indirect offset */
505 unsigned base_offset;
506
507 /* TODO def-use chain goes here */
508 } nir_reg_dest;
509
510 struct nir_if;
511
512 typedef struct nir_src {
513 union {
514 nir_instr *parent_instr;
515 struct nir_if *parent_if;
516 };
517
518 struct list_head use_link;
519
520 union {
521 nir_reg_src reg;
522 nir_ssa_def *ssa;
523 };
524
525 bool is_ssa;
526 } nir_src;
527
528 static inline nir_src
529 nir_src_init(void)
530 {
531 nir_src src = { { NULL } };
532 return src;
533 }
534
535 #define NIR_SRC_INIT nir_src_init()
536
537 #define nir_foreach_use(src, reg_or_ssa_def) \
538 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
539
540 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
541 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
542
543 #define nir_foreach_if_use(src, reg_or_ssa_def) \
544 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
545
546 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
547 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
548
549 typedef struct {
550 union {
551 nir_reg_dest reg;
552 nir_ssa_def ssa;
553 };
554
555 bool is_ssa;
556 } nir_dest;
557
558 static inline nir_dest
559 nir_dest_init(void)
560 {
561 nir_dest dest = { { { NULL } } };
562 return dest;
563 }
564
565 #define NIR_DEST_INIT nir_dest_init()
566
567 #define nir_foreach_def(dest, reg) \
568 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
569
570 #define nir_foreach_def_safe(dest, reg) \
571 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
572
573 static inline nir_src
574 nir_src_for_ssa(nir_ssa_def *def)
575 {
576 nir_src src = NIR_SRC_INIT;
577
578 src.is_ssa = true;
579 src.ssa = def;
580
581 return src;
582 }
583
584 static inline nir_src
585 nir_src_for_reg(nir_register *reg)
586 {
587 nir_src src = NIR_SRC_INIT;
588
589 src.is_ssa = false;
590 src.reg.reg = reg;
591 src.reg.indirect = NULL;
592 src.reg.base_offset = 0;
593
594 return src;
595 }
596
597 static inline nir_dest
598 nir_dest_for_reg(nir_register *reg)
599 {
600 nir_dest dest = NIR_DEST_INIT;
601
602 dest.reg.reg = reg;
603
604 return dest;
605 }
606
607 static inline unsigned
608 nir_src_bit_size(nir_src src)
609 {
610 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
611 }
612
613 static inline unsigned
614 nir_dest_bit_size(nir_dest dest)
615 {
616 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
617 }
618
619 void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
620 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
621
622 typedef struct {
623 nir_src src;
624
625 /**
626 * \name input modifiers
627 */
628 /*@{*/
629 /**
630 * For inputs interpreted as floating point, flips the sign bit. For
631 * inputs interpreted as integers, performs the two's complement negation.
632 */
633 bool negate;
634
635 /**
636 * Clears the sign bit for floating point values, and computes the integer
637 * absolute value for integers. Note that the negate modifier acts after
638 * the absolute value modifier, therefore if both are set then all inputs
639 * will become negative.
640 */
641 bool abs;
642 /*@}*/
643
644 /**
645 * For each input component, says which component of the register it is
646 * chosen from. Note that which elements of the swizzle are used and which
647 * are ignored are based on the write mask for most opcodes - for example,
648 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
649 * a swizzle of {2, x, 1, 0} where x means "don't care."
650 */
651 uint8_t swizzle[4];
652 } nir_alu_src;
653
654 typedef struct {
655 nir_dest dest;
656
657 /**
658 * \name saturate output modifier
659 *
660 * Only valid for opcodes that output floating-point numbers. Clamps the
661 * output to between 0.0 and 1.0 inclusive.
662 */
663
664 bool saturate;
665
666 unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
667 } nir_alu_dest;
668
669 typedef enum {
670 nir_type_invalid = 0, /* Not a valid type */
671 nir_type_float,
672 nir_type_int,
673 nir_type_uint,
674 nir_type_bool,
675 nir_type_bool32 = 32 | nir_type_bool,
676 nir_type_int8 = 8 | nir_type_int,
677 nir_type_int16 = 16 | nir_type_int,
678 nir_type_int32 = 32 | nir_type_int,
679 nir_type_int64 = 64 | nir_type_int,
680 nir_type_uint8 = 8 | nir_type_uint,
681 nir_type_uint16 = 16 | nir_type_uint,
682 nir_type_uint32 = 32 | nir_type_uint,
683 nir_type_uint64 = 64 | nir_type_uint,
684 nir_type_float16 = 16 | nir_type_float,
685 nir_type_float32 = 32 | nir_type_float,
686 nir_type_float64 = 64 | nir_type_float,
687 } nir_alu_type;
688
689 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
690 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
691
692 static inline unsigned
693 nir_alu_type_get_type_size(nir_alu_type type)
694 {
695 return type & NIR_ALU_TYPE_SIZE_MASK;
696 }
697
698 static inline unsigned
699 nir_alu_type_get_base_type(nir_alu_type type)
700 {
701 return type & NIR_ALU_TYPE_BASE_TYPE_MASK;
702 }
703
704 typedef enum {
705 NIR_OP_IS_COMMUTATIVE = (1 << 0),
706 NIR_OP_IS_ASSOCIATIVE = (1 << 1),
707 } nir_op_algebraic_property;
708
709 typedef struct {
710 const char *name;
711
712 unsigned num_inputs;
713
714 /**
715 * The number of components in the output
716 *
717 * If non-zero, this is the size of the output and input sizes are
718 * explicitly given; swizzle and writemask are still in effect, but if
719 * the output component is masked out, then the input component may
720 * still be in use.
721 *
722 * If zero, the opcode acts in the standard, per-component manner; the
723 * operation is performed on each component (except the ones that are
724 * masked out) with the input being taken from the input swizzle for
725 * that component.
726 *
727 * The size of some of the inputs may be given (i.e. non-zero) even
728 * though output_size is zero; in that case, the inputs with a zero
729 * size act per-component, while the inputs with non-zero size don't.
730 */
731 unsigned output_size;
732
733 /**
734 * The type of vector that the instruction outputs. Note that the
735 * staurate modifier is only allowed on outputs with the float type.
736 */
737
738 nir_alu_type output_type;
739
740 /**
741 * The number of components in each input
742 */
743 unsigned input_sizes[4];
744
745 /**
746 * The type of vector that each input takes. Note that negate and
747 * absolute value are only allowed on inputs with int or float type and
748 * behave differently on the two.
749 */
750 nir_alu_type input_types[4];
751
752 nir_op_algebraic_property algebraic_properties;
753 } nir_op_info;
754
755 extern const nir_op_info nir_op_infos[nir_num_opcodes];
756
757 typedef struct nir_alu_instr {
758 nir_instr instr;
759 nir_op op;
760
761 /** Indicates that this ALU instruction generates an exact value
762 *
763 * This is kind of a mixture of GLSL "precise" and "invariant" and not
764 * really equivalent to either. This indicates that the value generated by
765 * this operation is high-precision and any code transformations that touch
766 * it must ensure that the resulting value is bit-for-bit identical to the
767 * original.
768 */
769 bool exact;
770
771 nir_alu_dest dest;
772 nir_alu_src src[];
773 } nir_alu_instr;
774
775 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
776 nir_alu_instr *instr);
777 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
778 nir_alu_instr *instr);
779
780 /* is this source channel used? */
781 static inline bool
782 nir_alu_instr_channel_used(nir_alu_instr *instr, unsigned src, unsigned channel)
783 {
784 if (nir_op_infos[instr->op].input_sizes[src] > 0)
785 return channel < nir_op_infos[instr->op].input_sizes[src];
786
787 return (instr->dest.write_mask >> channel) & 1;
788 }
789
790 /*
791 * For instructions whose destinations are SSA, get the number of channels
792 * used for a source
793 */
794 static inline unsigned
795 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
796 {
797 assert(instr->dest.dest.is_ssa);
798
799 if (nir_op_infos[instr->op].input_sizes[src] > 0)
800 return nir_op_infos[instr->op].input_sizes[src];
801
802 return instr->dest.dest.ssa.num_components;
803 }
804
805 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
806 unsigned src1, unsigned src2);
807
808 typedef enum {
809 nir_deref_type_var,
810 nir_deref_type_array,
811 nir_deref_type_struct
812 } nir_deref_type;
813
814 typedef struct nir_deref {
815 nir_deref_type deref_type;
816 struct nir_deref *child;
817 const struct glsl_type *type;
818 } nir_deref;
819
820 typedef struct {
821 nir_deref deref;
822
823 nir_variable *var;
824 } nir_deref_var;
825
826 /* This enum describes how the array is referenced. If the deref is
827 * direct then the base_offset is used. If the deref is indirect then
828 * offset is given by base_offset + indirect. If the deref is a wildcard
829 * then the deref refers to all of the elements of the array at the same
830 * time. Wildcard dereferences are only ever allowed in copy_var
831 * intrinsics and the source and destination derefs must have matching
832 * wildcards.
833 */
834 typedef enum {
835 nir_deref_array_type_direct,
836 nir_deref_array_type_indirect,
837 nir_deref_array_type_wildcard,
838 } nir_deref_array_type;
839
840 typedef struct {
841 nir_deref deref;
842
843 nir_deref_array_type deref_array_type;
844 unsigned base_offset;
845 nir_src indirect;
846 } nir_deref_array;
847
848 typedef struct {
849 nir_deref deref;
850
851 unsigned index;
852 } nir_deref_struct;
853
854 NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref,
855 deref_type, nir_deref_type_var)
856 NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref,
857 deref_type, nir_deref_type_array)
858 NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref,
859 deref_type, nir_deref_type_struct)
860
861 /* Returns the last deref in the chain. */
862 static inline nir_deref *
863 nir_deref_tail(nir_deref *deref)
864 {
865 while (deref->child)
866 deref = deref->child;
867 return deref;
868 }
869
870 typedef struct {
871 nir_instr instr;
872
873 unsigned num_params;
874 nir_deref_var **params;
875 nir_deref_var *return_deref;
876
877 struct nir_function *callee;
878 } nir_call_instr;
879
880 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
881 num_variables, num_indices, idx0, idx1, idx2, flags) \
882 nir_intrinsic_##name,
883
884 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
885
886 typedef enum {
887 #include "nir_intrinsics.h"
888 nir_num_intrinsics = nir_last_intrinsic + 1
889 } nir_intrinsic_op;
890
891 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
892
893 /** Represents an intrinsic
894 *
895 * An intrinsic is an instruction type for handling things that are
896 * more-or-less regular operations but don't just consume and produce SSA
897 * values like ALU operations do. Intrinsics are not for things that have
898 * special semantic meaning such as phi nodes and parallel copies.
899 * Examples of intrinsics include variable load/store operations, system
900 * value loads, and the like. Even though texturing more-or-less falls
901 * under this category, texturing is its own instruction type because
902 * trying to represent texturing with intrinsics would lead to a
903 * combinatorial explosion of intrinsic opcodes.
904 *
905 * By having a single instruction type for handling a lot of different
906 * cases, optimization passes can look for intrinsics and, for the most
907 * part, completely ignore them. Each intrinsic type also has a few
908 * possible flags that govern whether or not they can be reordered or
909 * eliminated. That way passes like dead code elimination can still work
910 * on intrisics without understanding the meaning of each.
911 *
912 * Each intrinsic has some number of constant indices, some number of
913 * variables, and some number of sources. What these sources, variables,
914 * and indices mean depends on the intrinsic and is documented with the
915 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
916 * instructions are the only types of instruction that can operate on
917 * variables.
918 */
919 typedef struct {
920 nir_instr instr;
921
922 nir_intrinsic_op intrinsic;
923
924 nir_dest dest;
925
926 /** number of components if this is a vectorized intrinsic
927 *
928 * Similarly to ALU operations, some intrinsics are vectorized.
929 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
930 * For vectorized intrinsics, the num_components field specifies the
931 * number of destination components and the number of source components
932 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
933 */
934 uint8_t num_components;
935
936 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
937
938 nir_deref_var *variables[2];
939
940 nir_src src[];
941 } nir_intrinsic_instr;
942
943 /**
944 * \name NIR intrinsics semantic flags
945 *
946 * information about what the compiler can do with the intrinsics.
947 *
948 * \sa nir_intrinsic_info::flags
949 */
950 typedef enum {
951 /**
952 * whether the intrinsic can be safely eliminated if none of its output
953 * value is not being used.
954 */
955 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0),
956
957 /**
958 * Whether the intrinsic can be reordered with respect to any other
959 * intrinsic, i.e. whether the only reordering dependencies of the
960 * intrinsic are due to the register reads/writes.
961 */
962 NIR_INTRINSIC_CAN_REORDER = (1 << 1),
963 } nir_intrinsic_semantic_flag;
964
965 /**
966 * \name NIR intrinsics const-index flag
967 *
968 * Indicates the usage of a const_index slot.
969 *
970 * \sa nir_intrinsic_info::index_map
971 */
972 typedef enum {
973 /**
974 * Generally instructions that take a offset src argument, can encode
975 * a constant 'base' value which is added to the offset.
976 */
977 NIR_INTRINSIC_BASE = 1,
978
979 /**
980 * For store instructions, a writemask for the store.
981 */
982 NIR_INTRINSIC_WRMASK = 2,
983
984 /**
985 * The stream-id for GS emit_vertex/end_primitive intrinsics.
986 */
987 NIR_INTRINSIC_STREAM_ID = 3,
988
989 /**
990 * The clip-plane id for load_user_clip_plane intrinsic.
991 */
992 NIR_INTRINSIC_UCP_ID = 4,
993
994 /**
995 * The amount of data, starting from BASE, that this instruction may
996 * access. This is used to provide bounds if the offset is not constant.
997 */
998 NIR_INTRINSIC_RANGE = 5,
999
1000 /**
1001 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1002 */
1003 NIR_INTRINSIC_DESC_SET = 6,
1004
1005 /**
1006 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1007 */
1008 NIR_INTRINSIC_BINDING = 7,
1009
1010 /**
1011 * Component offset.
1012 */
1013 NIR_INTRINSIC_COMPONENT = 8,
1014
1015 /**
1016 * Interpolation mode (only meaningful for FS inputs).
1017 */
1018 NIR_INTRINSIC_INTERP_MODE = 9,
1019
1020 NIR_INTRINSIC_NUM_INDEX_FLAGS,
1021
1022 } nir_intrinsic_index_flag;
1023
1024 #define NIR_INTRINSIC_MAX_INPUTS 4
1025
1026 typedef struct {
1027 const char *name;
1028
1029 unsigned num_srcs; /** < number of register/SSA inputs */
1030
1031 /** number of components of each input register
1032 *
1033 * If this value is 0, the number of components is given by the
1034 * num_components field of nir_intrinsic_instr.
1035 */
1036 unsigned src_components[NIR_INTRINSIC_MAX_INPUTS];
1037
1038 bool has_dest;
1039
1040 /** number of components of the output register
1041 *
1042 * If this value is 0, the number of components is given by the
1043 * num_components field of nir_intrinsic_instr.
1044 */
1045 unsigned dest_components;
1046
1047 /** the number of inputs/outputs that are variables */
1048 unsigned num_variables;
1049
1050 /** the number of constant indices used by the intrinsic */
1051 unsigned num_indices;
1052
1053 /** indicates the usage of intr->const_index[n] */
1054 unsigned index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS];
1055
1056 /** semantic flags for calls to this intrinsic */
1057 nir_intrinsic_semantic_flag flags;
1058 } nir_intrinsic_info;
1059
1060 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
1061
1062
1063 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1064 static inline type \
1065 nir_intrinsic_##name(nir_intrinsic_instr *instr) \
1066 { \
1067 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1068 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1069 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1070 } \
1071 static inline void \
1072 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1073 { \
1074 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1075 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1076 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1077 }
1078
1079 INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
1080 INTRINSIC_IDX_ACCESSORS(base, BASE, int)
1081 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned)
1082 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned)
1083 INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned)
1084 INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned)
1085 INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
1086 INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
1087 INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
1088
1089 /**
1090 * \group texture information
1091 *
1092 * This gives semantic information about textures which is useful to the
1093 * frontend, the backend, and lowering passes, but not the optimizer.
1094 */
1095
1096 typedef enum {
1097 nir_tex_src_coord,
1098 nir_tex_src_projector,
1099 nir_tex_src_comparitor, /* shadow comparitor */
1100 nir_tex_src_offset,
1101 nir_tex_src_bias,
1102 nir_tex_src_lod,
1103 nir_tex_src_ms_index, /* MSAA sample index */
1104 nir_tex_src_ms_mcs, /* MSAA compression value */
1105 nir_tex_src_ddx,
1106 nir_tex_src_ddy,
1107 nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
1108 nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
1109 nir_tex_src_plane, /* < selects plane for planar textures */
1110 nir_num_tex_src_types
1111 } nir_tex_src_type;
1112
1113 typedef struct {
1114 nir_src src;
1115 nir_tex_src_type src_type;
1116 } nir_tex_src;
1117
1118 typedef enum {
1119 nir_texop_tex, /**< Regular texture look-up */
1120 nir_texop_txb, /**< Texture look-up with LOD bias */
1121 nir_texop_txl, /**< Texture look-up with explicit LOD */
1122 nir_texop_txd, /**< Texture look-up with partial derivatvies */
1123 nir_texop_txf, /**< Texel fetch with explicit LOD */
1124 nir_texop_txf_ms, /**< Multisample texture fetch */
1125 nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
1126 nir_texop_txs, /**< Texture size */
1127 nir_texop_lod, /**< Texture lod query */
1128 nir_texop_tg4, /**< Texture gather */
1129 nir_texop_query_levels, /**< Texture levels query */
1130 nir_texop_texture_samples, /**< Texture samples query */
1131 nir_texop_samples_identical, /**< Query whether all samples are definitely
1132 * identical.
1133 */
1134 } nir_texop;
1135
1136 typedef struct {
1137 nir_instr instr;
1138
1139 enum glsl_sampler_dim sampler_dim;
1140 nir_alu_type dest_type;
1141
1142 nir_texop op;
1143 nir_dest dest;
1144 nir_tex_src *src;
1145 unsigned num_srcs, coord_components;
1146 bool is_array, is_shadow;
1147
1148 /**
1149 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1150 * components or the new-style shadow that outputs 1 component.
1151 */
1152 bool is_new_style_shadow;
1153
1154 /* gather component selector */
1155 unsigned component : 2;
1156
1157 /** The texture index
1158 *
1159 * If this texture instruction has a nir_tex_src_texture_offset source,
1160 * then the texture index is given by texture_index + texture_offset.
1161 */
1162 unsigned texture_index;
1163
1164 /** The size of the texture array or 0 if it's not an array */
1165 unsigned texture_array_size;
1166
1167 /** The texture deref
1168 *
1169 * If this is null, use texture_index instead.
1170 */
1171 nir_deref_var *texture;
1172
1173 /** The sampler index
1174 *
1175 * The following operations do not require a sampler and, as such, this
1176 * field should be ignored:
1177 * - nir_texop_txf
1178 * - nir_texop_txf_ms
1179 * - nir_texop_txs
1180 * - nir_texop_lod
1181 * - nir_texop_tg4
1182 * - nir_texop_query_levels
1183 * - nir_texop_texture_samples
1184 * - nir_texop_samples_identical
1185 *
1186 * If this texture instruction has a nir_tex_src_sampler_offset source,
1187 * then the sampler index is given by sampler_index + sampler_offset.
1188 */
1189 unsigned sampler_index;
1190
1191 /** The sampler deref
1192 *
1193 * If this is null, use sampler_index instead.
1194 */
1195 nir_deref_var *sampler;
1196 } nir_tex_instr;
1197
1198 static inline unsigned
1199 nir_tex_instr_dest_size(nir_tex_instr *instr)
1200 {
1201 switch (instr->op) {
1202 case nir_texop_txs: {
1203 unsigned ret;
1204 switch (instr->sampler_dim) {
1205 case GLSL_SAMPLER_DIM_1D:
1206 case GLSL_SAMPLER_DIM_BUF:
1207 ret = 1;
1208 break;
1209 case GLSL_SAMPLER_DIM_2D:
1210 case GLSL_SAMPLER_DIM_CUBE:
1211 case GLSL_SAMPLER_DIM_MS:
1212 case GLSL_SAMPLER_DIM_RECT:
1213 case GLSL_SAMPLER_DIM_EXTERNAL:
1214 case GLSL_SAMPLER_DIM_SUBPASS:
1215 ret = 2;
1216 break;
1217 case GLSL_SAMPLER_DIM_3D:
1218 ret = 3;
1219 break;
1220 default:
1221 unreachable("not reached");
1222 }
1223 if (instr->is_array)
1224 ret++;
1225 return ret;
1226 }
1227
1228 case nir_texop_lod:
1229 return 2;
1230
1231 case nir_texop_texture_samples:
1232 case nir_texop_query_levels:
1233 case nir_texop_samples_identical:
1234 return 1;
1235
1236 default:
1237 if (instr->is_shadow && instr->is_new_style_shadow)
1238 return 1;
1239
1240 return 4;
1241 }
1242 }
1243
1244 /* Returns true if this texture operation queries something about the texture
1245 * rather than actually sampling it.
1246 */
1247 static inline bool
1248 nir_tex_instr_is_query(nir_tex_instr *instr)
1249 {
1250 switch (instr->op) {
1251 case nir_texop_txs:
1252 case nir_texop_lod:
1253 case nir_texop_texture_samples:
1254 case nir_texop_query_levels:
1255 case nir_texop_txf_ms_mcs:
1256 return true;
1257 case nir_texop_tex:
1258 case nir_texop_txb:
1259 case nir_texop_txl:
1260 case nir_texop_txd:
1261 case nir_texop_txf:
1262 case nir_texop_txf_ms:
1263 case nir_texop_tg4:
1264 return false;
1265 default:
1266 unreachable("Invalid texture opcode");
1267 }
1268 }
1269
1270 static inline nir_alu_type
1271 nir_tex_instr_src_type(nir_tex_instr *instr, unsigned src)
1272 {
1273 switch (instr->src[src].src_type) {
1274 case nir_tex_src_coord:
1275 switch (instr->op) {
1276 case nir_texop_txf:
1277 case nir_texop_txf_ms:
1278 case nir_texop_txf_ms_mcs:
1279 case nir_texop_samples_identical:
1280 return nir_type_int;
1281
1282 default:
1283 return nir_type_float;
1284 }
1285
1286 case nir_tex_src_lod:
1287 switch (instr->op) {
1288 case nir_texop_txs:
1289 case nir_texop_txf:
1290 return nir_type_int;
1291
1292 default:
1293 return nir_type_float;
1294 }
1295
1296 case nir_tex_src_projector:
1297 case nir_tex_src_comparitor:
1298 case nir_tex_src_bias:
1299 case nir_tex_src_ddx:
1300 case nir_tex_src_ddy:
1301 return nir_type_float;
1302
1303 case nir_tex_src_offset:
1304 case nir_tex_src_ms_index:
1305 case nir_tex_src_texture_offset:
1306 case nir_tex_src_sampler_offset:
1307 return nir_type_int;
1308
1309 default:
1310 unreachable("Invalid texture source type");
1311 }
1312 }
1313
1314 static inline unsigned
1315 nir_tex_instr_src_size(nir_tex_instr *instr, unsigned src)
1316 {
1317 if (instr->src[src].src_type == nir_tex_src_coord)
1318 return instr->coord_components;
1319
1320 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1321 if (instr->src[src].src_type == nir_tex_src_ms_mcs)
1322 return 4;
1323
1324 if (instr->src[src].src_type == nir_tex_src_offset ||
1325 instr->src[src].src_type == nir_tex_src_ddx ||
1326 instr->src[src].src_type == nir_tex_src_ddy) {
1327 if (instr->is_array)
1328 return instr->coord_components - 1;
1329 else
1330 return instr->coord_components;
1331 }
1332
1333 return 1;
1334 }
1335
1336 static inline int
1337 nir_tex_instr_src_index(nir_tex_instr *instr, nir_tex_src_type type)
1338 {
1339 for (unsigned i = 0; i < instr->num_srcs; i++)
1340 if (instr->src[i].src_type == type)
1341 return (int) i;
1342
1343 return -1;
1344 }
1345
1346 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
1347
1348 typedef union {
1349 float f32[4];
1350 double f64[4];
1351 int32_t i32[4];
1352 uint32_t u32[4];
1353 int64_t i64[4];
1354 uint64_t u64[4];
1355 } nir_const_value;
1356
1357 typedef struct {
1358 nir_instr instr;
1359
1360 nir_const_value value;
1361
1362 nir_ssa_def def;
1363 } nir_load_const_instr;
1364
1365 typedef enum {
1366 nir_jump_return,
1367 nir_jump_break,
1368 nir_jump_continue,
1369 } nir_jump_type;
1370
1371 typedef struct {
1372 nir_instr instr;
1373 nir_jump_type type;
1374 } nir_jump_instr;
1375
1376 /* creates a new SSA variable in an undefined state */
1377
1378 typedef struct {
1379 nir_instr instr;
1380 nir_ssa_def def;
1381 } nir_ssa_undef_instr;
1382
1383 typedef struct {
1384 struct exec_node node;
1385
1386 /* The predecessor block corresponding to this source */
1387 struct nir_block *pred;
1388
1389 nir_src src;
1390 } nir_phi_src;
1391
1392 #define nir_foreach_phi_src(phi_src, phi) \
1393 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1394 #define nir_foreach_phi_src_safe(phi_src, phi) \
1395 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1396
1397 typedef struct {
1398 nir_instr instr;
1399
1400 struct exec_list srcs; /** < list of nir_phi_src */
1401
1402 nir_dest dest;
1403 } nir_phi_instr;
1404
1405 typedef struct {
1406 struct exec_node node;
1407 nir_src src;
1408 nir_dest dest;
1409 } nir_parallel_copy_entry;
1410
1411 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1412 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1413
1414 typedef struct {
1415 nir_instr instr;
1416
1417 /* A list of nir_parallel_copy_entry's. The sources of all of the
1418 * entries are copied to the corresponding destinations "in parallel".
1419 * In other words, if we have two entries: a -> b and b -> a, the values
1420 * get swapped.
1421 */
1422 struct exec_list entries;
1423 } nir_parallel_copy_instr;
1424
1425 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
1426 type, nir_instr_type_alu)
1427 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
1428 type, nir_instr_type_call)
1429 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
1430 type, nir_instr_type_jump)
1431 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr,
1432 type, nir_instr_type_tex)
1433 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
1434 type, nir_instr_type_intrinsic)
1435 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
1436 type, nir_instr_type_load_const)
1437 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
1438 type, nir_instr_type_ssa_undef)
1439 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
1440 type, nir_instr_type_phi)
1441 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
1442 nir_parallel_copy_instr, instr,
1443 type, nir_instr_type_parallel_copy)
1444
1445 /*
1446 * Control flow
1447 *
1448 * Control flow consists of a tree of control flow nodes, which include
1449 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1450 * instructions that always run start-to-finish. Each basic block also keeps
1451 * track of its successors (blocks which may run immediately after the current
1452 * block) and predecessors (blocks which could have run immediately before the
1453 * current block). Each function also has a start block and an end block which
1454 * all return statements point to (which is always empty). Together, all the
1455 * blocks with their predecessors and successors make up the control flow
1456 * graph (CFG) of the function. There are helpers that modify the tree of
1457 * control flow nodes while modifying the CFG appropriately; these should be
1458 * used instead of modifying the tree directly.
1459 */
1460
1461 typedef enum {
1462 nir_cf_node_block,
1463 nir_cf_node_if,
1464 nir_cf_node_loop,
1465 nir_cf_node_function
1466 } nir_cf_node_type;
1467
1468 typedef struct nir_cf_node {
1469 struct exec_node node;
1470 nir_cf_node_type type;
1471 struct nir_cf_node *parent;
1472 } nir_cf_node;
1473
1474 typedef struct nir_block {
1475 nir_cf_node cf_node;
1476
1477 struct exec_list instr_list; /** < list of nir_instr */
1478
1479 /** generic block index; generated by nir_index_blocks */
1480 unsigned index;
1481
1482 /*
1483 * Each block can only have up to 2 successors, so we put them in a simple
1484 * array - no need for anything more complicated.
1485 */
1486 struct nir_block *successors[2];
1487
1488 /* Set of nir_block predecessors in the CFG */
1489 struct set *predecessors;
1490
1491 /*
1492 * this node's immediate dominator in the dominance tree - set to NULL for
1493 * the start block.
1494 */
1495 struct nir_block *imm_dom;
1496
1497 /* This node's children in the dominance tree */
1498 unsigned num_dom_children;
1499 struct nir_block **dom_children;
1500
1501 /* Set of nir_block's on the dominance frontier of this block */
1502 struct set *dom_frontier;
1503
1504 /*
1505 * These two indices have the property that dom_{pre,post}_index for each
1506 * child of this block in the dominance tree will always be between
1507 * dom_pre_index and dom_post_index for this block, which makes testing if
1508 * a given block is dominated by another block an O(1) operation.
1509 */
1510 unsigned dom_pre_index, dom_post_index;
1511
1512 /* live in and out for this block; used for liveness analysis */
1513 BITSET_WORD *live_in;
1514 BITSET_WORD *live_out;
1515 } nir_block;
1516
1517 static inline nir_instr *
1518 nir_block_first_instr(nir_block *block)
1519 {
1520 struct exec_node *head = exec_list_get_head(&block->instr_list);
1521 return exec_node_data(nir_instr, head, node);
1522 }
1523
1524 static inline nir_instr *
1525 nir_block_last_instr(nir_block *block)
1526 {
1527 struct exec_node *tail = exec_list_get_tail(&block->instr_list);
1528 return exec_node_data(nir_instr, tail, node);
1529 }
1530
1531 #define nir_foreach_instr(instr, block) \
1532 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1533 #define nir_foreach_instr_reverse(instr, block) \
1534 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1535 #define nir_foreach_instr_safe(instr, block) \
1536 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1537 #define nir_foreach_instr_reverse_safe(instr, block) \
1538 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1539
1540 typedef struct nir_if {
1541 nir_cf_node cf_node;
1542 nir_src condition;
1543
1544 struct exec_list then_list; /** < list of nir_cf_node */
1545 struct exec_list else_list; /** < list of nir_cf_node */
1546 } nir_if;
1547
1548 typedef struct {
1549 nir_cf_node cf_node;
1550
1551 struct exec_list body; /** < list of nir_cf_node */
1552 } nir_loop;
1553
1554 /**
1555 * Various bits of metadata that can may be created or required by
1556 * optimization and analysis passes
1557 */
1558 typedef enum {
1559 nir_metadata_none = 0x0,
1560 nir_metadata_block_index = 0x1,
1561 nir_metadata_dominance = 0x2,
1562 nir_metadata_live_ssa_defs = 0x4,
1563 nir_metadata_not_properly_reset = 0x8,
1564 } nir_metadata;
1565
1566 typedef struct {
1567 nir_cf_node cf_node;
1568
1569 /** pointer to the function of which this is an implementation */
1570 struct nir_function *function;
1571
1572 struct exec_list body; /** < list of nir_cf_node */
1573
1574 nir_block *end_block;
1575
1576 /** list for all local variables in the function */
1577 struct exec_list locals;
1578
1579 /** array of variables used as parameters */
1580 unsigned num_params;
1581 nir_variable **params;
1582
1583 /** variable used to hold the result of the function */
1584 nir_variable *return_var;
1585
1586 /** list of local registers in the function */
1587 struct exec_list registers;
1588
1589 /** next available local register index */
1590 unsigned reg_alloc;
1591
1592 /** next available SSA value index */
1593 unsigned ssa_alloc;
1594
1595 /* total number of basic blocks, only valid when block_index_dirty = false */
1596 unsigned num_blocks;
1597
1598 nir_metadata valid_metadata;
1599 } nir_function_impl;
1600
1601 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1602 nir_start_block(nir_function_impl *impl)
1603 {
1604 return (nir_block *) impl->body.head_sentinel.next;
1605 }
1606
1607 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1608 nir_impl_last_block(nir_function_impl *impl)
1609 {
1610 return (nir_block *) impl->body.tail_sentinel.prev;
1611 }
1612
1613 static inline nir_cf_node *
1614 nir_cf_node_next(nir_cf_node *node)
1615 {
1616 struct exec_node *next = exec_node_get_next(&node->node);
1617 if (exec_node_is_tail_sentinel(next))
1618 return NULL;
1619 else
1620 return exec_node_data(nir_cf_node, next, node);
1621 }
1622
1623 static inline nir_cf_node *
1624 nir_cf_node_prev(nir_cf_node *node)
1625 {
1626 struct exec_node *prev = exec_node_get_prev(&node->node);
1627 if (exec_node_is_head_sentinel(prev))
1628 return NULL;
1629 else
1630 return exec_node_data(nir_cf_node, prev, node);
1631 }
1632
1633 static inline bool
1634 nir_cf_node_is_first(const nir_cf_node *node)
1635 {
1636 return exec_node_is_head_sentinel(node->node.prev);
1637 }
1638
1639 static inline bool
1640 nir_cf_node_is_last(const nir_cf_node *node)
1641 {
1642 return exec_node_is_tail_sentinel(node->node.next);
1643 }
1644
1645 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node,
1646 type, nir_cf_node_block)
1647 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node,
1648 type, nir_cf_node_if)
1649 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node,
1650 type, nir_cf_node_loop)
1651 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node,
1652 nir_function_impl, cf_node, type, nir_cf_node_function)
1653
1654 static inline nir_block *
1655 nir_if_first_then_block(nir_if *if_stmt)
1656 {
1657 struct exec_node *head = exec_list_get_head(&if_stmt->then_list);
1658 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1659 }
1660
1661 static inline nir_block *
1662 nir_if_last_then_block(nir_if *if_stmt)
1663 {
1664 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list);
1665 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1666 }
1667
1668 static inline nir_block *
1669 nir_if_first_else_block(nir_if *if_stmt)
1670 {
1671 struct exec_node *head = exec_list_get_head(&if_stmt->else_list);
1672 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1673 }
1674
1675 static inline nir_block *
1676 nir_if_last_else_block(nir_if *if_stmt)
1677 {
1678 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list);
1679 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1680 }
1681
1682 static inline nir_block *
1683 nir_loop_first_block(nir_loop *loop)
1684 {
1685 struct exec_node *head = exec_list_get_head(&loop->body);
1686 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1687 }
1688
1689 static inline nir_block *
1690 nir_loop_last_block(nir_loop *loop)
1691 {
1692 struct exec_node *tail = exec_list_get_tail(&loop->body);
1693 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1694 }
1695
1696 typedef enum {
1697 nir_parameter_in,
1698 nir_parameter_out,
1699 nir_parameter_inout,
1700 } nir_parameter_type;
1701
1702 typedef struct {
1703 nir_parameter_type param_type;
1704 const struct glsl_type *type;
1705 } nir_parameter;
1706
1707 typedef struct nir_function {
1708 struct exec_node node;
1709
1710 const char *name;
1711 struct nir_shader *shader;
1712
1713 unsigned num_params;
1714 nir_parameter *params;
1715 const struct glsl_type *return_type;
1716
1717 /** The implementation of this function.
1718 *
1719 * If the function is only declared and not implemented, this is NULL.
1720 */
1721 nir_function_impl *impl;
1722 } nir_function;
1723
1724 typedef struct nir_shader_compiler_options {
1725 bool lower_fdiv;
1726 bool lower_ffma;
1727 bool fuse_ffma;
1728 bool lower_flrp32;
1729 /** Lowers flrp when it does not support doubles */
1730 bool lower_flrp64;
1731 bool lower_fpow;
1732 bool lower_fsat;
1733 bool lower_fsqrt;
1734 bool lower_fmod32;
1735 bool lower_fmod64;
1736 bool lower_bitfield_extract;
1737 bool lower_bitfield_insert;
1738 bool lower_uadd_carry;
1739 bool lower_usub_borrow;
1740 /** lowers fneg and ineg to fsub and isub. */
1741 bool lower_negate;
1742 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1743 bool lower_sub;
1744
1745 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1746 bool lower_scmp;
1747
1748 /** enables rules to lower idiv by power-of-two: */
1749 bool lower_idiv;
1750
1751 /* Does the native fdot instruction replicate its result for four
1752 * components? If so, then opt_algebraic_late will turn all fdotN
1753 * instructions into fdot_replicatedN instructions.
1754 */
1755 bool fdot_replicates;
1756
1757 /** lowers ffract to fsub+ffloor: */
1758 bool lower_ffract;
1759
1760 bool lower_pack_half_2x16;
1761 bool lower_pack_unorm_2x16;
1762 bool lower_pack_snorm_2x16;
1763 bool lower_pack_unorm_4x8;
1764 bool lower_pack_snorm_4x8;
1765 bool lower_unpack_half_2x16;
1766 bool lower_unpack_unorm_2x16;
1767 bool lower_unpack_snorm_2x16;
1768 bool lower_unpack_unorm_4x8;
1769 bool lower_unpack_snorm_4x8;
1770
1771 bool lower_extract_byte;
1772 bool lower_extract_word;
1773
1774 /**
1775 * Does the driver support real 32-bit integers? (Otherwise, integers
1776 * are simulated by floats.)
1777 */
1778 bool native_integers;
1779
1780 /* Indicates that the driver only has zero-based vertex id */
1781 bool vertex_id_zero_based;
1782
1783 bool lower_cs_local_index_from_id;
1784
1785 /**
1786 * Should nir_lower_io() create load_interpolated_input intrinsics?
1787 *
1788 * If not, it generates regular load_input intrinsics and interpolation
1789 * information must be inferred from the list of input nir_variables.
1790 */
1791 bool use_interpolated_input_intrinsics;
1792 } nir_shader_compiler_options;
1793
1794 typedef struct nir_shader {
1795 /** list of uniforms (nir_variable) */
1796 struct exec_list uniforms;
1797
1798 /** list of inputs (nir_variable) */
1799 struct exec_list inputs;
1800
1801 /** list of outputs (nir_variable) */
1802 struct exec_list outputs;
1803
1804 /** list of shared compute variables (nir_variable) */
1805 struct exec_list shared;
1806
1807 /** Set of driver-specific options for the shader.
1808 *
1809 * The memory for the options is expected to be kept in a single static
1810 * copy by the driver.
1811 */
1812 const struct nir_shader_compiler_options *options;
1813
1814 /** Various bits of compile-time information about a given shader */
1815 struct shader_info *info;
1816
1817 /** list of global variables in the shader (nir_variable) */
1818 struct exec_list globals;
1819
1820 /** list of system value variables in the shader (nir_variable) */
1821 struct exec_list system_values;
1822
1823 struct exec_list functions; /** < list of nir_function */
1824
1825 /** list of global register in the shader */
1826 struct exec_list registers;
1827
1828 /** next available global register index */
1829 unsigned reg_alloc;
1830
1831 /**
1832 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
1833 * access plus one
1834 */
1835 unsigned num_inputs, num_uniforms, num_outputs, num_shared;
1836
1837 /** The shader stage, such as MESA_SHADER_VERTEX. */
1838 gl_shader_stage stage;
1839 } nir_shader;
1840
1841 static inline nir_function_impl *
1842 nir_shader_get_entrypoint(nir_shader *shader)
1843 {
1844 assert(exec_list_length(&shader->functions) == 1);
1845 struct exec_node *func_node = exec_list_get_head(&shader->functions);
1846 nir_function *func = exec_node_data(nir_function, func_node, node);
1847 assert(func->return_type == glsl_void_type());
1848 assert(func->num_params == 0);
1849 assert(func->impl);
1850 return func->impl;
1851 }
1852
1853 #define nir_foreach_function(func, shader) \
1854 foreach_list_typed(nir_function, func, node, &(shader)->functions)
1855
1856 nir_shader *nir_shader_create(void *mem_ctx,
1857 gl_shader_stage stage,
1858 const nir_shader_compiler_options *options,
1859 shader_info *si);
1860
1861 /** creates a register, including assigning it an index and adding it to the list */
1862 nir_register *nir_global_reg_create(nir_shader *shader);
1863
1864 nir_register *nir_local_reg_create(nir_function_impl *impl);
1865
1866 void nir_reg_remove(nir_register *reg);
1867
1868 /** Adds a variable to the appropreate list in nir_shader */
1869 void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
1870
1871 static inline void
1872 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
1873 {
1874 assert(var->data.mode == nir_var_local);
1875 exec_list_push_tail(&impl->locals, &var->node);
1876 }
1877
1878 /** creates a variable, sets a few defaults, and adds it to the list */
1879 nir_variable *nir_variable_create(nir_shader *shader,
1880 nir_variable_mode mode,
1881 const struct glsl_type *type,
1882 const char *name);
1883 /** creates a local variable and adds it to the list */
1884 nir_variable *nir_local_variable_create(nir_function_impl *impl,
1885 const struct glsl_type *type,
1886 const char *name);
1887
1888 /** creates a function and adds it to the shader's list of functions */
1889 nir_function *nir_function_create(nir_shader *shader, const char *name);
1890
1891 nir_function_impl *nir_function_impl_create(nir_function *func);
1892 /** creates a function_impl that isn't tied to any particular function */
1893 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
1894
1895 nir_block *nir_block_create(nir_shader *shader);
1896 nir_if *nir_if_create(nir_shader *shader);
1897 nir_loop *nir_loop_create(nir_shader *shader);
1898
1899 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node);
1900
1901 /** requests that the given pieces of metadata be generated */
1902 void nir_metadata_require(nir_function_impl *impl, nir_metadata required);
1903 /** dirties all but the preserved metadata */
1904 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved);
1905
1906 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
1907 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
1908
1909 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
1910
1911 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
1912 unsigned num_components,
1913 unsigned bit_size);
1914
1915 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader,
1916 nir_intrinsic_op op);
1917
1918 nir_call_instr *nir_call_instr_create(nir_shader *shader,
1919 nir_function *callee);
1920
1921 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
1922
1923 nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
1924
1925 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
1926
1927 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
1928 unsigned num_components,
1929 unsigned bit_size);
1930
1931 nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
1932 nir_deref_array *nir_deref_array_create(void *mem_ctx);
1933 nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
1934
1935 nir_deref *nir_copy_deref(void *mem_ctx, nir_deref *deref);
1936
1937 typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
1938 bool nir_deref_foreach_leaf(nir_deref_var *deref,
1939 nir_deref_foreach_leaf_cb cb, void *state);
1940
1941 nir_load_const_instr *
1942 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
1943
1944 /**
1945 * NIR Cursors and Instruction Insertion API
1946 * @{
1947 *
1948 * A tiny struct representing a point to insert/extract instructions or
1949 * control flow nodes. Helps reduce the combinatorial explosion of possible
1950 * points to insert/extract.
1951 *
1952 * \sa nir_control_flow.h
1953 */
1954 typedef enum {
1955 nir_cursor_before_block,
1956 nir_cursor_after_block,
1957 nir_cursor_before_instr,
1958 nir_cursor_after_instr,
1959 } nir_cursor_option;
1960
1961 typedef struct {
1962 nir_cursor_option option;
1963 union {
1964 nir_block *block;
1965 nir_instr *instr;
1966 };
1967 } nir_cursor;
1968
1969 static inline nir_block *
1970 nir_cursor_current_block(nir_cursor cursor)
1971 {
1972 if (cursor.option == nir_cursor_before_instr ||
1973 cursor.option == nir_cursor_after_instr) {
1974 return cursor.instr->block;
1975 } else {
1976 return cursor.block;
1977 }
1978 }
1979
1980 bool nir_cursors_equal(nir_cursor a, nir_cursor b);
1981
1982 static inline nir_cursor
1983 nir_before_block(nir_block *block)
1984 {
1985 nir_cursor cursor;
1986 cursor.option = nir_cursor_before_block;
1987 cursor.block = block;
1988 return cursor;
1989 }
1990
1991 static inline nir_cursor
1992 nir_after_block(nir_block *block)
1993 {
1994 nir_cursor cursor;
1995 cursor.option = nir_cursor_after_block;
1996 cursor.block = block;
1997 return cursor;
1998 }
1999
2000 static inline nir_cursor
2001 nir_before_instr(nir_instr *instr)
2002 {
2003 nir_cursor cursor;
2004 cursor.option = nir_cursor_before_instr;
2005 cursor.instr = instr;
2006 return cursor;
2007 }
2008
2009 static inline nir_cursor
2010 nir_after_instr(nir_instr *instr)
2011 {
2012 nir_cursor cursor;
2013 cursor.option = nir_cursor_after_instr;
2014 cursor.instr = instr;
2015 return cursor;
2016 }
2017
2018 static inline nir_cursor
2019 nir_after_block_before_jump(nir_block *block)
2020 {
2021 nir_instr *last_instr = nir_block_last_instr(block);
2022 if (last_instr && last_instr->type == nir_instr_type_jump) {
2023 return nir_before_instr(last_instr);
2024 } else {
2025 return nir_after_block(block);
2026 }
2027 }
2028
2029 static inline nir_cursor
2030 nir_before_cf_node(nir_cf_node *node)
2031 {
2032 if (node->type == nir_cf_node_block)
2033 return nir_before_block(nir_cf_node_as_block(node));
2034
2035 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node)));
2036 }
2037
2038 static inline nir_cursor
2039 nir_after_cf_node(nir_cf_node *node)
2040 {
2041 if (node->type == nir_cf_node_block)
2042 return nir_after_block(nir_cf_node_as_block(node));
2043
2044 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
2045 }
2046
2047 static inline nir_cursor
2048 nir_after_phis(nir_block *block)
2049 {
2050 nir_foreach_instr(instr, block) {
2051 if (instr->type != nir_instr_type_phi)
2052 return nir_before_instr(instr);
2053 }
2054 return nir_after_block(block);
2055 }
2056
2057 static inline nir_cursor
2058 nir_after_cf_node_and_phis(nir_cf_node *node)
2059 {
2060 if (node->type == nir_cf_node_block)
2061 return nir_after_block(nir_cf_node_as_block(node));
2062
2063 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
2064
2065 return nir_after_phis(block);
2066 }
2067
2068 static inline nir_cursor
2069 nir_before_cf_list(struct exec_list *cf_list)
2070 {
2071 nir_cf_node *first_node = exec_node_data(nir_cf_node,
2072 exec_list_get_head(cf_list), node);
2073 return nir_before_cf_node(first_node);
2074 }
2075
2076 static inline nir_cursor
2077 nir_after_cf_list(struct exec_list *cf_list)
2078 {
2079 nir_cf_node *last_node = exec_node_data(nir_cf_node,
2080 exec_list_get_tail(cf_list), node);
2081 return nir_after_cf_node(last_node);
2082 }
2083
2084 /**
2085 * Insert a NIR instruction at the given cursor.
2086 *
2087 * Note: This does not update the cursor.
2088 */
2089 void nir_instr_insert(nir_cursor cursor, nir_instr *instr);
2090
2091 static inline void
2092 nir_instr_insert_before(nir_instr *instr, nir_instr *before)
2093 {
2094 nir_instr_insert(nir_before_instr(instr), before);
2095 }
2096
2097 static inline void
2098 nir_instr_insert_after(nir_instr *instr, nir_instr *after)
2099 {
2100 nir_instr_insert(nir_after_instr(instr), after);
2101 }
2102
2103 static inline void
2104 nir_instr_insert_before_block(nir_block *block, nir_instr *before)
2105 {
2106 nir_instr_insert(nir_before_block(block), before);
2107 }
2108
2109 static inline void
2110 nir_instr_insert_after_block(nir_block *block, nir_instr *after)
2111 {
2112 nir_instr_insert(nir_after_block(block), after);
2113 }
2114
2115 static inline void
2116 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before)
2117 {
2118 nir_instr_insert(nir_before_cf_node(node), before);
2119 }
2120
2121 static inline void
2122 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after)
2123 {
2124 nir_instr_insert(nir_after_cf_node(node), after);
2125 }
2126
2127 static inline void
2128 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before)
2129 {
2130 nir_instr_insert(nir_before_cf_list(list), before);
2131 }
2132
2133 static inline void
2134 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after)
2135 {
2136 nir_instr_insert(nir_after_cf_list(list), after);
2137 }
2138
2139 void nir_instr_remove(nir_instr *instr);
2140
2141 /** @} */
2142
2143 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
2144 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
2145 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
2146 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
2147 void *state);
2148 bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
2149 bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
2150
2151 nir_const_value *nir_src_as_const_value(nir_src src);
2152 bool nir_src_is_dynamically_uniform(nir_src src);
2153 bool nir_srcs_equal(nir_src src1, nir_src src2);
2154 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
2155 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
2156 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
2157 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
2158 nir_dest new_dest);
2159
2160 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
2161 unsigned num_components, unsigned bit_size,
2162 const char *name);
2163 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
2164 unsigned num_components, unsigned bit_size,
2165 const char *name);
2166 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src);
2167 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
2168 nir_instr *after_me);
2169
2170 uint8_t nir_ssa_def_components_read(nir_ssa_def *def);
2171
2172 /*
2173 * finds the next basic block in source-code order, returns NULL if there is
2174 * none
2175 */
2176
2177 nir_block *nir_block_cf_tree_next(nir_block *block);
2178
2179 /* Performs the opposite of nir_block_cf_tree_next() */
2180
2181 nir_block *nir_block_cf_tree_prev(nir_block *block);
2182
2183 /* Gets the first block in a CF node in source-code order */
2184
2185 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
2186
2187 /* Gets the last block in a CF node in source-code order */
2188
2189 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
2190
2191 /* Gets the next block after a CF node in source-code order */
2192
2193 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
2194
2195 /* Macros for loops that visit blocks in source-code order */
2196
2197 #define nir_foreach_block(block, impl) \
2198 for (nir_block *block = nir_start_block(impl); block != NULL; \
2199 block = nir_block_cf_tree_next(block))
2200
2201 #define nir_foreach_block_safe(block, impl) \
2202 for (nir_block *block = nir_start_block(impl), \
2203 *next = nir_block_cf_tree_next(block); \
2204 block != NULL; \
2205 block = next, next = nir_block_cf_tree_next(block))
2206
2207 #define nir_foreach_block_reverse(block, impl) \
2208 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2209 block = nir_block_cf_tree_prev(block))
2210
2211 #define nir_foreach_block_reverse_safe(block, impl) \
2212 for (nir_block *block = nir_impl_last_block(impl), \
2213 *prev = nir_block_cf_tree_prev(block); \
2214 block != NULL; \
2215 block = prev, prev = nir_block_cf_tree_prev(block))
2216
2217 #define nir_foreach_block_in_cf_node(block, node) \
2218 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2219 block != nir_cf_node_cf_tree_next(node); \
2220 block = nir_block_cf_tree_next(block))
2221
2222 /* If the following CF node is an if, this function returns that if.
2223 * Otherwise, it returns NULL.
2224 */
2225 nir_if *nir_block_get_following_if(nir_block *block);
2226
2227 nir_loop *nir_block_get_following_loop(nir_block *block);
2228
2229 void nir_index_local_regs(nir_function_impl *impl);
2230 void nir_index_global_regs(nir_shader *shader);
2231 void nir_index_ssa_defs(nir_function_impl *impl);
2232 unsigned nir_index_instrs(nir_function_impl *impl);
2233
2234 void nir_index_blocks(nir_function_impl *impl);
2235
2236 void nir_print_shader(nir_shader *shader, FILE *fp);
2237 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
2238 void nir_print_instr(const nir_instr *instr, FILE *fp);
2239
2240 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
2241 nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
2242 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
2243 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
2244
2245 #ifdef DEBUG
2246 void nir_validate_shader(nir_shader *shader);
2247 void nir_metadata_set_validation_flag(nir_shader *shader);
2248 void nir_metadata_check_validation_flag(nir_shader *shader);
2249
2250 #include "util/debug.h"
2251 static inline bool
2252 should_clone_nir(void)
2253 {
2254 static int should_clone = -1;
2255 if (should_clone < 0)
2256 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
2257
2258 return should_clone;
2259 }
2260 #else
2261 static inline void nir_validate_shader(nir_shader *shader) { (void) shader; }
2262 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
2263 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
2264 static inline bool should_clone_nir(void) { return false; }
2265 #endif /* DEBUG */
2266
2267 #define _PASS(nir, do_pass) do { \
2268 do_pass \
2269 nir_validate_shader(nir); \
2270 if (should_clone_nir()) { \
2271 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2272 ralloc_free(nir); \
2273 nir = clone; \
2274 } \
2275 } while (0)
2276
2277 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2278 nir_metadata_set_validation_flag(nir); \
2279 if (pass(nir, ##__VA_ARGS__)) { \
2280 progress = true; \
2281 nir_metadata_check_validation_flag(nir); \
2282 } \
2283 )
2284
2285 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2286 pass(nir, ##__VA_ARGS__); \
2287 )
2288
2289 void nir_calc_dominance_impl(nir_function_impl *impl);
2290 void nir_calc_dominance(nir_shader *shader);
2291
2292 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
2293 bool nir_block_dominates(nir_block *parent, nir_block *child);
2294
2295 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
2296 void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
2297
2298 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp);
2299 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp);
2300
2301 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp);
2302 void nir_dump_cfg(nir_shader *shader, FILE *fp);
2303
2304 int nir_gs_count_vertices(const nir_shader *shader);
2305
2306 bool nir_split_var_copies(nir_shader *shader);
2307
2308 bool nir_lower_returns_impl(nir_function_impl *impl);
2309 bool nir_lower_returns(nir_shader *shader);
2310
2311 bool nir_inline_functions(nir_shader *shader);
2312
2313 bool nir_propagate_invariant(nir_shader *shader);
2314
2315 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, void *mem_ctx);
2316 void nir_lower_var_copies(nir_shader *shader);
2317
2318 bool nir_lower_global_vars_to_local(nir_shader *shader);
2319
2320 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
2321
2322 bool nir_lower_locals_to_regs(nir_shader *shader);
2323
2324 void nir_lower_io_to_temporaries(nir_shader *shader,
2325 nir_function_impl *entrypoint,
2326 bool outputs, bool inputs);
2327
2328 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
2329
2330 void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
2331 int (*type_size)(const struct glsl_type *));
2332
2333 typedef enum {
2334 /* If set, this forces all non-flat fragment shader inputs to be
2335 * interpolated as if with the "sample" qualifier. This requires
2336 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
2337 */
2338 nir_lower_io_force_sample_interpolation = (1 << 1),
2339 } nir_lower_io_options;
2340 void nir_lower_io(nir_shader *shader,
2341 nir_variable_mode modes,
2342 int (*type_size)(const struct glsl_type *),
2343 nir_lower_io_options);
2344 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
2345 nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
2346
2347 bool nir_is_per_vertex_io(nir_variable *var, gl_shader_stage stage);
2348
2349 void nir_lower_io_types(nir_shader *shader);
2350 void nir_lower_vars_to_ssa(nir_shader *shader);
2351
2352 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes);
2353
2354 void nir_move_vec_src_uses_to_dest(nir_shader *shader);
2355 bool nir_lower_vec_to_movs(nir_shader *shader);
2356 bool nir_lower_alu_to_scalar(nir_shader *shader);
2357 void nir_lower_load_const_to_scalar(nir_shader *shader);
2358
2359 bool nir_lower_phis_to_scalar(nir_shader *shader);
2360 void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
2361
2362 void nir_lower_samplers(nir_shader *shader,
2363 const struct gl_shader_program *shader_program);
2364
2365 bool nir_lower_system_values(nir_shader *shader);
2366
2367 typedef struct nir_lower_tex_options {
2368 /**
2369 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2370 * sampler types a texture projector is lowered.
2371 */
2372 unsigned lower_txp;
2373
2374 /**
2375 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2376 */
2377 bool lower_txf_offset;
2378
2379 /**
2380 * If true, lower away nir_tex_src_offset for all rect textures.
2381 */
2382 bool lower_rect_offset;
2383
2384 /**
2385 * If true, lower rect textures to 2D, using txs to fetch the
2386 * texture dimensions and dividing the texture coords by the
2387 * texture dims to normalize.
2388 */
2389 bool lower_rect;
2390
2391 /**
2392 * If true, convert yuv to rgb.
2393 */
2394 unsigned lower_y_uv_external;
2395 unsigned lower_y_u_v_external;
2396 unsigned lower_yx_xuxv_external;
2397
2398 /**
2399 * To emulate certain texture wrap modes, this can be used
2400 * to saturate the specified tex coord to [0.0, 1.0]. The
2401 * bits are according to sampler #, ie. if, for example:
2402 *
2403 * (conf->saturate_s & (1 << n))
2404 *
2405 * is true, then the s coord for sampler n is saturated.
2406 *
2407 * Note that clamping must happen *after* projector lowering
2408 * so any projected texture sample instruction with a clamped
2409 * coordinate gets automatically lowered, regardless of the
2410 * 'lower_txp' setting.
2411 */
2412 unsigned saturate_s;
2413 unsigned saturate_t;
2414 unsigned saturate_r;
2415
2416 /* Bitmask of textures that need swizzling.
2417 *
2418 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2419 * swizzles[texture_index] is applied to the result of the texturing
2420 * operation.
2421 */
2422 unsigned swizzle_result;
2423
2424 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2425 * while 4 and 5 represent 0 and 1 respectively.
2426 */
2427 uint8_t swizzles[32][4];
2428
2429 /**
2430 * Bitmap of textures that need srgb to linear conversion. If
2431 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2432 * of the texture are lowered to linear.
2433 */
2434 unsigned lower_srgb;
2435 } nir_lower_tex_options;
2436
2437 bool nir_lower_tex(nir_shader *shader,
2438 const nir_lower_tex_options *options);
2439
2440 bool nir_lower_idiv(nir_shader *shader);
2441
2442 void nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables);
2443 void nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
2444 void nir_lower_clip_cull_distance_arrays(nir_shader *nir);
2445
2446 void nir_lower_two_sided_color(nir_shader *shader);
2447
2448 void nir_lower_clamp_color_outputs(nir_shader *shader);
2449
2450 void nir_lower_passthrough_edgeflags(nir_shader *shader);
2451
2452 typedef struct nir_lower_wpos_ytransform_options {
2453 int state_tokens[5];
2454 bool fs_coord_origin_upper_left :1;
2455 bool fs_coord_origin_lower_left :1;
2456 bool fs_coord_pixel_center_integer :1;
2457 bool fs_coord_pixel_center_half_integer :1;
2458 } nir_lower_wpos_ytransform_options;
2459
2460 bool nir_lower_wpos_ytransform(nir_shader *shader,
2461 const nir_lower_wpos_ytransform_options *options);
2462 bool nir_lower_wpos_center(nir_shader *shader);
2463
2464 typedef struct nir_lower_drawpixels_options {
2465 int texcoord_state_tokens[5];
2466 int scale_state_tokens[5];
2467 int bias_state_tokens[5];
2468 unsigned drawpix_sampler;
2469 unsigned pixelmap_sampler;
2470 bool pixel_maps :1;
2471 bool scale_and_bias :1;
2472 } nir_lower_drawpixels_options;
2473
2474 void nir_lower_drawpixels(nir_shader *shader,
2475 const nir_lower_drawpixels_options *options);
2476
2477 typedef struct nir_lower_bitmap_options {
2478 unsigned sampler;
2479 bool swizzle_xxxx;
2480 } nir_lower_bitmap_options;
2481
2482 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
2483
2484 void nir_lower_atomics(nir_shader *shader,
2485 const struct gl_shader_program *shader_program);
2486 void nir_lower_to_source_mods(nir_shader *shader);
2487
2488 bool nir_lower_gs_intrinsics(nir_shader *shader);
2489
2490 typedef enum {
2491 nir_lower_drcp = (1 << 0),
2492 nir_lower_dsqrt = (1 << 1),
2493 nir_lower_drsq = (1 << 2),
2494 nir_lower_dtrunc = (1 << 3),
2495 nir_lower_dfloor = (1 << 4),
2496 nir_lower_dceil = (1 << 5),
2497 nir_lower_dfract = (1 << 6),
2498 nir_lower_dround_even = (1 << 7),
2499 nir_lower_dmod = (1 << 8)
2500 } nir_lower_doubles_options;
2501
2502 void nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
2503 void nir_lower_double_pack(nir_shader *shader);
2504
2505 bool nir_normalize_cubemap_coords(nir_shader *shader);
2506
2507 void nir_live_ssa_defs_impl(nir_function_impl *impl);
2508 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
2509
2510 void nir_convert_to_ssa_impl(nir_function_impl *impl);
2511 void nir_convert_to_ssa(nir_shader *shader);
2512
2513 bool nir_repair_ssa_impl(nir_function_impl *impl);
2514 bool nir_repair_ssa(nir_shader *shader);
2515
2516 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2517 * registers. If false, convert all values (even those not involved in a phi
2518 * node) to registers.
2519 */
2520 void nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
2521
2522 bool nir_opt_algebraic(nir_shader *shader);
2523 bool nir_opt_algebraic_late(nir_shader *shader);
2524 bool nir_opt_constant_folding(nir_shader *shader);
2525
2526 bool nir_opt_global_to_local(nir_shader *shader);
2527
2528 bool nir_copy_prop(nir_shader *shader);
2529
2530 bool nir_opt_cse(nir_shader *shader);
2531
2532 bool nir_opt_dce(nir_shader *shader);
2533
2534 bool nir_opt_dead_cf(nir_shader *shader);
2535
2536 bool nir_opt_gcm(nir_shader *shader, bool value_number);
2537
2538 bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
2539
2540 bool nir_opt_remove_phis(nir_shader *shader);
2541
2542 bool nir_opt_undef(nir_shader *shader);
2543
2544 bool nir_opt_conditional_discard(nir_shader *shader);
2545
2546 void nir_sweep(nir_shader *shader);
2547
2548 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
2549 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
2550
2551 #ifdef __cplusplus
2552 } /* extern "C" */
2553 #endif