nir: Add a pass for lowering away constant initializers
[mesa.git] / src / compiler / nir / nir.h
1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #pragma once
29
30 #include "util/hash_table.h"
31 #include "compiler/glsl/list.h"
32 #include "GL/gl.h" /* GLenum */
33 #include "util/list.h"
34 #include "util/ralloc.h"
35 #include "util/set.h"
36 #include "util/bitset.h"
37 #include "util/macros.h"
38 #include "compiler/nir_types.h"
39 #include "compiler/shader_enums.h"
40 #include "compiler/shader_info.h"
41 #include <stdio.h>
42
43 #include "nir_opcodes.h"
44
45 #ifdef __cplusplus
46 extern "C" {
47 #endif
48
49 struct gl_program;
50 struct gl_shader_program;
51
52 #define NIR_FALSE 0u
53 #define NIR_TRUE (~0u)
54
55 /** Defines a cast function
56 *
57 * This macro defines a cast function from in_type to out_type where
58 * out_type is some structure type that contains a field of type out_type.
59 *
60 * Note that you have to be a bit careful as the generated cast function
61 * destroys constness.
62 */
63 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
64 type_field, type_value) \
65 static inline out_type * \
66 name(const in_type *parent) \
67 { \
68 assert(parent && parent->type_field == type_value); \
69 return exec_node_data(out_type, parent, field); \
70 }
71
72 struct nir_function;
73 struct nir_shader;
74 struct nir_instr;
75
76
77 /**
78 * Description of built-in state associated with a uniform
79 *
80 * \sa nir_variable::state_slots
81 */
82 typedef struct {
83 int tokens[5];
84 int swizzle;
85 } nir_state_slot;
86
87 typedef enum {
88 nir_var_shader_in = (1 << 0),
89 nir_var_shader_out = (1 << 1),
90 nir_var_global = (1 << 2),
91 nir_var_local = (1 << 3),
92 nir_var_uniform = (1 << 4),
93 nir_var_shader_storage = (1 << 5),
94 nir_var_system_value = (1 << 6),
95 nir_var_param = (1 << 7),
96 nir_var_shared = (1 << 8),
97 nir_var_all = ~0,
98 } nir_variable_mode;
99
100
101 typedef union {
102 float f32[4];
103 double f64[4];
104 int32_t i32[4];
105 uint32_t u32[4];
106 int64_t i64[4];
107 uint64_t u64[4];
108 } nir_const_value;
109
110 typedef struct nir_constant {
111 /**
112 * Value of the constant.
113 *
114 * The field used to back the values supplied by the constant is determined
115 * by the type associated with the \c nir_variable. Constants may be
116 * scalars, vectors, or matrices.
117 */
118 nir_const_value values[4];
119
120 /* we could get this from the var->type but makes clone *much* easier to
121 * not have to care about the type.
122 */
123 unsigned num_elements;
124
125 /* Array elements / Structure Fields */
126 struct nir_constant **elements;
127 } nir_constant;
128
129 /**
130 * \brief Layout qualifiers for gl_FragDepth.
131 *
132 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
133 * with a layout qualifier.
134 */
135 typedef enum {
136 nir_depth_layout_none, /**< No depth layout is specified. */
137 nir_depth_layout_any,
138 nir_depth_layout_greater,
139 nir_depth_layout_less,
140 nir_depth_layout_unchanged
141 } nir_depth_layout;
142
143 /**
144 * Either a uniform, global variable, shader input, or shader output. Based on
145 * ir_variable - it should be easy to translate between the two.
146 */
147
148 typedef struct nir_variable {
149 struct exec_node node;
150
151 /**
152 * Declared type of the variable
153 */
154 const struct glsl_type *type;
155
156 /**
157 * Declared name of the variable
158 */
159 char *name;
160
161 struct nir_variable_data {
162 /**
163 * Storage class of the variable.
164 *
165 * \sa nir_variable_mode
166 */
167 nir_variable_mode mode;
168
169 /**
170 * Is the variable read-only?
171 *
172 * This is set for variables declared as \c const, shader inputs,
173 * and uniforms.
174 */
175 unsigned read_only:1;
176 unsigned centroid:1;
177 unsigned sample:1;
178 unsigned patch:1;
179 unsigned invariant:1;
180
181 /**
182 * Interpolation mode for shader inputs / outputs
183 *
184 * \sa glsl_interp_mode
185 */
186 unsigned interpolation:2;
187
188 /**
189 * \name ARB_fragment_coord_conventions
190 * @{
191 */
192 unsigned origin_upper_left:1;
193 unsigned pixel_center_integer:1;
194 /*@}*/
195
196 /**
197 * Was the location explicitly set in the shader?
198 *
199 * If the location is explicitly set in the shader, it \b cannot be changed
200 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
201 * no effect).
202 */
203 unsigned explicit_location:1;
204 unsigned explicit_index:1;
205
206 /**
207 * Was an initial binding explicitly set in the shader?
208 *
209 * If so, constant_initializer contains an integer nir_constant
210 * representing the initial binding point.
211 */
212 unsigned explicit_binding:1;
213
214 /**
215 * Does this variable have an initializer?
216 *
217 * This is used by the linker to cross-validiate initializers of global
218 * variables.
219 */
220 unsigned has_initializer:1;
221
222 /**
223 * If non-zero, then this variable may be packed along with other variables
224 * into a single varying slot, so this offset should be applied when
225 * accessing components. For example, an offset of 1 means that the x
226 * component of this variable is actually stored in component y of the
227 * location specified by \c location.
228 */
229 unsigned location_frac:2;
230
231 /**
232 * If true, this variable represents an array of scalars that should
233 * be tightly packed. In other words, consecutive array elements
234 * should be stored one component apart, rather than one slot apart.
235 */
236 bool compact:1;
237
238 /**
239 * Whether this is a fragment shader output implicitly initialized with
240 * the previous contents of the specified render target at the
241 * framebuffer location corresponding to this shader invocation.
242 */
243 unsigned fb_fetch_output:1;
244
245 /**
246 * \brief Layout qualifier for gl_FragDepth.
247 *
248 * This is not equal to \c ir_depth_layout_none if and only if this
249 * variable is \c gl_FragDepth and a layout qualifier is specified.
250 */
251 nir_depth_layout depth_layout;
252
253 /**
254 * Storage location of the base of this variable
255 *
256 * The precise meaning of this field depends on the nature of the variable.
257 *
258 * - Vertex shader input: one of the values from \c gl_vert_attrib.
259 * - Vertex shader output: one of the values from \c gl_varying_slot.
260 * - Geometry shader input: one of the values from \c gl_varying_slot.
261 * - Geometry shader output: one of the values from \c gl_varying_slot.
262 * - Fragment shader input: one of the values from \c gl_varying_slot.
263 * - Fragment shader output: one of the values from \c gl_frag_result.
264 * - Uniforms: Per-stage uniform slot number for default uniform block.
265 * - Uniforms: Index within the uniform block definition for UBO members.
266 * - Non-UBO Uniforms: uniform slot number.
267 * - Other: This field is not currently used.
268 *
269 * If the variable is a uniform, shader input, or shader output, and the
270 * slot has not been assigned, the value will be -1.
271 */
272 int location;
273
274 /**
275 * The actual location of the variable in the IR. Only valid for inputs
276 * and outputs.
277 */
278 unsigned int driver_location;
279
280 /**
281 * output index for dual source blending.
282 */
283 int index;
284
285 /**
286 * Descriptor set binding for sampler or UBO.
287 */
288 int descriptor_set;
289
290 /**
291 * Initial binding point for a sampler or UBO.
292 *
293 * For array types, this represents the binding point for the first element.
294 */
295 int binding;
296
297 /**
298 * Location an atomic counter is stored at.
299 */
300 unsigned offset;
301
302 /**
303 * ARB_shader_image_load_store qualifiers.
304 */
305 struct {
306 bool read_only; /**< "readonly" qualifier. */
307 bool write_only; /**< "writeonly" qualifier. */
308 bool coherent;
309 bool _volatile;
310 bool restrict_flag;
311
312 /** Image internal format if specified explicitly, otherwise GL_NONE. */
313 GLenum format;
314 } image;
315
316 /**
317 * Highest element accessed with a constant expression array index
318 *
319 * Not used for non-array variables.
320 */
321 unsigned max_array_access;
322
323 } data;
324
325 /**
326 * Built-in state that backs this uniform
327 *
328 * Once set at variable creation, \c state_slots must remain invariant.
329 * This is because, ideally, this array would be shared by all clones of
330 * this variable in the IR tree. In other words, we'd really like for it
331 * to be a fly-weight.
332 *
333 * If the variable is not a uniform, \c num_state_slots will be zero and
334 * \c state_slots will be \c NULL.
335 */
336 /*@{*/
337 unsigned num_state_slots; /**< Number of state slots used */
338 nir_state_slot *state_slots; /**< State descriptors. */
339 /*@}*/
340
341 /**
342 * Constant expression assigned in the initializer of the variable
343 */
344 nir_constant *constant_initializer;
345
346 /**
347 * For variables that are in an interface block or are an instance of an
348 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
349 *
350 * \sa ir_variable::location
351 */
352 const struct glsl_type *interface_type;
353 } nir_variable;
354
355 #define nir_foreach_variable(var, var_list) \
356 foreach_list_typed(nir_variable, var, node, var_list)
357
358 #define nir_foreach_variable_safe(var, var_list) \
359 foreach_list_typed_safe(nir_variable, var, node, var_list)
360
361 static inline bool
362 nir_variable_is_global(const nir_variable *var)
363 {
364 return var->data.mode != nir_var_local && var->data.mode != nir_var_param;
365 }
366
367 typedef struct nir_register {
368 struct exec_node node;
369
370 unsigned num_components; /** < number of vector components */
371 unsigned num_array_elems; /** < size of array (0 for no array) */
372
373 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
374 uint8_t bit_size;
375
376 /** generic register index. */
377 unsigned index;
378
379 /** only for debug purposes, can be NULL */
380 const char *name;
381
382 /** whether this register is local (per-function) or global (per-shader) */
383 bool is_global;
384
385 /**
386 * If this flag is set to true, then accessing channels >= num_components
387 * is well-defined, and simply spills over to the next array element. This
388 * is useful for backends that can do per-component accessing, in
389 * particular scalar backends. By setting this flag and making
390 * num_components equal to 1, structures can be packed tightly into
391 * registers and then registers can be accessed per-component to get to
392 * each structure member, even if it crosses vec4 boundaries.
393 */
394 bool is_packed;
395
396 /** set of nir_src's where this register is used (read from) */
397 struct list_head uses;
398
399 /** set of nir_dest's where this register is defined (written to) */
400 struct list_head defs;
401
402 /** set of nir_if's where this register is used as a condition */
403 struct list_head if_uses;
404 } nir_register;
405
406 typedef enum {
407 nir_instr_type_alu,
408 nir_instr_type_call,
409 nir_instr_type_tex,
410 nir_instr_type_intrinsic,
411 nir_instr_type_load_const,
412 nir_instr_type_jump,
413 nir_instr_type_ssa_undef,
414 nir_instr_type_phi,
415 nir_instr_type_parallel_copy,
416 } nir_instr_type;
417
418 typedef struct nir_instr {
419 struct exec_node node;
420 nir_instr_type type;
421 struct nir_block *block;
422
423 /** generic instruction index. */
424 unsigned index;
425
426 /* A temporary for optimization and analysis passes to use for storing
427 * flags. For instance, DCE uses this to store the "dead/live" info.
428 */
429 uint8_t pass_flags;
430 } nir_instr;
431
432 static inline nir_instr *
433 nir_instr_next(nir_instr *instr)
434 {
435 struct exec_node *next = exec_node_get_next(&instr->node);
436 if (exec_node_is_tail_sentinel(next))
437 return NULL;
438 else
439 return exec_node_data(nir_instr, next, node);
440 }
441
442 static inline nir_instr *
443 nir_instr_prev(nir_instr *instr)
444 {
445 struct exec_node *prev = exec_node_get_prev(&instr->node);
446 if (exec_node_is_head_sentinel(prev))
447 return NULL;
448 else
449 return exec_node_data(nir_instr, prev, node);
450 }
451
452 static inline bool
453 nir_instr_is_first(nir_instr *instr)
454 {
455 return exec_node_is_head_sentinel(exec_node_get_prev(&instr->node));
456 }
457
458 static inline bool
459 nir_instr_is_last(nir_instr *instr)
460 {
461 return exec_node_is_tail_sentinel(exec_node_get_next(&instr->node));
462 }
463
464 typedef struct nir_ssa_def {
465 /** for debugging only, can be NULL */
466 const char* name;
467
468 /** generic SSA definition index. */
469 unsigned index;
470
471 /** Index into the live_in and live_out bitfields */
472 unsigned live_index;
473
474 nir_instr *parent_instr;
475
476 /** set of nir_instr's where this register is used (read from) */
477 struct list_head uses;
478
479 /** set of nir_if's where this register is used as a condition */
480 struct list_head if_uses;
481
482 uint8_t num_components;
483
484 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
485 uint8_t bit_size;
486 } nir_ssa_def;
487
488 struct nir_src;
489
490 typedef struct {
491 nir_register *reg;
492 struct nir_src *indirect; /** < NULL for no indirect offset */
493 unsigned base_offset;
494
495 /* TODO use-def chain goes here */
496 } nir_reg_src;
497
498 typedef struct {
499 nir_instr *parent_instr;
500 struct list_head def_link;
501
502 nir_register *reg;
503 struct nir_src *indirect; /** < NULL for no indirect offset */
504 unsigned base_offset;
505
506 /* TODO def-use chain goes here */
507 } nir_reg_dest;
508
509 struct nir_if;
510
511 typedef struct nir_src {
512 union {
513 nir_instr *parent_instr;
514 struct nir_if *parent_if;
515 };
516
517 struct list_head use_link;
518
519 union {
520 nir_reg_src reg;
521 nir_ssa_def *ssa;
522 };
523
524 bool is_ssa;
525 } nir_src;
526
527 static inline nir_src
528 nir_src_init(void)
529 {
530 nir_src src = { { NULL } };
531 return src;
532 }
533
534 #define NIR_SRC_INIT nir_src_init()
535
536 #define nir_foreach_use(src, reg_or_ssa_def) \
537 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
538
539 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
540 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
541
542 #define nir_foreach_if_use(src, reg_or_ssa_def) \
543 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
544
545 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
546 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
547
548 typedef struct {
549 union {
550 nir_reg_dest reg;
551 nir_ssa_def ssa;
552 };
553
554 bool is_ssa;
555 } nir_dest;
556
557 static inline nir_dest
558 nir_dest_init(void)
559 {
560 nir_dest dest = { { { NULL } } };
561 return dest;
562 }
563
564 #define NIR_DEST_INIT nir_dest_init()
565
566 #define nir_foreach_def(dest, reg) \
567 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
568
569 #define nir_foreach_def_safe(dest, reg) \
570 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
571
572 static inline nir_src
573 nir_src_for_ssa(nir_ssa_def *def)
574 {
575 nir_src src = NIR_SRC_INIT;
576
577 src.is_ssa = true;
578 src.ssa = def;
579
580 return src;
581 }
582
583 static inline nir_src
584 nir_src_for_reg(nir_register *reg)
585 {
586 nir_src src = NIR_SRC_INIT;
587
588 src.is_ssa = false;
589 src.reg.reg = reg;
590 src.reg.indirect = NULL;
591 src.reg.base_offset = 0;
592
593 return src;
594 }
595
596 static inline nir_dest
597 nir_dest_for_reg(nir_register *reg)
598 {
599 nir_dest dest = NIR_DEST_INIT;
600
601 dest.reg.reg = reg;
602
603 return dest;
604 }
605
606 static inline unsigned
607 nir_src_bit_size(nir_src src)
608 {
609 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
610 }
611
612 static inline unsigned
613 nir_dest_bit_size(nir_dest dest)
614 {
615 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
616 }
617
618 void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
619 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
620
621 typedef struct {
622 nir_src src;
623
624 /**
625 * \name input modifiers
626 */
627 /*@{*/
628 /**
629 * For inputs interpreted as floating point, flips the sign bit. For
630 * inputs interpreted as integers, performs the two's complement negation.
631 */
632 bool negate;
633
634 /**
635 * Clears the sign bit for floating point values, and computes the integer
636 * absolute value for integers. Note that the negate modifier acts after
637 * the absolute value modifier, therefore if both are set then all inputs
638 * will become negative.
639 */
640 bool abs;
641 /*@}*/
642
643 /**
644 * For each input component, says which component of the register it is
645 * chosen from. Note that which elements of the swizzle are used and which
646 * are ignored are based on the write mask for most opcodes - for example,
647 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
648 * a swizzle of {2, x, 1, 0} where x means "don't care."
649 */
650 uint8_t swizzle[4];
651 } nir_alu_src;
652
653 typedef struct {
654 nir_dest dest;
655
656 /**
657 * \name saturate output modifier
658 *
659 * Only valid for opcodes that output floating-point numbers. Clamps the
660 * output to between 0.0 and 1.0 inclusive.
661 */
662
663 bool saturate;
664
665 unsigned write_mask : 4; /* ignored if dest.is_ssa is true */
666 } nir_alu_dest;
667
668 typedef enum {
669 nir_type_invalid = 0, /* Not a valid type */
670 nir_type_float,
671 nir_type_int,
672 nir_type_uint,
673 nir_type_bool,
674 nir_type_bool32 = 32 | nir_type_bool,
675 nir_type_int8 = 8 | nir_type_int,
676 nir_type_int16 = 16 | nir_type_int,
677 nir_type_int32 = 32 | nir_type_int,
678 nir_type_int64 = 64 | nir_type_int,
679 nir_type_uint8 = 8 | nir_type_uint,
680 nir_type_uint16 = 16 | nir_type_uint,
681 nir_type_uint32 = 32 | nir_type_uint,
682 nir_type_uint64 = 64 | nir_type_uint,
683 nir_type_float16 = 16 | nir_type_float,
684 nir_type_float32 = 32 | nir_type_float,
685 nir_type_float64 = 64 | nir_type_float,
686 } nir_alu_type;
687
688 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8
689 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007
690
691 static inline unsigned
692 nir_alu_type_get_type_size(nir_alu_type type)
693 {
694 return type & NIR_ALU_TYPE_SIZE_MASK;
695 }
696
697 static inline unsigned
698 nir_alu_type_get_base_type(nir_alu_type type)
699 {
700 return type & NIR_ALU_TYPE_BASE_TYPE_MASK;
701 }
702
703 typedef enum {
704 NIR_OP_IS_COMMUTATIVE = (1 << 0),
705 NIR_OP_IS_ASSOCIATIVE = (1 << 1),
706 } nir_op_algebraic_property;
707
708 typedef struct {
709 const char *name;
710
711 unsigned num_inputs;
712
713 /**
714 * The number of components in the output
715 *
716 * If non-zero, this is the size of the output and input sizes are
717 * explicitly given; swizzle and writemask are still in effect, but if
718 * the output component is masked out, then the input component may
719 * still be in use.
720 *
721 * If zero, the opcode acts in the standard, per-component manner; the
722 * operation is performed on each component (except the ones that are
723 * masked out) with the input being taken from the input swizzle for
724 * that component.
725 *
726 * The size of some of the inputs may be given (i.e. non-zero) even
727 * though output_size is zero; in that case, the inputs with a zero
728 * size act per-component, while the inputs with non-zero size don't.
729 */
730 unsigned output_size;
731
732 /**
733 * The type of vector that the instruction outputs. Note that the
734 * staurate modifier is only allowed on outputs with the float type.
735 */
736
737 nir_alu_type output_type;
738
739 /**
740 * The number of components in each input
741 */
742 unsigned input_sizes[4];
743
744 /**
745 * The type of vector that each input takes. Note that negate and
746 * absolute value are only allowed on inputs with int or float type and
747 * behave differently on the two.
748 */
749 nir_alu_type input_types[4];
750
751 nir_op_algebraic_property algebraic_properties;
752 } nir_op_info;
753
754 extern const nir_op_info nir_op_infos[nir_num_opcodes];
755
756 typedef struct nir_alu_instr {
757 nir_instr instr;
758 nir_op op;
759
760 /** Indicates that this ALU instruction generates an exact value
761 *
762 * This is kind of a mixture of GLSL "precise" and "invariant" and not
763 * really equivalent to either. This indicates that the value generated by
764 * this operation is high-precision and any code transformations that touch
765 * it must ensure that the resulting value is bit-for-bit identical to the
766 * original.
767 */
768 bool exact;
769
770 nir_alu_dest dest;
771 nir_alu_src src[];
772 } nir_alu_instr;
773
774 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
775 nir_alu_instr *instr);
776 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
777 nir_alu_instr *instr);
778
779 /* is this source channel used? */
780 static inline bool
781 nir_alu_instr_channel_used(nir_alu_instr *instr, unsigned src, unsigned channel)
782 {
783 if (nir_op_infos[instr->op].input_sizes[src] > 0)
784 return channel < nir_op_infos[instr->op].input_sizes[src];
785
786 return (instr->dest.write_mask >> channel) & 1;
787 }
788
789 /*
790 * For instructions whose destinations are SSA, get the number of channels
791 * used for a source
792 */
793 static inline unsigned
794 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
795 {
796 assert(instr->dest.dest.is_ssa);
797
798 if (nir_op_infos[instr->op].input_sizes[src] > 0)
799 return nir_op_infos[instr->op].input_sizes[src];
800
801 return instr->dest.dest.ssa.num_components;
802 }
803
804 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
805 unsigned src1, unsigned src2);
806
807 typedef enum {
808 nir_deref_type_var,
809 nir_deref_type_array,
810 nir_deref_type_struct
811 } nir_deref_type;
812
813 typedef struct nir_deref {
814 nir_deref_type deref_type;
815 struct nir_deref *child;
816 const struct glsl_type *type;
817 } nir_deref;
818
819 typedef struct {
820 nir_deref deref;
821
822 nir_variable *var;
823 } nir_deref_var;
824
825 /* This enum describes how the array is referenced. If the deref is
826 * direct then the base_offset is used. If the deref is indirect then
827 * offset is given by base_offset + indirect. If the deref is a wildcard
828 * then the deref refers to all of the elements of the array at the same
829 * time. Wildcard dereferences are only ever allowed in copy_var
830 * intrinsics and the source and destination derefs must have matching
831 * wildcards.
832 */
833 typedef enum {
834 nir_deref_array_type_direct,
835 nir_deref_array_type_indirect,
836 nir_deref_array_type_wildcard,
837 } nir_deref_array_type;
838
839 typedef struct {
840 nir_deref deref;
841
842 nir_deref_array_type deref_array_type;
843 unsigned base_offset;
844 nir_src indirect;
845 } nir_deref_array;
846
847 typedef struct {
848 nir_deref deref;
849
850 unsigned index;
851 } nir_deref_struct;
852
853 NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref,
854 deref_type, nir_deref_type_var)
855 NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref,
856 deref_type, nir_deref_type_array)
857 NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref,
858 deref_type, nir_deref_type_struct)
859
860 /* Returns the last deref in the chain. */
861 static inline nir_deref *
862 nir_deref_tail(nir_deref *deref)
863 {
864 while (deref->child)
865 deref = deref->child;
866 return deref;
867 }
868
869 typedef struct {
870 nir_instr instr;
871
872 unsigned num_params;
873 nir_deref_var **params;
874 nir_deref_var *return_deref;
875
876 struct nir_function *callee;
877 } nir_call_instr;
878
879 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
880 num_variables, num_indices, idx0, idx1, idx2, flags) \
881 nir_intrinsic_##name,
882
883 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name,
884
885 typedef enum {
886 #include "nir_intrinsics.h"
887 nir_num_intrinsics = nir_last_intrinsic + 1
888 } nir_intrinsic_op;
889
890 #define NIR_INTRINSIC_MAX_CONST_INDEX 3
891
892 /** Represents an intrinsic
893 *
894 * An intrinsic is an instruction type for handling things that are
895 * more-or-less regular operations but don't just consume and produce SSA
896 * values like ALU operations do. Intrinsics are not for things that have
897 * special semantic meaning such as phi nodes and parallel copies.
898 * Examples of intrinsics include variable load/store operations, system
899 * value loads, and the like. Even though texturing more-or-less falls
900 * under this category, texturing is its own instruction type because
901 * trying to represent texturing with intrinsics would lead to a
902 * combinatorial explosion of intrinsic opcodes.
903 *
904 * By having a single instruction type for handling a lot of different
905 * cases, optimization passes can look for intrinsics and, for the most
906 * part, completely ignore them. Each intrinsic type also has a few
907 * possible flags that govern whether or not they can be reordered or
908 * eliminated. That way passes like dead code elimination can still work
909 * on intrisics without understanding the meaning of each.
910 *
911 * Each intrinsic has some number of constant indices, some number of
912 * variables, and some number of sources. What these sources, variables,
913 * and indices mean depends on the intrinsic and is documented with the
914 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
915 * instructions are the only types of instruction that can operate on
916 * variables.
917 */
918 typedef struct {
919 nir_instr instr;
920
921 nir_intrinsic_op intrinsic;
922
923 nir_dest dest;
924
925 /** number of components if this is a vectorized intrinsic
926 *
927 * Similarly to ALU operations, some intrinsics are vectorized.
928 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
929 * For vectorized intrinsics, the num_components field specifies the
930 * number of destination components and the number of source components
931 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
932 */
933 uint8_t num_components;
934
935 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
936
937 nir_deref_var *variables[2];
938
939 nir_src src[];
940 } nir_intrinsic_instr;
941
942 /**
943 * \name NIR intrinsics semantic flags
944 *
945 * information about what the compiler can do with the intrinsics.
946 *
947 * \sa nir_intrinsic_info::flags
948 */
949 typedef enum {
950 /**
951 * whether the intrinsic can be safely eliminated if none of its output
952 * value is not being used.
953 */
954 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0),
955
956 /**
957 * Whether the intrinsic can be reordered with respect to any other
958 * intrinsic, i.e. whether the only reordering dependencies of the
959 * intrinsic are due to the register reads/writes.
960 */
961 NIR_INTRINSIC_CAN_REORDER = (1 << 1),
962 } nir_intrinsic_semantic_flag;
963
964 /**
965 * \name NIR intrinsics const-index flag
966 *
967 * Indicates the usage of a const_index slot.
968 *
969 * \sa nir_intrinsic_info::index_map
970 */
971 typedef enum {
972 /**
973 * Generally instructions that take a offset src argument, can encode
974 * a constant 'base' value which is added to the offset.
975 */
976 NIR_INTRINSIC_BASE = 1,
977
978 /**
979 * For store instructions, a writemask for the store.
980 */
981 NIR_INTRINSIC_WRMASK = 2,
982
983 /**
984 * The stream-id for GS emit_vertex/end_primitive intrinsics.
985 */
986 NIR_INTRINSIC_STREAM_ID = 3,
987
988 /**
989 * The clip-plane id for load_user_clip_plane intrinsic.
990 */
991 NIR_INTRINSIC_UCP_ID = 4,
992
993 /**
994 * The amount of data, starting from BASE, that this instruction may
995 * access. This is used to provide bounds if the offset is not constant.
996 */
997 NIR_INTRINSIC_RANGE = 5,
998
999 /**
1000 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1001 */
1002 NIR_INTRINSIC_DESC_SET = 6,
1003
1004 /**
1005 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1006 */
1007 NIR_INTRINSIC_BINDING = 7,
1008
1009 /**
1010 * Component offset.
1011 */
1012 NIR_INTRINSIC_COMPONENT = 8,
1013
1014 /**
1015 * Interpolation mode (only meaningful for FS inputs).
1016 */
1017 NIR_INTRINSIC_INTERP_MODE = 9,
1018
1019 NIR_INTRINSIC_NUM_INDEX_FLAGS,
1020
1021 } nir_intrinsic_index_flag;
1022
1023 #define NIR_INTRINSIC_MAX_INPUTS 4
1024
1025 typedef struct {
1026 const char *name;
1027
1028 unsigned num_srcs; /** < number of register/SSA inputs */
1029
1030 /** number of components of each input register
1031 *
1032 * If this value is 0, the number of components is given by the
1033 * num_components field of nir_intrinsic_instr.
1034 */
1035 unsigned src_components[NIR_INTRINSIC_MAX_INPUTS];
1036
1037 bool has_dest;
1038
1039 /** number of components of the output register
1040 *
1041 * If this value is 0, the number of components is given by the
1042 * num_components field of nir_intrinsic_instr.
1043 */
1044 unsigned dest_components;
1045
1046 /** the number of inputs/outputs that are variables */
1047 unsigned num_variables;
1048
1049 /** the number of constant indices used by the intrinsic */
1050 unsigned num_indices;
1051
1052 /** indicates the usage of intr->const_index[n] */
1053 unsigned index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS];
1054
1055 /** semantic flags for calls to this intrinsic */
1056 nir_intrinsic_semantic_flag flags;
1057 } nir_intrinsic_info;
1058
1059 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
1060
1061
1062 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1063 static inline type \
1064 nir_intrinsic_##name(nir_intrinsic_instr *instr) \
1065 { \
1066 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1067 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1068 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1069 } \
1070 static inline void \
1071 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1072 { \
1073 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1074 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1075 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1076 }
1077
1078 INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
1079 INTRINSIC_IDX_ACCESSORS(base, BASE, int)
1080 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned)
1081 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned)
1082 INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned)
1083 INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned)
1084 INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
1085 INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
1086 INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
1087
1088 /**
1089 * \group texture information
1090 *
1091 * This gives semantic information about textures which is useful to the
1092 * frontend, the backend, and lowering passes, but not the optimizer.
1093 */
1094
1095 typedef enum {
1096 nir_tex_src_coord,
1097 nir_tex_src_projector,
1098 nir_tex_src_comparitor, /* shadow comparitor */
1099 nir_tex_src_offset,
1100 nir_tex_src_bias,
1101 nir_tex_src_lod,
1102 nir_tex_src_ms_index, /* MSAA sample index */
1103 nir_tex_src_ms_mcs, /* MSAA compression value */
1104 nir_tex_src_ddx,
1105 nir_tex_src_ddy,
1106 nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
1107 nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
1108 nir_tex_src_plane, /* < selects plane for planar textures */
1109 nir_num_tex_src_types
1110 } nir_tex_src_type;
1111
1112 typedef struct {
1113 nir_src src;
1114 nir_tex_src_type src_type;
1115 } nir_tex_src;
1116
1117 typedef enum {
1118 nir_texop_tex, /**< Regular texture look-up */
1119 nir_texop_txb, /**< Texture look-up with LOD bias */
1120 nir_texop_txl, /**< Texture look-up with explicit LOD */
1121 nir_texop_txd, /**< Texture look-up with partial derivatvies */
1122 nir_texop_txf, /**< Texel fetch with explicit LOD */
1123 nir_texop_txf_ms, /**< Multisample texture fetch */
1124 nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
1125 nir_texop_txs, /**< Texture size */
1126 nir_texop_lod, /**< Texture lod query */
1127 nir_texop_tg4, /**< Texture gather */
1128 nir_texop_query_levels, /**< Texture levels query */
1129 nir_texop_texture_samples, /**< Texture samples query */
1130 nir_texop_samples_identical, /**< Query whether all samples are definitely
1131 * identical.
1132 */
1133 } nir_texop;
1134
1135 typedef struct {
1136 nir_instr instr;
1137
1138 enum glsl_sampler_dim sampler_dim;
1139 nir_alu_type dest_type;
1140
1141 nir_texop op;
1142 nir_dest dest;
1143 nir_tex_src *src;
1144 unsigned num_srcs, coord_components;
1145 bool is_array, is_shadow;
1146
1147 /**
1148 * If is_shadow is true, whether this is the old-style shadow that outputs 4
1149 * components or the new-style shadow that outputs 1 component.
1150 */
1151 bool is_new_style_shadow;
1152
1153 /* gather component selector */
1154 unsigned component : 2;
1155
1156 /** The texture index
1157 *
1158 * If this texture instruction has a nir_tex_src_texture_offset source,
1159 * then the texture index is given by texture_index + texture_offset.
1160 */
1161 unsigned texture_index;
1162
1163 /** The size of the texture array or 0 if it's not an array */
1164 unsigned texture_array_size;
1165
1166 /** The texture deref
1167 *
1168 * If this is null, use texture_index instead.
1169 */
1170 nir_deref_var *texture;
1171
1172 /** The sampler index
1173 *
1174 * The following operations do not require a sampler and, as such, this
1175 * field should be ignored:
1176 * - nir_texop_txf
1177 * - nir_texop_txf_ms
1178 * - nir_texop_txs
1179 * - nir_texop_lod
1180 * - nir_texop_tg4
1181 * - nir_texop_query_levels
1182 * - nir_texop_texture_samples
1183 * - nir_texop_samples_identical
1184 *
1185 * If this texture instruction has a nir_tex_src_sampler_offset source,
1186 * then the sampler index is given by sampler_index + sampler_offset.
1187 */
1188 unsigned sampler_index;
1189
1190 /** The sampler deref
1191 *
1192 * If this is null, use sampler_index instead.
1193 */
1194 nir_deref_var *sampler;
1195 } nir_tex_instr;
1196
1197 static inline unsigned
1198 nir_tex_instr_dest_size(nir_tex_instr *instr)
1199 {
1200 switch (instr->op) {
1201 case nir_texop_txs: {
1202 unsigned ret;
1203 switch (instr->sampler_dim) {
1204 case GLSL_SAMPLER_DIM_1D:
1205 case GLSL_SAMPLER_DIM_BUF:
1206 ret = 1;
1207 break;
1208 case GLSL_SAMPLER_DIM_2D:
1209 case GLSL_SAMPLER_DIM_CUBE:
1210 case GLSL_SAMPLER_DIM_MS:
1211 case GLSL_SAMPLER_DIM_RECT:
1212 case GLSL_SAMPLER_DIM_EXTERNAL:
1213 case GLSL_SAMPLER_DIM_SUBPASS:
1214 ret = 2;
1215 break;
1216 case GLSL_SAMPLER_DIM_3D:
1217 ret = 3;
1218 break;
1219 default:
1220 unreachable("not reached");
1221 }
1222 if (instr->is_array)
1223 ret++;
1224 return ret;
1225 }
1226
1227 case nir_texop_lod:
1228 return 2;
1229
1230 case nir_texop_texture_samples:
1231 case nir_texop_query_levels:
1232 case nir_texop_samples_identical:
1233 return 1;
1234
1235 default:
1236 if (instr->is_shadow && instr->is_new_style_shadow)
1237 return 1;
1238
1239 return 4;
1240 }
1241 }
1242
1243 /* Returns true if this texture operation queries something about the texture
1244 * rather than actually sampling it.
1245 */
1246 static inline bool
1247 nir_tex_instr_is_query(nir_tex_instr *instr)
1248 {
1249 switch (instr->op) {
1250 case nir_texop_txs:
1251 case nir_texop_lod:
1252 case nir_texop_texture_samples:
1253 case nir_texop_query_levels:
1254 case nir_texop_txf_ms_mcs:
1255 return true;
1256 case nir_texop_tex:
1257 case nir_texop_txb:
1258 case nir_texop_txl:
1259 case nir_texop_txd:
1260 case nir_texop_txf:
1261 case nir_texop_txf_ms:
1262 case nir_texop_tg4:
1263 return false;
1264 default:
1265 unreachable("Invalid texture opcode");
1266 }
1267 }
1268
1269 static inline nir_alu_type
1270 nir_tex_instr_src_type(nir_tex_instr *instr, unsigned src)
1271 {
1272 switch (instr->src[src].src_type) {
1273 case nir_tex_src_coord:
1274 switch (instr->op) {
1275 case nir_texop_txf:
1276 case nir_texop_txf_ms:
1277 case nir_texop_txf_ms_mcs:
1278 case nir_texop_samples_identical:
1279 return nir_type_int;
1280
1281 default:
1282 return nir_type_float;
1283 }
1284
1285 case nir_tex_src_lod:
1286 switch (instr->op) {
1287 case nir_texop_txs:
1288 case nir_texop_txf:
1289 return nir_type_int;
1290
1291 default:
1292 return nir_type_float;
1293 }
1294
1295 case nir_tex_src_projector:
1296 case nir_tex_src_comparitor:
1297 case nir_tex_src_bias:
1298 case nir_tex_src_ddx:
1299 case nir_tex_src_ddy:
1300 return nir_type_float;
1301
1302 case nir_tex_src_offset:
1303 case nir_tex_src_ms_index:
1304 case nir_tex_src_texture_offset:
1305 case nir_tex_src_sampler_offset:
1306 return nir_type_int;
1307
1308 default:
1309 unreachable("Invalid texture source type");
1310 }
1311 }
1312
1313 static inline unsigned
1314 nir_tex_instr_src_size(nir_tex_instr *instr, unsigned src)
1315 {
1316 if (instr->src[src].src_type == nir_tex_src_coord)
1317 return instr->coord_components;
1318
1319 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
1320 if (instr->src[src].src_type == nir_tex_src_ms_mcs)
1321 return 4;
1322
1323 if (instr->src[src].src_type == nir_tex_src_offset ||
1324 instr->src[src].src_type == nir_tex_src_ddx ||
1325 instr->src[src].src_type == nir_tex_src_ddy) {
1326 if (instr->is_array)
1327 return instr->coord_components - 1;
1328 else
1329 return instr->coord_components;
1330 }
1331
1332 return 1;
1333 }
1334
1335 static inline int
1336 nir_tex_instr_src_index(nir_tex_instr *instr, nir_tex_src_type type)
1337 {
1338 for (unsigned i = 0; i < instr->num_srcs; i++)
1339 if (instr->src[i].src_type == type)
1340 return (int) i;
1341
1342 return -1;
1343 }
1344
1345 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
1346
1347 typedef struct {
1348 nir_instr instr;
1349
1350 nir_const_value value;
1351
1352 nir_ssa_def def;
1353 } nir_load_const_instr;
1354
1355 typedef enum {
1356 nir_jump_return,
1357 nir_jump_break,
1358 nir_jump_continue,
1359 } nir_jump_type;
1360
1361 typedef struct {
1362 nir_instr instr;
1363 nir_jump_type type;
1364 } nir_jump_instr;
1365
1366 /* creates a new SSA variable in an undefined state */
1367
1368 typedef struct {
1369 nir_instr instr;
1370 nir_ssa_def def;
1371 } nir_ssa_undef_instr;
1372
1373 typedef struct {
1374 struct exec_node node;
1375
1376 /* The predecessor block corresponding to this source */
1377 struct nir_block *pred;
1378
1379 nir_src src;
1380 } nir_phi_src;
1381
1382 #define nir_foreach_phi_src(phi_src, phi) \
1383 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
1384 #define nir_foreach_phi_src_safe(phi_src, phi) \
1385 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
1386
1387 typedef struct {
1388 nir_instr instr;
1389
1390 struct exec_list srcs; /** < list of nir_phi_src */
1391
1392 nir_dest dest;
1393 } nir_phi_instr;
1394
1395 typedef struct {
1396 struct exec_node node;
1397 nir_src src;
1398 nir_dest dest;
1399 } nir_parallel_copy_entry;
1400
1401 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
1402 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
1403
1404 typedef struct {
1405 nir_instr instr;
1406
1407 /* A list of nir_parallel_copy_entry's. The sources of all of the
1408 * entries are copied to the corresponding destinations "in parallel".
1409 * In other words, if we have two entries: a -> b and b -> a, the values
1410 * get swapped.
1411 */
1412 struct exec_list entries;
1413 } nir_parallel_copy_instr;
1414
1415 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
1416 type, nir_instr_type_alu)
1417 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
1418 type, nir_instr_type_call)
1419 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
1420 type, nir_instr_type_jump)
1421 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr,
1422 type, nir_instr_type_tex)
1423 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
1424 type, nir_instr_type_intrinsic)
1425 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
1426 type, nir_instr_type_load_const)
1427 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
1428 type, nir_instr_type_ssa_undef)
1429 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
1430 type, nir_instr_type_phi)
1431 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
1432 nir_parallel_copy_instr, instr,
1433 type, nir_instr_type_parallel_copy)
1434
1435 /*
1436 * Control flow
1437 *
1438 * Control flow consists of a tree of control flow nodes, which include
1439 * if-statements and loops. The leaves of the tree are basic blocks, lists of
1440 * instructions that always run start-to-finish. Each basic block also keeps
1441 * track of its successors (blocks which may run immediately after the current
1442 * block) and predecessors (blocks which could have run immediately before the
1443 * current block). Each function also has a start block and an end block which
1444 * all return statements point to (which is always empty). Together, all the
1445 * blocks with their predecessors and successors make up the control flow
1446 * graph (CFG) of the function. There are helpers that modify the tree of
1447 * control flow nodes while modifying the CFG appropriately; these should be
1448 * used instead of modifying the tree directly.
1449 */
1450
1451 typedef enum {
1452 nir_cf_node_block,
1453 nir_cf_node_if,
1454 nir_cf_node_loop,
1455 nir_cf_node_function
1456 } nir_cf_node_type;
1457
1458 typedef struct nir_cf_node {
1459 struct exec_node node;
1460 nir_cf_node_type type;
1461 struct nir_cf_node *parent;
1462 } nir_cf_node;
1463
1464 typedef struct nir_block {
1465 nir_cf_node cf_node;
1466
1467 struct exec_list instr_list; /** < list of nir_instr */
1468
1469 /** generic block index; generated by nir_index_blocks */
1470 unsigned index;
1471
1472 /*
1473 * Each block can only have up to 2 successors, so we put them in a simple
1474 * array - no need for anything more complicated.
1475 */
1476 struct nir_block *successors[2];
1477
1478 /* Set of nir_block predecessors in the CFG */
1479 struct set *predecessors;
1480
1481 /*
1482 * this node's immediate dominator in the dominance tree - set to NULL for
1483 * the start block.
1484 */
1485 struct nir_block *imm_dom;
1486
1487 /* This node's children in the dominance tree */
1488 unsigned num_dom_children;
1489 struct nir_block **dom_children;
1490
1491 /* Set of nir_block's on the dominance frontier of this block */
1492 struct set *dom_frontier;
1493
1494 /*
1495 * These two indices have the property that dom_{pre,post}_index for each
1496 * child of this block in the dominance tree will always be between
1497 * dom_pre_index and dom_post_index for this block, which makes testing if
1498 * a given block is dominated by another block an O(1) operation.
1499 */
1500 unsigned dom_pre_index, dom_post_index;
1501
1502 /* live in and out for this block; used for liveness analysis */
1503 BITSET_WORD *live_in;
1504 BITSET_WORD *live_out;
1505 } nir_block;
1506
1507 static inline nir_instr *
1508 nir_block_first_instr(nir_block *block)
1509 {
1510 struct exec_node *head = exec_list_get_head(&block->instr_list);
1511 return exec_node_data(nir_instr, head, node);
1512 }
1513
1514 static inline nir_instr *
1515 nir_block_last_instr(nir_block *block)
1516 {
1517 struct exec_node *tail = exec_list_get_tail(&block->instr_list);
1518 return exec_node_data(nir_instr, tail, node);
1519 }
1520
1521 #define nir_foreach_instr(instr, block) \
1522 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
1523 #define nir_foreach_instr_reverse(instr, block) \
1524 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
1525 #define nir_foreach_instr_safe(instr, block) \
1526 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
1527 #define nir_foreach_instr_reverse_safe(instr, block) \
1528 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
1529
1530 typedef struct nir_if {
1531 nir_cf_node cf_node;
1532 nir_src condition;
1533
1534 struct exec_list then_list; /** < list of nir_cf_node */
1535 struct exec_list else_list; /** < list of nir_cf_node */
1536 } nir_if;
1537
1538 typedef struct {
1539 nir_cf_node cf_node;
1540
1541 struct exec_list body; /** < list of nir_cf_node */
1542 } nir_loop;
1543
1544 /**
1545 * Various bits of metadata that can may be created or required by
1546 * optimization and analysis passes
1547 */
1548 typedef enum {
1549 nir_metadata_none = 0x0,
1550 nir_metadata_block_index = 0x1,
1551 nir_metadata_dominance = 0x2,
1552 nir_metadata_live_ssa_defs = 0x4,
1553 nir_metadata_not_properly_reset = 0x8,
1554 } nir_metadata;
1555
1556 typedef struct {
1557 nir_cf_node cf_node;
1558
1559 /** pointer to the function of which this is an implementation */
1560 struct nir_function *function;
1561
1562 struct exec_list body; /** < list of nir_cf_node */
1563
1564 nir_block *end_block;
1565
1566 /** list for all local variables in the function */
1567 struct exec_list locals;
1568
1569 /** array of variables used as parameters */
1570 unsigned num_params;
1571 nir_variable **params;
1572
1573 /** variable used to hold the result of the function */
1574 nir_variable *return_var;
1575
1576 /** list of local registers in the function */
1577 struct exec_list registers;
1578
1579 /** next available local register index */
1580 unsigned reg_alloc;
1581
1582 /** next available SSA value index */
1583 unsigned ssa_alloc;
1584
1585 /* total number of basic blocks, only valid when block_index_dirty = false */
1586 unsigned num_blocks;
1587
1588 nir_metadata valid_metadata;
1589 } nir_function_impl;
1590
1591 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1592 nir_start_block(nir_function_impl *impl)
1593 {
1594 return (nir_block *) impl->body.head_sentinel.next;
1595 }
1596
1597 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
1598 nir_impl_last_block(nir_function_impl *impl)
1599 {
1600 return (nir_block *) impl->body.tail_sentinel.prev;
1601 }
1602
1603 static inline nir_cf_node *
1604 nir_cf_node_next(nir_cf_node *node)
1605 {
1606 struct exec_node *next = exec_node_get_next(&node->node);
1607 if (exec_node_is_tail_sentinel(next))
1608 return NULL;
1609 else
1610 return exec_node_data(nir_cf_node, next, node);
1611 }
1612
1613 static inline nir_cf_node *
1614 nir_cf_node_prev(nir_cf_node *node)
1615 {
1616 struct exec_node *prev = exec_node_get_prev(&node->node);
1617 if (exec_node_is_head_sentinel(prev))
1618 return NULL;
1619 else
1620 return exec_node_data(nir_cf_node, prev, node);
1621 }
1622
1623 static inline bool
1624 nir_cf_node_is_first(const nir_cf_node *node)
1625 {
1626 return exec_node_is_head_sentinel(node->node.prev);
1627 }
1628
1629 static inline bool
1630 nir_cf_node_is_last(const nir_cf_node *node)
1631 {
1632 return exec_node_is_tail_sentinel(node->node.next);
1633 }
1634
1635 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node,
1636 type, nir_cf_node_block)
1637 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node,
1638 type, nir_cf_node_if)
1639 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node,
1640 type, nir_cf_node_loop)
1641 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node,
1642 nir_function_impl, cf_node, type, nir_cf_node_function)
1643
1644 static inline nir_block *
1645 nir_if_first_then_block(nir_if *if_stmt)
1646 {
1647 struct exec_node *head = exec_list_get_head(&if_stmt->then_list);
1648 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1649 }
1650
1651 static inline nir_block *
1652 nir_if_last_then_block(nir_if *if_stmt)
1653 {
1654 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list);
1655 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1656 }
1657
1658 static inline nir_block *
1659 nir_if_first_else_block(nir_if *if_stmt)
1660 {
1661 struct exec_node *head = exec_list_get_head(&if_stmt->else_list);
1662 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1663 }
1664
1665 static inline nir_block *
1666 nir_if_last_else_block(nir_if *if_stmt)
1667 {
1668 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list);
1669 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1670 }
1671
1672 static inline nir_block *
1673 nir_loop_first_block(nir_loop *loop)
1674 {
1675 struct exec_node *head = exec_list_get_head(&loop->body);
1676 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
1677 }
1678
1679 static inline nir_block *
1680 nir_loop_last_block(nir_loop *loop)
1681 {
1682 struct exec_node *tail = exec_list_get_tail(&loop->body);
1683 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
1684 }
1685
1686 typedef enum {
1687 nir_parameter_in,
1688 nir_parameter_out,
1689 nir_parameter_inout,
1690 } nir_parameter_type;
1691
1692 typedef struct {
1693 nir_parameter_type param_type;
1694 const struct glsl_type *type;
1695 } nir_parameter;
1696
1697 typedef struct nir_function {
1698 struct exec_node node;
1699
1700 const char *name;
1701 struct nir_shader *shader;
1702
1703 unsigned num_params;
1704 nir_parameter *params;
1705 const struct glsl_type *return_type;
1706
1707 /** The implementation of this function.
1708 *
1709 * If the function is only declared and not implemented, this is NULL.
1710 */
1711 nir_function_impl *impl;
1712 } nir_function;
1713
1714 typedef struct nir_shader_compiler_options {
1715 bool lower_fdiv;
1716 bool lower_ffma;
1717 bool fuse_ffma;
1718 bool lower_flrp32;
1719 /** Lowers flrp when it does not support doubles */
1720 bool lower_flrp64;
1721 bool lower_fpow;
1722 bool lower_fsat;
1723 bool lower_fsqrt;
1724 bool lower_fmod32;
1725 bool lower_fmod64;
1726 bool lower_bitfield_extract;
1727 bool lower_bitfield_insert;
1728 bool lower_uadd_carry;
1729 bool lower_usub_borrow;
1730 /** lowers fneg and ineg to fsub and isub. */
1731 bool lower_negate;
1732 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
1733 bool lower_sub;
1734
1735 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */
1736 bool lower_scmp;
1737
1738 /** enables rules to lower idiv by power-of-two: */
1739 bool lower_idiv;
1740
1741 /* Does the native fdot instruction replicate its result for four
1742 * components? If so, then opt_algebraic_late will turn all fdotN
1743 * instructions into fdot_replicatedN instructions.
1744 */
1745 bool fdot_replicates;
1746
1747 /** lowers ffract to fsub+ffloor: */
1748 bool lower_ffract;
1749
1750 bool lower_pack_half_2x16;
1751 bool lower_pack_unorm_2x16;
1752 bool lower_pack_snorm_2x16;
1753 bool lower_pack_unorm_4x8;
1754 bool lower_pack_snorm_4x8;
1755 bool lower_unpack_half_2x16;
1756 bool lower_unpack_unorm_2x16;
1757 bool lower_unpack_snorm_2x16;
1758 bool lower_unpack_unorm_4x8;
1759 bool lower_unpack_snorm_4x8;
1760
1761 bool lower_extract_byte;
1762 bool lower_extract_word;
1763
1764 /**
1765 * Does the driver support real 32-bit integers? (Otherwise, integers
1766 * are simulated by floats.)
1767 */
1768 bool native_integers;
1769
1770 /* Indicates that the driver only has zero-based vertex id */
1771 bool vertex_id_zero_based;
1772
1773 bool lower_cs_local_index_from_id;
1774
1775 /**
1776 * Should nir_lower_io() create load_interpolated_input intrinsics?
1777 *
1778 * If not, it generates regular load_input intrinsics and interpolation
1779 * information must be inferred from the list of input nir_variables.
1780 */
1781 bool use_interpolated_input_intrinsics;
1782 } nir_shader_compiler_options;
1783
1784 typedef struct nir_shader {
1785 /** list of uniforms (nir_variable) */
1786 struct exec_list uniforms;
1787
1788 /** list of inputs (nir_variable) */
1789 struct exec_list inputs;
1790
1791 /** list of outputs (nir_variable) */
1792 struct exec_list outputs;
1793
1794 /** list of shared compute variables (nir_variable) */
1795 struct exec_list shared;
1796
1797 /** Set of driver-specific options for the shader.
1798 *
1799 * The memory for the options is expected to be kept in a single static
1800 * copy by the driver.
1801 */
1802 const struct nir_shader_compiler_options *options;
1803
1804 /** Various bits of compile-time information about a given shader */
1805 struct shader_info *info;
1806
1807 /** list of global variables in the shader (nir_variable) */
1808 struct exec_list globals;
1809
1810 /** list of system value variables in the shader (nir_variable) */
1811 struct exec_list system_values;
1812
1813 struct exec_list functions; /** < list of nir_function */
1814
1815 /** list of global register in the shader */
1816 struct exec_list registers;
1817
1818 /** next available global register index */
1819 unsigned reg_alloc;
1820
1821 /**
1822 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
1823 * access plus one
1824 */
1825 unsigned num_inputs, num_uniforms, num_outputs, num_shared;
1826
1827 /** The shader stage, such as MESA_SHADER_VERTEX. */
1828 gl_shader_stage stage;
1829 } nir_shader;
1830
1831 static inline nir_function_impl *
1832 nir_shader_get_entrypoint(nir_shader *shader)
1833 {
1834 assert(exec_list_length(&shader->functions) == 1);
1835 struct exec_node *func_node = exec_list_get_head(&shader->functions);
1836 nir_function *func = exec_node_data(nir_function, func_node, node);
1837 assert(func->return_type == glsl_void_type());
1838 assert(func->num_params == 0);
1839 assert(func->impl);
1840 return func->impl;
1841 }
1842
1843 #define nir_foreach_function(func, shader) \
1844 foreach_list_typed(nir_function, func, node, &(shader)->functions)
1845
1846 nir_shader *nir_shader_create(void *mem_ctx,
1847 gl_shader_stage stage,
1848 const nir_shader_compiler_options *options,
1849 shader_info *si);
1850
1851 /** creates a register, including assigning it an index and adding it to the list */
1852 nir_register *nir_global_reg_create(nir_shader *shader);
1853
1854 nir_register *nir_local_reg_create(nir_function_impl *impl);
1855
1856 void nir_reg_remove(nir_register *reg);
1857
1858 /** Adds a variable to the appropreate list in nir_shader */
1859 void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
1860
1861 static inline void
1862 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
1863 {
1864 assert(var->data.mode == nir_var_local);
1865 exec_list_push_tail(&impl->locals, &var->node);
1866 }
1867
1868 /** creates a variable, sets a few defaults, and adds it to the list */
1869 nir_variable *nir_variable_create(nir_shader *shader,
1870 nir_variable_mode mode,
1871 const struct glsl_type *type,
1872 const char *name);
1873 /** creates a local variable and adds it to the list */
1874 nir_variable *nir_local_variable_create(nir_function_impl *impl,
1875 const struct glsl_type *type,
1876 const char *name);
1877
1878 /** creates a function and adds it to the shader's list of functions */
1879 nir_function *nir_function_create(nir_shader *shader, const char *name);
1880
1881 nir_function_impl *nir_function_impl_create(nir_function *func);
1882 /** creates a function_impl that isn't tied to any particular function */
1883 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
1884
1885 nir_block *nir_block_create(nir_shader *shader);
1886 nir_if *nir_if_create(nir_shader *shader);
1887 nir_loop *nir_loop_create(nir_shader *shader);
1888
1889 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node);
1890
1891 /** requests that the given pieces of metadata be generated */
1892 void nir_metadata_require(nir_function_impl *impl, nir_metadata required);
1893 /** dirties all but the preserved metadata */
1894 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved);
1895
1896 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
1897 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
1898
1899 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
1900
1901 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
1902 unsigned num_components,
1903 unsigned bit_size);
1904
1905 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader,
1906 nir_intrinsic_op op);
1907
1908 nir_call_instr *nir_call_instr_create(nir_shader *shader,
1909 nir_function *callee);
1910
1911 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
1912
1913 nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
1914
1915 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
1916
1917 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
1918 unsigned num_components,
1919 unsigned bit_size);
1920
1921 nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var);
1922 nir_deref_array *nir_deref_array_create(void *mem_ctx);
1923 nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index);
1924
1925 nir_deref *nir_copy_deref(void *mem_ctx, nir_deref *deref);
1926
1927 typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state);
1928 bool nir_deref_foreach_leaf(nir_deref_var *deref,
1929 nir_deref_foreach_leaf_cb cb, void *state);
1930
1931 nir_load_const_instr *
1932 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref);
1933
1934 /**
1935 * NIR Cursors and Instruction Insertion API
1936 * @{
1937 *
1938 * A tiny struct representing a point to insert/extract instructions or
1939 * control flow nodes. Helps reduce the combinatorial explosion of possible
1940 * points to insert/extract.
1941 *
1942 * \sa nir_control_flow.h
1943 */
1944 typedef enum {
1945 nir_cursor_before_block,
1946 nir_cursor_after_block,
1947 nir_cursor_before_instr,
1948 nir_cursor_after_instr,
1949 } nir_cursor_option;
1950
1951 typedef struct {
1952 nir_cursor_option option;
1953 union {
1954 nir_block *block;
1955 nir_instr *instr;
1956 };
1957 } nir_cursor;
1958
1959 static inline nir_block *
1960 nir_cursor_current_block(nir_cursor cursor)
1961 {
1962 if (cursor.option == nir_cursor_before_instr ||
1963 cursor.option == nir_cursor_after_instr) {
1964 return cursor.instr->block;
1965 } else {
1966 return cursor.block;
1967 }
1968 }
1969
1970 bool nir_cursors_equal(nir_cursor a, nir_cursor b);
1971
1972 static inline nir_cursor
1973 nir_before_block(nir_block *block)
1974 {
1975 nir_cursor cursor;
1976 cursor.option = nir_cursor_before_block;
1977 cursor.block = block;
1978 return cursor;
1979 }
1980
1981 static inline nir_cursor
1982 nir_after_block(nir_block *block)
1983 {
1984 nir_cursor cursor;
1985 cursor.option = nir_cursor_after_block;
1986 cursor.block = block;
1987 return cursor;
1988 }
1989
1990 static inline nir_cursor
1991 nir_before_instr(nir_instr *instr)
1992 {
1993 nir_cursor cursor;
1994 cursor.option = nir_cursor_before_instr;
1995 cursor.instr = instr;
1996 return cursor;
1997 }
1998
1999 static inline nir_cursor
2000 nir_after_instr(nir_instr *instr)
2001 {
2002 nir_cursor cursor;
2003 cursor.option = nir_cursor_after_instr;
2004 cursor.instr = instr;
2005 return cursor;
2006 }
2007
2008 static inline nir_cursor
2009 nir_after_block_before_jump(nir_block *block)
2010 {
2011 nir_instr *last_instr = nir_block_last_instr(block);
2012 if (last_instr && last_instr->type == nir_instr_type_jump) {
2013 return nir_before_instr(last_instr);
2014 } else {
2015 return nir_after_block(block);
2016 }
2017 }
2018
2019 static inline nir_cursor
2020 nir_before_cf_node(nir_cf_node *node)
2021 {
2022 if (node->type == nir_cf_node_block)
2023 return nir_before_block(nir_cf_node_as_block(node));
2024
2025 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node)));
2026 }
2027
2028 static inline nir_cursor
2029 nir_after_cf_node(nir_cf_node *node)
2030 {
2031 if (node->type == nir_cf_node_block)
2032 return nir_after_block(nir_cf_node_as_block(node));
2033
2034 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
2035 }
2036
2037 static inline nir_cursor
2038 nir_after_phis(nir_block *block)
2039 {
2040 nir_foreach_instr(instr, block) {
2041 if (instr->type != nir_instr_type_phi)
2042 return nir_before_instr(instr);
2043 }
2044 return nir_after_block(block);
2045 }
2046
2047 static inline nir_cursor
2048 nir_after_cf_node_and_phis(nir_cf_node *node)
2049 {
2050 if (node->type == nir_cf_node_block)
2051 return nir_after_block(nir_cf_node_as_block(node));
2052
2053 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
2054
2055 return nir_after_phis(block);
2056 }
2057
2058 static inline nir_cursor
2059 nir_before_cf_list(struct exec_list *cf_list)
2060 {
2061 nir_cf_node *first_node = exec_node_data(nir_cf_node,
2062 exec_list_get_head(cf_list), node);
2063 return nir_before_cf_node(first_node);
2064 }
2065
2066 static inline nir_cursor
2067 nir_after_cf_list(struct exec_list *cf_list)
2068 {
2069 nir_cf_node *last_node = exec_node_data(nir_cf_node,
2070 exec_list_get_tail(cf_list), node);
2071 return nir_after_cf_node(last_node);
2072 }
2073
2074 /**
2075 * Insert a NIR instruction at the given cursor.
2076 *
2077 * Note: This does not update the cursor.
2078 */
2079 void nir_instr_insert(nir_cursor cursor, nir_instr *instr);
2080
2081 static inline void
2082 nir_instr_insert_before(nir_instr *instr, nir_instr *before)
2083 {
2084 nir_instr_insert(nir_before_instr(instr), before);
2085 }
2086
2087 static inline void
2088 nir_instr_insert_after(nir_instr *instr, nir_instr *after)
2089 {
2090 nir_instr_insert(nir_after_instr(instr), after);
2091 }
2092
2093 static inline void
2094 nir_instr_insert_before_block(nir_block *block, nir_instr *before)
2095 {
2096 nir_instr_insert(nir_before_block(block), before);
2097 }
2098
2099 static inline void
2100 nir_instr_insert_after_block(nir_block *block, nir_instr *after)
2101 {
2102 nir_instr_insert(nir_after_block(block), after);
2103 }
2104
2105 static inline void
2106 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before)
2107 {
2108 nir_instr_insert(nir_before_cf_node(node), before);
2109 }
2110
2111 static inline void
2112 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after)
2113 {
2114 nir_instr_insert(nir_after_cf_node(node), after);
2115 }
2116
2117 static inline void
2118 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before)
2119 {
2120 nir_instr_insert(nir_before_cf_list(list), before);
2121 }
2122
2123 static inline void
2124 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after)
2125 {
2126 nir_instr_insert(nir_after_cf_list(list), after);
2127 }
2128
2129 void nir_instr_remove(nir_instr *instr);
2130
2131 /** @} */
2132
2133 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
2134 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
2135 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
2136 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
2137 void *state);
2138 bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
2139 bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
2140
2141 nir_const_value *nir_src_as_const_value(nir_src src);
2142 bool nir_src_is_dynamically_uniform(nir_src src);
2143 bool nir_srcs_equal(nir_src src1, nir_src src2);
2144 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
2145 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
2146 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
2147 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
2148 nir_dest new_dest);
2149
2150 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
2151 unsigned num_components, unsigned bit_size,
2152 const char *name);
2153 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
2154 unsigned num_components, unsigned bit_size,
2155 const char *name);
2156 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src);
2157 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
2158 nir_instr *after_me);
2159
2160 uint8_t nir_ssa_def_components_read(nir_ssa_def *def);
2161
2162 /*
2163 * finds the next basic block in source-code order, returns NULL if there is
2164 * none
2165 */
2166
2167 nir_block *nir_block_cf_tree_next(nir_block *block);
2168
2169 /* Performs the opposite of nir_block_cf_tree_next() */
2170
2171 nir_block *nir_block_cf_tree_prev(nir_block *block);
2172
2173 /* Gets the first block in a CF node in source-code order */
2174
2175 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
2176
2177 /* Gets the last block in a CF node in source-code order */
2178
2179 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
2180
2181 /* Gets the next block after a CF node in source-code order */
2182
2183 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
2184
2185 /* Macros for loops that visit blocks in source-code order */
2186
2187 #define nir_foreach_block(block, impl) \
2188 for (nir_block *block = nir_start_block(impl); block != NULL; \
2189 block = nir_block_cf_tree_next(block))
2190
2191 #define nir_foreach_block_safe(block, impl) \
2192 for (nir_block *block = nir_start_block(impl), \
2193 *next = nir_block_cf_tree_next(block); \
2194 block != NULL; \
2195 block = next, next = nir_block_cf_tree_next(block))
2196
2197 #define nir_foreach_block_reverse(block, impl) \
2198 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
2199 block = nir_block_cf_tree_prev(block))
2200
2201 #define nir_foreach_block_reverse_safe(block, impl) \
2202 for (nir_block *block = nir_impl_last_block(impl), \
2203 *prev = nir_block_cf_tree_prev(block); \
2204 block != NULL; \
2205 block = prev, prev = nir_block_cf_tree_prev(block))
2206
2207 #define nir_foreach_block_in_cf_node(block, node) \
2208 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
2209 block != nir_cf_node_cf_tree_next(node); \
2210 block = nir_block_cf_tree_next(block))
2211
2212 /* If the following CF node is an if, this function returns that if.
2213 * Otherwise, it returns NULL.
2214 */
2215 nir_if *nir_block_get_following_if(nir_block *block);
2216
2217 nir_loop *nir_block_get_following_loop(nir_block *block);
2218
2219 void nir_index_local_regs(nir_function_impl *impl);
2220 void nir_index_global_regs(nir_shader *shader);
2221 void nir_index_ssa_defs(nir_function_impl *impl);
2222 unsigned nir_index_instrs(nir_function_impl *impl);
2223
2224 void nir_index_blocks(nir_function_impl *impl);
2225
2226 void nir_print_shader(nir_shader *shader, FILE *fp);
2227 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
2228 void nir_print_instr(const nir_instr *instr, FILE *fp);
2229
2230 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
2231 nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi);
2232 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
2233 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
2234
2235 #ifdef DEBUG
2236 void nir_validate_shader(nir_shader *shader);
2237 void nir_metadata_set_validation_flag(nir_shader *shader);
2238 void nir_metadata_check_validation_flag(nir_shader *shader);
2239
2240 #include "util/debug.h"
2241 static inline bool
2242 should_clone_nir(void)
2243 {
2244 static int should_clone = -1;
2245 if (should_clone < 0)
2246 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
2247
2248 return should_clone;
2249 }
2250 #else
2251 static inline void nir_validate_shader(nir_shader *shader) { (void) shader; }
2252 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
2253 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
2254 static inline bool should_clone_nir(void) { return false; }
2255 #endif /* DEBUG */
2256
2257 #define _PASS(nir, do_pass) do { \
2258 do_pass \
2259 nir_validate_shader(nir); \
2260 if (should_clone_nir()) { \
2261 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
2262 ralloc_free(nir); \
2263 nir = clone; \
2264 } \
2265 } while (0)
2266
2267 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \
2268 nir_metadata_set_validation_flag(nir); \
2269 if (pass(nir, ##__VA_ARGS__)) { \
2270 progress = true; \
2271 nir_metadata_check_validation_flag(nir); \
2272 } \
2273 )
2274
2275 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \
2276 pass(nir, ##__VA_ARGS__); \
2277 )
2278
2279 void nir_calc_dominance_impl(nir_function_impl *impl);
2280 void nir_calc_dominance(nir_shader *shader);
2281
2282 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
2283 bool nir_block_dominates(nir_block *parent, nir_block *child);
2284
2285 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
2286 void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
2287
2288 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp);
2289 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp);
2290
2291 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp);
2292 void nir_dump_cfg(nir_shader *shader, FILE *fp);
2293
2294 int nir_gs_count_vertices(const nir_shader *shader);
2295
2296 bool nir_split_var_copies(nir_shader *shader);
2297
2298 bool nir_lower_returns_impl(nir_function_impl *impl);
2299 bool nir_lower_returns(nir_shader *shader);
2300
2301 bool nir_inline_functions(nir_shader *shader);
2302
2303 bool nir_propagate_invariant(nir_shader *shader);
2304
2305 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, void *mem_ctx);
2306 void nir_lower_var_copies(nir_shader *shader);
2307
2308 bool nir_lower_global_vars_to_local(nir_shader *shader);
2309
2310 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
2311
2312 bool nir_lower_locals_to_regs(nir_shader *shader);
2313
2314 void nir_lower_io_to_temporaries(nir_shader *shader,
2315 nir_function_impl *entrypoint,
2316 bool outputs, bool inputs);
2317
2318 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
2319
2320 void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
2321 int (*type_size)(const struct glsl_type *));
2322
2323 typedef enum {
2324 /* If set, this forces all non-flat fragment shader inputs to be
2325 * interpolated as if with the "sample" qualifier. This requires
2326 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
2327 */
2328 nir_lower_io_force_sample_interpolation = (1 << 1),
2329 } nir_lower_io_options;
2330 void nir_lower_io(nir_shader *shader,
2331 nir_variable_mode modes,
2332 int (*type_size)(const struct glsl_type *),
2333 nir_lower_io_options);
2334 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
2335 nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
2336
2337 bool nir_is_per_vertex_io(nir_variable *var, gl_shader_stage stage);
2338
2339 void nir_lower_io_types(nir_shader *shader);
2340 void nir_lower_vars_to_ssa(nir_shader *shader);
2341
2342 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes);
2343 bool nir_lower_constant_initializers(nir_shader *shader,
2344 nir_variable_mode modes);
2345
2346 void nir_move_vec_src_uses_to_dest(nir_shader *shader);
2347 bool nir_lower_vec_to_movs(nir_shader *shader);
2348 bool nir_lower_alu_to_scalar(nir_shader *shader);
2349 void nir_lower_load_const_to_scalar(nir_shader *shader);
2350
2351 bool nir_lower_phis_to_scalar(nir_shader *shader);
2352 void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
2353
2354 void nir_lower_samplers(nir_shader *shader,
2355 const struct gl_shader_program *shader_program);
2356
2357 bool nir_lower_system_values(nir_shader *shader);
2358
2359 typedef struct nir_lower_tex_options {
2360 /**
2361 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
2362 * sampler types a texture projector is lowered.
2363 */
2364 unsigned lower_txp;
2365
2366 /**
2367 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
2368 */
2369 bool lower_txf_offset;
2370
2371 /**
2372 * If true, lower away nir_tex_src_offset for all rect textures.
2373 */
2374 bool lower_rect_offset;
2375
2376 /**
2377 * If true, lower rect textures to 2D, using txs to fetch the
2378 * texture dimensions and dividing the texture coords by the
2379 * texture dims to normalize.
2380 */
2381 bool lower_rect;
2382
2383 /**
2384 * If true, convert yuv to rgb.
2385 */
2386 unsigned lower_y_uv_external;
2387 unsigned lower_y_u_v_external;
2388 unsigned lower_yx_xuxv_external;
2389
2390 /**
2391 * To emulate certain texture wrap modes, this can be used
2392 * to saturate the specified tex coord to [0.0, 1.0]. The
2393 * bits are according to sampler #, ie. if, for example:
2394 *
2395 * (conf->saturate_s & (1 << n))
2396 *
2397 * is true, then the s coord for sampler n is saturated.
2398 *
2399 * Note that clamping must happen *after* projector lowering
2400 * so any projected texture sample instruction with a clamped
2401 * coordinate gets automatically lowered, regardless of the
2402 * 'lower_txp' setting.
2403 */
2404 unsigned saturate_s;
2405 unsigned saturate_t;
2406 unsigned saturate_r;
2407
2408 /* Bitmask of textures that need swizzling.
2409 *
2410 * If (swizzle_result & (1 << texture_index)), then the swizzle in
2411 * swizzles[texture_index] is applied to the result of the texturing
2412 * operation.
2413 */
2414 unsigned swizzle_result;
2415
2416 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
2417 * while 4 and 5 represent 0 and 1 respectively.
2418 */
2419 uint8_t swizzles[32][4];
2420
2421 /**
2422 * Bitmap of textures that need srgb to linear conversion. If
2423 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
2424 * of the texture are lowered to linear.
2425 */
2426 unsigned lower_srgb;
2427 } nir_lower_tex_options;
2428
2429 bool nir_lower_tex(nir_shader *shader,
2430 const nir_lower_tex_options *options);
2431
2432 bool nir_lower_idiv(nir_shader *shader);
2433
2434 void nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables);
2435 void nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables);
2436 void nir_lower_clip_cull_distance_arrays(nir_shader *nir);
2437
2438 void nir_lower_two_sided_color(nir_shader *shader);
2439
2440 void nir_lower_clamp_color_outputs(nir_shader *shader);
2441
2442 void nir_lower_passthrough_edgeflags(nir_shader *shader);
2443
2444 typedef struct nir_lower_wpos_ytransform_options {
2445 int state_tokens[5];
2446 bool fs_coord_origin_upper_left :1;
2447 bool fs_coord_origin_lower_left :1;
2448 bool fs_coord_pixel_center_integer :1;
2449 bool fs_coord_pixel_center_half_integer :1;
2450 } nir_lower_wpos_ytransform_options;
2451
2452 bool nir_lower_wpos_ytransform(nir_shader *shader,
2453 const nir_lower_wpos_ytransform_options *options);
2454 bool nir_lower_wpos_center(nir_shader *shader);
2455
2456 typedef struct nir_lower_drawpixels_options {
2457 int texcoord_state_tokens[5];
2458 int scale_state_tokens[5];
2459 int bias_state_tokens[5];
2460 unsigned drawpix_sampler;
2461 unsigned pixelmap_sampler;
2462 bool pixel_maps :1;
2463 bool scale_and_bias :1;
2464 } nir_lower_drawpixels_options;
2465
2466 void nir_lower_drawpixels(nir_shader *shader,
2467 const nir_lower_drawpixels_options *options);
2468
2469 typedef struct nir_lower_bitmap_options {
2470 unsigned sampler;
2471 bool swizzle_xxxx;
2472 } nir_lower_bitmap_options;
2473
2474 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
2475
2476 void nir_lower_atomics(nir_shader *shader,
2477 const struct gl_shader_program *shader_program);
2478 void nir_lower_to_source_mods(nir_shader *shader);
2479
2480 bool nir_lower_gs_intrinsics(nir_shader *shader);
2481
2482 typedef enum {
2483 nir_lower_drcp = (1 << 0),
2484 nir_lower_dsqrt = (1 << 1),
2485 nir_lower_drsq = (1 << 2),
2486 nir_lower_dtrunc = (1 << 3),
2487 nir_lower_dfloor = (1 << 4),
2488 nir_lower_dceil = (1 << 5),
2489 nir_lower_dfract = (1 << 6),
2490 nir_lower_dround_even = (1 << 7),
2491 nir_lower_dmod = (1 << 8)
2492 } nir_lower_doubles_options;
2493
2494 void nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options);
2495 void nir_lower_double_pack(nir_shader *shader);
2496
2497 bool nir_normalize_cubemap_coords(nir_shader *shader);
2498
2499 void nir_live_ssa_defs_impl(nir_function_impl *impl);
2500 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
2501
2502 void nir_convert_to_ssa_impl(nir_function_impl *impl);
2503 void nir_convert_to_ssa(nir_shader *shader);
2504
2505 bool nir_repair_ssa_impl(nir_function_impl *impl);
2506 bool nir_repair_ssa(nir_shader *shader);
2507
2508 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
2509 * registers. If false, convert all values (even those not involved in a phi
2510 * node) to registers.
2511 */
2512 void nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
2513
2514 bool nir_opt_algebraic(nir_shader *shader);
2515 bool nir_opt_algebraic_late(nir_shader *shader);
2516 bool nir_opt_constant_folding(nir_shader *shader);
2517
2518 bool nir_opt_global_to_local(nir_shader *shader);
2519
2520 bool nir_copy_prop(nir_shader *shader);
2521
2522 bool nir_opt_cse(nir_shader *shader);
2523
2524 bool nir_opt_dce(nir_shader *shader);
2525
2526 bool nir_opt_dead_cf(nir_shader *shader);
2527
2528 bool nir_opt_gcm(nir_shader *shader, bool value_number);
2529
2530 bool nir_opt_peephole_select(nir_shader *shader, unsigned limit);
2531
2532 bool nir_opt_remove_phis(nir_shader *shader);
2533
2534 bool nir_opt_undef(nir_shader *shader);
2535
2536 bool nir_opt_conditional_discard(nir_shader *shader);
2537
2538 void nir_sweep(nir_shader *shader);
2539
2540 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
2541 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
2542
2543 #ifdef __cplusplus
2544 } /* extern "C" */
2545 #endif