nir: Move compute system value lowering to a separate pass
[mesa.git] / src / compiler / nir / nir.h
1 /*
2 * Copyright © 2014 Connor Abbott
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #ifndef NIR_H
29 #define NIR_H
30
31 #include "util/hash_table.h"
32 #include "compiler/glsl/list.h"
33 #include "GL/gl.h" /* GLenum */
34 #include "util/list.h"
35 #include "util/ralloc.h"
36 #include "util/set.h"
37 #include "util/bitscan.h"
38 #include "util/bitset.h"
39 #include "util/macros.h"
40 #include "util/format/u_format.h"
41 #include "compiler/nir_types.h"
42 #include "compiler/shader_enums.h"
43 #include "compiler/shader_info.h"
44 #define XXH_INLINE_ALL
45 #include "util/xxhash.h"
46 #include <stdio.h>
47
48 #ifndef NDEBUG
49 #include "util/debug.h"
50 #endif /* NDEBUG */
51
52 #include "nir_opcodes.h"
53
54 #if defined(_WIN32) && !defined(snprintf)
55 #define snprintf _snprintf
56 #endif
57
58 #ifdef __cplusplus
59 extern "C" {
60 #endif
61
62 #define NIR_FALSE 0u
63 #define NIR_TRUE (~0u)
64 #define NIR_MAX_VEC_COMPONENTS 16
65 #define NIR_MAX_MATRIX_COLUMNS 4
66 #define NIR_STREAM_PACKED (1 << 8)
67 typedef uint16_t nir_component_mask_t;
68
69 static inline bool
70 nir_num_components_valid(unsigned num_components)
71 {
72 return (num_components >= 1 &&
73 num_components <= 4) ||
74 num_components == 8 ||
75 num_components == 16;
76 }
77
78 /** Defines a cast function
79 *
80 * This macro defines a cast function from in_type to out_type where
81 * out_type is some structure type that contains a field of type out_type.
82 *
83 * Note that you have to be a bit careful as the generated cast function
84 * destroys constness.
85 */
86 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \
87 type_field, type_value) \
88 static inline out_type * \
89 name(const in_type *parent) \
90 { \
91 assert(parent && parent->type_field == type_value); \
92 return exec_node_data(out_type, parent, field); \
93 }
94
95 struct nir_function;
96 struct nir_shader;
97 struct nir_instr;
98 struct nir_builder;
99
100
101 /**
102 * Description of built-in state associated with a uniform
103 *
104 * \sa nir_variable::state_slots
105 */
106 typedef struct {
107 gl_state_index16 tokens[STATE_LENGTH];
108 uint16_t swizzle;
109 } nir_state_slot;
110
111 typedef enum {
112 nir_var_shader_in = (1 << 0),
113 nir_var_shader_out = (1 << 1),
114 nir_var_shader_temp = (1 << 2),
115 nir_var_function_temp = (1 << 3),
116 nir_var_uniform = (1 << 4),
117 nir_var_mem_ubo = (1 << 5),
118 nir_var_system_value = (1 << 6),
119 nir_var_mem_ssbo = (1 << 7),
120 nir_var_mem_shared = (1 << 8),
121 nir_var_mem_global = (1 << 9),
122 nir_var_mem_push_const = (1 << 10), /* not actually used for variables */
123 nir_num_variable_modes = 11,
124 nir_var_all = (1 << nir_num_variable_modes) - 1,
125 } nir_variable_mode;
126
127 /**
128 * Rounding modes.
129 */
130 typedef enum {
131 nir_rounding_mode_undef = 0,
132 nir_rounding_mode_rtne = 1, /* round to nearest even */
133 nir_rounding_mode_ru = 2, /* round up */
134 nir_rounding_mode_rd = 3, /* round down */
135 nir_rounding_mode_rtz = 4, /* round towards zero */
136 } nir_rounding_mode;
137
138 typedef union {
139 bool b;
140 float f32;
141 double f64;
142 int8_t i8;
143 uint8_t u8;
144 int16_t i16;
145 uint16_t u16;
146 int32_t i32;
147 uint32_t u32;
148 int64_t i64;
149 uint64_t u64;
150 } nir_const_value;
151
152 #define nir_const_value_to_array(arr, c, components, m) \
153 { \
154 for (unsigned i = 0; i < components; ++i) \
155 arr[i] = c[i].m; \
156 } while (false)
157
158 static inline nir_const_value
159 nir_const_value_for_raw_uint(uint64_t x, unsigned bit_size)
160 {
161 nir_const_value v;
162 memset(&v, 0, sizeof(v));
163
164 switch (bit_size) {
165 case 1: v.b = x; break;
166 case 8: v.u8 = x; break;
167 case 16: v.u16 = x; break;
168 case 32: v.u32 = x; break;
169 case 64: v.u64 = x; break;
170 default:
171 unreachable("Invalid bit size");
172 }
173
174 return v;
175 }
176
177 static inline nir_const_value
178 nir_const_value_for_int(int64_t i, unsigned bit_size)
179 {
180 nir_const_value v;
181 memset(&v, 0, sizeof(v));
182
183 assert(bit_size <= 64);
184 if (bit_size < 64) {
185 assert(i >= (-(1ll << (bit_size - 1))));
186 assert(i < (1ll << (bit_size - 1)));
187 }
188
189 return nir_const_value_for_raw_uint(i, bit_size);
190 }
191
192 static inline nir_const_value
193 nir_const_value_for_uint(uint64_t u, unsigned bit_size)
194 {
195 nir_const_value v;
196 memset(&v, 0, sizeof(v));
197
198 assert(bit_size <= 64);
199 if (bit_size < 64)
200 assert(u < (1ull << bit_size));
201
202 return nir_const_value_for_raw_uint(u, bit_size);
203 }
204
205 static inline nir_const_value
206 nir_const_value_for_bool(bool b, unsigned bit_size)
207 {
208 /* Booleans use a 0/-1 convention */
209 return nir_const_value_for_int(-(int)b, bit_size);
210 }
211
212 /* This one isn't inline because it requires half-float conversion */
213 nir_const_value nir_const_value_for_float(double b, unsigned bit_size);
214
215 static inline int64_t
216 nir_const_value_as_int(nir_const_value value, unsigned bit_size)
217 {
218 switch (bit_size) {
219 /* int1_t uses 0/-1 convention */
220 case 1: return -(int)value.b;
221 case 8: return value.i8;
222 case 16: return value.i16;
223 case 32: return value.i32;
224 case 64: return value.i64;
225 default:
226 unreachable("Invalid bit size");
227 }
228 }
229
230 static inline uint64_t
231 nir_const_value_as_uint(nir_const_value value, unsigned bit_size)
232 {
233 switch (bit_size) {
234 case 1: return value.b;
235 case 8: return value.u8;
236 case 16: return value.u16;
237 case 32: return value.u32;
238 case 64: return value.u64;
239 default:
240 unreachable("Invalid bit size");
241 }
242 }
243
244 static inline bool
245 nir_const_value_as_bool(nir_const_value value, unsigned bit_size)
246 {
247 int64_t i = nir_const_value_as_int(value, bit_size);
248
249 /* Booleans of any size use 0/-1 convention */
250 assert(i == 0 || i == -1);
251
252 return i;
253 }
254
255 /* This one isn't inline because it requires half-float conversion */
256 double nir_const_value_as_float(nir_const_value value, unsigned bit_size);
257
258 typedef struct nir_constant {
259 /**
260 * Value of the constant.
261 *
262 * The field used to back the values supplied by the constant is determined
263 * by the type associated with the \c nir_variable. Constants may be
264 * scalars, vectors, or matrices.
265 */
266 nir_const_value values[NIR_MAX_VEC_COMPONENTS];
267
268 /* we could get this from the var->type but makes clone *much* easier to
269 * not have to care about the type.
270 */
271 unsigned num_elements;
272
273 /* Array elements / Structure Fields */
274 struct nir_constant **elements;
275 } nir_constant;
276
277 /**
278 * \brief Layout qualifiers for gl_FragDepth.
279 *
280 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
281 * with a layout qualifier.
282 */
283 typedef enum {
284 nir_depth_layout_none, /**< No depth layout is specified. */
285 nir_depth_layout_any,
286 nir_depth_layout_greater,
287 nir_depth_layout_less,
288 nir_depth_layout_unchanged
289 } nir_depth_layout;
290
291 /**
292 * Enum keeping track of how a variable was declared.
293 */
294 typedef enum {
295 /**
296 * Normal declaration.
297 */
298 nir_var_declared_normally = 0,
299
300 /**
301 * Variable is implicitly generated by the compiler and should not be
302 * visible via the API.
303 */
304 nir_var_hidden,
305 } nir_var_declaration_type;
306
307 /**
308 * Either a uniform, global variable, shader input, or shader output. Based on
309 * ir_variable - it should be easy to translate between the two.
310 */
311
312 typedef struct nir_variable {
313 struct exec_node node;
314
315 /**
316 * Declared type of the variable
317 */
318 const struct glsl_type *type;
319
320 /**
321 * Declared name of the variable
322 */
323 char *name;
324
325 struct nir_variable_data {
326 /**
327 * Storage class of the variable.
328 *
329 * \sa nir_variable_mode
330 */
331 unsigned mode:11;
332
333 /**
334 * Is the variable read-only?
335 *
336 * This is set for variables declared as \c const, shader inputs,
337 * and uniforms.
338 */
339 unsigned read_only:1;
340 unsigned centroid:1;
341 unsigned sample:1;
342 unsigned patch:1;
343 unsigned invariant:1;
344
345 /**
346 * Precision qualifier.
347 *
348 * In desktop GLSL we do not care about precision qualifiers at all, in
349 * fact, the spec says that precision qualifiers are ignored.
350 *
351 * To make things easy, we make it so that this field is always
352 * GLSL_PRECISION_NONE on desktop shaders. This way all the variables
353 * have the same precision value and the checks we add in the compiler
354 * for this field will never break a desktop shader compile.
355 */
356 unsigned precision:2;
357
358 /**
359 * Can this variable be coalesced with another?
360 *
361 * This is set by nir_lower_io_to_temporaries to say that any
362 * copies involving this variable should stay put. Propagating it can
363 * duplicate the resulting load/store, which is not wanted, and may
364 * result in a load/store of the variable with an indirect offset which
365 * the backend may not be able to handle.
366 */
367 unsigned cannot_coalesce:1;
368
369 /**
370 * When separate shader programs are enabled, only input/outputs between
371 * the stages of a multi-stage separate program can be safely removed
372 * from the shader interface. Other input/outputs must remains active.
373 *
374 * This is also used to make sure xfb varyings that are unused by the
375 * fragment shader are not removed.
376 */
377 unsigned always_active_io:1;
378
379 /**
380 * Interpolation mode for shader inputs / outputs
381 *
382 * \sa glsl_interp_mode
383 */
384 unsigned interpolation:3;
385
386 /**
387 * If non-zero, then this variable may be packed along with other variables
388 * into a single varying slot, so this offset should be applied when
389 * accessing components. For example, an offset of 1 means that the x
390 * component of this variable is actually stored in component y of the
391 * location specified by \c location.
392 */
393 unsigned location_frac:2;
394
395 /**
396 * If true, this variable represents an array of scalars that should
397 * be tightly packed. In other words, consecutive array elements
398 * should be stored one component apart, rather than one slot apart.
399 */
400 unsigned compact:1;
401
402 /**
403 * Whether this is a fragment shader output implicitly initialized with
404 * the previous contents of the specified render target at the
405 * framebuffer location corresponding to this shader invocation.
406 */
407 unsigned fb_fetch_output:1;
408
409 /**
410 * Non-zero if this variable is considered bindless as defined by
411 * ARB_bindless_texture.
412 */
413 unsigned bindless:1;
414
415 /**
416 * Was an explicit binding set in the shader?
417 */
418 unsigned explicit_binding:1;
419
420 /**
421 * Was the location explicitly set in the shader?
422 *
423 * If the location is explicitly set in the shader, it \b cannot be changed
424 * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
425 * no effect).
426 */
427 unsigned explicit_location:1;
428
429 /**
430 * Was a transfer feedback buffer set in the shader?
431 */
432 unsigned explicit_xfb_buffer:1;
433
434 /**
435 * Was a transfer feedback stride set in the shader?
436 */
437 unsigned explicit_xfb_stride:1;
438
439 /**
440 * Was an explicit offset set in the shader?
441 */
442 unsigned explicit_offset:1;
443
444 /**
445 * Layout of the matrix. Uses glsl_matrix_layout values.
446 */
447 unsigned matrix_layout:2;
448
449 /**
450 * Non-zero if this variable was created by lowering a named interface
451 * block.
452 */
453 unsigned from_named_ifc_block:1;
454
455 /**
456 * How the variable was declared. See nir_var_declaration_type.
457 *
458 * This is used to detect variables generated by the compiler, so should
459 * not be visible via the API.
460 */
461 unsigned how_declared:2;
462
463 /**
464 * Is this variable per-view? If so, we know it must be an array with
465 * size corresponding to the number of views.
466 */
467 unsigned per_view:1;
468
469 /**
470 * \brief Layout qualifier for gl_FragDepth. See nir_depth_layout.
471 *
472 * This is not equal to \c ir_depth_layout_none if and only if this
473 * variable is \c gl_FragDepth and a layout qualifier is specified.
474 */
475 unsigned depth_layout:3;
476
477 /**
478 * Vertex stream output identifier.
479 *
480 * For packed outputs, NIR_STREAM_PACKED is set and bits [2*i+1,2*i]
481 * indicate the stream of the i-th component.
482 */
483 unsigned stream:9;
484
485 /**
486 * See gl_access_qualifier.
487 *
488 * Access flags for memory variables (SSBO/global), image uniforms, and
489 * bindless images in uniforms/inputs/outputs.
490 */
491 unsigned access:8;
492
493 /**
494 * Descriptor set binding for sampler or UBO.
495 */
496 unsigned descriptor_set:5;
497
498 /**
499 * output index for dual source blending.
500 */
501 unsigned index;
502
503 /**
504 * Initial binding point for a sampler or UBO.
505 *
506 * For array types, this represents the binding point for the first element.
507 */
508 unsigned binding;
509
510 /**
511 * Storage location of the base of this variable
512 *
513 * The precise meaning of this field depends on the nature of the variable.
514 *
515 * - Vertex shader input: one of the values from \c gl_vert_attrib.
516 * - Vertex shader output: one of the values from \c gl_varying_slot.
517 * - Geometry shader input: one of the values from \c gl_varying_slot.
518 * - Geometry shader output: one of the values from \c gl_varying_slot.
519 * - Fragment shader input: one of the values from \c gl_varying_slot.
520 * - Fragment shader output: one of the values from \c gl_frag_result.
521 * - Uniforms: Per-stage uniform slot number for default uniform block.
522 * - Uniforms: Index within the uniform block definition for UBO members.
523 * - Non-UBO Uniforms: uniform slot number.
524 * - Other: This field is not currently used.
525 *
526 * If the variable is a uniform, shader input, or shader output, and the
527 * slot has not been assigned, the value will be -1.
528 */
529 int location;
530
531 /**
532 * The actual location of the variable in the IR. Only valid for inputs,
533 * outputs, and uniforms (including samplers and images).
534 */
535 unsigned driver_location;
536
537 /**
538 * Location an atomic counter or transform feedback is stored at.
539 */
540 unsigned offset;
541
542 union {
543 struct {
544 /** Image internal format if specified explicitly, otherwise PIPE_FORMAT_NONE. */
545 enum pipe_format format;
546 } image;
547
548 struct {
549 /**
550 * Transform feedback buffer.
551 */
552 uint16_t buffer:2;
553
554 /**
555 * Transform feedback stride.
556 */
557 uint16_t stride;
558 } xfb;
559 };
560 } data;
561
562 /**
563 * Identifier for this variable generated by nir_index_vars() that is unique
564 * among other variables in the same exec_list.
565 */
566 unsigned index;
567
568 /* Number of nir_variable_data members */
569 uint16_t num_members;
570
571 /**
572 * Built-in state that backs this uniform
573 *
574 * Once set at variable creation, \c state_slots must remain invariant.
575 * This is because, ideally, this array would be shared by all clones of
576 * this variable in the IR tree. In other words, we'd really like for it
577 * to be a fly-weight.
578 *
579 * If the variable is not a uniform, \c num_state_slots will be zero and
580 * \c state_slots will be \c NULL.
581 */
582 /*@{*/
583 uint16_t num_state_slots; /**< Number of state slots used */
584 nir_state_slot *state_slots; /**< State descriptors. */
585 /*@}*/
586
587 /**
588 * Constant expression assigned in the initializer of the variable
589 *
590 * This field should only be used temporarily by creators of NIR shaders
591 * and then lower_constant_initializers can be used to get rid of them.
592 * Most of the rest of NIR ignores this field or asserts that it's NULL.
593 */
594 nir_constant *constant_initializer;
595
596 /**
597 * Global variable assigned in the initializer of the variable
598 * This field should only be used temporarily by creators of NIR shaders
599 * and then lower_constant_initializers can be used to get rid of them.
600 * Most of the rest of NIR ignores this field or asserts that it's NULL.
601 */
602 struct nir_variable *pointer_initializer;
603
604 /**
605 * For variables that are in an interface block or are an instance of an
606 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
607 *
608 * \sa ir_variable::location
609 */
610 const struct glsl_type *interface_type;
611
612 /**
613 * Description of per-member data for per-member struct variables
614 *
615 * This is used for variables which are actually an amalgamation of
616 * multiple entities such as a struct of built-in values or a struct of
617 * inputs each with their own layout specifier. This is only allowed on
618 * variables with a struct or array of array of struct type.
619 */
620 struct nir_variable_data *members;
621 } nir_variable;
622
623 static inline bool
624 _nir_shader_variable_has_mode(nir_variable *var, unsigned modes)
625 {
626 /* This isn't a shader variable */
627 assert(!(modes & nir_var_function_temp));
628 return var->data.mode & modes;
629 }
630
631 #define nir_foreach_variable_in_list(var, var_list) \
632 foreach_list_typed(nir_variable, var, node, var_list)
633
634 #define nir_foreach_variable_in_list_safe(var, var_list) \
635 foreach_list_typed_safe(nir_variable, var, node, var_list)
636
637 #define nir_foreach_variable_in_shader(var, shader) \
638 nir_foreach_variable_in_list(var, &(shader)->variables)
639
640 #define nir_foreach_variable_in_shader_safe(var, shader) \
641 nir_foreach_variable_in_list_safe(var, &(shader)->variables)
642
643 #define nir_foreach_variable_with_modes(var, shader, modes) \
644 nir_foreach_variable_in_shader(var, shader) \
645 if (_nir_shader_variable_has_mode(var, modes))
646
647 #define nir_foreach_variable_with_modes_safe(var, shader, modes) \
648 nir_foreach_variable_in_shader_safe(var, shader) \
649 if (_nir_shader_variable_has_mode(var, modes))
650
651 #define nir_foreach_shader_in_variable(var, shader) \
652 nir_foreach_variable_with_modes(var, shader, nir_var_shader_in)
653
654 #define nir_foreach_shader_in_variable_safe(var, shader) \
655 nir_foreach_variable_with_modes_safe(var, shader, nir_var_shader_in)
656
657 #define nir_foreach_shader_out_variable(var, shader) \
658 nir_foreach_variable_with_modes(var, shader, nir_var_shader_out)
659
660 #define nir_foreach_shader_out_variable_safe(var, shader) \
661 nir_foreach_variable_with_modes_safe(var, shader, nir_var_shader_out)
662
663 #define nir_foreach_uniform_variable(var, shader) \
664 nir_foreach_variable_with_modes(var, shader, nir_var_uniform)
665
666 #define nir_foreach_uniform_variable_safe(var, shader) \
667 nir_foreach_variable_with_modes_safe(var, shader, nir_var_uniform)
668
669 static inline bool
670 nir_variable_is_global(const nir_variable *var)
671 {
672 return var->data.mode != nir_var_function_temp;
673 }
674
675 typedef struct nir_register {
676 struct exec_node node;
677
678 unsigned num_components; /** < number of vector components */
679 unsigned num_array_elems; /** < size of array (0 for no array) */
680
681 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
682 uint8_t bit_size;
683
684 /** generic register index. */
685 unsigned index;
686
687 /** only for debug purposes, can be NULL */
688 const char *name;
689
690 /** set of nir_srcs where this register is used (read from) */
691 struct list_head uses;
692
693 /** set of nir_dests where this register is defined (written to) */
694 struct list_head defs;
695
696 /** set of nir_ifs where this register is used as a condition */
697 struct list_head if_uses;
698 } nir_register;
699
700 #define nir_foreach_register(reg, reg_list) \
701 foreach_list_typed(nir_register, reg, node, reg_list)
702 #define nir_foreach_register_safe(reg, reg_list) \
703 foreach_list_typed_safe(nir_register, reg, node, reg_list)
704
705 typedef enum PACKED {
706 nir_instr_type_alu,
707 nir_instr_type_deref,
708 nir_instr_type_call,
709 nir_instr_type_tex,
710 nir_instr_type_intrinsic,
711 nir_instr_type_load_const,
712 nir_instr_type_jump,
713 nir_instr_type_ssa_undef,
714 nir_instr_type_phi,
715 nir_instr_type_parallel_copy,
716 } nir_instr_type;
717
718 typedef struct nir_instr {
719 struct exec_node node;
720 struct nir_block *block;
721 nir_instr_type type;
722
723 /* A temporary for optimization and analysis passes to use for storing
724 * flags. For instance, DCE uses this to store the "dead/live" info.
725 */
726 uint8_t pass_flags;
727
728 /** generic instruction index. */
729 unsigned index;
730 } nir_instr;
731
732 static inline nir_instr *
733 nir_instr_next(nir_instr *instr)
734 {
735 struct exec_node *next = exec_node_get_next(&instr->node);
736 if (exec_node_is_tail_sentinel(next))
737 return NULL;
738 else
739 return exec_node_data(nir_instr, next, node);
740 }
741
742 static inline nir_instr *
743 nir_instr_prev(nir_instr *instr)
744 {
745 struct exec_node *prev = exec_node_get_prev(&instr->node);
746 if (exec_node_is_head_sentinel(prev))
747 return NULL;
748 else
749 return exec_node_data(nir_instr, prev, node);
750 }
751
752 static inline bool
753 nir_instr_is_first(const nir_instr *instr)
754 {
755 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr->node));
756 }
757
758 static inline bool
759 nir_instr_is_last(const nir_instr *instr)
760 {
761 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node));
762 }
763
764 typedef struct nir_ssa_def {
765 /** for debugging only, can be NULL */
766 const char* name;
767
768 /** generic SSA definition index. */
769 unsigned index;
770
771 /** Index into the live_in and live_out bitfields */
772 unsigned live_index;
773
774 /** Instruction which produces this SSA value. */
775 nir_instr *parent_instr;
776
777 /** set of nir_instrs where this register is used (read from) */
778 struct list_head uses;
779
780 /** set of nir_ifs where this register is used as a condition */
781 struct list_head if_uses;
782
783 uint8_t num_components;
784
785 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */
786 uint8_t bit_size;
787
788 /**
789 * True if this SSA value may have different values in different SIMD
790 * invocations of the shader. This is set by nir_divergence_analysis.
791 */
792 bool divergent;
793 } nir_ssa_def;
794
795 struct nir_src;
796
797 typedef struct {
798 nir_register *reg;
799 struct nir_src *indirect; /** < NULL for no indirect offset */
800 unsigned base_offset;
801
802 /* TODO use-def chain goes here */
803 } nir_reg_src;
804
805 typedef struct {
806 nir_instr *parent_instr;
807 struct list_head def_link;
808
809 nir_register *reg;
810 struct nir_src *indirect; /** < NULL for no indirect offset */
811 unsigned base_offset;
812
813 /* TODO def-use chain goes here */
814 } nir_reg_dest;
815
816 struct nir_if;
817
818 typedef struct nir_src {
819 union {
820 /** Instruction that consumes this value as a source. */
821 nir_instr *parent_instr;
822 struct nir_if *parent_if;
823 };
824
825 struct list_head use_link;
826
827 union {
828 nir_reg_src reg;
829 nir_ssa_def *ssa;
830 };
831
832 bool is_ssa;
833 } nir_src;
834
835 static inline nir_src
836 nir_src_init(void)
837 {
838 nir_src src = { { NULL } };
839 return src;
840 }
841
842 #define NIR_SRC_INIT nir_src_init()
843
844 #define nir_foreach_use(src, reg_or_ssa_def) \
845 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
846
847 #define nir_foreach_use_safe(src, reg_or_ssa_def) \
848 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link)
849
850 #define nir_foreach_if_use(src, reg_or_ssa_def) \
851 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
852
853 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \
854 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link)
855
856 typedef struct {
857 union {
858 nir_reg_dest reg;
859 nir_ssa_def ssa;
860 };
861
862 bool is_ssa;
863 } nir_dest;
864
865 static inline nir_dest
866 nir_dest_init(void)
867 {
868 nir_dest dest = { { { NULL } } };
869 return dest;
870 }
871
872 #define NIR_DEST_INIT nir_dest_init()
873
874 #define nir_foreach_def(dest, reg) \
875 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link)
876
877 #define nir_foreach_def_safe(dest, reg) \
878 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link)
879
880 static inline nir_src
881 nir_src_for_ssa(nir_ssa_def *def)
882 {
883 nir_src src = NIR_SRC_INIT;
884
885 src.is_ssa = true;
886 src.ssa = def;
887
888 return src;
889 }
890
891 static inline nir_src
892 nir_src_for_reg(nir_register *reg)
893 {
894 nir_src src = NIR_SRC_INIT;
895
896 src.is_ssa = false;
897 src.reg.reg = reg;
898 src.reg.indirect = NULL;
899 src.reg.base_offset = 0;
900
901 return src;
902 }
903
904 static inline nir_dest
905 nir_dest_for_reg(nir_register *reg)
906 {
907 nir_dest dest = NIR_DEST_INIT;
908
909 dest.reg.reg = reg;
910
911 return dest;
912 }
913
914 static inline unsigned
915 nir_src_bit_size(nir_src src)
916 {
917 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size;
918 }
919
920 static inline unsigned
921 nir_src_num_components(nir_src src)
922 {
923 return src.is_ssa ? src.ssa->num_components : src.reg.reg->num_components;
924 }
925
926 static inline bool
927 nir_src_is_const(nir_src src)
928 {
929 return src.is_ssa &&
930 src.ssa->parent_instr->type == nir_instr_type_load_const;
931 }
932
933 static inline bool
934 nir_src_is_divergent(nir_src src)
935 {
936 assert(src.is_ssa);
937 return src.ssa->divergent;
938 }
939
940 static inline unsigned
941 nir_dest_bit_size(nir_dest dest)
942 {
943 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size;
944 }
945
946 static inline unsigned
947 nir_dest_num_components(nir_dest dest)
948 {
949 return dest.is_ssa ? dest.ssa.num_components : dest.reg.reg->num_components;
950 }
951
952 static inline bool
953 nir_dest_is_divergent(nir_dest dest)
954 {
955 assert(dest.is_ssa);
956 return dest.ssa.divergent;
957 }
958
959 /* Are all components the same, ie. .xxxx */
960 static inline bool
961 nir_is_same_comp_swizzle(uint8_t *swiz, unsigned nr_comp)
962 {
963 for (unsigned i = 1; i < nr_comp; i++)
964 if (swiz[i] != swiz[0])
965 return false;
966 return true;
967 }
968
969 /* Are all components sequential, ie. .yzw */
970 static inline bool
971 nir_is_sequential_comp_swizzle(uint8_t *swiz, unsigned nr_comp)
972 {
973 for (unsigned i = 1; i < nr_comp; i++)
974 if (swiz[i] != (swiz[0] + i))
975 return false;
976 return true;
977 }
978
979 void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if);
980 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr);
981
982 typedef struct {
983 nir_src src;
984
985 /**
986 * \name input modifiers
987 */
988 /*@{*/
989 /**
990 * For inputs interpreted as floating point, flips the sign bit. For
991 * inputs interpreted as integers, performs the two's complement negation.
992 */
993 bool negate;
994
995 /**
996 * Clears the sign bit for floating point values, and computes the integer
997 * absolute value for integers. Note that the negate modifier acts after
998 * the absolute value modifier, therefore if both are set then all inputs
999 * will become negative.
1000 */
1001 bool abs;
1002 /*@}*/
1003
1004 /**
1005 * For each input component, says which component of the register it is
1006 * chosen from. Note that which elements of the swizzle are used and which
1007 * are ignored are based on the write mask for most opcodes - for example,
1008 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and
1009 * a swizzle of {2, x, 1, 0} where x means "don't care."
1010 */
1011 uint8_t swizzle[NIR_MAX_VEC_COMPONENTS];
1012 } nir_alu_src;
1013
1014 typedef struct {
1015 nir_dest dest;
1016
1017 /**
1018 * \name saturate output modifier
1019 *
1020 * Only valid for opcodes that output floating-point numbers. Clamps the
1021 * output to between 0.0 and 1.0 inclusive.
1022 */
1023
1024 bool saturate;
1025
1026 unsigned write_mask : NIR_MAX_VEC_COMPONENTS; /* ignored if dest.is_ssa is true */
1027 } nir_alu_dest;
1028
1029 /** NIR sized and unsized types
1030 *
1031 * The values in this enum are carefully chosen so that the sized type is
1032 * just the unsized type OR the number of bits.
1033 */
1034 typedef enum PACKED {
1035 nir_type_invalid = 0, /* Not a valid type */
1036 nir_type_int = 2,
1037 nir_type_uint = 4,
1038 nir_type_bool = 6,
1039 nir_type_float = 128,
1040 nir_type_bool1 = 1 | nir_type_bool,
1041 nir_type_bool8 = 8 | nir_type_bool,
1042 nir_type_bool16 = 16 | nir_type_bool,
1043 nir_type_bool32 = 32 | nir_type_bool,
1044 nir_type_int1 = 1 | nir_type_int,
1045 nir_type_int8 = 8 | nir_type_int,
1046 nir_type_int16 = 16 | nir_type_int,
1047 nir_type_int32 = 32 | nir_type_int,
1048 nir_type_int64 = 64 | nir_type_int,
1049 nir_type_uint1 = 1 | nir_type_uint,
1050 nir_type_uint8 = 8 | nir_type_uint,
1051 nir_type_uint16 = 16 | nir_type_uint,
1052 nir_type_uint32 = 32 | nir_type_uint,
1053 nir_type_uint64 = 64 | nir_type_uint,
1054 nir_type_float16 = 16 | nir_type_float,
1055 nir_type_float32 = 32 | nir_type_float,
1056 nir_type_float64 = 64 | nir_type_float,
1057 } nir_alu_type;
1058
1059 #define NIR_ALU_TYPE_SIZE_MASK 0x79
1060 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x86
1061
1062 static inline unsigned
1063 nir_alu_type_get_type_size(nir_alu_type type)
1064 {
1065 return type & NIR_ALU_TYPE_SIZE_MASK;
1066 }
1067
1068 static inline nir_alu_type
1069 nir_alu_type_get_base_type(nir_alu_type type)
1070 {
1071 return (nir_alu_type)(type & NIR_ALU_TYPE_BASE_TYPE_MASK);
1072 }
1073
1074 static inline nir_alu_type
1075 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type)
1076 {
1077 switch (base_type) {
1078 case GLSL_TYPE_BOOL:
1079 return nir_type_bool1;
1080 break;
1081 case GLSL_TYPE_UINT:
1082 return nir_type_uint32;
1083 break;
1084 case GLSL_TYPE_INT:
1085 return nir_type_int32;
1086 break;
1087 case GLSL_TYPE_UINT16:
1088 return nir_type_uint16;
1089 break;
1090 case GLSL_TYPE_INT16:
1091 return nir_type_int16;
1092 break;
1093 case GLSL_TYPE_UINT8:
1094 return nir_type_uint8;
1095 case GLSL_TYPE_INT8:
1096 return nir_type_int8;
1097 case GLSL_TYPE_UINT64:
1098 return nir_type_uint64;
1099 break;
1100 case GLSL_TYPE_INT64:
1101 return nir_type_int64;
1102 break;
1103 case GLSL_TYPE_FLOAT:
1104 return nir_type_float32;
1105 break;
1106 case GLSL_TYPE_FLOAT16:
1107 return nir_type_float16;
1108 break;
1109 case GLSL_TYPE_DOUBLE:
1110 return nir_type_float64;
1111 break;
1112
1113 case GLSL_TYPE_SAMPLER:
1114 case GLSL_TYPE_IMAGE:
1115 case GLSL_TYPE_ATOMIC_UINT:
1116 case GLSL_TYPE_STRUCT:
1117 case GLSL_TYPE_INTERFACE:
1118 case GLSL_TYPE_ARRAY:
1119 case GLSL_TYPE_VOID:
1120 case GLSL_TYPE_SUBROUTINE:
1121 case GLSL_TYPE_FUNCTION:
1122 case GLSL_TYPE_ERROR:
1123 return nir_type_invalid;
1124 }
1125
1126 unreachable("unknown type");
1127 }
1128
1129 static inline nir_alu_type
1130 nir_get_nir_type_for_glsl_type(const struct glsl_type *type)
1131 {
1132 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type));
1133 }
1134
1135 nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst,
1136 nir_rounding_mode rnd);
1137
1138 static inline nir_op
1139 nir_op_vec(unsigned components)
1140 {
1141 switch (components) {
1142 case 1: return nir_op_mov;
1143 case 2: return nir_op_vec2;
1144 case 3: return nir_op_vec3;
1145 case 4: return nir_op_vec4;
1146 case 8: return nir_op_vec8;
1147 case 16: return nir_op_vec16;
1148 default: unreachable("bad component count");
1149 }
1150 }
1151
1152 static inline bool
1153 nir_op_is_vec(nir_op op)
1154 {
1155 switch (op) {
1156 case nir_op_mov:
1157 case nir_op_vec2:
1158 case nir_op_vec3:
1159 case nir_op_vec4:
1160 case nir_op_vec8:
1161 case nir_op_vec16:
1162 return true;
1163 default:
1164 return false;
1165 }
1166 }
1167
1168 static inline bool
1169 nir_is_float_control_signed_zero_inf_nan_preserve(unsigned execution_mode, unsigned bit_size)
1170 {
1171 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16) ||
1172 (32 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32) ||
1173 (64 == bit_size && execution_mode & FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64);
1174 }
1175
1176 static inline bool
1177 nir_is_denorm_flush_to_zero(unsigned execution_mode, unsigned bit_size)
1178 {
1179 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16) ||
1180 (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32) ||
1181 (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64);
1182 }
1183
1184 static inline bool
1185 nir_is_denorm_preserve(unsigned execution_mode, unsigned bit_size)
1186 {
1187 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP16) ||
1188 (32 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP32) ||
1189 (64 == bit_size && execution_mode & FLOAT_CONTROLS_DENORM_PRESERVE_FP64);
1190 }
1191
1192 static inline bool
1193 nir_is_rounding_mode_rtne(unsigned execution_mode, unsigned bit_size)
1194 {
1195 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
1196 (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
1197 (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
1198 }
1199
1200 static inline bool
1201 nir_is_rounding_mode_rtz(unsigned execution_mode, unsigned bit_size)
1202 {
1203 return (16 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
1204 (32 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
1205 (64 == bit_size && execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
1206 }
1207
1208 static inline bool
1209 nir_has_any_rounding_mode_rtz(unsigned execution_mode)
1210 {
1211 return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16) ||
1212 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32) ||
1213 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64);
1214 }
1215
1216 static inline bool
1217 nir_has_any_rounding_mode_rtne(unsigned execution_mode)
1218 {
1219 return (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16) ||
1220 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32) ||
1221 (execution_mode & FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64);
1222 }
1223
1224 static inline nir_rounding_mode
1225 nir_get_rounding_mode_from_float_controls(unsigned execution_mode,
1226 nir_alu_type type)
1227 {
1228 if (nir_alu_type_get_base_type(type) != nir_type_float)
1229 return nir_rounding_mode_undef;
1230
1231 unsigned bit_size = nir_alu_type_get_type_size(type);
1232
1233 if (nir_is_rounding_mode_rtz(execution_mode, bit_size))
1234 return nir_rounding_mode_rtz;
1235 if (nir_is_rounding_mode_rtne(execution_mode, bit_size))
1236 return nir_rounding_mode_rtne;
1237 return nir_rounding_mode_undef;
1238 }
1239
1240 static inline bool
1241 nir_has_any_rounding_mode_enabled(unsigned execution_mode)
1242 {
1243 bool result =
1244 nir_has_any_rounding_mode_rtne(execution_mode) ||
1245 nir_has_any_rounding_mode_rtz(execution_mode);
1246 return result;
1247 }
1248
1249 typedef enum {
1250 /**
1251 * Operation where the first two sources are commutative.
1252 *
1253 * For 2-source operations, this just mathematical commutativity. Some
1254 * 3-source operations, like ffma, are only commutative in the first two
1255 * sources.
1256 */
1257 NIR_OP_IS_2SRC_COMMUTATIVE = (1 << 0),
1258 NIR_OP_IS_ASSOCIATIVE = (1 << 1),
1259 } nir_op_algebraic_property;
1260
1261 typedef struct {
1262 const char *name;
1263
1264 uint8_t num_inputs;
1265
1266 /**
1267 * The number of components in the output
1268 *
1269 * If non-zero, this is the size of the output and input sizes are
1270 * explicitly given; swizzle and writemask are still in effect, but if
1271 * the output component is masked out, then the input component may
1272 * still be in use.
1273 *
1274 * If zero, the opcode acts in the standard, per-component manner; the
1275 * operation is performed on each component (except the ones that are
1276 * masked out) with the input being taken from the input swizzle for
1277 * that component.
1278 *
1279 * The size of some of the inputs may be given (i.e. non-zero) even
1280 * though output_size is zero; in that case, the inputs with a zero
1281 * size act per-component, while the inputs with non-zero size don't.
1282 */
1283 uint8_t output_size;
1284
1285 /**
1286 * The type of vector that the instruction outputs. Note that the
1287 * staurate modifier is only allowed on outputs with the float type.
1288 */
1289
1290 nir_alu_type output_type;
1291
1292 /**
1293 * The number of components in each input
1294 */
1295 uint8_t input_sizes[NIR_MAX_VEC_COMPONENTS];
1296
1297 /**
1298 * The type of vector that each input takes. Note that negate and
1299 * absolute value are only allowed on inputs with int or float type and
1300 * behave differently on the two.
1301 */
1302 nir_alu_type input_types[NIR_MAX_VEC_COMPONENTS];
1303
1304 nir_op_algebraic_property algebraic_properties;
1305
1306 /* Whether this represents a numeric conversion opcode */
1307 bool is_conversion;
1308 } nir_op_info;
1309
1310 extern const nir_op_info nir_op_infos[nir_num_opcodes];
1311
1312 typedef struct nir_alu_instr {
1313 nir_instr instr;
1314 nir_op op;
1315
1316 /** Indicates that this ALU instruction generates an exact value
1317 *
1318 * This is kind of a mixture of GLSL "precise" and "invariant" and not
1319 * really equivalent to either. This indicates that the value generated by
1320 * this operation is high-precision and any code transformations that touch
1321 * it must ensure that the resulting value is bit-for-bit identical to the
1322 * original.
1323 */
1324 bool exact:1;
1325
1326 /**
1327 * Indicates that this instruction do not cause wrapping to occur, in the
1328 * form of overflow or underflow.
1329 */
1330 bool no_signed_wrap:1;
1331 bool no_unsigned_wrap:1;
1332
1333 nir_alu_dest dest;
1334 nir_alu_src src[];
1335 } nir_alu_instr;
1336
1337 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src,
1338 nir_alu_instr *instr);
1339 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src,
1340 nir_alu_instr *instr);
1341
1342 /* is this source channel used? */
1343 static inline bool
1344 nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src,
1345 unsigned channel)
1346 {
1347 if (nir_op_infos[instr->op].input_sizes[src] > 0)
1348 return channel < nir_op_infos[instr->op].input_sizes[src];
1349
1350 return (instr->dest.write_mask >> channel) & 1;
1351 }
1352
1353 static inline nir_component_mask_t
1354 nir_alu_instr_src_read_mask(const nir_alu_instr *instr, unsigned src)
1355 {
1356 nir_component_mask_t read_mask = 0;
1357 for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; c++) {
1358 if (!nir_alu_instr_channel_used(instr, src, c))
1359 continue;
1360
1361 read_mask |= (1 << instr->src[src].swizzle[c]);
1362 }
1363 return read_mask;
1364 }
1365
1366 /**
1367 * Get the number of channels used for a source
1368 */
1369 static inline unsigned
1370 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src)
1371 {
1372 if (nir_op_infos[instr->op].input_sizes[src] > 0)
1373 return nir_op_infos[instr->op].input_sizes[src];
1374
1375 return nir_dest_num_components(instr->dest.dest);
1376 }
1377
1378 static inline bool
1379 nir_alu_instr_is_comparison(const nir_alu_instr *instr)
1380 {
1381 switch (instr->op) {
1382 case nir_op_flt:
1383 case nir_op_fge:
1384 case nir_op_feq:
1385 case nir_op_fneu:
1386 case nir_op_ilt:
1387 case nir_op_ult:
1388 case nir_op_ige:
1389 case nir_op_uge:
1390 case nir_op_ieq:
1391 case nir_op_ine:
1392 case nir_op_i2b1:
1393 case nir_op_f2b1:
1394 case nir_op_inot:
1395 return true;
1396 default:
1397 return false;
1398 }
1399 }
1400
1401 bool nir_const_value_negative_equal(nir_const_value c1, nir_const_value c2,
1402 nir_alu_type full_type);
1403
1404 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2,
1405 unsigned src1, unsigned src2);
1406
1407 bool nir_alu_srcs_negative_equal(const nir_alu_instr *alu1,
1408 const nir_alu_instr *alu2,
1409 unsigned src1, unsigned src2);
1410
1411 typedef enum {
1412 nir_deref_type_var,
1413 nir_deref_type_array,
1414 nir_deref_type_array_wildcard,
1415 nir_deref_type_ptr_as_array,
1416 nir_deref_type_struct,
1417 nir_deref_type_cast,
1418 } nir_deref_type;
1419
1420 typedef struct {
1421 nir_instr instr;
1422
1423 /** The type of this deref instruction */
1424 nir_deref_type deref_type;
1425
1426 /** The mode of the underlying variable */
1427 nir_variable_mode mode;
1428
1429 /** The dereferenced type of the resulting pointer value */
1430 const struct glsl_type *type;
1431
1432 union {
1433 /** Variable being dereferenced if deref_type is a deref_var */
1434 nir_variable *var;
1435
1436 /** Parent deref if deref_type is not deref_var */
1437 nir_src parent;
1438 };
1439
1440 /** Additional deref parameters */
1441 union {
1442 struct {
1443 nir_src index;
1444 } arr;
1445
1446 struct {
1447 unsigned index;
1448 } strct;
1449
1450 struct {
1451 unsigned ptr_stride;
1452 } cast;
1453 };
1454
1455 /** Destination to store the resulting "pointer" */
1456 nir_dest dest;
1457 } nir_deref_instr;
1458
1459 static inline nir_deref_instr *nir_src_as_deref(nir_src src);
1460
1461 static inline nir_deref_instr *
1462 nir_deref_instr_parent(const nir_deref_instr *instr)
1463 {
1464 if (instr->deref_type == nir_deref_type_var)
1465 return NULL;
1466 else
1467 return nir_src_as_deref(instr->parent);
1468 }
1469
1470 static inline nir_variable *
1471 nir_deref_instr_get_variable(const nir_deref_instr *instr)
1472 {
1473 while (instr->deref_type != nir_deref_type_var) {
1474 if (instr->deref_type == nir_deref_type_cast)
1475 return NULL;
1476
1477 instr = nir_deref_instr_parent(instr);
1478 }
1479
1480 return instr->var;
1481 }
1482
1483 bool nir_deref_instr_has_indirect(nir_deref_instr *instr);
1484 bool nir_deref_instr_is_known_out_of_bounds(nir_deref_instr *instr);
1485 bool nir_deref_instr_has_complex_use(nir_deref_instr *instr);
1486
1487 bool nir_deref_instr_remove_if_unused(nir_deref_instr *instr);
1488
1489 unsigned nir_deref_instr_ptr_as_array_stride(nir_deref_instr *instr);
1490
1491 typedef struct {
1492 nir_instr instr;
1493
1494 struct nir_function *callee;
1495
1496 unsigned num_params;
1497 nir_src params[];
1498 } nir_call_instr;
1499
1500 #include "nir_intrinsics.h"
1501
1502 #define NIR_INTRINSIC_MAX_CONST_INDEX 4
1503
1504 /** Represents an intrinsic
1505 *
1506 * An intrinsic is an instruction type for handling things that are
1507 * more-or-less regular operations but don't just consume and produce SSA
1508 * values like ALU operations do. Intrinsics are not for things that have
1509 * special semantic meaning such as phi nodes and parallel copies.
1510 * Examples of intrinsics include variable load/store operations, system
1511 * value loads, and the like. Even though texturing more-or-less falls
1512 * under this category, texturing is its own instruction type because
1513 * trying to represent texturing with intrinsics would lead to a
1514 * combinatorial explosion of intrinsic opcodes.
1515 *
1516 * By having a single instruction type for handling a lot of different
1517 * cases, optimization passes can look for intrinsics and, for the most
1518 * part, completely ignore them. Each intrinsic type also has a few
1519 * possible flags that govern whether or not they can be reordered or
1520 * eliminated. That way passes like dead code elimination can still work
1521 * on intrisics without understanding the meaning of each.
1522 *
1523 * Each intrinsic has some number of constant indices, some number of
1524 * variables, and some number of sources. What these sources, variables,
1525 * and indices mean depends on the intrinsic and is documented with the
1526 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture
1527 * instructions are the only types of instruction that can operate on
1528 * variables.
1529 */
1530 typedef struct {
1531 nir_instr instr;
1532
1533 nir_intrinsic_op intrinsic;
1534
1535 nir_dest dest;
1536
1537 /** number of components if this is a vectorized intrinsic
1538 *
1539 * Similarly to ALU operations, some intrinsics are vectorized.
1540 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0.
1541 * For vectorized intrinsics, the num_components field specifies the
1542 * number of destination components and the number of source components
1543 * for all sources with nir_intrinsic_infos.src_components[i] == 0.
1544 */
1545 uint8_t num_components;
1546
1547 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX];
1548
1549 nir_src src[];
1550 } nir_intrinsic_instr;
1551
1552 static inline nir_variable *
1553 nir_intrinsic_get_var(nir_intrinsic_instr *intrin, unsigned i)
1554 {
1555 return nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[i]));
1556 }
1557
1558 typedef enum {
1559 /* Memory ordering. */
1560 NIR_MEMORY_ACQUIRE = 1 << 0,
1561 NIR_MEMORY_RELEASE = 1 << 1,
1562 NIR_MEMORY_ACQ_REL = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE,
1563
1564 /* Memory visibility operations. */
1565 NIR_MEMORY_MAKE_AVAILABLE = 1 << 2,
1566 NIR_MEMORY_MAKE_VISIBLE = 1 << 3,
1567 } nir_memory_semantics;
1568
1569 typedef enum {
1570 NIR_SCOPE_NONE,
1571 NIR_SCOPE_INVOCATION,
1572 NIR_SCOPE_SUBGROUP,
1573 NIR_SCOPE_WORKGROUP,
1574 NIR_SCOPE_QUEUE_FAMILY,
1575 NIR_SCOPE_DEVICE,
1576 } nir_scope;
1577
1578 /**
1579 * \name NIR intrinsics semantic flags
1580 *
1581 * information about what the compiler can do with the intrinsics.
1582 *
1583 * \sa nir_intrinsic_info::flags
1584 */
1585 typedef enum {
1586 /**
1587 * whether the intrinsic can be safely eliminated if none of its output
1588 * value is not being used.
1589 */
1590 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0),
1591
1592 /**
1593 * Whether the intrinsic can be reordered with respect to any other
1594 * intrinsic, i.e. whether the only reordering dependencies of the
1595 * intrinsic are due to the register reads/writes.
1596 */
1597 NIR_INTRINSIC_CAN_REORDER = (1 << 1),
1598 } nir_intrinsic_semantic_flag;
1599
1600 /**
1601 * \name NIR intrinsics const-index flag
1602 *
1603 * Indicates the usage of a const_index slot.
1604 *
1605 * \sa nir_intrinsic_info::index_map
1606 */
1607 typedef enum {
1608 /**
1609 * Generally instructions that take a offset src argument, can encode
1610 * a constant 'base' value which is added to the offset.
1611 */
1612 NIR_INTRINSIC_BASE = 1,
1613
1614 /**
1615 * For store instructions, a writemask for the store.
1616 */
1617 NIR_INTRINSIC_WRMASK,
1618
1619 /**
1620 * The stream-id for GS emit_vertex/end_primitive intrinsics.
1621 */
1622 NIR_INTRINSIC_STREAM_ID,
1623
1624 /**
1625 * The clip-plane id for load_user_clip_plane intrinsic.
1626 */
1627 NIR_INTRINSIC_UCP_ID,
1628
1629 /**
1630 * The amount of data, starting from BASE, that this instruction may
1631 * access. This is used to provide bounds if the offset is not constant.
1632 */
1633 NIR_INTRINSIC_RANGE,
1634
1635 /**
1636 * The Vulkan descriptor set for vulkan_resource_index intrinsic.
1637 */
1638 NIR_INTRINSIC_DESC_SET,
1639
1640 /**
1641 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic.
1642 */
1643 NIR_INTRINSIC_BINDING,
1644
1645 /**
1646 * Component offset.
1647 */
1648 NIR_INTRINSIC_COMPONENT,
1649
1650 /**
1651 * Interpolation mode (only meaningful for FS inputs).
1652 */
1653 NIR_INTRINSIC_INTERP_MODE,
1654
1655 /**
1656 * A binary nir_op to use when performing a reduction or scan operation
1657 */
1658 NIR_INTRINSIC_REDUCTION_OP,
1659
1660 /**
1661 * Cluster size for reduction operations
1662 */
1663 NIR_INTRINSIC_CLUSTER_SIZE,
1664
1665 /**
1666 * Parameter index for a load_param intrinsic
1667 */
1668 NIR_INTRINSIC_PARAM_IDX,
1669
1670 /**
1671 * Image dimensionality for image intrinsics
1672 *
1673 * One of GLSL_SAMPLER_DIM_*
1674 */
1675 NIR_INTRINSIC_IMAGE_DIM,
1676
1677 /**
1678 * Non-zero if we are accessing an array image
1679 */
1680 NIR_INTRINSIC_IMAGE_ARRAY,
1681
1682 /**
1683 * Image format for image intrinsics
1684 */
1685 NIR_INTRINSIC_FORMAT,
1686
1687 /**
1688 * Access qualifiers for image and memory access intrinsics
1689 */
1690 NIR_INTRINSIC_ACCESS,
1691
1692 /**
1693 * Alignment for offsets and addresses
1694 *
1695 * These two parameters, specify an alignment in terms of a multiplier and
1696 * an offset. The offset or address parameter X of the intrinsic is
1697 * guaranteed to satisfy the following:
1698 *
1699 * (X - align_offset) % align_mul == 0
1700 */
1701 NIR_INTRINSIC_ALIGN_MUL,
1702 NIR_INTRINSIC_ALIGN_OFFSET,
1703
1704 /**
1705 * The Vulkan descriptor type for a vulkan_resource_[re]index intrinsic.
1706 */
1707 NIR_INTRINSIC_DESC_TYPE,
1708
1709 /**
1710 * The nir_alu_type of a uniform/input/output
1711 */
1712 NIR_INTRINSIC_TYPE,
1713
1714 /**
1715 * The swizzle mask for the instructions
1716 * SwizzleInvocationsAMD and SwizzleInvocationsMaskedAMD
1717 */
1718 NIR_INTRINSIC_SWIZZLE_MASK,
1719
1720 /* Separate source/dest access flags for copies */
1721 NIR_INTRINSIC_SRC_ACCESS,
1722 NIR_INTRINSIC_DST_ACCESS,
1723
1724 /* Driver location for nir_load_patch_location_ir3 */
1725 NIR_INTRINSIC_DRIVER_LOCATION,
1726
1727 /**
1728 * Mask of nir_memory_semantics, includes ordering and visibility.
1729 */
1730 NIR_INTRINSIC_MEMORY_SEMANTICS,
1731
1732 /**
1733 * Mask of nir_variable_modes affected by the memory operation.
1734 */
1735 NIR_INTRINSIC_MEMORY_MODES,
1736
1737 /**
1738 * Value of nir_scope.
1739 */
1740 NIR_INTRINSIC_MEMORY_SCOPE,
1741
1742 /**
1743 * Value of nir_scope.
1744 */
1745 NIR_INTRINSIC_EXECUTION_SCOPE,
1746
1747 NIR_INTRINSIC_NUM_INDEX_FLAGS,
1748
1749 } nir_intrinsic_index_flag;
1750
1751 #define NIR_INTRINSIC_MAX_INPUTS 5
1752
1753 typedef struct {
1754 const char *name;
1755
1756 uint8_t num_srcs; /** < number of register/SSA inputs */
1757
1758 /** number of components of each input register
1759 *
1760 * If this value is 0, the number of components is given by the
1761 * num_components field of nir_intrinsic_instr. If this value is -1, the
1762 * intrinsic consumes however many components are provided and it is not
1763 * validated at all.
1764 */
1765 int8_t src_components[NIR_INTRINSIC_MAX_INPUTS];
1766
1767 bool has_dest;
1768
1769 /** number of components of the output register
1770 *
1771 * If this value is 0, the number of components is given by the
1772 * num_components field of nir_intrinsic_instr.
1773 */
1774 uint8_t dest_components;
1775
1776 /** bitfield of legal bit sizes */
1777 uint8_t dest_bit_sizes;
1778
1779 /** the number of constant indices used by the intrinsic */
1780 uint8_t num_indices;
1781
1782 /** indicates the usage of intr->const_index[n] */
1783 uint8_t index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS];
1784
1785 /** semantic flags for calls to this intrinsic */
1786 nir_intrinsic_semantic_flag flags;
1787 } nir_intrinsic_info;
1788
1789 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics];
1790
1791 static inline unsigned
1792 nir_intrinsic_src_components(const nir_intrinsic_instr *intr, unsigned srcn)
1793 {
1794 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1795 assert(srcn < info->num_srcs);
1796 if (info->src_components[srcn] > 0)
1797 return info->src_components[srcn];
1798 else if (info->src_components[srcn] == 0)
1799 return intr->num_components;
1800 else
1801 return nir_src_num_components(intr->src[srcn]);
1802 }
1803
1804 static inline unsigned
1805 nir_intrinsic_dest_components(nir_intrinsic_instr *intr)
1806 {
1807 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
1808 if (!info->has_dest)
1809 return 0;
1810 else if (info->dest_components)
1811 return info->dest_components;
1812 else
1813 return intr->num_components;
1814 }
1815
1816 /**
1817 * Helper to copy const_index[] from src to dst, without assuming they
1818 * match in order.
1819 */
1820 static inline void
1821 nir_intrinsic_copy_const_indices(nir_intrinsic_instr *dst, nir_intrinsic_instr *src)
1822 {
1823 if (src->intrinsic == dst->intrinsic) {
1824 memcpy(dst->const_index, src->const_index, sizeof(dst->const_index));
1825 return;
1826 }
1827
1828 const nir_intrinsic_info *src_info = &nir_intrinsic_infos[src->intrinsic];
1829 const nir_intrinsic_info *dst_info = &nir_intrinsic_infos[dst->intrinsic];
1830
1831 for (unsigned i = 0; i < NIR_INTRINSIC_NUM_INDEX_FLAGS; i++) {
1832 if (src_info->index_map[i] == 0)
1833 continue;
1834
1835 /* require that dst instruction also uses the same const_index[]: */
1836 assert(dst_info->index_map[i] > 0);
1837
1838 dst->const_index[dst_info->index_map[i] - 1] =
1839 src->const_index[src_info->index_map[i] - 1];
1840 }
1841 }
1842
1843 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \
1844 static inline type \
1845 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \
1846 { \
1847 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1848 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1849 return (type)instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \
1850 } \
1851 static inline void \
1852 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \
1853 { \
1854 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1855 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \
1856 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \
1857 } \
1858 static inline bool \
1859 nir_intrinsic_has_##name(nir_intrinsic_instr *instr) \
1860 { \
1861 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \
1862 return info->index_map[NIR_INTRINSIC_##flag] > 0; \
1863 }
1864
1865 INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned)
1866 INTRINSIC_IDX_ACCESSORS(base, BASE, int)
1867 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned)
1868 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned)
1869 INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned)
1870 INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned)
1871 INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned)
1872 INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned)
1873 INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned)
1874 INTRINSIC_IDX_ACCESSORS(reduction_op, REDUCTION_OP, unsigned)
1875 INTRINSIC_IDX_ACCESSORS(cluster_size, CLUSTER_SIZE, unsigned)
1876 INTRINSIC_IDX_ACCESSORS(param_idx, PARAM_IDX, unsigned)
1877 INTRINSIC_IDX_ACCESSORS(image_dim, IMAGE_DIM, enum glsl_sampler_dim)
1878 INTRINSIC_IDX_ACCESSORS(image_array, IMAGE_ARRAY, bool)
1879 INTRINSIC_IDX_ACCESSORS(access, ACCESS, enum gl_access_qualifier)
1880 INTRINSIC_IDX_ACCESSORS(src_access, SRC_ACCESS, enum gl_access_qualifier)
1881 INTRINSIC_IDX_ACCESSORS(dst_access, DST_ACCESS, enum gl_access_qualifier)
1882 INTRINSIC_IDX_ACCESSORS(format, FORMAT, enum pipe_format)
1883 INTRINSIC_IDX_ACCESSORS(align_mul, ALIGN_MUL, unsigned)
1884 INTRINSIC_IDX_ACCESSORS(align_offset, ALIGN_OFFSET, unsigned)
1885 INTRINSIC_IDX_ACCESSORS(desc_type, DESC_TYPE, unsigned)
1886 INTRINSIC_IDX_ACCESSORS(type, TYPE, nir_alu_type)
1887 INTRINSIC_IDX_ACCESSORS(swizzle_mask, SWIZZLE_MASK, unsigned)
1888 INTRINSIC_IDX_ACCESSORS(driver_location, DRIVER_LOCATION, unsigned)
1889 INTRINSIC_IDX_ACCESSORS(memory_semantics, MEMORY_SEMANTICS, nir_memory_semantics)
1890 INTRINSIC_IDX_ACCESSORS(memory_modes, MEMORY_MODES, nir_variable_mode)
1891 INTRINSIC_IDX_ACCESSORS(memory_scope, MEMORY_SCOPE, nir_scope)
1892 INTRINSIC_IDX_ACCESSORS(execution_scope, EXECUTION_SCOPE, nir_scope)
1893
1894 static inline void
1895 nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
1896 unsigned align_mul, unsigned align_offset)
1897 {
1898 assert(util_is_power_of_two_nonzero(align_mul));
1899 assert(align_offset < align_mul);
1900 nir_intrinsic_set_align_mul(intrin, align_mul);
1901 nir_intrinsic_set_align_offset(intrin, align_offset);
1902 }
1903
1904 /** Returns a simple alignment for a load/store intrinsic offset
1905 *
1906 * Instead of the full mul+offset alignment scheme provided by the ALIGN_MUL
1907 * and ALIGN_OFFSET parameters, this helper takes both into account and
1908 * provides a single simple alignment parameter. The offset X is guaranteed
1909 * to satisfy X % align == 0.
1910 */
1911 static inline unsigned
1912 nir_intrinsic_align(const nir_intrinsic_instr *intrin)
1913 {
1914 const unsigned align_mul = nir_intrinsic_align_mul(intrin);
1915 const unsigned align_offset = nir_intrinsic_align_offset(intrin);
1916 assert(align_offset < align_mul);
1917 return align_offset ? 1 << (ffs(align_offset) - 1) : align_mul;
1918 }
1919
1920 unsigned
1921 nir_image_intrinsic_coord_components(const nir_intrinsic_instr *instr);
1922
1923 /* Converts a image_deref_* intrinsic into a image_* one */
1924 void nir_rewrite_image_intrinsic(nir_intrinsic_instr *instr,
1925 nir_ssa_def *handle, bool bindless);
1926
1927 /* Determine if an intrinsic can be arbitrarily reordered and eliminated. */
1928 static inline bool
1929 nir_intrinsic_can_reorder(nir_intrinsic_instr *instr)
1930 {
1931 if (instr->intrinsic == nir_intrinsic_load_deref ||
1932 instr->intrinsic == nir_intrinsic_load_ssbo ||
1933 instr->intrinsic == nir_intrinsic_bindless_image_load ||
1934 instr->intrinsic == nir_intrinsic_image_deref_load ||
1935 instr->intrinsic == nir_intrinsic_image_load) {
1936 return nir_intrinsic_access(instr) & ACCESS_CAN_REORDER;
1937 } else {
1938 const nir_intrinsic_info *info =
1939 &nir_intrinsic_infos[instr->intrinsic];
1940 return (info->flags & NIR_INTRINSIC_CAN_ELIMINATE) &&
1941 (info->flags & NIR_INTRINSIC_CAN_REORDER);
1942 }
1943 }
1944
1945 /**
1946 * \group texture information
1947 *
1948 * This gives semantic information about textures which is useful to the
1949 * frontend, the backend, and lowering passes, but not the optimizer.
1950 */
1951
1952 typedef enum {
1953 nir_tex_src_coord,
1954 nir_tex_src_projector,
1955 nir_tex_src_comparator, /* shadow comparator */
1956 nir_tex_src_offset,
1957 nir_tex_src_bias,
1958 nir_tex_src_lod,
1959 nir_tex_src_min_lod,
1960 nir_tex_src_ms_index, /* MSAA sample index */
1961 nir_tex_src_ms_mcs, /* MSAA compression value */
1962 nir_tex_src_ddx,
1963 nir_tex_src_ddy,
1964 nir_tex_src_texture_deref, /* < deref pointing to the texture */
1965 nir_tex_src_sampler_deref, /* < deref pointing to the sampler */
1966 nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */
1967 nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */
1968 nir_tex_src_texture_handle, /* < bindless texture handle */
1969 nir_tex_src_sampler_handle, /* < bindless sampler handle */
1970 nir_tex_src_plane, /* < selects plane for planar textures */
1971 nir_num_tex_src_types
1972 } nir_tex_src_type;
1973
1974 typedef struct {
1975 nir_src src;
1976 nir_tex_src_type src_type;
1977 } nir_tex_src;
1978
1979 typedef enum {
1980 nir_texop_tex, /**< Regular texture look-up */
1981 nir_texop_txb, /**< Texture look-up with LOD bias */
1982 nir_texop_txl, /**< Texture look-up with explicit LOD */
1983 nir_texop_txd, /**< Texture look-up with partial derivatives */
1984 nir_texop_txf, /**< Texel fetch with explicit LOD */
1985 nir_texop_txf_ms, /**< Multisample texture fetch */
1986 nir_texop_txf_ms_fb, /**< Multisample texture fetch from framebuffer */
1987 nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */
1988 nir_texop_txs, /**< Texture size */
1989 nir_texop_lod, /**< Texture lod query */
1990 nir_texop_tg4, /**< Texture gather */
1991 nir_texop_query_levels, /**< Texture levels query */
1992 nir_texop_texture_samples, /**< Texture samples query */
1993 nir_texop_samples_identical, /**< Query whether all samples are definitely
1994 * identical.
1995 */
1996 nir_texop_tex_prefetch, /**< Regular texture look-up, eligible for pre-dispatch */
1997 nir_texop_fragment_fetch, /**< Multisample fragment color texture fetch */
1998 nir_texop_fragment_mask_fetch,/**< Multisample fragment mask texture fetch */
1999 } nir_texop;
2000
2001 typedef struct {
2002 nir_instr instr;
2003
2004 enum glsl_sampler_dim sampler_dim;
2005 nir_alu_type dest_type;
2006
2007 nir_texop op;
2008 nir_dest dest;
2009 nir_tex_src *src;
2010 unsigned num_srcs, coord_components;
2011 bool is_array, is_shadow;
2012
2013 /**
2014 * If is_shadow is true, whether this is the old-style shadow that outputs 4
2015 * components or the new-style shadow that outputs 1 component.
2016 */
2017 bool is_new_style_shadow;
2018
2019 /* gather component selector */
2020 unsigned component : 2;
2021
2022 /* gather offsets */
2023 int8_t tg4_offsets[4][2];
2024
2025 /* True if the texture index or handle is not dynamically uniform */
2026 bool texture_non_uniform;
2027
2028 /* True if the sampler index or handle is not dynamically uniform */
2029 bool sampler_non_uniform;
2030
2031 /** The texture index
2032 *
2033 * If this texture instruction has a nir_tex_src_texture_offset source,
2034 * then the texture index is given by texture_index + texture_offset.
2035 */
2036 unsigned texture_index;
2037
2038 /** The sampler index
2039 *
2040 * The following operations do not require a sampler and, as such, this
2041 * field should be ignored:
2042 * - nir_texop_txf
2043 * - nir_texop_txf_ms
2044 * - nir_texop_txs
2045 * - nir_texop_lod
2046 * - nir_texop_query_levels
2047 * - nir_texop_texture_samples
2048 * - nir_texop_samples_identical
2049 *
2050 * If this texture instruction has a nir_tex_src_sampler_offset source,
2051 * then the sampler index is given by sampler_index + sampler_offset.
2052 */
2053 unsigned sampler_index;
2054 } nir_tex_instr;
2055
2056 /*
2057 * Returns true if the texture operation requires a sampler as a general rule,
2058 * see the documentation of sampler_index.
2059 *
2060 * Note that the specific hw/driver backend could require to a sampler
2061 * object/configuration packet in any case, for some other reason.
2062 */
2063 static inline bool
2064 nir_tex_instr_need_sampler(const nir_tex_instr *instr)
2065 {
2066 switch (instr->op) {
2067 case nir_texop_txf:
2068 case nir_texop_txf_ms:
2069 case nir_texop_txs:
2070 case nir_texop_lod:
2071 case nir_texop_query_levels:
2072 case nir_texop_texture_samples:
2073 case nir_texop_samples_identical:
2074 return false;
2075 default:
2076 return true;
2077 }
2078 }
2079
2080 static inline unsigned
2081 nir_tex_instr_dest_size(const nir_tex_instr *instr)
2082 {
2083 switch (instr->op) {
2084 case nir_texop_txs: {
2085 unsigned ret;
2086 switch (instr->sampler_dim) {
2087 case GLSL_SAMPLER_DIM_1D:
2088 case GLSL_SAMPLER_DIM_BUF:
2089 ret = 1;
2090 break;
2091 case GLSL_SAMPLER_DIM_2D:
2092 case GLSL_SAMPLER_DIM_CUBE:
2093 case GLSL_SAMPLER_DIM_MS:
2094 case GLSL_SAMPLER_DIM_RECT:
2095 case GLSL_SAMPLER_DIM_EXTERNAL:
2096 case GLSL_SAMPLER_DIM_SUBPASS:
2097 ret = 2;
2098 break;
2099 case GLSL_SAMPLER_DIM_3D:
2100 ret = 3;
2101 break;
2102 default:
2103 unreachable("not reached");
2104 }
2105 if (instr->is_array)
2106 ret++;
2107 return ret;
2108 }
2109
2110 case nir_texop_lod:
2111 return 2;
2112
2113 case nir_texop_texture_samples:
2114 case nir_texop_query_levels:
2115 case nir_texop_samples_identical:
2116 case nir_texop_fragment_mask_fetch:
2117 return 1;
2118
2119 default:
2120 if (instr->is_shadow && instr->is_new_style_shadow)
2121 return 1;
2122
2123 return 4;
2124 }
2125 }
2126
2127 /* Returns true if this texture operation queries something about the texture
2128 * rather than actually sampling it.
2129 */
2130 static inline bool
2131 nir_tex_instr_is_query(const nir_tex_instr *instr)
2132 {
2133 switch (instr->op) {
2134 case nir_texop_txs:
2135 case nir_texop_lod:
2136 case nir_texop_texture_samples:
2137 case nir_texop_query_levels:
2138 case nir_texop_txf_ms_mcs:
2139 return true;
2140 case nir_texop_tex:
2141 case nir_texop_txb:
2142 case nir_texop_txl:
2143 case nir_texop_txd:
2144 case nir_texop_txf:
2145 case nir_texop_txf_ms:
2146 case nir_texop_txf_ms_fb:
2147 case nir_texop_tg4:
2148 return false;
2149 default:
2150 unreachable("Invalid texture opcode");
2151 }
2152 }
2153
2154 static inline bool
2155 nir_tex_instr_has_implicit_derivative(const nir_tex_instr *instr)
2156 {
2157 switch (instr->op) {
2158 case nir_texop_tex:
2159 case nir_texop_txb:
2160 case nir_texop_lod:
2161 return true;
2162 default:
2163 return false;
2164 }
2165 }
2166
2167 static inline nir_alu_type
2168 nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src)
2169 {
2170 switch (instr->src[src].src_type) {
2171 case nir_tex_src_coord:
2172 switch (instr->op) {
2173 case nir_texop_txf:
2174 case nir_texop_txf_ms:
2175 case nir_texop_txf_ms_fb:
2176 case nir_texop_txf_ms_mcs:
2177 case nir_texop_samples_identical:
2178 return nir_type_int;
2179
2180 default:
2181 return nir_type_float;
2182 }
2183
2184 case nir_tex_src_lod:
2185 switch (instr->op) {
2186 case nir_texop_txs:
2187 case nir_texop_txf:
2188 return nir_type_int;
2189
2190 default:
2191 return nir_type_float;
2192 }
2193
2194 case nir_tex_src_projector:
2195 case nir_tex_src_comparator:
2196 case nir_tex_src_bias:
2197 case nir_tex_src_min_lod:
2198 case nir_tex_src_ddx:
2199 case nir_tex_src_ddy:
2200 return nir_type_float;
2201
2202 case nir_tex_src_offset:
2203 case nir_tex_src_ms_index:
2204 case nir_tex_src_plane:
2205 return nir_type_int;
2206
2207 case nir_tex_src_ms_mcs:
2208 case nir_tex_src_texture_deref:
2209 case nir_tex_src_sampler_deref:
2210 case nir_tex_src_texture_offset:
2211 case nir_tex_src_sampler_offset:
2212 case nir_tex_src_texture_handle:
2213 case nir_tex_src_sampler_handle:
2214 return nir_type_uint;
2215
2216 case nir_num_tex_src_types:
2217 unreachable("nir_num_tex_src_types is not a valid source type");
2218 }
2219
2220 unreachable("Invalid texture source type");
2221 }
2222
2223 static inline unsigned
2224 nir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src)
2225 {
2226 if (instr->src[src].src_type == nir_tex_src_coord)
2227 return instr->coord_components;
2228
2229 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */
2230 if (instr->src[src].src_type == nir_tex_src_ms_mcs)
2231 return 4;
2232
2233 if (instr->src[src].src_type == nir_tex_src_ddx ||
2234 instr->src[src].src_type == nir_tex_src_ddy) {
2235 if (instr->is_array)
2236 return instr->coord_components - 1;
2237 else
2238 return instr->coord_components;
2239 }
2240
2241 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for
2242 * the offset, since a cube maps to a single face.
2243 */
2244 if (instr->src[src].src_type == nir_tex_src_offset) {
2245 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
2246 return 2;
2247 else if (instr->is_array)
2248 return instr->coord_components - 1;
2249 else
2250 return instr->coord_components;
2251 }
2252
2253 return 1;
2254 }
2255
2256 static inline int
2257 nir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type)
2258 {
2259 for (unsigned i = 0; i < instr->num_srcs; i++)
2260 if (instr->src[i].src_type == type)
2261 return (int) i;
2262
2263 return -1;
2264 }
2265
2266 void nir_tex_instr_add_src(nir_tex_instr *tex,
2267 nir_tex_src_type src_type,
2268 nir_src src);
2269
2270 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx);
2271
2272 bool nir_tex_instr_has_explicit_tg4_offsets(nir_tex_instr *tex);
2273
2274 typedef struct {
2275 nir_instr instr;
2276
2277 nir_ssa_def def;
2278
2279 nir_const_value value[];
2280 } nir_load_const_instr;
2281
2282 typedef enum {
2283 /** Return from a function
2284 *
2285 * This instruction is a classic function return. It jumps to
2286 * nir_function_impl::end_block. No return value is provided in this
2287 * instruction. Instead, the function is expected to write any return
2288 * data to a deref passed in from the caller.
2289 */
2290 nir_jump_return,
2291
2292 /** Break out of the inner-most loop
2293 *
2294 * This has the same semantics as C's "break" statement.
2295 */
2296 nir_jump_break,
2297
2298 /** Jump back to the top of the inner-most loop
2299 *
2300 * This has the same semantics as C's "continue" statement assuming that a
2301 * NIR loop is implemented as "while (1) { body }".
2302 */
2303 nir_jump_continue,
2304
2305 /** Jumps for unstructured CFG.
2306 *
2307 * As within an unstructured CFG we can't rely on block ordering we need to
2308 * place explicit jumps at the end of every block.
2309 */
2310 nir_jump_goto,
2311 nir_jump_goto_if,
2312 } nir_jump_type;
2313
2314 typedef struct {
2315 nir_instr instr;
2316 nir_jump_type type;
2317 nir_src condition;
2318 struct nir_block *target;
2319 struct nir_block *else_target;
2320 } nir_jump_instr;
2321
2322 /* creates a new SSA variable in an undefined state */
2323
2324 typedef struct {
2325 nir_instr instr;
2326 nir_ssa_def def;
2327 } nir_ssa_undef_instr;
2328
2329 typedef struct {
2330 struct exec_node node;
2331
2332 /* The predecessor block corresponding to this source */
2333 struct nir_block *pred;
2334
2335 nir_src src;
2336 } nir_phi_src;
2337
2338 #define nir_foreach_phi_src(phi_src, phi) \
2339 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs)
2340 #define nir_foreach_phi_src_safe(phi_src, phi) \
2341 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs)
2342
2343 typedef struct {
2344 nir_instr instr;
2345
2346 struct exec_list srcs; /** < list of nir_phi_src */
2347
2348 nir_dest dest;
2349 } nir_phi_instr;
2350
2351 typedef struct {
2352 struct exec_node node;
2353 nir_src src;
2354 nir_dest dest;
2355 } nir_parallel_copy_entry;
2356
2357 #define nir_foreach_parallel_copy_entry(entry, pcopy) \
2358 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries)
2359
2360 typedef struct {
2361 nir_instr instr;
2362
2363 /* A list of nir_parallel_copy_entrys. The sources of all of the
2364 * entries are copied to the corresponding destinations "in parallel".
2365 * In other words, if we have two entries: a -> b and b -> a, the values
2366 * get swapped.
2367 */
2368 struct exec_list entries;
2369 } nir_parallel_copy_instr;
2370
2371 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr,
2372 type, nir_instr_type_alu)
2373 NIR_DEFINE_CAST(nir_instr_as_deref, nir_instr, nir_deref_instr, instr,
2374 type, nir_instr_type_deref)
2375 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr,
2376 type, nir_instr_type_call)
2377 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr,
2378 type, nir_instr_type_jump)
2379 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr,
2380 type, nir_instr_type_tex)
2381 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr,
2382 type, nir_instr_type_intrinsic)
2383 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr,
2384 type, nir_instr_type_load_const)
2385 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr,
2386 type, nir_instr_type_ssa_undef)
2387 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr,
2388 type, nir_instr_type_phi)
2389 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr,
2390 nir_parallel_copy_instr, instr,
2391 type, nir_instr_type_parallel_copy)
2392
2393
2394 #define NIR_DEFINE_SRC_AS_CONST(type, suffix) \
2395 static inline type \
2396 nir_src_comp_as_##suffix(nir_src src, unsigned comp) \
2397 { \
2398 assert(nir_src_is_const(src)); \
2399 nir_load_const_instr *load = \
2400 nir_instr_as_load_const(src.ssa->parent_instr); \
2401 assert(comp < load->def.num_components); \
2402 return nir_const_value_as_##suffix(load->value[comp], \
2403 load->def.bit_size); \
2404 } \
2405 \
2406 static inline type \
2407 nir_src_as_##suffix(nir_src src) \
2408 { \
2409 assert(nir_src_num_components(src) == 1); \
2410 return nir_src_comp_as_##suffix(src, 0); \
2411 }
2412
2413 NIR_DEFINE_SRC_AS_CONST(int64_t, int)
2414 NIR_DEFINE_SRC_AS_CONST(uint64_t, uint)
2415 NIR_DEFINE_SRC_AS_CONST(bool, bool)
2416 NIR_DEFINE_SRC_AS_CONST(double, float)
2417
2418 #undef NIR_DEFINE_SRC_AS_CONST
2419
2420
2421 typedef struct {
2422 nir_ssa_def *def;
2423 unsigned comp;
2424 } nir_ssa_scalar;
2425
2426 static inline bool
2427 nir_ssa_scalar_is_const(nir_ssa_scalar s)
2428 {
2429 return s.def->parent_instr->type == nir_instr_type_load_const;
2430 }
2431
2432 static inline nir_const_value
2433 nir_ssa_scalar_as_const_value(nir_ssa_scalar s)
2434 {
2435 assert(s.comp < s.def->num_components);
2436 nir_load_const_instr *load = nir_instr_as_load_const(s.def->parent_instr);
2437 return load->value[s.comp];
2438 }
2439
2440 #define NIR_DEFINE_SCALAR_AS_CONST(type, suffix) \
2441 static inline type \
2442 nir_ssa_scalar_as_##suffix(nir_ssa_scalar s) \
2443 { \
2444 return nir_const_value_as_##suffix( \
2445 nir_ssa_scalar_as_const_value(s), s.def->bit_size); \
2446 }
2447
2448 NIR_DEFINE_SCALAR_AS_CONST(int64_t, int)
2449 NIR_DEFINE_SCALAR_AS_CONST(uint64_t, uint)
2450 NIR_DEFINE_SCALAR_AS_CONST(bool, bool)
2451 NIR_DEFINE_SCALAR_AS_CONST(double, float)
2452
2453 #undef NIR_DEFINE_SCALAR_AS_CONST
2454
2455 static inline bool
2456 nir_ssa_scalar_is_alu(nir_ssa_scalar s)
2457 {
2458 return s.def->parent_instr->type == nir_instr_type_alu;
2459 }
2460
2461 static inline nir_op
2462 nir_ssa_scalar_alu_op(nir_ssa_scalar s)
2463 {
2464 return nir_instr_as_alu(s.def->parent_instr)->op;
2465 }
2466
2467 static inline nir_ssa_scalar
2468 nir_ssa_scalar_chase_alu_src(nir_ssa_scalar s, unsigned alu_src_idx)
2469 {
2470 nir_ssa_scalar out = { NULL, 0 };
2471
2472 nir_alu_instr *alu = nir_instr_as_alu(s.def->parent_instr);
2473 assert(alu_src_idx < nir_op_infos[alu->op].num_inputs);
2474
2475 /* Our component must be written */
2476 assert(s.comp < s.def->num_components);
2477 assert(alu->dest.write_mask & (1u << s.comp));
2478
2479 assert(alu->src[alu_src_idx].src.is_ssa);
2480 out.def = alu->src[alu_src_idx].src.ssa;
2481
2482 if (nir_op_infos[alu->op].input_sizes[alu_src_idx] == 0) {
2483 /* The ALU src is unsized so the source component follows the
2484 * destination component.
2485 */
2486 out.comp = alu->src[alu_src_idx].swizzle[s.comp];
2487 } else {
2488 /* This is a sized source so all source components work together to
2489 * produce all the destination components. Since we need to return a
2490 * scalar, this only works if the source is a scalar.
2491 */
2492 assert(nir_op_infos[alu->op].input_sizes[alu_src_idx] == 1);
2493 out.comp = alu->src[alu_src_idx].swizzle[0];
2494 }
2495 assert(out.comp < out.def->num_components);
2496
2497 return out;
2498 }
2499
2500
2501 /*
2502 * Control flow
2503 *
2504 * Control flow consists of a tree of control flow nodes, which include
2505 * if-statements and loops. The leaves of the tree are basic blocks, lists of
2506 * instructions that always run start-to-finish. Each basic block also keeps
2507 * track of its successors (blocks which may run immediately after the current
2508 * block) and predecessors (blocks which could have run immediately before the
2509 * current block). Each function also has a start block and an end block which
2510 * all return statements point to (which is always empty). Together, all the
2511 * blocks with their predecessors and successors make up the control flow
2512 * graph (CFG) of the function. There are helpers that modify the tree of
2513 * control flow nodes while modifying the CFG appropriately; these should be
2514 * used instead of modifying the tree directly.
2515 */
2516
2517 typedef enum {
2518 nir_cf_node_block,
2519 nir_cf_node_if,
2520 nir_cf_node_loop,
2521 nir_cf_node_function
2522 } nir_cf_node_type;
2523
2524 typedef struct nir_cf_node {
2525 struct exec_node node;
2526 nir_cf_node_type type;
2527 struct nir_cf_node *parent;
2528 } nir_cf_node;
2529
2530 typedef struct nir_block {
2531 nir_cf_node cf_node;
2532
2533 struct exec_list instr_list; /** < list of nir_instr */
2534
2535 /** generic block index; generated by nir_index_blocks */
2536 unsigned index;
2537
2538 /*
2539 * Each block can only have up to 2 successors, so we put them in a simple
2540 * array - no need for anything more complicated.
2541 */
2542 struct nir_block *successors[2];
2543
2544 /* Set of nir_block predecessors in the CFG */
2545 struct set *predecessors;
2546
2547 /*
2548 * this node's immediate dominator in the dominance tree - set to NULL for
2549 * the start block.
2550 */
2551 struct nir_block *imm_dom;
2552
2553 /* This node's children in the dominance tree */
2554 unsigned num_dom_children;
2555 struct nir_block **dom_children;
2556
2557 /* Set of nir_blocks on the dominance frontier of this block */
2558 struct set *dom_frontier;
2559
2560 /*
2561 * These two indices have the property that dom_{pre,post}_index for each
2562 * child of this block in the dominance tree will always be between
2563 * dom_pre_index and dom_post_index for this block, which makes testing if
2564 * a given block is dominated by another block an O(1) operation.
2565 */
2566 int16_t dom_pre_index, dom_post_index;
2567
2568 /* live in and out for this block; used for liveness analysis */
2569 BITSET_WORD *live_in;
2570 BITSET_WORD *live_out;
2571 } nir_block;
2572
2573 static inline bool
2574 nir_block_is_reachable(nir_block *b)
2575 {
2576 /* See also nir_block_dominates */
2577 return b->dom_post_index != -1;
2578 }
2579
2580 static inline nir_instr *
2581 nir_block_first_instr(nir_block *block)
2582 {
2583 struct exec_node *head = exec_list_get_head(&block->instr_list);
2584 return exec_node_data(nir_instr, head, node);
2585 }
2586
2587 static inline nir_instr *
2588 nir_block_last_instr(nir_block *block)
2589 {
2590 struct exec_node *tail = exec_list_get_tail(&block->instr_list);
2591 return exec_node_data(nir_instr, tail, node);
2592 }
2593
2594 static inline bool
2595 nir_block_ends_in_jump(nir_block *block)
2596 {
2597 return !exec_list_is_empty(&block->instr_list) &&
2598 nir_block_last_instr(block)->type == nir_instr_type_jump;
2599 }
2600
2601 #define nir_foreach_instr(instr, block) \
2602 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list)
2603 #define nir_foreach_instr_reverse(instr, block) \
2604 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list)
2605 #define nir_foreach_instr_safe(instr, block) \
2606 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list)
2607 #define nir_foreach_instr_reverse_safe(instr, block) \
2608 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list)
2609
2610 typedef enum {
2611 nir_selection_control_none = 0x0,
2612 nir_selection_control_flatten = 0x1,
2613 nir_selection_control_dont_flatten = 0x2,
2614 } nir_selection_control;
2615
2616 typedef struct nir_if {
2617 nir_cf_node cf_node;
2618 nir_src condition;
2619 nir_selection_control control;
2620
2621 struct exec_list then_list; /** < list of nir_cf_node */
2622 struct exec_list else_list; /** < list of nir_cf_node */
2623 } nir_if;
2624
2625 typedef struct {
2626 nir_if *nif;
2627
2628 /** Instruction that generates nif::condition. */
2629 nir_instr *conditional_instr;
2630
2631 /** Block within ::nif that has the break instruction. */
2632 nir_block *break_block;
2633
2634 /** Last block for the then- or else-path that does not contain the break. */
2635 nir_block *continue_from_block;
2636
2637 /** True when ::break_block is in the else-path of ::nif. */
2638 bool continue_from_then;
2639 bool induction_rhs;
2640
2641 /* This is true if the terminators exact trip count is unknown. For
2642 * example:
2643 *
2644 * for (int i = 0; i < imin(x, 4); i++)
2645 * ...
2646 *
2647 * Here loop analysis would have set a max_trip_count of 4 however we dont
2648 * know for sure that this is the exact trip count.
2649 */
2650 bool exact_trip_count_unknown;
2651
2652 struct list_head loop_terminator_link;
2653 } nir_loop_terminator;
2654
2655 typedef struct {
2656 /* Estimated cost (in number of instructions) of the loop */
2657 unsigned instr_cost;
2658
2659 /* Guessed trip count based on array indexing */
2660 unsigned guessed_trip_count;
2661
2662 /* Maximum number of times the loop is run (if known) */
2663 unsigned max_trip_count;
2664
2665 /* Do we know the exact number of times the loop will be run */
2666 bool exact_trip_count_known;
2667
2668 /* Unroll the loop regardless of its size */
2669 bool force_unroll;
2670
2671 /* Does the loop contain complex loop terminators, continues or other
2672 * complex behaviours? If this is true we can't rely on
2673 * loop_terminator_list to be complete or accurate.
2674 */
2675 bool complex_loop;
2676
2677 nir_loop_terminator *limiting_terminator;
2678
2679 /* A list of loop_terminators terminating this loop. */
2680 struct list_head loop_terminator_list;
2681 } nir_loop_info;
2682
2683 typedef enum {
2684 nir_loop_control_none = 0x0,
2685 nir_loop_control_unroll = 0x1,
2686 nir_loop_control_dont_unroll = 0x2,
2687 } nir_loop_control;
2688
2689 typedef struct {
2690 nir_cf_node cf_node;
2691
2692 struct exec_list body; /** < list of nir_cf_node */
2693
2694 nir_loop_info *info;
2695 nir_loop_control control;
2696 bool partially_unrolled;
2697 } nir_loop;
2698
2699 /**
2700 * Various bits of metadata that can may be created or required by
2701 * optimization and analysis passes
2702 */
2703 typedef enum {
2704 nir_metadata_none = 0x0,
2705
2706 /** Indicates that nir_block::index values are valid.
2707 *
2708 * The start block has index 0 and they increase through a natural walk of
2709 * the CFG. nir_function_impl::num_blocks is the number of blocks and
2710 * every block index is in the range [0, nir_function_impl::num_blocks].
2711 *
2712 * A pass can preserve this metadata type if it doesn't touch the CFG.
2713 */
2714 nir_metadata_block_index = 0x1,
2715
2716 /** Indicates that block dominance information is valid
2717 *
2718 * This includes:
2719 *
2720 * - nir_block::num_dom_children
2721 * - nir_block::dom_children
2722 * - nir_block::dom_frontier
2723 * - nir_block::dom_pre_index
2724 * - nir_block::dom_post_index
2725 *
2726 * A pass can preserve this metadata type if it doesn't touch the CFG.
2727 */
2728 nir_metadata_dominance = 0x2,
2729
2730 /** Indicates that SSA def data-flow liveness information is valid
2731 *
2732 * This includes:
2733 *
2734 * - nir_ssa_def::live_index
2735 * - nir_block::live_in
2736 * - nir_block::live_out
2737 *
2738 * A pass can preserve this metadata type if it never adds or removes any
2739 * SSA defs (most passes shouldn't preserve this metadata type).
2740 */
2741 nir_metadata_live_ssa_defs = 0x4,
2742
2743 /** A dummy metadata value to track when a pass forgot to call
2744 * nir_metadata_preserve.
2745 *
2746 * A pass should always clear this value even if it doesn't make any
2747 * progress to indicate that it thought about preserving metadata.
2748 */
2749 nir_metadata_not_properly_reset = 0x8,
2750
2751 /** Indicates that loop analysis information is valid.
2752 *
2753 * This includes everything pointed to by nir_loop::info.
2754 *
2755 * A pass can preserve this metadata type if it is guaranteed to not affect
2756 * any loop metadata. However, since loop metadata includes things like
2757 * loop counts which depend on arithmetic in the loop, this is very hard to
2758 * determine. Most passes shouldn't preserve this metadata type.
2759 */
2760 nir_metadata_loop_analysis = 0x10,
2761
2762 /** All metadata
2763 *
2764 * This includes all nir_metadata flags except not_properly_reset. Passes
2765 * which do not change the shader in any way should call
2766 *
2767 * nir_metadata_preserve(impl, nir_metadata_all);
2768 */
2769 nir_metadata_all = ~nir_metadata_not_properly_reset,
2770 } nir_metadata;
2771
2772 typedef struct {
2773 nir_cf_node cf_node;
2774
2775 /** pointer to the function of which this is an implementation */
2776 struct nir_function *function;
2777
2778 struct exec_list body; /** < list of nir_cf_node */
2779
2780 nir_block *end_block;
2781
2782 /** list for all local variables in the function */
2783 struct exec_list locals;
2784
2785 /** list of local registers in the function */
2786 struct exec_list registers;
2787
2788 /** next available local register index */
2789 unsigned reg_alloc;
2790
2791 /** next available SSA value index */
2792 unsigned ssa_alloc;
2793
2794 /* total number of basic blocks, only valid when block_index_dirty = false */
2795 unsigned num_blocks;
2796
2797 /** True if this nir_function_impl uses structured control-flow
2798 *
2799 * Structured nir_function_impls have different validation rules.
2800 */
2801 bool structured;
2802
2803 nir_metadata valid_metadata;
2804 } nir_function_impl;
2805
2806 #define nir_foreach_function_temp_variable(var, impl) \
2807 foreach_list_typed(nir_variable, var, node, &(impl)->locals)
2808
2809 #define nir_foreach_function_temp_variable_safe(var, impl) \
2810 foreach_list_typed_safe(nir_variable, var, node, &(impl)->locals)
2811
2812 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
2813 nir_start_block(nir_function_impl *impl)
2814 {
2815 return (nir_block *) impl->body.head_sentinel.next;
2816 }
2817
2818 ATTRIBUTE_RETURNS_NONNULL static inline nir_block *
2819 nir_impl_last_block(nir_function_impl *impl)
2820 {
2821 return (nir_block *) impl->body.tail_sentinel.prev;
2822 }
2823
2824 static inline nir_cf_node *
2825 nir_cf_node_next(nir_cf_node *node)
2826 {
2827 struct exec_node *next = exec_node_get_next(&node->node);
2828 if (exec_node_is_tail_sentinel(next))
2829 return NULL;
2830 else
2831 return exec_node_data(nir_cf_node, next, node);
2832 }
2833
2834 static inline nir_cf_node *
2835 nir_cf_node_prev(nir_cf_node *node)
2836 {
2837 struct exec_node *prev = exec_node_get_prev(&node->node);
2838 if (exec_node_is_head_sentinel(prev))
2839 return NULL;
2840 else
2841 return exec_node_data(nir_cf_node, prev, node);
2842 }
2843
2844 static inline bool
2845 nir_cf_node_is_first(const nir_cf_node *node)
2846 {
2847 return exec_node_is_head_sentinel(node->node.prev);
2848 }
2849
2850 static inline bool
2851 nir_cf_node_is_last(const nir_cf_node *node)
2852 {
2853 return exec_node_is_tail_sentinel(node->node.next);
2854 }
2855
2856 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node,
2857 type, nir_cf_node_block)
2858 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node,
2859 type, nir_cf_node_if)
2860 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node,
2861 type, nir_cf_node_loop)
2862 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node,
2863 nir_function_impl, cf_node, type, nir_cf_node_function)
2864
2865 static inline nir_block *
2866 nir_if_first_then_block(nir_if *if_stmt)
2867 {
2868 struct exec_node *head = exec_list_get_head(&if_stmt->then_list);
2869 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
2870 }
2871
2872 static inline nir_block *
2873 nir_if_last_then_block(nir_if *if_stmt)
2874 {
2875 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list);
2876 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
2877 }
2878
2879 static inline nir_block *
2880 nir_if_first_else_block(nir_if *if_stmt)
2881 {
2882 struct exec_node *head = exec_list_get_head(&if_stmt->else_list);
2883 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
2884 }
2885
2886 static inline nir_block *
2887 nir_if_last_else_block(nir_if *if_stmt)
2888 {
2889 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list);
2890 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
2891 }
2892
2893 static inline nir_block *
2894 nir_loop_first_block(nir_loop *loop)
2895 {
2896 struct exec_node *head = exec_list_get_head(&loop->body);
2897 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
2898 }
2899
2900 static inline nir_block *
2901 nir_loop_last_block(nir_loop *loop)
2902 {
2903 struct exec_node *tail = exec_list_get_tail(&loop->body);
2904 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node));
2905 }
2906
2907 /**
2908 * Return true if this list of cf_nodes contains a single empty block.
2909 */
2910 static inline bool
2911 nir_cf_list_is_empty_block(struct exec_list *cf_list)
2912 {
2913 if (exec_list_is_singular(cf_list)) {
2914 struct exec_node *head = exec_list_get_head(cf_list);
2915 nir_block *block =
2916 nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node));
2917 return exec_list_is_empty(&block->instr_list);
2918 }
2919 return false;
2920 }
2921
2922 typedef struct {
2923 uint8_t num_components;
2924 uint8_t bit_size;
2925 } nir_parameter;
2926
2927 typedef struct nir_function {
2928 struct exec_node node;
2929
2930 const char *name;
2931 struct nir_shader *shader;
2932
2933 unsigned num_params;
2934 nir_parameter *params;
2935
2936 /** The implementation of this function.
2937 *
2938 * If the function is only declared and not implemented, this is NULL.
2939 */
2940 nir_function_impl *impl;
2941
2942 bool is_entrypoint;
2943 } nir_function;
2944
2945 typedef enum {
2946 nir_lower_imul64 = (1 << 0),
2947 nir_lower_isign64 = (1 << 1),
2948 /** Lower all int64 modulus and division opcodes */
2949 nir_lower_divmod64 = (1 << 2),
2950 /** Lower all 64-bit umul_high and imul_high opcodes */
2951 nir_lower_imul_high64 = (1 << 3),
2952 nir_lower_mov64 = (1 << 4),
2953 nir_lower_icmp64 = (1 << 5),
2954 nir_lower_iadd64 = (1 << 6),
2955 nir_lower_iabs64 = (1 << 7),
2956 nir_lower_ineg64 = (1 << 8),
2957 nir_lower_logic64 = (1 << 9),
2958 nir_lower_minmax64 = (1 << 10),
2959 nir_lower_shift64 = (1 << 11),
2960 nir_lower_imul_2x32_64 = (1 << 12),
2961 nir_lower_extract64 = (1 << 13),
2962 nir_lower_ufind_msb64 = (1 << 14),
2963 } nir_lower_int64_options;
2964
2965 typedef enum {
2966 nir_lower_drcp = (1 << 0),
2967 nir_lower_dsqrt = (1 << 1),
2968 nir_lower_drsq = (1 << 2),
2969 nir_lower_dtrunc = (1 << 3),
2970 nir_lower_dfloor = (1 << 4),
2971 nir_lower_dceil = (1 << 5),
2972 nir_lower_dfract = (1 << 6),
2973 nir_lower_dround_even = (1 << 7),
2974 nir_lower_dmod = (1 << 8),
2975 nir_lower_dsub = (1 << 9),
2976 nir_lower_ddiv = (1 << 10),
2977 nir_lower_fp64_full_software = (1 << 11),
2978 } nir_lower_doubles_options;
2979
2980 typedef enum {
2981 nir_divergence_single_prim_per_subgroup = (1 << 0),
2982 nir_divergence_single_patch_per_tcs_subgroup = (1 << 1),
2983 nir_divergence_single_patch_per_tes_subgroup = (1 << 2),
2984 nir_divergence_view_index_uniform = (1 << 3),
2985 } nir_divergence_options;
2986
2987 typedef struct nir_shader_compiler_options {
2988 bool lower_fdiv;
2989 bool lower_ffma;
2990 bool fuse_ffma;
2991 bool lower_flrp16;
2992 bool lower_flrp32;
2993 /** Lowers flrp when it does not support doubles */
2994 bool lower_flrp64;
2995 bool lower_fpow;
2996 bool lower_fsat;
2997 bool lower_fsqrt;
2998 bool lower_sincos;
2999 bool lower_fmod;
3000 /** Lowers ibitfield_extract/ubitfield_extract to ibfe/ubfe. */
3001 bool lower_bitfield_extract;
3002 /** Lowers ibitfield_extract/ubitfield_extract to compares, shifts. */
3003 bool lower_bitfield_extract_to_shifts;
3004 /** Lowers bitfield_insert to bfi/bfm */
3005 bool lower_bitfield_insert;
3006 /** Lowers bitfield_insert to compares, and shifts. */
3007 bool lower_bitfield_insert_to_shifts;
3008 /** Lowers bitfield_insert to bfm/bitfield_select. */
3009 bool lower_bitfield_insert_to_bitfield_select;
3010 /** Lowers bitfield_reverse to shifts. */
3011 bool lower_bitfield_reverse;
3012 /** Lowers bit_count to shifts. */
3013 bool lower_bit_count;
3014 /** Lowers ifind_msb to compare and ufind_msb */
3015 bool lower_ifind_msb;
3016 /** Lowers find_lsb to ufind_msb and logic ops */
3017 bool lower_find_lsb;
3018 bool lower_uadd_carry;
3019 bool lower_usub_borrow;
3020 /** Lowers imul_high/umul_high to 16-bit multiplies and carry operations. */
3021 bool lower_mul_high;
3022 /** lowers fneg and ineg to fsub and isub. */
3023 bool lower_negate;
3024 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */
3025 bool lower_sub;
3026
3027 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fneu} + b2f: */
3028 bool lower_scmp;
3029
3030 /* lower fall_equalN/fany_nequalN (ex:fany_nequal4 to sne+fdot4+fsat) */
3031 bool lower_vector_cmp;
3032
3033 /** enables rules to lower idiv by power-of-two: */
3034 bool lower_idiv;
3035
3036 /** enable rules to avoid bit ops */
3037 bool lower_bitops;
3038
3039 /** enables rules to lower isign to imin+imax */
3040 bool lower_isign;
3041
3042 /** enables rules to lower fsign to fsub and flt */
3043 bool lower_fsign;
3044
3045 /* lower fdph to fdot4 */
3046 bool lower_fdph;
3047
3048 /** lower fdot to fmul and fsum/fadd. */
3049 bool lower_fdot;
3050
3051 /* Does the native fdot instruction replicate its result for four
3052 * components? If so, then opt_algebraic_late will turn all fdotN
3053 * instructions into fdot_replicatedN instructions.
3054 */
3055 bool fdot_replicates;
3056
3057 /** lowers ffloor to fsub+ffract: */
3058 bool lower_ffloor;
3059
3060 /** lowers ffract to fsub+ffloor: */
3061 bool lower_ffract;
3062
3063 /** lowers fceil to fneg+ffloor+fneg: */
3064 bool lower_fceil;
3065
3066 bool lower_ftrunc;
3067
3068 bool lower_ldexp;
3069
3070 bool lower_pack_half_2x16;
3071 bool lower_pack_unorm_2x16;
3072 bool lower_pack_snorm_2x16;
3073 bool lower_pack_unorm_4x8;
3074 bool lower_pack_snorm_4x8;
3075 bool lower_pack_64_2x32_split;
3076 bool lower_pack_32_2x16_split;
3077 bool lower_unpack_half_2x16;
3078 bool lower_unpack_unorm_2x16;
3079 bool lower_unpack_snorm_2x16;
3080 bool lower_unpack_unorm_4x8;
3081 bool lower_unpack_snorm_4x8;
3082 bool lower_unpack_64_2x32_split;
3083 bool lower_unpack_32_2x16_split;
3084
3085 bool lower_pack_split;
3086
3087 bool lower_extract_byte;
3088 bool lower_extract_word;
3089
3090 bool lower_all_io_to_temps;
3091 bool lower_all_io_to_elements;
3092
3093 /* Indicates that the driver only has zero-based vertex id */
3094 bool vertex_id_zero_based;
3095
3096 /**
3097 * If enabled, gl_BaseVertex will be lowered as:
3098 * is_indexed_draw (~0/0) & firstvertex
3099 */
3100 bool lower_base_vertex;
3101
3102 /**
3103 * If enabled, gl_HelperInvocation will be lowered as:
3104 *
3105 * !((1 << sample_id) & sample_mask_in))
3106 *
3107 * This depends on some possibly hw implementation details, which may
3108 * not be true for all hw. In particular that the FS is only executed
3109 * for covered samples or for helper invocations. So, do not blindly
3110 * enable this option.
3111 *
3112 * Note: See also issue #22 in ARB_shader_image_load_store
3113 */
3114 bool lower_helper_invocation;
3115
3116 /**
3117 * Convert gl_SampleMaskIn to gl_HelperInvocation as follows:
3118 *
3119 * gl_SampleMaskIn == 0 ---> gl_HelperInvocation
3120 * gl_SampleMaskIn != 0 ---> !gl_HelperInvocation
3121 */
3122 bool optimize_sample_mask_in;
3123
3124 bool lower_cs_local_index_from_id;
3125 bool lower_cs_local_id_from_index;
3126
3127 /* Prevents lowering global_invocation_id to be in terms of work_group_id */
3128 bool has_cs_global_id;
3129
3130 bool lower_device_index_to_zero;
3131
3132 /* Set if nir_lower_wpos_ytransform() should also invert gl_PointCoord. */
3133 bool lower_wpos_pntc;
3134
3135 /**
3136 * Set if nir_op_[iu]hadd and nir_op_[iu]rhadd instructions should be
3137 * lowered to simple arithmetic.
3138 *
3139 * If this flag is set, the lowering will be applied to all bit-sizes of
3140 * these instructions.
3141 *
3142 * \sa ::lower_hadd64
3143 */
3144 bool lower_hadd;
3145
3146 /**
3147 * Set if only 64-bit nir_op_[iu]hadd and nir_op_[iu]rhadd instructions
3148 * should be lowered to simple arithmetic.
3149 *
3150 * If this flag is set, the lowering will be applied to only 64-bit
3151 * versions of these instructions.
3152 *
3153 * \sa ::lower_hadd
3154 */
3155 bool lower_hadd64;
3156
3157 /**
3158 * Set if nir_op_add_sat and nir_op_usub_sat should be lowered to simple
3159 * arithmetic.
3160 *
3161 * If this flag is set, the lowering will be applied to all bit-sizes of
3162 * these instructions.
3163 *
3164 * \sa ::lower_usub_sat64
3165 */
3166 bool lower_add_sat;
3167
3168 /**
3169 * Set if only 64-bit nir_op_usub_sat should be lowered to simple
3170 * arithmetic.
3171 *
3172 * \sa ::lower_add_sat
3173 */
3174 bool lower_usub_sat64;
3175
3176 /**
3177 * Should IO be re-vectorized? Some scalar ISAs still operate on vec4's
3178 * for IO purposes and would prefer loads/stores be vectorized.
3179 */
3180 bool vectorize_io;
3181 bool lower_to_scalar;
3182
3183 /**
3184 * Whether nir_opt_vectorize should only create 16-bit 2D vectors.
3185 */
3186 bool vectorize_vec2_16bit;
3187
3188 /**
3189 * Should the linker unify inputs_read/outputs_written between adjacent
3190 * shader stages which are linked into a single program?
3191 */
3192 bool unify_interfaces;
3193
3194 /**
3195 * Should nir_lower_io() create load_interpolated_input intrinsics?
3196 *
3197 * If not, it generates regular load_input intrinsics and interpolation
3198 * information must be inferred from the list of input nir_variables.
3199 */
3200 bool use_interpolated_input_intrinsics;
3201
3202 /* Lowers when 32x32->64 bit multiplication is not supported */
3203 bool lower_mul_2x32_64;
3204
3205 /* Lowers when rotate instruction is not supported */
3206 bool lower_rotate;
3207
3208 /**
3209 * Backend supports imul24, and would like to use it (when possible)
3210 * for address/offset calculation. If true, driver should call
3211 * nir_lower_amul(). (If not set, amul will automatically be lowered
3212 * to imul.)
3213 */
3214 bool has_imul24;
3215
3216 /** Backend supports umul24, if not set umul24 will automatically be lowered
3217 * to imul with masked inputs */
3218 bool has_umul24;
3219
3220 /** Backend supports umad24, if not set umad24 will automatically be lowered
3221 * to imul with masked inputs and iadd */
3222 bool has_umad24;
3223
3224 /* Whether to generate only scoped_barrier intrinsics instead of the set of
3225 * memory and control barrier intrinsics based on GLSL.
3226 */
3227 bool use_scoped_barrier;
3228
3229 /**
3230 * Is this the Intel vec4 backend?
3231 *
3232 * Used to inhibit algebraic optimizations that are known to be harmful on
3233 * the Intel vec4 backend. This is generally applicable to any
3234 * optimization that might cause more immediate values to be used in
3235 * 3-source (e.g., ffma and flrp) instructions.
3236 */
3237 bool intel_vec4;
3238
3239 /** Lower nir_op_ibfe and nir_op_ubfe that have two constant sources. */
3240 bool lower_bfe_with_two_constants;
3241
3242 /** Whether 8-bit ALU is supported. */
3243 bool support_8bit_alu;
3244
3245 /** Whether 16-bit ALU is supported. */
3246 bool support_16bit_alu;
3247
3248 unsigned max_unroll_iterations;
3249
3250 nir_lower_int64_options lower_int64_options;
3251 nir_lower_doubles_options lower_doubles_options;
3252 } nir_shader_compiler_options;
3253
3254 typedef struct nir_shader {
3255 /** list of uniforms (nir_variable) */
3256 struct exec_list variables;
3257
3258 /** Set of driver-specific options for the shader.
3259 *
3260 * The memory for the options is expected to be kept in a single static
3261 * copy by the driver.
3262 */
3263 const struct nir_shader_compiler_options *options;
3264
3265 /** Various bits of compile-time information about a given shader */
3266 struct shader_info info;
3267
3268 struct exec_list functions; /** < list of nir_function */
3269
3270 /**
3271 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can
3272 * access plus one
3273 */
3274 unsigned num_inputs, num_uniforms, num_outputs, num_shared;
3275
3276 /** Size in bytes of required scratch space */
3277 unsigned scratch_size;
3278
3279 /** Constant data associated with this shader.
3280 *
3281 * Constant data is loaded through load_constant intrinsics (as compared to
3282 * the NIR load_const instructions which have the constant value inlined
3283 * into them). This is usually generated by nir_opt_large_constants (so
3284 * shaders don't have to load_const into a temporary array when they want
3285 * to indirect on a const array).
3286 */
3287 void *constant_data;
3288 /** Size of the constant data associated with the shader, in bytes */
3289 unsigned constant_data_size;
3290 } nir_shader;
3291
3292 #define nir_foreach_function(func, shader) \
3293 foreach_list_typed(nir_function, func, node, &(shader)->functions)
3294
3295 static inline nir_function_impl *
3296 nir_shader_get_entrypoint(nir_shader *shader)
3297 {
3298 nir_function *func = NULL;
3299
3300 nir_foreach_function(function, shader) {
3301 assert(func == NULL);
3302 if (function->is_entrypoint) {
3303 func = function;
3304 #ifndef NDEBUG
3305 break;
3306 #endif
3307 }
3308 }
3309
3310 if (!func)
3311 return NULL;
3312
3313 assert(func->num_params == 0);
3314 assert(func->impl);
3315 return func->impl;
3316 }
3317
3318 nir_shader *nir_shader_create(void *mem_ctx,
3319 gl_shader_stage stage,
3320 const nir_shader_compiler_options *options,
3321 shader_info *si);
3322
3323 nir_register *nir_local_reg_create(nir_function_impl *impl);
3324
3325 void nir_reg_remove(nir_register *reg);
3326
3327 /** Adds a variable to the appropriate list in nir_shader */
3328 void nir_shader_add_variable(nir_shader *shader, nir_variable *var);
3329
3330 static inline void
3331 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var)
3332 {
3333 assert(var->data.mode == nir_var_function_temp);
3334 exec_list_push_tail(&impl->locals, &var->node);
3335 }
3336
3337 /** creates a variable, sets a few defaults, and adds it to the list */
3338 nir_variable *nir_variable_create(nir_shader *shader,
3339 nir_variable_mode mode,
3340 const struct glsl_type *type,
3341 const char *name);
3342 /** creates a local variable and adds it to the list */
3343 nir_variable *nir_local_variable_create(nir_function_impl *impl,
3344 const struct glsl_type *type,
3345 const char *name);
3346
3347 nir_variable *nir_find_variable_with_location(nir_shader *shader,
3348 nir_variable_mode mode,
3349 unsigned location);
3350
3351 nir_variable *nir_find_variable_with_driver_location(nir_shader *shader,
3352 nir_variable_mode mode,
3353 unsigned location);
3354
3355 /** creates a function and adds it to the shader's list of functions */
3356 nir_function *nir_function_create(nir_shader *shader, const char *name);
3357
3358 nir_function_impl *nir_function_impl_create(nir_function *func);
3359 /** creates a function_impl that isn't tied to any particular function */
3360 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader);
3361
3362 nir_block *nir_block_create(nir_shader *shader);
3363 nir_if *nir_if_create(nir_shader *shader);
3364 nir_loop *nir_loop_create(nir_shader *shader);
3365
3366 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node);
3367
3368 /** requests that the given pieces of metadata be generated */
3369 void nir_metadata_require(nir_function_impl *impl, nir_metadata required, ...);
3370 /** dirties all but the preserved metadata */
3371 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved);
3372 /** Preserves all metadata for the given shader */
3373 void nir_shader_preserve_all_metadata(nir_shader *shader);
3374
3375 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */
3376 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op);
3377
3378 nir_deref_instr *nir_deref_instr_create(nir_shader *shader,
3379 nir_deref_type deref_type);
3380
3381 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type);
3382
3383 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader,
3384 unsigned num_components,
3385 unsigned bit_size);
3386
3387 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader,
3388 nir_intrinsic_op op);
3389
3390 nir_call_instr *nir_call_instr_create(nir_shader *shader,
3391 nir_function *callee);
3392
3393 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs);
3394
3395 nir_phi_instr *nir_phi_instr_create(nir_shader *shader);
3396
3397 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader);
3398
3399 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader,
3400 unsigned num_components,
3401 unsigned bit_size);
3402
3403 nir_const_value nir_alu_binop_identity(nir_op binop, unsigned bit_size);
3404
3405 /**
3406 * NIR Cursors and Instruction Insertion API
3407 * @{
3408 *
3409 * A tiny struct representing a point to insert/extract instructions or
3410 * control flow nodes. Helps reduce the combinatorial explosion of possible
3411 * points to insert/extract.
3412 *
3413 * \sa nir_control_flow.h
3414 */
3415 typedef enum {
3416 nir_cursor_before_block,
3417 nir_cursor_after_block,
3418 nir_cursor_before_instr,
3419 nir_cursor_after_instr,
3420 } nir_cursor_option;
3421
3422 typedef struct {
3423 nir_cursor_option option;
3424 union {
3425 nir_block *block;
3426 nir_instr *instr;
3427 };
3428 } nir_cursor;
3429
3430 static inline nir_block *
3431 nir_cursor_current_block(nir_cursor cursor)
3432 {
3433 if (cursor.option == nir_cursor_before_instr ||
3434 cursor.option == nir_cursor_after_instr) {
3435 return cursor.instr->block;
3436 } else {
3437 return cursor.block;
3438 }
3439 }
3440
3441 bool nir_cursors_equal(nir_cursor a, nir_cursor b);
3442
3443 static inline nir_cursor
3444 nir_before_block(nir_block *block)
3445 {
3446 nir_cursor cursor;
3447 cursor.option = nir_cursor_before_block;
3448 cursor.block = block;
3449 return cursor;
3450 }
3451
3452 static inline nir_cursor
3453 nir_after_block(nir_block *block)
3454 {
3455 nir_cursor cursor;
3456 cursor.option = nir_cursor_after_block;
3457 cursor.block = block;
3458 return cursor;
3459 }
3460
3461 static inline nir_cursor
3462 nir_before_instr(nir_instr *instr)
3463 {
3464 nir_cursor cursor;
3465 cursor.option = nir_cursor_before_instr;
3466 cursor.instr = instr;
3467 return cursor;
3468 }
3469
3470 static inline nir_cursor
3471 nir_after_instr(nir_instr *instr)
3472 {
3473 nir_cursor cursor;
3474 cursor.option = nir_cursor_after_instr;
3475 cursor.instr = instr;
3476 return cursor;
3477 }
3478
3479 static inline nir_cursor
3480 nir_after_block_before_jump(nir_block *block)
3481 {
3482 nir_instr *last_instr = nir_block_last_instr(block);
3483 if (last_instr && last_instr->type == nir_instr_type_jump) {
3484 return nir_before_instr(last_instr);
3485 } else {
3486 return nir_after_block(block);
3487 }
3488 }
3489
3490 static inline nir_cursor
3491 nir_before_src(nir_src *src, bool is_if_condition)
3492 {
3493 if (is_if_condition) {
3494 nir_block *prev_block =
3495 nir_cf_node_as_block(nir_cf_node_prev(&src->parent_if->cf_node));
3496 assert(!nir_block_ends_in_jump(prev_block));
3497 return nir_after_block(prev_block);
3498 } else if (src->parent_instr->type == nir_instr_type_phi) {
3499 #ifndef NDEBUG
3500 nir_phi_instr *cond_phi = nir_instr_as_phi(src->parent_instr);
3501 bool found = false;
3502 nir_foreach_phi_src(phi_src, cond_phi) {
3503 if (phi_src->src.ssa == src->ssa) {
3504 found = true;
3505 break;
3506 }
3507 }
3508 assert(found);
3509 #endif
3510 /* The LIST_ENTRY macro is a generic container-of macro, it just happens
3511 * to have a more specific name.
3512 */
3513 nir_phi_src *phi_src = LIST_ENTRY(nir_phi_src, src, src);
3514 return nir_after_block_before_jump(phi_src->pred);
3515 } else {
3516 return nir_before_instr(src->parent_instr);
3517 }
3518 }
3519
3520 static inline nir_cursor
3521 nir_before_cf_node(nir_cf_node *node)
3522 {
3523 if (node->type == nir_cf_node_block)
3524 return nir_before_block(nir_cf_node_as_block(node));
3525
3526 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node)));
3527 }
3528
3529 static inline nir_cursor
3530 nir_after_cf_node(nir_cf_node *node)
3531 {
3532 if (node->type == nir_cf_node_block)
3533 return nir_after_block(nir_cf_node_as_block(node));
3534
3535 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node)));
3536 }
3537
3538 static inline nir_cursor
3539 nir_after_phis(nir_block *block)
3540 {
3541 nir_foreach_instr(instr, block) {
3542 if (instr->type != nir_instr_type_phi)
3543 return nir_before_instr(instr);
3544 }
3545 return nir_after_block(block);
3546 }
3547
3548 static inline nir_cursor
3549 nir_after_cf_node_and_phis(nir_cf_node *node)
3550 {
3551 if (node->type == nir_cf_node_block)
3552 return nir_after_block(nir_cf_node_as_block(node));
3553
3554 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node));
3555
3556 return nir_after_phis(block);
3557 }
3558
3559 static inline nir_cursor
3560 nir_before_cf_list(struct exec_list *cf_list)
3561 {
3562 nir_cf_node *first_node = exec_node_data(nir_cf_node,
3563 exec_list_get_head(cf_list), node);
3564 return nir_before_cf_node(first_node);
3565 }
3566
3567 static inline nir_cursor
3568 nir_after_cf_list(struct exec_list *cf_list)
3569 {
3570 nir_cf_node *last_node = exec_node_data(nir_cf_node,
3571 exec_list_get_tail(cf_list), node);
3572 return nir_after_cf_node(last_node);
3573 }
3574
3575 /**
3576 * Insert a NIR instruction at the given cursor.
3577 *
3578 * Note: This does not update the cursor.
3579 */
3580 void nir_instr_insert(nir_cursor cursor, nir_instr *instr);
3581
3582 static inline void
3583 nir_instr_insert_before(nir_instr *instr, nir_instr *before)
3584 {
3585 nir_instr_insert(nir_before_instr(instr), before);
3586 }
3587
3588 static inline void
3589 nir_instr_insert_after(nir_instr *instr, nir_instr *after)
3590 {
3591 nir_instr_insert(nir_after_instr(instr), after);
3592 }
3593
3594 static inline void
3595 nir_instr_insert_before_block(nir_block *block, nir_instr *before)
3596 {
3597 nir_instr_insert(nir_before_block(block), before);
3598 }
3599
3600 static inline void
3601 nir_instr_insert_after_block(nir_block *block, nir_instr *after)
3602 {
3603 nir_instr_insert(nir_after_block(block), after);
3604 }
3605
3606 static inline void
3607 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before)
3608 {
3609 nir_instr_insert(nir_before_cf_node(node), before);
3610 }
3611
3612 static inline void
3613 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after)
3614 {
3615 nir_instr_insert(nir_after_cf_node(node), after);
3616 }
3617
3618 static inline void
3619 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before)
3620 {
3621 nir_instr_insert(nir_before_cf_list(list), before);
3622 }
3623
3624 static inline void
3625 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after)
3626 {
3627 nir_instr_insert(nir_after_cf_list(list), after);
3628 }
3629
3630 void nir_instr_remove_v(nir_instr *instr);
3631
3632 static inline nir_cursor
3633 nir_instr_remove(nir_instr *instr)
3634 {
3635 nir_cursor cursor;
3636 nir_instr *prev = nir_instr_prev(instr);
3637 if (prev) {
3638 cursor = nir_after_instr(prev);
3639 } else {
3640 cursor = nir_before_block(instr->block);
3641 }
3642 nir_instr_remove_v(instr);
3643 return cursor;
3644 }
3645
3646 /** @} */
3647
3648 nir_ssa_def *nir_instr_ssa_def(nir_instr *instr);
3649
3650 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state);
3651 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state);
3652 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state);
3653 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb,
3654 void *state);
3655 bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state);
3656 bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state);
3657 bool nir_foreach_phi_src_leaving_block(nir_block *instr,
3658 nir_foreach_src_cb cb,
3659 void *state);
3660
3661 nir_const_value *nir_src_as_const_value(nir_src src);
3662
3663 #define NIR_SRC_AS_(name, c_type, type_enum, cast_macro) \
3664 static inline c_type * \
3665 nir_src_as_ ## name (nir_src src) \
3666 { \
3667 return src.is_ssa && src.ssa->parent_instr->type == type_enum \
3668 ? cast_macro(src.ssa->parent_instr) : NULL; \
3669 }
3670
3671 NIR_SRC_AS_(alu_instr, nir_alu_instr, nir_instr_type_alu, nir_instr_as_alu)
3672 NIR_SRC_AS_(intrinsic, nir_intrinsic_instr,
3673 nir_instr_type_intrinsic, nir_instr_as_intrinsic)
3674 NIR_SRC_AS_(deref, nir_deref_instr, nir_instr_type_deref, nir_instr_as_deref)
3675
3676 bool nir_src_is_dynamically_uniform(nir_src src);
3677 bool nir_srcs_equal(nir_src src1, nir_src src2);
3678 bool nir_instrs_equal(const nir_instr *instr1, const nir_instr *instr2);
3679 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src);
3680 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src);
3681 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src);
3682 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest,
3683 nir_dest new_dest);
3684
3685 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest,
3686 unsigned num_components, unsigned bit_size,
3687 const char *name);
3688 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def,
3689 unsigned num_components, unsigned bit_size,
3690 const char *name);
3691 static inline void
3692 nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest,
3693 const struct glsl_type *type,
3694 const char *name)
3695 {
3696 assert(glsl_type_is_vector_or_scalar(type));
3697 nir_ssa_dest_init(instr, dest, glsl_get_components(type),
3698 glsl_get_bit_size(type), name);
3699 }
3700 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src);
3701 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src,
3702 nir_instr *after_me);
3703
3704 nir_component_mask_t nir_ssa_def_components_read(const nir_ssa_def *def);
3705
3706
3707 /** Returns the next block, disregarding structure
3708 *
3709 * The ordering is deterministic but has no guarantees beyond that. In
3710 * particular, it is not guaranteed to be dominance-preserving.
3711 */
3712 nir_block *nir_block_unstructured_next(nir_block *block);
3713 nir_block *nir_unstructured_start_block(nir_function_impl *impl);
3714
3715 #define nir_foreach_block_unstructured(block, impl) \
3716 for (nir_block *block = nir_unstructured_start_block(impl); block != NULL; \
3717 block = nir_block_unstructured_next(block))
3718
3719 #define nir_foreach_block_unstructured_safe(block, impl) \
3720 for (nir_block *block = nir_unstructured_start_block(impl), \
3721 *next = nir_block_unstructured_next(block); \
3722 block != NULL; \
3723 block = next, next = nir_block_unstructured_next(block))
3724
3725 /*
3726 * finds the next basic block in source-code order, returns NULL if there is
3727 * none
3728 */
3729
3730 nir_block *nir_block_cf_tree_next(nir_block *block);
3731
3732 /* Performs the opposite of nir_block_cf_tree_next() */
3733
3734 nir_block *nir_block_cf_tree_prev(nir_block *block);
3735
3736 /* Gets the first block in a CF node in source-code order */
3737
3738 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node);
3739
3740 /* Gets the last block in a CF node in source-code order */
3741
3742 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node);
3743
3744 /* Gets the next block after a CF node in source-code order */
3745
3746 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node);
3747
3748 /* Macros for loops that visit blocks in source-code order */
3749
3750 #define nir_foreach_block(block, impl) \
3751 for (nir_block *block = nir_start_block(impl); block != NULL; \
3752 block = nir_block_cf_tree_next(block))
3753
3754 #define nir_foreach_block_safe(block, impl) \
3755 for (nir_block *block = nir_start_block(impl), \
3756 *next = nir_block_cf_tree_next(block); \
3757 block != NULL; \
3758 block = next, next = nir_block_cf_tree_next(block))
3759
3760 #define nir_foreach_block_reverse(block, impl) \
3761 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \
3762 block = nir_block_cf_tree_prev(block))
3763
3764 #define nir_foreach_block_reverse_safe(block, impl) \
3765 for (nir_block *block = nir_impl_last_block(impl), \
3766 *prev = nir_block_cf_tree_prev(block); \
3767 block != NULL; \
3768 block = prev, prev = nir_block_cf_tree_prev(block))
3769
3770 #define nir_foreach_block_in_cf_node(block, node) \
3771 for (nir_block *block = nir_cf_node_cf_tree_first(node); \
3772 block != nir_cf_node_cf_tree_next(node); \
3773 block = nir_block_cf_tree_next(block))
3774
3775 /* If the following CF node is an if, this function returns that if.
3776 * Otherwise, it returns NULL.
3777 */
3778 nir_if *nir_block_get_following_if(nir_block *block);
3779
3780 nir_loop *nir_block_get_following_loop(nir_block *block);
3781
3782 void nir_index_local_regs(nir_function_impl *impl);
3783 void nir_index_ssa_defs(nir_function_impl *impl);
3784 unsigned nir_index_instrs(nir_function_impl *impl);
3785
3786 void nir_index_blocks(nir_function_impl *impl);
3787
3788 unsigned nir_shader_index_vars(nir_shader *shader, nir_variable_mode modes);
3789 unsigned nir_function_impl_index_vars(nir_function_impl *impl);
3790
3791 void nir_print_shader(nir_shader *shader, FILE *fp);
3792 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors);
3793 void nir_print_instr(const nir_instr *instr, FILE *fp);
3794 void nir_print_deref(const nir_deref_instr *deref, FILE *fp);
3795
3796 /** Shallow clone of a single ALU instruction. */
3797 nir_alu_instr *nir_alu_instr_clone(nir_shader *s, const nir_alu_instr *orig);
3798
3799 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s);
3800 nir_function_impl *nir_function_impl_clone(nir_shader *shader,
3801 const nir_function_impl *fi);
3802 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var);
3803 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader);
3804
3805 void nir_shader_replace(nir_shader *dest, nir_shader *src);
3806
3807 void nir_shader_serialize_deserialize(nir_shader *s);
3808
3809 #ifndef NDEBUG
3810 void nir_validate_shader(nir_shader *shader, const char *when);
3811 void nir_metadata_set_validation_flag(nir_shader *shader);
3812 void nir_metadata_check_validation_flag(nir_shader *shader);
3813
3814 static inline bool
3815 should_skip_nir(const char *name)
3816 {
3817 static const char *list = NULL;
3818 if (!list) {
3819 /* Comma separated list of names to skip. */
3820 list = getenv("NIR_SKIP");
3821 if (!list)
3822 list = "";
3823 }
3824
3825 if (!list[0])
3826 return false;
3827
3828 return comma_separated_list_contains(list, name);
3829 }
3830
3831 static inline bool
3832 should_clone_nir(void)
3833 {
3834 static int should_clone = -1;
3835 if (should_clone < 0)
3836 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false);
3837
3838 return should_clone;
3839 }
3840
3841 static inline bool
3842 should_serialize_deserialize_nir(void)
3843 {
3844 static int test_serialize = -1;
3845 if (test_serialize < 0)
3846 test_serialize = env_var_as_boolean("NIR_TEST_SERIALIZE", false);
3847
3848 return test_serialize;
3849 }
3850
3851 static inline bool
3852 should_print_nir(void)
3853 {
3854 static int should_print = -1;
3855 if (should_print < 0)
3856 should_print = env_var_as_boolean("NIR_PRINT", false);
3857
3858 return should_print;
3859 }
3860 #else
3861 static inline void nir_validate_shader(nir_shader *shader, const char *when) { (void) shader; (void)when; }
3862 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; }
3863 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; }
3864 static inline bool should_skip_nir(UNUSED const char *pass_name) { return false; }
3865 static inline bool should_clone_nir(void) { return false; }
3866 static inline bool should_serialize_deserialize_nir(void) { return false; }
3867 static inline bool should_print_nir(void) { return false; }
3868 #endif /* NDEBUG */
3869
3870 #define _PASS(pass, nir, do_pass) do { \
3871 if (should_skip_nir(#pass)) { \
3872 printf("skipping %s\n", #pass); \
3873 break; \
3874 } \
3875 do_pass \
3876 nir_validate_shader(nir, "after " #pass); \
3877 if (should_clone_nir()) { \
3878 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
3879 nir_shader_replace(nir, clone); \
3880 } \
3881 if (should_serialize_deserialize_nir()) { \
3882 nir_shader_serialize_deserialize(nir); \
3883 } \
3884 } while (0)
3885
3886 #define NIR_PASS(progress, nir, pass, ...) _PASS(pass, nir, \
3887 nir_metadata_set_validation_flag(nir); \
3888 if (should_print_nir()) \
3889 printf("%s\n", #pass); \
3890 if (pass(nir, ##__VA_ARGS__)) { \
3891 progress = true; \
3892 if (should_print_nir()) \
3893 nir_print_shader(nir, stdout); \
3894 nir_metadata_check_validation_flag(nir); \
3895 } \
3896 )
3897
3898 #define NIR_PASS_V(nir, pass, ...) _PASS(pass, nir, \
3899 if (should_print_nir()) \
3900 printf("%s\n", #pass); \
3901 pass(nir, ##__VA_ARGS__); \
3902 if (should_print_nir()) \
3903 nir_print_shader(nir, stdout); \
3904 )
3905
3906 #define NIR_SKIP(name) should_skip_nir(#name)
3907
3908 /** An instruction filtering callback
3909 *
3910 * Returns true if the instruction should be processed and false otherwise.
3911 */
3912 typedef bool (*nir_instr_filter_cb)(const nir_instr *, const void *);
3913
3914 /** A simple instruction lowering callback
3915 *
3916 * Many instruction lowering passes can be written as a simple function which
3917 * takes an instruction as its input and returns a sequence of instructions
3918 * that implement the consumed instruction. This function type represents
3919 * such a lowering function. When called, a function with this prototype
3920 * should either return NULL indicating that no lowering needs to be done or
3921 * emit a sequence of instructions using the provided builder (whose cursor
3922 * will already be placed after the instruction to be lowered) and return the
3923 * resulting nir_ssa_def.
3924 */
3925 typedef nir_ssa_def *(*nir_lower_instr_cb)(struct nir_builder *,
3926 nir_instr *, void *);
3927
3928 /**
3929 * Special return value for nir_lower_instr_cb when some progress occurred
3930 * (like changing an input to the instr) that didn't result in a replacement
3931 * SSA def being generated.
3932 */
3933 #define NIR_LOWER_INSTR_PROGRESS ((nir_ssa_def *)(uintptr_t)1)
3934
3935 /** Iterate over all the instructions in a nir_function_impl and lower them
3936 * using the provided callbacks
3937 *
3938 * This function implements the guts of a standard lowering pass for you. It
3939 * iterates over all of the instructions in a nir_function_impl and calls the
3940 * filter callback on each one. If the filter callback returns true, it then
3941 * calls the lowering call back on the instruction. (Splitting it this way
3942 * allows us to avoid some save/restore work for instructions we know won't be
3943 * lowered.) If the instruction is dead after the lowering is complete, it
3944 * will be removed. If new instructions are added, the lowering callback will
3945 * also be called on them in case multiple lowerings are required.
3946 *
3947 * The metadata for the nir_function_impl will also be updated. If any blocks
3948 * are added (they cannot be removed), dominance and block indices will be
3949 * invalidated.
3950 */
3951 bool nir_function_impl_lower_instructions(nir_function_impl *impl,
3952 nir_instr_filter_cb filter,
3953 nir_lower_instr_cb lower,
3954 void *cb_data);
3955 bool nir_shader_lower_instructions(nir_shader *shader,
3956 nir_instr_filter_cb filter,
3957 nir_lower_instr_cb lower,
3958 void *cb_data);
3959
3960 void nir_calc_dominance_impl(nir_function_impl *impl);
3961 void nir_calc_dominance(nir_shader *shader);
3962
3963 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2);
3964 bool nir_block_dominates(nir_block *parent, nir_block *child);
3965 bool nir_block_is_unreachable(nir_block *block);
3966
3967 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp);
3968 void nir_dump_dom_tree(nir_shader *shader, FILE *fp);
3969
3970 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp);
3971 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp);
3972
3973 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp);
3974 void nir_dump_cfg(nir_shader *shader, FILE *fp);
3975
3976 int nir_gs_count_vertices(const nir_shader *shader);
3977
3978 bool nir_shrink_vec_array_vars(nir_shader *shader, nir_variable_mode modes);
3979 bool nir_split_array_vars(nir_shader *shader, nir_variable_mode modes);
3980 bool nir_split_var_copies(nir_shader *shader);
3981 bool nir_split_per_member_structs(nir_shader *shader);
3982 bool nir_split_struct_vars(nir_shader *shader, nir_variable_mode modes);
3983
3984 bool nir_lower_returns_impl(nir_function_impl *impl);
3985 bool nir_lower_returns(nir_shader *shader);
3986
3987 void nir_inline_function_impl(struct nir_builder *b,
3988 const nir_function_impl *impl,
3989 nir_ssa_def **params);
3990 bool nir_inline_functions(nir_shader *shader);
3991
3992 bool nir_propagate_invariant(nir_shader *shader);
3993
3994 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader);
3995 void nir_lower_deref_copy_instr(struct nir_builder *b,
3996 nir_intrinsic_instr *copy);
3997 bool nir_lower_var_copies(nir_shader *shader);
3998
3999 void nir_fixup_deref_modes(nir_shader *shader);
4000
4001 bool nir_lower_global_vars_to_local(nir_shader *shader);
4002
4003 typedef enum {
4004 nir_lower_direct_array_deref_of_vec_load = (1 << 0),
4005 nir_lower_indirect_array_deref_of_vec_load = (1 << 1),
4006 nir_lower_direct_array_deref_of_vec_store = (1 << 2),
4007 nir_lower_indirect_array_deref_of_vec_store = (1 << 3),
4008 } nir_lower_array_deref_of_vec_options;
4009
4010 bool nir_lower_array_deref_of_vec(nir_shader *shader, nir_variable_mode modes,
4011 nir_lower_array_deref_of_vec_options options);
4012
4013 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes);
4014
4015 bool nir_lower_locals_to_regs(nir_shader *shader);
4016
4017 void nir_lower_io_to_temporaries(nir_shader *shader,
4018 nir_function_impl *entrypoint,
4019 bool outputs, bool inputs);
4020
4021 bool nir_lower_vars_to_scratch(nir_shader *shader,
4022 nir_variable_mode modes,
4023 int size_threshold,
4024 glsl_type_size_align_func size_align);
4025
4026 void nir_lower_clip_halfz(nir_shader *shader);
4027
4028 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
4029
4030 void nir_gather_ssa_types(nir_function_impl *impl,
4031 BITSET_WORD *float_types,
4032 BITSET_WORD *int_types);
4033
4034 void nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
4035 unsigned *size,
4036 int (*type_size)(const struct glsl_type *, bool));
4037
4038 /* Some helpers to do very simple linking */
4039 bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer);
4040 bool nir_remove_unused_io_vars(nir_shader *shader, nir_variable_mode mode,
4041 uint64_t *used_by_other_stage,
4042 uint64_t *used_by_other_stage_patches);
4043 void nir_compact_varyings(nir_shader *producer, nir_shader *consumer,
4044 bool default_to_smooth_interp);
4045 void nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer);
4046 bool nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer);
4047
4048 bool nir_lower_amul(nir_shader *shader,
4049 int (*type_size)(const struct glsl_type *, bool));
4050
4051 void nir_assign_io_var_locations(nir_shader *shader,
4052 nir_variable_mode mode,
4053 unsigned *size,
4054 gl_shader_stage stage);
4055
4056 typedef struct {
4057 uint8_t num_linked_io_vars;
4058 uint8_t num_linked_patch_io_vars;
4059 } nir_linked_io_var_info;
4060
4061 nir_linked_io_var_info
4062 nir_assign_linked_io_var_locations(nir_shader *producer,
4063 nir_shader *consumer);
4064
4065 typedef enum {
4066 /* If set, this causes all 64-bit IO operations to be lowered on-the-fly
4067 * to 32-bit operations. This is only valid for nir_var_shader_in/out
4068 * modes.
4069 */
4070 nir_lower_io_lower_64bit_to_32 = (1 << 0),
4071
4072 /* If set, this forces all non-flat fragment shader inputs to be
4073 * interpolated as if with the "sample" qualifier. This requires
4074 * nir_shader_compiler_options::use_interpolated_input_intrinsics.
4075 */
4076 nir_lower_io_force_sample_interpolation = (1 << 1),
4077 } nir_lower_io_options;
4078 bool nir_lower_io(nir_shader *shader,
4079 nir_variable_mode modes,
4080 int (*type_size)(const struct glsl_type *, bool),
4081 nir_lower_io_options);
4082
4083 bool nir_io_add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode);
4084
4085 bool
4086 nir_lower_vars_to_explicit_types(nir_shader *shader,
4087 nir_variable_mode modes,
4088 glsl_type_size_align_func type_info);
4089
4090 typedef enum {
4091 /**
4092 * An address format which is a simple 32-bit global GPU address.
4093 */
4094 nir_address_format_32bit_global,
4095
4096 /**
4097 * An address format which is a simple 64-bit global GPU address.
4098 */
4099 nir_address_format_64bit_global,
4100
4101 /**
4102 * An address format which is a bounds-checked 64-bit global GPU address.
4103 *
4104 * The address is comprised as a 32-bit vec4 where .xy are a uint64_t base
4105 * address stored with the low bits in .x and high bits in .y, .z is a
4106 * size, and .w is an offset. When the final I/O operation is lowered, .w
4107 * is checked against .z and the operation is predicated on the result.
4108 */
4109 nir_address_format_64bit_bounded_global,
4110
4111 /**
4112 * An address format which is comprised of a vec2 where the first
4113 * component is a buffer index and the second is an offset.
4114 */
4115 nir_address_format_32bit_index_offset,
4116
4117 /**
4118 * An address format which is a 64-bit value, where the high 32 bits
4119 * are a buffer index, and the low 32 bits are an offset.
4120 */
4121 nir_address_format_32bit_index_offset_pack64,
4122
4123 /**
4124 * An address format which is comprised of a vec3 where the first two
4125 * components specify the buffer and the third is an offset.
4126 */
4127 nir_address_format_vec2_index_32bit_offset,
4128
4129 /**
4130 * An address format which is a simple 32-bit offset.
4131 */
4132 nir_address_format_32bit_offset,
4133
4134 /**
4135 * An address format which is a simple 32-bit offset cast to 64-bit.
4136 */
4137 nir_address_format_32bit_offset_as_64bit,
4138
4139 /**
4140 * An address format representing a purely logical addressing model. In
4141 * this model, all deref chains must be complete from the dereference
4142 * operation to the variable. Cast derefs are not allowed. These
4143 * addresses will be 32-bit scalars but the format is immaterial because
4144 * you can always chase the chain.
4145 */
4146 nir_address_format_logical,
4147 } nir_address_format;
4148
4149 static inline unsigned
4150 nir_address_format_bit_size(nir_address_format addr_format)
4151 {
4152 switch (addr_format) {
4153 case nir_address_format_32bit_global: return 32;
4154 case nir_address_format_64bit_global: return 64;
4155 case nir_address_format_64bit_bounded_global: return 32;
4156 case nir_address_format_32bit_index_offset: return 32;
4157 case nir_address_format_32bit_index_offset_pack64: return 64;
4158 case nir_address_format_vec2_index_32bit_offset: return 32;
4159 case nir_address_format_32bit_offset: return 32;
4160 case nir_address_format_32bit_offset_as_64bit: return 64;
4161 case nir_address_format_logical: return 32;
4162 }
4163 unreachable("Invalid address format");
4164 }
4165
4166 static inline unsigned
4167 nir_address_format_num_components(nir_address_format addr_format)
4168 {
4169 switch (addr_format) {
4170 case nir_address_format_32bit_global: return 1;
4171 case nir_address_format_64bit_global: return 1;
4172 case nir_address_format_64bit_bounded_global: return 4;
4173 case nir_address_format_32bit_index_offset: return 2;
4174 case nir_address_format_32bit_index_offset_pack64: return 1;
4175 case nir_address_format_vec2_index_32bit_offset: return 3;
4176 case nir_address_format_32bit_offset: return 1;
4177 case nir_address_format_32bit_offset_as_64bit: return 1;
4178 case nir_address_format_logical: return 1;
4179 }
4180 unreachable("Invalid address format");
4181 }
4182
4183 static inline const struct glsl_type *
4184 nir_address_format_to_glsl_type(nir_address_format addr_format)
4185 {
4186 unsigned bit_size = nir_address_format_bit_size(addr_format);
4187 assert(bit_size == 32 || bit_size == 64);
4188 return glsl_vector_type(bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64,
4189 nir_address_format_num_components(addr_format));
4190 }
4191
4192 const nir_const_value *nir_address_format_null_value(nir_address_format addr_format);
4193
4194 nir_ssa_def *nir_build_addr_ieq(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
4195 nir_address_format addr_format);
4196
4197 nir_ssa_def *nir_build_addr_isub(struct nir_builder *b, nir_ssa_def *addr0, nir_ssa_def *addr1,
4198 nir_address_format addr_format);
4199
4200 nir_ssa_def * nir_explicit_io_address_from_deref(struct nir_builder *b,
4201 nir_deref_instr *deref,
4202 nir_ssa_def *base_addr,
4203 nir_address_format addr_format);
4204 void nir_lower_explicit_io_instr(struct nir_builder *b,
4205 nir_intrinsic_instr *io_instr,
4206 nir_ssa_def *addr,
4207 nir_address_format addr_format);
4208
4209 bool nir_lower_explicit_io(nir_shader *shader,
4210 nir_variable_mode modes,
4211 nir_address_format);
4212
4213 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
4214 nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
4215
4216 bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage);
4217
4218 bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
4219 bool nir_lower_regs_to_ssa(nir_shader *shader);
4220 bool nir_lower_vars_to_ssa(nir_shader *shader);
4221
4222 bool nir_remove_dead_derefs(nir_shader *shader);
4223 bool nir_remove_dead_derefs_impl(nir_function_impl *impl);
4224 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes,
4225 bool (*can_remove_var)(nir_variable *var));
4226 bool nir_lower_variable_initializers(nir_shader *shader,
4227 nir_variable_mode modes);
4228
4229 bool nir_move_vec_src_uses_to_dest(nir_shader *shader);
4230 bool nir_lower_vec_to_movs(nir_shader *shader);
4231 void nir_lower_alpha_test(nir_shader *shader, enum compare_func func,
4232 bool alpha_to_one,
4233 const gl_state_index16 *alpha_ref_state_tokens);
4234 bool nir_lower_alu(nir_shader *shader);
4235
4236 bool nir_lower_flrp(nir_shader *shader, unsigned lowering_mask,
4237 bool always_precise, bool have_ffma);
4238
4239 bool nir_lower_alu_to_scalar(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
4240 bool nir_lower_bool_to_bitsize(nir_shader *shader);
4241 bool nir_lower_bool_to_float(nir_shader *shader);
4242 bool nir_lower_bool_to_int32(nir_shader *shader);
4243 bool nir_lower_int_to_float(nir_shader *shader);
4244 bool nir_lower_load_const_to_scalar(nir_shader *shader);
4245 bool nir_lower_read_invocation_to_scalar(nir_shader *shader);
4246 bool nir_lower_phis_to_scalar(nir_shader *shader);
4247 void nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer);
4248 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader,
4249 bool outputs_only);
4250 void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask);
4251 void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask);
4252 bool nir_lower_io_to_vector(nir_shader *shader, nir_variable_mode mask);
4253
4254 bool nir_lower_fragcolor(nir_shader *shader);
4255 bool nir_lower_fragcoord_wtrans(nir_shader *shader);
4256 void nir_lower_viewport_transform(nir_shader *shader);
4257 bool nir_lower_uniforms_to_ubo(nir_shader *shader, int multiplier);
4258
4259 typedef struct nir_lower_subgroups_options {
4260 uint8_t subgroup_size;
4261 uint8_t ballot_bit_size;
4262 bool lower_to_scalar:1;
4263 bool lower_vote_trivial:1;
4264 bool lower_vote_eq_to_ballot:1;
4265 bool lower_subgroup_masks:1;
4266 bool lower_shuffle:1;
4267 bool lower_shuffle_to_32bit:1;
4268 bool lower_shuffle_to_swizzle_amd:1;
4269 bool lower_quad:1;
4270 bool lower_quad_broadcast_dynamic:1;
4271 bool lower_quad_broadcast_dynamic_to_const:1;
4272 } nir_lower_subgroups_options;
4273
4274 bool nir_lower_subgroups(nir_shader *shader,
4275 const nir_lower_subgroups_options *options);
4276
4277 bool nir_lower_system_values(nir_shader *shader);
4278
4279 bool nir_lower_compute_system_values(nir_shader *shader);
4280
4281 enum PACKED nir_lower_tex_packing {
4282 nir_lower_tex_packing_none = 0,
4283 /* The sampler returns up to 2 32-bit words of half floats or 16-bit signed
4284 * or unsigned ints based on the sampler type
4285 */
4286 nir_lower_tex_packing_16,
4287 /* The sampler returns 1 32-bit word of 4x8 unorm */
4288 nir_lower_tex_packing_8,
4289 };
4290
4291 typedef struct nir_lower_tex_options {
4292 /**
4293 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which
4294 * sampler types a texture projector is lowered.
4295 */
4296 unsigned lower_txp;
4297
4298 /**
4299 * If true, lower away nir_tex_src_offset for all texelfetch instructions.
4300 */
4301 bool lower_txf_offset;
4302
4303 /**
4304 * If true, lower away nir_tex_src_offset for all rect textures.
4305 */
4306 bool lower_rect_offset;
4307
4308 /**
4309 * If true, lower rect textures to 2D, using txs to fetch the
4310 * texture dimensions and dividing the texture coords by the
4311 * texture dims to normalize.
4312 */
4313 bool lower_rect;
4314
4315 /**
4316 * If true, convert yuv to rgb.
4317 */
4318 unsigned lower_y_uv_external;
4319 unsigned lower_y_u_v_external;
4320 unsigned lower_yx_xuxv_external;
4321 unsigned lower_xy_uxvx_external;
4322 unsigned lower_ayuv_external;
4323 unsigned lower_xyuv_external;
4324 unsigned bt709_external;
4325 unsigned bt2020_external;
4326
4327 /**
4328 * To emulate certain texture wrap modes, this can be used
4329 * to saturate the specified tex coord to [0.0, 1.0]. The
4330 * bits are according to sampler #, ie. if, for example:
4331 *
4332 * (conf->saturate_s & (1 << n))
4333 *
4334 * is true, then the s coord for sampler n is saturated.
4335 *
4336 * Note that clamping must happen *after* projector lowering
4337 * so any projected texture sample instruction with a clamped
4338 * coordinate gets automatically lowered, regardless of the
4339 * 'lower_txp' setting.
4340 */
4341 unsigned saturate_s;
4342 unsigned saturate_t;
4343 unsigned saturate_r;
4344
4345 /* Bitmask of textures that need swizzling.
4346 *
4347 * If (swizzle_result & (1 << texture_index)), then the swizzle in
4348 * swizzles[texture_index] is applied to the result of the texturing
4349 * operation.
4350 */
4351 unsigned swizzle_result;
4352
4353 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles
4354 * while 4 and 5 represent 0 and 1 respectively.
4355 */
4356 uint8_t swizzles[32][4];
4357
4358 /* Can be used to scale sampled values in range required by the format. */
4359 float scale_factors[32];
4360
4361 /**
4362 * Bitmap of textures that need srgb to linear conversion. If
4363 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components
4364 * of the texture are lowered to linear.
4365 */
4366 unsigned lower_srgb;
4367
4368 /**
4369 * If true, lower nir_texop_tex on shaders that doesn't support implicit
4370 * LODs to nir_texop_txl.
4371 */
4372 bool lower_tex_without_implicit_lod;
4373
4374 /**
4375 * If true, lower nir_texop_txd on cube maps with nir_texop_txl.
4376 */
4377 bool lower_txd_cube_map;
4378
4379 /**
4380 * If true, lower nir_texop_txd on 3D surfaces with nir_texop_txl.
4381 */
4382 bool lower_txd_3d;
4383
4384 /**
4385 * If true, lower nir_texop_txd on shadow samplers (except cube maps)
4386 * with nir_texop_txl. Notice that cube map shadow samplers are lowered
4387 * with lower_txd_cube_map.
4388 */
4389 bool lower_txd_shadow;
4390
4391 /**
4392 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl.
4393 * Implies lower_txd_cube_map and lower_txd_shadow.
4394 */
4395 bool lower_txd;
4396
4397 /**
4398 * If true, lower nir_texop_txb that try to use shadow compare and min_lod
4399 * at the same time to a nir_texop_lod, some math, and nir_texop_tex.
4400 */
4401 bool lower_txb_shadow_clamp;
4402
4403 /**
4404 * If true, lower nir_texop_txd on shadow samplers when it uses min_lod
4405 * with nir_texop_txl. This includes cube maps.
4406 */
4407 bool lower_txd_shadow_clamp;
4408
4409 /**
4410 * If true, lower nir_texop_txd on when it uses both offset and min_lod
4411 * with nir_texop_txl. This includes cube maps.
4412 */
4413 bool lower_txd_offset_clamp;
4414
4415 /**
4416 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
4417 * sampler is bindless.
4418 */
4419 bool lower_txd_clamp_bindless_sampler;
4420
4421 /**
4422 * If true, lower nir_texop_txd with min_lod to a nir_texop_txl if the
4423 * sampler index is not statically determinable to be less than 16.
4424 */
4425 bool lower_txd_clamp_if_sampler_index_not_lt_16;
4426
4427 /**
4428 * If true, lower nir_texop_txs with a non-0-lod into nir_texop_txs with
4429 * 0-lod followed by a nir_ishr.
4430 */
4431 bool lower_txs_lod;
4432
4433 /**
4434 * If true, apply a .bagr swizzle on tg4 results to handle Broadcom's
4435 * mixed-up tg4 locations.
4436 */
4437 bool lower_tg4_broadcom_swizzle;
4438
4439 /**
4440 * If true, lowers tg4 with 4 constant offsets to 4 tg4 calls
4441 */
4442 bool lower_tg4_offsets;
4443
4444 enum nir_lower_tex_packing lower_tex_packing[32];
4445 } nir_lower_tex_options;
4446
4447 bool nir_lower_tex(nir_shader *shader,
4448 const nir_lower_tex_options *options);
4449
4450 enum nir_lower_non_uniform_access_type {
4451 nir_lower_non_uniform_ubo_access = (1 << 0),
4452 nir_lower_non_uniform_ssbo_access = (1 << 1),
4453 nir_lower_non_uniform_texture_access = (1 << 2),
4454 nir_lower_non_uniform_image_access = (1 << 3),
4455 };
4456
4457 bool nir_lower_non_uniform_access(nir_shader *shader,
4458 enum nir_lower_non_uniform_access_type);
4459
4460 enum nir_lower_idiv_path {
4461 /* This path is based on NV50LegalizeSSA::handleDIV(). It is the faster of
4462 * the two but it is not exact in some cases (for example, 1091317713u /
4463 * 1034u gives 5209173 instead of 1055432) */
4464 nir_lower_idiv_fast,
4465 /* This path is based on AMDGPUTargetLowering::LowerUDIVREM() and
4466 * AMDGPUTargetLowering::LowerSDIVREM(). It requires more instructions than
4467 * the nv50 path and many of them are integer multiplications, so it is
4468 * probably slower. It should always return the correct result, though. */
4469 nir_lower_idiv_precise,
4470 };
4471
4472 bool nir_lower_idiv(nir_shader *shader, enum nir_lower_idiv_path path);
4473
4474 typedef struct nir_input_attachment_options {
4475 bool use_fragcoord_sysval;
4476 bool use_layer_id_sysval;
4477 bool use_view_id_for_layer;
4478 } nir_input_attachment_options;
4479
4480 bool nir_lower_input_attachments(nir_shader *shader,
4481 const nir_input_attachment_options *options);
4482
4483 bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables,
4484 bool use_vars,
4485 bool use_clipdist_array,
4486 const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
4487 bool nir_lower_clip_gs(nir_shader *shader, unsigned ucp_enables,
4488 bool use_clipdist_array,
4489 const gl_state_index16 clipplane_state_tokens[][STATE_LENGTH]);
4490 bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables,
4491 bool use_clipdist_array);
4492 bool nir_lower_clip_cull_distance_arrays(nir_shader *nir);
4493 bool nir_lower_clip_disable(nir_shader *shader, unsigned clip_plane_enable);
4494
4495 void nir_lower_point_size_mov(nir_shader *shader,
4496 const gl_state_index16 *pointsize_state_tokens);
4497
4498 bool nir_lower_frexp(nir_shader *nir);
4499
4500 void nir_lower_two_sided_color(nir_shader *shader, bool face_sysval);
4501
4502 bool nir_lower_clamp_color_outputs(nir_shader *shader);
4503
4504 bool nir_lower_flatshade(nir_shader *shader);
4505
4506 void nir_lower_passthrough_edgeflags(nir_shader *shader);
4507 bool nir_lower_patch_vertices(nir_shader *nir, unsigned static_count,
4508 const gl_state_index16 *uniform_state_tokens);
4509
4510 typedef struct nir_lower_wpos_ytransform_options {
4511 gl_state_index16 state_tokens[STATE_LENGTH];
4512 bool fs_coord_origin_upper_left :1;
4513 bool fs_coord_origin_lower_left :1;
4514 bool fs_coord_pixel_center_integer :1;
4515 bool fs_coord_pixel_center_half_integer :1;
4516 } nir_lower_wpos_ytransform_options;
4517
4518 bool nir_lower_wpos_ytransform(nir_shader *shader,
4519 const nir_lower_wpos_ytransform_options *options);
4520 bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading);
4521
4522 bool nir_lower_wrmasks(nir_shader *shader, nir_instr_filter_cb cb, const void *data);
4523
4524 bool nir_lower_fb_read(nir_shader *shader);
4525
4526 typedef struct nir_lower_drawpixels_options {
4527 gl_state_index16 texcoord_state_tokens[STATE_LENGTH];
4528 gl_state_index16 scale_state_tokens[STATE_LENGTH];
4529 gl_state_index16 bias_state_tokens[STATE_LENGTH];
4530 unsigned drawpix_sampler;
4531 unsigned pixelmap_sampler;
4532 bool pixel_maps :1;
4533 bool scale_and_bias :1;
4534 } nir_lower_drawpixels_options;
4535
4536 void nir_lower_drawpixels(nir_shader *shader,
4537 const nir_lower_drawpixels_options *options);
4538
4539 typedef struct nir_lower_bitmap_options {
4540 unsigned sampler;
4541 bool swizzle_xxxx;
4542 } nir_lower_bitmap_options;
4543
4544 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options);
4545
4546 bool nir_lower_atomics_to_ssbo(nir_shader *shader);
4547
4548 typedef enum {
4549 nir_lower_int_source_mods = 1 << 0,
4550 nir_lower_float_source_mods = 1 << 1,
4551 nir_lower_triop_abs = 1 << 2,
4552 nir_lower_all_source_mods = (1 << 3) - 1
4553 } nir_lower_to_source_mods_flags;
4554
4555
4556 bool nir_lower_to_source_mods(nir_shader *shader, nir_lower_to_source_mods_flags options);
4557
4558 bool nir_lower_gs_intrinsics(nir_shader *shader, bool per_stream);
4559
4560 typedef unsigned (*nir_lower_bit_size_callback)(const nir_alu_instr *, void *);
4561
4562 bool nir_lower_bit_size(nir_shader *shader,
4563 nir_lower_bit_size_callback callback,
4564 void *callback_data);
4565
4566 nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
4567 bool nir_lower_int64(nir_shader *shader);
4568
4569 nir_lower_doubles_options nir_lower_doubles_op_to_options_mask(nir_op opcode);
4570 bool nir_lower_doubles(nir_shader *shader, const nir_shader *softfp64,
4571 nir_lower_doubles_options options);
4572 bool nir_lower_pack(nir_shader *shader);
4573
4574 void nir_lower_mediump_outputs(nir_shader *nir);
4575
4576 bool nir_lower_point_size(nir_shader *shader, float min, float max);
4577
4578 typedef enum {
4579 nir_lower_interpolation_at_sample = (1 << 1),
4580 nir_lower_interpolation_at_offset = (1 << 2),
4581 nir_lower_interpolation_centroid = (1 << 3),
4582 nir_lower_interpolation_pixel = (1 << 4),
4583 nir_lower_interpolation_sample = (1 << 5),
4584 } nir_lower_interpolation_options;
4585
4586 bool nir_lower_interpolation(nir_shader *shader,
4587 nir_lower_interpolation_options options);
4588
4589 bool nir_lower_discard_to_demote(nir_shader *shader);
4590
4591 bool nir_lower_memory_model(nir_shader *shader);
4592
4593 bool nir_lower_goto_ifs(nir_shader *shader);
4594
4595 bool nir_normalize_cubemap_coords(nir_shader *shader);
4596
4597 void nir_live_ssa_defs_impl(nir_function_impl *impl);
4598
4599 void nir_loop_analyze_impl(nir_function_impl *impl,
4600 nir_variable_mode indirect_mask);
4601
4602 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b);
4603
4604 bool nir_repair_ssa_impl(nir_function_impl *impl);
4605 bool nir_repair_ssa(nir_shader *shader);
4606
4607 void nir_convert_loop_to_lcssa(nir_loop *loop);
4608 bool nir_convert_to_lcssa(nir_shader *shader, bool skip_invariants, bool skip_bool_invariants);
4609 void nir_divergence_analysis(nir_shader *shader, nir_divergence_options options);
4610
4611 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to
4612 * registers. If false, convert all values (even those not involved in a phi
4613 * node) to registers.
4614 */
4615 bool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only);
4616
4617 bool nir_lower_phis_to_regs_block(nir_block *block);
4618 bool nir_lower_ssa_defs_to_regs_block(nir_block *block);
4619 bool nir_rematerialize_derefs_in_use_blocks_impl(nir_function_impl *impl);
4620
4621 bool nir_lower_samplers(nir_shader *shader);
4622 bool nir_lower_ssbo(nir_shader *shader);
4623
4624 /* This is here for unit tests. */
4625 bool nir_opt_comparison_pre_impl(nir_function_impl *impl);
4626
4627 bool nir_opt_comparison_pre(nir_shader *shader);
4628
4629 bool nir_opt_access(nir_shader *shader);
4630 bool nir_opt_algebraic(nir_shader *shader);
4631 bool nir_opt_algebraic_before_ffma(nir_shader *shader);
4632 bool nir_opt_algebraic_late(nir_shader *shader);
4633 bool nir_opt_algebraic_distribute_src_mods(nir_shader *shader);
4634 bool nir_opt_constant_folding(nir_shader *shader);
4635
4636 /* Try to combine a and b into a. Return true if combination was possible,
4637 * which will result in b being removed by the pass. Return false if
4638 * combination wasn't possible.
4639 */
4640 typedef bool (*nir_combine_memory_barrier_cb)(
4641 nir_intrinsic_instr *a, nir_intrinsic_instr *b, void *data);
4642
4643 bool nir_opt_combine_memory_barriers(nir_shader *shader,
4644 nir_combine_memory_barrier_cb combine_cb,
4645 void *data);
4646
4647 bool nir_opt_combine_stores(nir_shader *shader, nir_variable_mode modes);
4648
4649 bool nir_copy_prop(nir_shader *shader);
4650
4651 bool nir_opt_copy_prop_vars(nir_shader *shader);
4652
4653 bool nir_opt_cse(nir_shader *shader);
4654
4655 bool nir_opt_dce(nir_shader *shader);
4656
4657 bool nir_opt_dead_cf(nir_shader *shader);
4658
4659 bool nir_opt_dead_write_vars(nir_shader *shader);
4660
4661 bool nir_opt_deref_impl(nir_function_impl *impl);
4662 bool nir_opt_deref(nir_shader *shader);
4663
4664 bool nir_opt_find_array_copies(nir_shader *shader);
4665
4666 bool nir_opt_gcm(nir_shader *shader, bool value_number);
4667
4668 bool nir_opt_idiv_const(nir_shader *shader, unsigned min_bit_size);
4669
4670 bool nir_opt_if(nir_shader *shader, bool aggressive_last_continue);
4671
4672 bool nir_opt_intrinsics(nir_shader *shader);
4673
4674 bool nir_opt_large_constants(nir_shader *shader,
4675 glsl_type_size_align_func size_align,
4676 unsigned threshold);
4677
4678 bool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask);
4679
4680 typedef enum {
4681 nir_move_const_undef = (1 << 0),
4682 nir_move_load_ubo = (1 << 1),
4683 nir_move_load_input = (1 << 2),
4684 nir_move_comparisons = (1 << 3),
4685 nir_move_copies = (1 << 4),
4686 } nir_move_options;
4687
4688 bool nir_can_move_instr(nir_instr *instr, nir_move_options options);
4689
4690 bool nir_opt_sink(nir_shader *shader, nir_move_options options);
4691
4692 bool nir_opt_move(nir_shader *shader, nir_move_options options);
4693
4694 bool nir_opt_peephole_select(nir_shader *shader, unsigned limit,
4695 bool indirect_load_ok, bool expensive_alu_ok);
4696
4697 bool nir_opt_rematerialize_compares(nir_shader *shader);
4698
4699 bool nir_opt_remove_phis(nir_shader *shader);
4700 bool nir_opt_remove_phis_block(nir_block *block);
4701
4702 bool nir_opt_shrink_vectors(nir_shader *shader);
4703
4704 bool nir_opt_trivial_continues(nir_shader *shader);
4705
4706 bool nir_opt_undef(nir_shader *shader);
4707
4708 bool nir_opt_vectorize(nir_shader *shader);
4709
4710 bool nir_opt_conditional_discard(nir_shader *shader);
4711
4712 typedef bool (*nir_should_vectorize_mem_func)(unsigned align, unsigned bit_size,
4713 unsigned num_components, unsigned high_offset,
4714 nir_intrinsic_instr *low, nir_intrinsic_instr *high);
4715
4716 bool nir_opt_load_store_vectorize(nir_shader *shader, nir_variable_mode modes,
4717 nir_should_vectorize_mem_func callback,
4718 nir_variable_mode robust_modes);
4719
4720 void nir_sweep(nir_shader *shader);
4721
4722 void nir_remap_dual_slot_attributes(nir_shader *shader,
4723 uint64_t *dual_slot_inputs);
4724 uint64_t nir_get_single_slot_attribs_mask(uint64_t attribs, uint64_t dual_slot);
4725
4726 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val);
4727 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin);
4728
4729 static inline bool
4730 nir_variable_is_in_ubo(const nir_variable *var)
4731 {
4732 return (var->data.mode == nir_var_mem_ubo &&
4733 var->interface_type != NULL);
4734 }
4735
4736 static inline bool
4737 nir_variable_is_in_ssbo(const nir_variable *var)
4738 {
4739 return (var->data.mode == nir_var_mem_ssbo &&
4740 var->interface_type != NULL);
4741 }
4742
4743 static inline bool
4744 nir_variable_is_in_block(const nir_variable *var)
4745 {
4746 return nir_variable_is_in_ubo(var) || nir_variable_is_in_ssbo(var);
4747 }
4748
4749 typedef struct nir_unsigned_upper_bound_config {
4750 unsigned min_subgroup_size;
4751 unsigned max_subgroup_size;
4752 unsigned max_work_group_invocations;
4753 unsigned max_work_group_count[3];
4754 unsigned max_work_group_size[3];
4755
4756 uint32_t vertex_attrib_max[32];
4757 } nir_unsigned_upper_bound_config;
4758
4759 uint32_t
4760 nir_unsigned_upper_bound(nir_shader *shader, struct hash_table *range_ht,
4761 nir_ssa_scalar scalar,
4762 const nir_unsigned_upper_bound_config *config);
4763
4764 bool
4765 nir_addition_might_overflow(nir_shader *shader, struct hash_table *range_ht,
4766 nir_ssa_scalar ssa, unsigned const_val,
4767 const nir_unsigned_upper_bound_config *config);
4768
4769 #ifdef __cplusplus
4770 } /* extern "C" */
4771 #endif
4772
4773 #endif /* NIR_H */