2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_control_flow_private.h"
33 nir_shader_create(void *mem_ctx
,
34 gl_shader_stage stage
,
35 const nir_shader_compiler_options
*options
)
37 nir_shader
*shader
= ralloc(mem_ctx
, nir_shader
);
39 exec_list_make_empty(&shader
->uniforms
);
40 exec_list_make_empty(&shader
->inputs
);
41 exec_list_make_empty(&shader
->outputs
);
43 shader
->options
= options
;
44 memset(&shader
->info
, 0, sizeof(shader
->info
));
46 exec_list_make_empty(&shader
->functions
);
47 exec_list_make_empty(&shader
->registers
);
48 exec_list_make_empty(&shader
->globals
);
49 exec_list_make_empty(&shader
->system_values
);
50 shader
->reg_alloc
= 0;
52 shader
->num_inputs
= 0;
53 shader
->num_outputs
= 0;
54 shader
->num_uniforms
= 0;
56 shader
->stage
= stage
;
62 reg_create(void *mem_ctx
, struct exec_list
*list
)
64 nir_register
*reg
= ralloc(mem_ctx
, nir_register
);
66 list_inithead(®
->uses
);
67 list_inithead(®
->defs
);
68 list_inithead(®
->if_uses
);
70 reg
->num_components
= 0;
71 reg
->num_array_elems
= 0;
72 reg
->is_packed
= false;
75 exec_list_push_tail(list
, ®
->node
);
81 nir_global_reg_create(nir_shader
*shader
)
83 nir_register
*reg
= reg_create(shader
, &shader
->registers
);
84 reg
->index
= shader
->reg_alloc
++;
85 reg
->is_global
= true;
91 nir_local_reg_create(nir_function_impl
*impl
)
93 nir_register
*reg
= reg_create(ralloc_parent(impl
), &impl
->registers
);
94 reg
->index
= impl
->reg_alloc
++;
95 reg
->is_global
= false;
101 nir_reg_remove(nir_register
*reg
)
103 exec_node_remove(®
->node
);
107 nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
)
109 switch (var
->data
.mode
) {
111 assert(!"invalid mode");
115 assert(!"nir_shader_add_variable cannot be used for local variables");
119 exec_list_push_tail(&shader
->globals
, &var
->node
);
122 case nir_var_shader_in
:
123 exec_list_push_tail(&shader
->inputs
, &var
->node
);
126 case nir_var_shader_out
:
127 exec_list_push_tail(&shader
->outputs
, &var
->node
);
130 case nir_var_uniform
:
131 case nir_var_shader_storage
:
132 exec_list_push_tail(&shader
->uniforms
, &var
->node
);
135 case nir_var_system_value
:
136 exec_list_push_tail(&shader
->system_values
, &var
->node
);
142 nir_variable_create(nir_shader
*shader
, nir_variable_mode mode
,
143 const struct glsl_type
*type
, const char *name
)
145 nir_variable
*var
= rzalloc(shader
, nir_variable
);
146 var
->name
= ralloc_strdup(var
, name
);
148 var
->data
.mode
= mode
;
150 if ((mode
== nir_var_shader_in
&& shader
->stage
!= MESA_SHADER_VERTEX
) ||
151 (mode
== nir_var_shader_out
&& shader
->stage
!= MESA_SHADER_FRAGMENT
))
152 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
154 if (mode
== nir_var_shader_in
|| mode
== nir_var_uniform
)
155 var
->data
.read_only
= true;
157 nir_shader_add_variable(shader
, var
);
163 nir_local_variable_create(nir_function_impl
*impl
,
164 const struct glsl_type
*type
, const char *name
)
166 nir_variable
*var
= rzalloc(impl
->overload
->function
->shader
, nir_variable
);
167 var
->name
= ralloc_strdup(var
, name
);
169 var
->data
.mode
= nir_var_local
;
171 nir_function_impl_add_variable(impl
, var
);
177 nir_function_create(nir_shader
*shader
, const char *name
)
179 nir_function
*func
= ralloc(shader
, nir_function
);
181 exec_list_push_tail(&shader
->functions
, &func
->node
);
182 exec_list_make_empty(&func
->overload_list
);
183 func
->name
= ralloc_strdup(func
, name
);
184 func
->shader
= shader
;
189 nir_function_overload
*
190 nir_function_overload_create(nir_function
*func
)
192 void *mem_ctx
= ralloc_parent(func
);
194 nir_function_overload
*overload
= ralloc(mem_ctx
, nir_function_overload
);
196 overload
->num_params
= 0;
197 overload
->params
= NULL
;
198 overload
->return_type
= glsl_void_type();
199 overload
->impl
= NULL
;
201 exec_list_push_tail(&func
->overload_list
, &overload
->node
);
202 overload
->function
= func
;
207 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *mem_ctx
)
209 dest
->is_ssa
= src
->is_ssa
;
211 dest
->ssa
= src
->ssa
;
213 dest
->reg
.base_offset
= src
->reg
.base_offset
;
214 dest
->reg
.reg
= src
->reg
.reg
;
215 if (src
->reg
.indirect
) {
216 dest
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
217 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, mem_ctx
);
219 dest
->reg
.indirect
= NULL
;
224 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
)
226 /* Copying an SSA definition makes no sense whatsoever. */
227 assert(!src
->is_ssa
);
229 dest
->is_ssa
= false;
231 dest
->reg
.base_offset
= src
->reg
.base_offset
;
232 dest
->reg
.reg
= src
->reg
.reg
;
233 if (src
->reg
.indirect
) {
234 dest
->reg
.indirect
= ralloc(instr
, nir_src
);
235 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, instr
);
237 dest
->reg
.indirect
= NULL
;
242 nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
243 nir_alu_instr
*instr
)
245 nir_src_copy(&dest
->src
, &src
->src
, &instr
->instr
);
246 dest
->abs
= src
->abs
;
247 dest
->negate
= src
->negate
;
248 for (unsigned i
= 0; i
< 4; i
++)
249 dest
->swizzle
[i
] = src
->swizzle
[i
];
253 nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
254 nir_alu_instr
*instr
)
256 nir_dest_copy(&dest
->dest
, &src
->dest
, &instr
->instr
);
257 dest
->write_mask
= src
->write_mask
;
258 dest
->saturate
= src
->saturate
;
263 cf_init(nir_cf_node
*node
, nir_cf_node_type type
)
265 exec_node_init(&node
->node
);
271 nir_function_impl_create(nir_function_overload
*overload
)
273 assert(overload
->impl
== NULL
);
275 void *mem_ctx
= ralloc_parent(overload
);
277 nir_function_impl
*impl
= ralloc(mem_ctx
, nir_function_impl
);
279 overload
->impl
= impl
;
280 impl
->overload
= overload
;
282 cf_init(&impl
->cf_node
, nir_cf_node_function
);
284 exec_list_make_empty(&impl
->body
);
285 exec_list_make_empty(&impl
->registers
);
286 exec_list_make_empty(&impl
->locals
);
287 impl
->num_params
= 0;
289 impl
->return_var
= NULL
;
292 impl
->valid_metadata
= nir_metadata_none
;
294 /* create start & end blocks */
295 nir_block
*start_block
= nir_block_create(mem_ctx
);
296 nir_block
*end_block
= nir_block_create(mem_ctx
);
297 start_block
->cf_node
.parent
= &impl
->cf_node
;
298 end_block
->cf_node
.parent
= &impl
->cf_node
;
299 impl
->end_block
= end_block
;
301 exec_list_push_tail(&impl
->body
, &start_block
->cf_node
.node
);
303 start_block
->successors
[0] = end_block
;
304 _mesa_set_add(end_block
->predecessors
, start_block
);
309 nir_block_create(nir_shader
*shader
)
311 nir_block
*block
= ralloc(shader
, nir_block
);
313 cf_init(&block
->cf_node
, nir_cf_node_block
);
315 block
->successors
[0] = block
->successors
[1] = NULL
;
316 block
->predecessors
= _mesa_set_create(block
, _mesa_hash_pointer
,
317 _mesa_key_pointer_equal
);
318 block
->imm_dom
= NULL
;
319 /* XXX maybe it would be worth it to defer allocation? This
320 * way it doesn't get allocated for shader ref's that never run
321 * nir_calc_dominance? For example, state-tracker creates an
322 * initial IR, clones that, runs appropriate lowering pass, passes
323 * to driver which does common lowering/opt, and then stores ref
324 * which is later used to do state specific lowering and futher
325 * opt. Do any of the references not need dominance metadata?
327 block
->dom_frontier
= _mesa_set_create(block
, _mesa_hash_pointer
,
328 _mesa_key_pointer_equal
);
330 exec_list_make_empty(&block
->instr_list
);
336 src_init(nir_src
*src
)
340 src
->reg
.indirect
= NULL
;
341 src
->reg
.base_offset
= 0;
345 nir_if_create(nir_shader
*shader
)
347 nir_if
*if_stmt
= ralloc(shader
, nir_if
);
349 cf_init(&if_stmt
->cf_node
, nir_cf_node_if
);
350 src_init(&if_stmt
->condition
);
352 nir_block
*then
= nir_block_create(shader
);
353 exec_list_make_empty(&if_stmt
->then_list
);
354 exec_list_push_tail(&if_stmt
->then_list
, &then
->cf_node
.node
);
355 then
->cf_node
.parent
= &if_stmt
->cf_node
;
357 nir_block
*else_stmt
= nir_block_create(shader
);
358 exec_list_make_empty(&if_stmt
->else_list
);
359 exec_list_push_tail(&if_stmt
->else_list
, &else_stmt
->cf_node
.node
);
360 else_stmt
->cf_node
.parent
= &if_stmt
->cf_node
;
366 nir_loop_create(nir_shader
*shader
)
368 nir_loop
*loop
= ralloc(shader
, nir_loop
);
370 cf_init(&loop
->cf_node
, nir_cf_node_loop
);
372 nir_block
*body
= nir_block_create(shader
);
373 exec_list_make_empty(&loop
->body
);
374 exec_list_push_tail(&loop
->body
, &body
->cf_node
.node
);
375 body
->cf_node
.parent
= &loop
->cf_node
;
377 body
->successors
[0] = body
;
378 _mesa_set_add(body
->predecessors
, body
);
384 instr_init(nir_instr
*instr
, nir_instr_type type
)
388 exec_node_init(&instr
->node
);
392 dest_init(nir_dest
*dest
)
394 dest
->is_ssa
= false;
395 dest
->reg
.reg
= NULL
;
396 dest
->reg
.indirect
= NULL
;
397 dest
->reg
.base_offset
= 0;
401 alu_dest_init(nir_alu_dest
*dest
)
403 dest_init(&dest
->dest
);
404 dest
->saturate
= false;
405 dest
->write_mask
= 0xf;
409 alu_src_init(nir_alu_src
*src
)
412 src
->abs
= src
->negate
= false;
420 nir_alu_instr_create(nir_shader
*shader
, nir_op op
)
422 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
423 nir_alu_instr
*instr
=
425 sizeof(nir_alu_instr
) + num_srcs
* sizeof(nir_alu_src
));
427 instr_init(&instr
->instr
, nir_instr_type_alu
);
429 alu_dest_init(&instr
->dest
);
430 for (unsigned i
= 0; i
< num_srcs
; i
++)
431 alu_src_init(&instr
->src
[i
]);
437 nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
)
439 nir_jump_instr
*instr
= ralloc(shader
, nir_jump_instr
);
440 instr_init(&instr
->instr
, nir_instr_type_jump
);
445 nir_load_const_instr
*
446 nir_load_const_instr_create(nir_shader
*shader
, unsigned num_components
)
448 nir_load_const_instr
*instr
= ralloc(shader
, nir_load_const_instr
);
449 instr_init(&instr
->instr
, nir_instr_type_load_const
);
451 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, NULL
);
456 nir_intrinsic_instr
*
457 nir_intrinsic_instr_create(nir_shader
*shader
, nir_intrinsic_op op
)
459 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
460 nir_intrinsic_instr
*instr
=
462 sizeof(nir_intrinsic_instr
) + num_srcs
* sizeof(nir_src
));
464 instr_init(&instr
->instr
, nir_instr_type_intrinsic
);
465 instr
->intrinsic
= op
;
467 if (nir_intrinsic_infos
[op
].has_dest
)
468 dest_init(&instr
->dest
);
470 for (unsigned i
= 0; i
< num_srcs
; i
++)
471 src_init(&instr
->src
[i
]);
477 nir_call_instr_create(nir_shader
*shader
, nir_function_overload
*callee
)
479 nir_call_instr
*instr
= ralloc(shader
, nir_call_instr
);
480 instr_init(&instr
->instr
, nir_instr_type_call
);
482 instr
->callee
= callee
;
483 instr
->num_params
= callee
->num_params
;
484 instr
->params
= ralloc_array(instr
, nir_deref_var
*, instr
->num_params
);
485 instr
->return_deref
= NULL
;
491 nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
)
493 nir_tex_instr
*instr
= rzalloc(shader
, nir_tex_instr
);
494 instr_init(&instr
->instr
, nir_instr_type_tex
);
496 dest_init(&instr
->dest
);
498 instr
->num_srcs
= num_srcs
;
499 instr
->src
= ralloc_array(instr
, nir_tex_src
, num_srcs
);
500 for (unsigned i
= 0; i
< num_srcs
; i
++)
501 src_init(&instr
->src
[i
].src
);
503 instr
->texture_index
= 0;
504 instr
->texture_array_size
= 0;
505 instr
->texture
= NULL
;
506 instr
->sampler_index
= 0;
507 instr
->sampler
= NULL
;
513 nir_phi_instr_create(nir_shader
*shader
)
515 nir_phi_instr
*instr
= ralloc(shader
, nir_phi_instr
);
516 instr_init(&instr
->instr
, nir_instr_type_phi
);
518 dest_init(&instr
->dest
);
519 exec_list_make_empty(&instr
->srcs
);
523 nir_parallel_copy_instr
*
524 nir_parallel_copy_instr_create(nir_shader
*shader
)
526 nir_parallel_copy_instr
*instr
= ralloc(shader
, nir_parallel_copy_instr
);
527 instr_init(&instr
->instr
, nir_instr_type_parallel_copy
);
529 exec_list_make_empty(&instr
->entries
);
534 nir_ssa_undef_instr
*
535 nir_ssa_undef_instr_create(nir_shader
*shader
, unsigned num_components
)
537 nir_ssa_undef_instr
*instr
= ralloc(shader
, nir_ssa_undef_instr
);
538 instr_init(&instr
->instr
, nir_instr_type_ssa_undef
);
540 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, NULL
);
546 nir_deref_var_create(void *mem_ctx
, nir_variable
*var
)
548 nir_deref_var
*deref
= ralloc(mem_ctx
, nir_deref_var
);
549 deref
->deref
.deref_type
= nir_deref_type_var
;
550 deref
->deref
.child
= NULL
;
551 deref
->deref
.type
= var
->type
;
557 nir_deref_array_create(void *mem_ctx
)
559 nir_deref_array
*deref
= ralloc(mem_ctx
, nir_deref_array
);
560 deref
->deref
.deref_type
= nir_deref_type_array
;
561 deref
->deref
.child
= NULL
;
562 deref
->deref_array_type
= nir_deref_array_type_direct
;
563 src_init(&deref
->indirect
);
564 deref
->base_offset
= 0;
569 nir_deref_struct_create(void *mem_ctx
, unsigned field_index
)
571 nir_deref_struct
*deref
= ralloc(mem_ctx
, nir_deref_struct
);
572 deref
->deref
.deref_type
= nir_deref_type_struct
;
573 deref
->deref
.child
= NULL
;
574 deref
->index
= field_index
;
578 static nir_deref_var
*
579 copy_deref_var(void *mem_ctx
, nir_deref_var
*deref
)
581 nir_deref_var
*ret
= nir_deref_var_create(mem_ctx
, deref
->var
);
582 ret
->deref
.type
= deref
->deref
.type
;
583 if (deref
->deref
.child
)
584 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
588 static nir_deref_array
*
589 copy_deref_array(void *mem_ctx
, nir_deref_array
*deref
)
591 nir_deref_array
*ret
= nir_deref_array_create(mem_ctx
);
592 ret
->base_offset
= deref
->base_offset
;
593 ret
->deref_array_type
= deref
->deref_array_type
;
594 if (deref
->deref_array_type
== nir_deref_array_type_indirect
) {
595 nir_src_copy(&ret
->indirect
, &deref
->indirect
, mem_ctx
);
597 ret
->deref
.type
= deref
->deref
.type
;
598 if (deref
->deref
.child
)
599 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
603 static nir_deref_struct
*
604 copy_deref_struct(void *mem_ctx
, nir_deref_struct
*deref
)
606 nir_deref_struct
*ret
= nir_deref_struct_create(mem_ctx
, deref
->index
);
607 ret
->deref
.type
= deref
->deref
.type
;
608 if (deref
->deref
.child
)
609 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
614 nir_copy_deref(void *mem_ctx
, nir_deref
*deref
)
616 switch (deref
->deref_type
) {
617 case nir_deref_type_var
:
618 return ©_deref_var(mem_ctx
, nir_deref_as_var(deref
))->deref
;
619 case nir_deref_type_array
:
620 return ©_deref_array(mem_ctx
, nir_deref_as_array(deref
))->deref
;
621 case nir_deref_type_struct
:
622 return ©_deref_struct(mem_ctx
, nir_deref_as_struct(deref
))->deref
;
624 unreachable("Invalid dereference type");
630 /* Returns a load_const instruction that represents the constant
631 * initializer for the given deref chain. The caller is responsible for
632 * ensuring that there actually is a constant initializer.
634 nir_load_const_instr
*
635 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
)
637 nir_constant
*constant
= deref
->var
->constant_initializer
;
640 const nir_deref
*tail
= &deref
->deref
;
641 unsigned matrix_offset
= 0;
642 while (tail
->child
) {
643 switch (tail
->child
->deref_type
) {
644 case nir_deref_type_array
: {
645 nir_deref_array
*arr
= nir_deref_as_array(tail
->child
);
646 assert(arr
->deref_array_type
== nir_deref_array_type_direct
);
647 if (glsl_type_is_matrix(tail
->type
)) {
648 assert(arr
->deref
.child
== NULL
);
649 matrix_offset
= arr
->base_offset
;
651 constant
= constant
->elements
[arr
->base_offset
];
656 case nir_deref_type_struct
: {
657 constant
= constant
->elements
[nir_deref_as_struct(tail
->child
)->index
];
662 unreachable("Invalid deref child type");
668 nir_load_const_instr
*load
=
669 nir_load_const_instr_create(shader
, glsl_get_vector_elements(tail
->type
));
671 matrix_offset
*= load
->def
.num_components
;
672 for (unsigned i
= 0; i
< load
->def
.num_components
; i
++) {
673 switch (glsl_get_base_type(tail
->type
)) {
674 case GLSL_TYPE_FLOAT
:
677 load
->value
.u
[i
] = constant
->value
.u
[matrix_offset
+ i
];
680 load
->value
.u
[i
] = constant
->value
.b
[matrix_offset
+ i
] ?
681 NIR_TRUE
: NIR_FALSE
;
684 unreachable("Invalid immediate type");
692 nir_cf_node_get_function(nir_cf_node
*node
)
694 while (node
->type
!= nir_cf_node_function
) {
698 return nir_cf_node_as_function(node
);
702 add_use_cb(nir_src
*src
, void *state
)
704 nir_instr
*instr
= state
;
706 src
->parent_instr
= instr
;
707 list_addtail(&src
->use_link
,
708 src
->is_ssa
? &src
->ssa
->uses
: &src
->reg
.reg
->uses
);
714 add_ssa_def_cb(nir_ssa_def
*def
, void *state
)
716 nir_instr
*instr
= state
;
718 if (instr
->block
&& def
->index
== UINT_MAX
) {
719 nir_function_impl
*impl
=
720 nir_cf_node_get_function(&instr
->block
->cf_node
);
722 def
->index
= impl
->ssa_alloc
++;
729 add_reg_def_cb(nir_dest
*dest
, void *state
)
731 nir_instr
*instr
= state
;
734 dest
->reg
.parent_instr
= instr
;
735 list_addtail(&dest
->reg
.def_link
, &dest
->reg
.reg
->defs
);
742 add_defs_uses(nir_instr
*instr
)
744 nir_foreach_src(instr
, add_use_cb
, instr
);
745 nir_foreach_dest(instr
, add_reg_def_cb
, instr
);
746 nir_foreach_ssa_def(instr
, add_ssa_def_cb
, instr
);
750 nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
)
752 switch (cursor
.option
) {
753 case nir_cursor_before_block
:
754 /* Only allow inserting jumps into empty blocks. */
755 if (instr
->type
== nir_instr_type_jump
)
756 assert(exec_list_is_empty(&cursor
.block
->instr_list
));
758 instr
->block
= cursor
.block
;
759 add_defs_uses(instr
);
760 exec_list_push_head(&cursor
.block
->instr_list
, &instr
->node
);
762 case nir_cursor_after_block
: {
763 /* Inserting instructions after a jump is illegal. */
764 nir_instr
*last
= nir_block_last_instr(cursor
.block
);
765 assert(last
== NULL
|| last
->type
!= nir_instr_type_jump
);
768 instr
->block
= cursor
.block
;
769 add_defs_uses(instr
);
770 exec_list_push_tail(&cursor
.block
->instr_list
, &instr
->node
);
773 case nir_cursor_before_instr
:
774 assert(instr
->type
!= nir_instr_type_jump
);
775 instr
->block
= cursor
.instr
->block
;
776 add_defs_uses(instr
);
777 exec_node_insert_node_before(&cursor
.instr
->node
, &instr
->node
);
779 case nir_cursor_after_instr
:
780 /* Inserting instructions after a jump is illegal. */
781 assert(cursor
.instr
->type
!= nir_instr_type_jump
);
783 /* Only allow inserting jumps at the end of the block. */
784 if (instr
->type
== nir_instr_type_jump
)
785 assert(cursor
.instr
== nir_block_last_instr(cursor
.instr
->block
));
787 instr
->block
= cursor
.instr
->block
;
788 add_defs_uses(instr
);
789 exec_node_insert_after(&cursor
.instr
->node
, &instr
->node
);
793 if (instr
->type
== nir_instr_type_jump
)
794 nir_handle_add_jump(instr
->block
);
798 src_is_valid(const nir_src
*src
)
800 return src
->is_ssa
? (src
->ssa
!= NULL
) : (src
->reg
.reg
!= NULL
);
804 remove_use_cb(nir_src
*src
, void *state
)
806 if (src_is_valid(src
))
807 list_del(&src
->use_link
);
813 remove_def_cb(nir_dest
*dest
, void *state
)
816 list_del(&dest
->reg
.def_link
);
822 remove_defs_uses(nir_instr
*instr
)
824 nir_foreach_dest(instr
, remove_def_cb
, instr
);
825 nir_foreach_src(instr
, remove_use_cb
, instr
);
828 void nir_instr_remove(nir_instr
*instr
)
830 remove_defs_uses(instr
);
831 exec_node_remove(&instr
->node
);
833 if (instr
->type
== nir_instr_type_jump
) {
834 nir_jump_instr
*jump_instr
= nir_instr_as_jump(instr
);
835 nir_handle_remove_jump(instr
->block
, jump_instr
->type
);
842 nir_index_local_regs(nir_function_impl
*impl
)
845 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
846 reg
->index
= index
++;
848 impl
->reg_alloc
= index
;
852 nir_index_global_regs(nir_shader
*shader
)
855 foreach_list_typed(nir_register
, reg
, node
, &shader
->registers
) {
856 reg
->index
= index
++;
858 shader
->reg_alloc
= index
;
862 visit_alu_dest(nir_alu_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
864 return cb(&instr
->dest
.dest
, state
);
868 visit_intrinsic_dest(nir_intrinsic_instr
*instr
, nir_foreach_dest_cb cb
,
871 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
872 return cb(&instr
->dest
, state
);
878 visit_texture_dest(nir_tex_instr
*instr
, nir_foreach_dest_cb cb
,
881 return cb(&instr
->dest
, state
);
885 visit_phi_dest(nir_phi_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
887 return cb(&instr
->dest
, state
);
891 visit_parallel_copy_dest(nir_parallel_copy_instr
*instr
,
892 nir_foreach_dest_cb cb
, void *state
)
894 nir_foreach_parallel_copy_entry(instr
, entry
) {
895 if (!cb(&entry
->dest
, state
))
903 nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
905 switch (instr
->type
) {
906 case nir_instr_type_alu
:
907 return visit_alu_dest(nir_instr_as_alu(instr
), cb
, state
);
908 case nir_instr_type_intrinsic
:
909 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr
), cb
, state
);
910 case nir_instr_type_tex
:
911 return visit_texture_dest(nir_instr_as_tex(instr
), cb
, state
);
912 case nir_instr_type_phi
:
913 return visit_phi_dest(nir_instr_as_phi(instr
), cb
, state
);
914 case nir_instr_type_parallel_copy
:
915 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr
),
918 case nir_instr_type_load_const
:
919 case nir_instr_type_ssa_undef
:
920 case nir_instr_type_call
:
921 case nir_instr_type_jump
:
925 unreachable("Invalid instruction type");
932 struct foreach_ssa_def_state
{
933 nir_foreach_ssa_def_cb cb
;
938 nir_ssa_def_visitor(nir_dest
*dest
, void *void_state
)
940 struct foreach_ssa_def_state
*state
= void_state
;
943 return state
->cb(&dest
->ssa
, state
->client_state
);
949 nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
, void *state
)
951 switch (instr
->type
) {
952 case nir_instr_type_alu
:
953 case nir_instr_type_tex
:
954 case nir_instr_type_intrinsic
:
955 case nir_instr_type_phi
:
956 case nir_instr_type_parallel_copy
: {
957 struct foreach_ssa_def_state foreach_state
= {cb
, state
};
958 return nir_foreach_dest(instr
, nir_ssa_def_visitor
, &foreach_state
);
961 case nir_instr_type_load_const
:
962 return cb(&nir_instr_as_load_const(instr
)->def
, state
);
963 case nir_instr_type_ssa_undef
:
964 return cb(&nir_instr_as_ssa_undef(instr
)->def
, state
);
965 case nir_instr_type_call
:
966 case nir_instr_type_jump
:
969 unreachable("Invalid instruction type");
974 visit_src(nir_src
*src
, nir_foreach_src_cb cb
, void *state
)
978 if (!src
->is_ssa
&& src
->reg
.indirect
)
979 return cb(src
->reg
.indirect
, state
);
984 visit_deref_array_src(nir_deref_array
*deref
, nir_foreach_src_cb cb
,
987 if (deref
->deref_array_type
== nir_deref_array_type_indirect
)
988 return visit_src(&deref
->indirect
, cb
, state
);
993 visit_deref_src(nir_deref_var
*deref
, nir_foreach_src_cb cb
, void *state
)
995 nir_deref
*cur
= &deref
->deref
;
996 while (cur
!= NULL
) {
997 if (cur
->deref_type
== nir_deref_type_array
)
998 if (!visit_deref_array_src(nir_deref_as_array(cur
), cb
, state
))
1008 visit_alu_src(nir_alu_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1010 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1011 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1018 visit_tex_src(nir_tex_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1020 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1021 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1024 if (instr
->texture
!= NULL
)
1025 if (!visit_deref_src(instr
->texture
, cb
, state
))
1028 if (instr
->sampler
!= NULL
)
1029 if (!visit_deref_src(instr
->sampler
, cb
, state
))
1036 visit_intrinsic_src(nir_intrinsic_instr
*instr
, nir_foreach_src_cb cb
,
1039 unsigned num_srcs
= nir_intrinsic_infos
[instr
->intrinsic
].num_srcs
;
1040 for (unsigned i
= 0; i
< num_srcs
; i
++)
1041 if (!visit_src(&instr
->src
[i
], cb
, state
))
1045 nir_intrinsic_infos
[instr
->intrinsic
].num_variables
;
1046 for (unsigned i
= 0; i
< num_vars
; i
++)
1047 if (!visit_deref_src(instr
->variables
[i
], cb
, state
))
1054 visit_call_src(nir_call_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1060 visit_load_const_src(nir_load_const_instr
*instr
, nir_foreach_src_cb cb
,
1067 visit_phi_src(nir_phi_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1069 nir_foreach_phi_src(instr
, src
) {
1070 if (!visit_src(&src
->src
, cb
, state
))
1078 visit_parallel_copy_src(nir_parallel_copy_instr
*instr
,
1079 nir_foreach_src_cb cb
, void *state
)
1081 nir_foreach_parallel_copy_entry(instr
, entry
) {
1082 if (!visit_src(&entry
->src
, cb
, state
))
1091 nir_foreach_src_cb cb
;
1092 } visit_dest_indirect_state
;
1095 visit_dest_indirect(nir_dest
*dest
, void *_state
)
1097 visit_dest_indirect_state
*state
= (visit_dest_indirect_state
*) _state
;
1099 if (!dest
->is_ssa
&& dest
->reg
.indirect
)
1100 return state
->cb(dest
->reg
.indirect
, state
->state
);
1106 nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1108 switch (instr
->type
) {
1109 case nir_instr_type_alu
:
1110 if (!visit_alu_src(nir_instr_as_alu(instr
), cb
, state
))
1113 case nir_instr_type_intrinsic
:
1114 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr
), cb
, state
))
1117 case nir_instr_type_tex
:
1118 if (!visit_tex_src(nir_instr_as_tex(instr
), cb
, state
))
1121 case nir_instr_type_call
:
1122 if (!visit_call_src(nir_instr_as_call(instr
), cb
, state
))
1125 case nir_instr_type_load_const
:
1126 if (!visit_load_const_src(nir_instr_as_load_const(instr
), cb
, state
))
1129 case nir_instr_type_phi
:
1130 if (!visit_phi_src(nir_instr_as_phi(instr
), cb
, state
))
1133 case nir_instr_type_parallel_copy
:
1134 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr
),
1138 case nir_instr_type_jump
:
1139 case nir_instr_type_ssa_undef
:
1143 unreachable("Invalid instruction type");
1147 visit_dest_indirect_state dest_state
;
1148 dest_state
.state
= state
;
1150 return nir_foreach_dest(instr
, visit_dest_indirect
, &dest_state
);
1154 nir_src_as_const_value(nir_src src
)
1159 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
1162 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1164 return &load
->value
;
1168 * Returns true if the source is known to be dynamically uniform. Otherwise it
1169 * returns false which means it may or may not be dynamically uniform but it
1170 * can't be determined.
1173 nir_src_is_dynamically_uniform(nir_src src
)
1178 /* Constants are trivially dynamically uniform */
1179 if (src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
)
1182 /* As are uniform variables */
1183 if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
1184 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
1186 if (intr
->intrinsic
== nir_intrinsic_load_uniform
)
1190 /* XXX: this could have many more tests, such as when a sampler function is
1191 * called with dynamically uniform arguments.
1197 src_remove_all_uses(nir_src
*src
)
1199 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1200 if (!src_is_valid(src
))
1203 list_del(&src
->use_link
);
1208 src_add_all_uses(nir_src
*src
, nir_instr
*parent_instr
, nir_if
*parent_if
)
1210 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1211 if (!src_is_valid(src
))
1215 src
->parent_instr
= parent_instr
;
1217 list_addtail(&src
->use_link
, &src
->ssa
->uses
);
1219 list_addtail(&src
->use_link
, &src
->reg
.reg
->uses
);
1222 src
->parent_if
= parent_if
;
1224 list_addtail(&src
->use_link
, &src
->ssa
->if_uses
);
1226 list_addtail(&src
->use_link
, &src
->reg
.reg
->if_uses
);
1232 nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
)
1234 assert(!src_is_valid(src
) || src
->parent_instr
== instr
);
1236 src_remove_all_uses(src
);
1238 src_add_all_uses(src
, instr
, NULL
);
1242 nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
)
1244 assert(!src_is_valid(dest
) || dest
->parent_instr
== dest_instr
);
1246 src_remove_all_uses(dest
);
1247 src_remove_all_uses(src
);
1249 *src
= NIR_SRC_INIT
;
1250 src_add_all_uses(dest
, dest_instr
, NULL
);
1254 nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
)
1256 nir_src
*src
= &if_stmt
->condition
;
1257 assert(!src_is_valid(src
) || src
->parent_if
== if_stmt
);
1259 src_remove_all_uses(src
);
1261 src_add_all_uses(src
, NULL
, if_stmt
);
1265 nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
, nir_dest new_dest
)
1268 /* We can only overwrite an SSA destination if it has no uses. */
1269 assert(list_empty(&dest
->ssa
.uses
) && list_empty(&dest
->ssa
.if_uses
));
1271 list_del(&dest
->reg
.def_link
);
1272 if (dest
->reg
.indirect
)
1273 src_remove_all_uses(dest
->reg
.indirect
);
1276 /* We can't re-write with an SSA def */
1277 assert(!new_dest
.is_ssa
);
1279 nir_dest_copy(dest
, &new_dest
, instr
);
1281 dest
->reg
.parent_instr
= instr
;
1282 list_addtail(&dest
->reg
.def_link
, &new_dest
.reg
.reg
->defs
);
1284 if (dest
->reg
.indirect
)
1285 src_add_all_uses(dest
->reg
.indirect
, instr
, NULL
);
1289 nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
1290 unsigned num_components
, const char *name
)
1293 def
->parent_instr
= instr
;
1294 list_inithead(&def
->uses
);
1295 list_inithead(&def
->if_uses
);
1296 def
->num_components
= num_components
;
1299 nir_function_impl
*impl
=
1300 nir_cf_node_get_function(&instr
->block
->cf_node
);
1302 def
->index
= impl
->ssa_alloc
++;
1304 def
->index
= UINT_MAX
;
1309 nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
1310 unsigned num_components
, const char *name
)
1312 dest
->is_ssa
= true;
1313 nir_ssa_def_init(instr
, &dest
->ssa
, num_components
, name
);
1317 nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
)
1319 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1321 nir_foreach_use_safe(def
, use_src
)
1322 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1324 nir_foreach_if_use_safe(def
, use_src
)
1325 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1329 is_instr_between(nir_instr
*start
, nir_instr
*end
, nir_instr
*between
)
1331 assert(start
->block
== end
->block
);
1333 if (between
->block
!= start
->block
)
1336 /* Search backwards looking for "between" */
1337 while (start
!= end
) {
1341 end
= nir_instr_prev(end
);
1348 /* Replaces all uses of the given SSA def with the given source but only if
1349 * the use comes after the after_me instruction. This can be useful if you
1350 * are emitting code to fix up the result of some instruction: you can freely
1351 * use the result in that code and then call rewrite_uses_after and pass the
1352 * last fixup instruction as after_me and it will replace all of the uses you
1353 * want without touching the fixup code.
1355 * This function assumes that after_me is in the same block as
1356 * def->parent_instr and that after_me comes after def->parent_instr.
1359 nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
1360 nir_instr
*after_me
)
1362 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1364 nir_foreach_use_safe(def
, use_src
) {
1365 assert(use_src
->parent_instr
!= def
->parent_instr
);
1366 /* Since def already dominates all of its uses, the only way a use can
1367 * not be dominated by after_me is if it is between def and after_me in
1368 * the instruction list.
1370 if (!is_instr_between(def
->parent_instr
, after_me
, use_src
->parent_instr
))
1371 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1374 nir_foreach_if_use_safe(def
, use_src
)
1375 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1378 static bool foreach_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1379 bool reverse
, void *state
);
1382 foreach_if(nir_if
*if_stmt
, nir_foreach_block_cb cb
, bool reverse
, void *state
)
1385 foreach_list_typed_safe_reverse(nir_cf_node
, node
, node
,
1386 &if_stmt
->else_list
) {
1387 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1391 foreach_list_typed_safe_reverse(nir_cf_node
, node
, node
,
1392 &if_stmt
->then_list
) {
1393 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1397 foreach_list_typed_safe(nir_cf_node
, node
, node
, &if_stmt
->then_list
) {
1398 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1402 foreach_list_typed_safe(nir_cf_node
, node
, node
, &if_stmt
->else_list
) {
1403 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1412 foreach_loop(nir_loop
*loop
, nir_foreach_block_cb cb
, bool reverse
, void *state
)
1415 foreach_list_typed_safe_reverse(nir_cf_node
, node
, node
, &loop
->body
) {
1416 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1420 foreach_list_typed_safe(nir_cf_node
, node
, node
, &loop
->body
) {
1421 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1430 foreach_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1431 bool reverse
, void *state
)
1433 switch (node
->type
) {
1434 case nir_cf_node_block
:
1435 return cb(nir_cf_node_as_block(node
), state
);
1436 case nir_cf_node_if
:
1437 return foreach_if(nir_cf_node_as_if(node
), cb
, reverse
, state
);
1438 case nir_cf_node_loop
:
1439 return foreach_loop(nir_cf_node_as_loop(node
), cb
, reverse
, state
);
1443 unreachable("Invalid CFG node type");
1451 nir_foreach_block_in_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1454 return foreach_cf_node(node
, cb
, false, state
);
1458 nir_foreach_block(nir_function_impl
*impl
, nir_foreach_block_cb cb
, void *state
)
1460 foreach_list_typed_safe(nir_cf_node
, node
, node
, &impl
->body
) {
1461 if (!foreach_cf_node(node
, cb
, false, state
))
1465 return cb(impl
->end_block
, state
);
1469 nir_foreach_block_reverse(nir_function_impl
*impl
, nir_foreach_block_cb cb
,
1472 if (!cb(impl
->end_block
, state
))
1475 foreach_list_typed_safe_reverse(nir_cf_node
, node
, node
, &impl
->body
) {
1476 if (!foreach_cf_node(node
, cb
, true, state
))
1484 nir_block_get_following_if(nir_block
*block
)
1486 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1489 if (nir_cf_node_is_last(&block
->cf_node
))
1492 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1494 if (next_node
->type
!= nir_cf_node_if
)
1497 return nir_cf_node_as_if(next_node
);
1501 nir_block_get_following_loop(nir_block
*block
)
1503 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1506 if (nir_cf_node_is_last(&block
->cf_node
))
1509 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1511 if (next_node
->type
!= nir_cf_node_loop
)
1514 return nir_cf_node_as_loop(next_node
);
1517 index_block(nir_block
*block
, void *state
)
1519 unsigned *index
= state
;
1520 block
->index
= (*index
)++;
1525 nir_index_blocks(nir_function_impl
*impl
)
1529 if (impl
->valid_metadata
& nir_metadata_block_index
)
1532 nir_foreach_block(impl
, index_block
, &index
);
1534 impl
->num_blocks
= index
;
1538 index_ssa_def_cb(nir_ssa_def
*def
, void *state
)
1540 unsigned *index
= (unsigned *) state
;
1541 def
->index
= (*index
)++;
1547 index_ssa_block(nir_block
*block
, void *state
)
1549 nir_foreach_instr(block
, instr
)
1550 nir_foreach_ssa_def(instr
, index_ssa_def_cb
, state
);
1556 * The indices are applied top-to-bottom which has the very nice property
1557 * that, if A dominates B, then A->index <= B->index.
1560 nir_index_ssa_defs(nir_function_impl
*impl
)
1563 nir_foreach_block(impl
, index_ssa_block
, &index
);
1564 impl
->ssa_alloc
= index
;
1568 index_instrs_block(nir_block
*block
, void *state
)
1570 unsigned *index
= state
;
1571 nir_foreach_instr(block
, instr
)
1572 instr
->index
= (*index
)++;
1578 * The indices are applied top-to-bottom which has the very nice property
1579 * that, if A dominates B, then A->index <= B->index.
1582 nir_index_instrs(nir_function_impl
*impl
)
1585 nir_foreach_block(impl
, index_instrs_block
, &index
);
1590 nir_intrinsic_from_system_value(gl_system_value val
)
1593 case SYSTEM_VALUE_VERTEX_ID
:
1594 return nir_intrinsic_load_vertex_id
;
1595 case SYSTEM_VALUE_INSTANCE_ID
:
1596 return nir_intrinsic_load_instance_id
;
1597 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
1598 return nir_intrinsic_load_vertex_id_zero_base
;
1599 case SYSTEM_VALUE_BASE_VERTEX
:
1600 return nir_intrinsic_load_base_vertex
;
1601 case SYSTEM_VALUE_INVOCATION_ID
:
1602 return nir_intrinsic_load_invocation_id
;
1603 case SYSTEM_VALUE_FRONT_FACE
:
1604 return nir_intrinsic_load_front_face
;
1605 case SYSTEM_VALUE_SAMPLE_ID
:
1606 return nir_intrinsic_load_sample_id
;
1607 case SYSTEM_VALUE_SAMPLE_POS
:
1608 return nir_intrinsic_load_sample_pos
;
1609 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
1610 return nir_intrinsic_load_sample_mask_in
;
1611 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
1612 return nir_intrinsic_load_local_invocation_id
;
1613 case SYSTEM_VALUE_WORK_GROUP_ID
:
1614 return nir_intrinsic_load_work_group_id
;
1615 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
1616 return nir_intrinsic_load_num_work_groups
;
1617 case SYSTEM_VALUE_PRIMITIVE_ID
:
1618 return nir_intrinsic_load_primitive_id
;
1619 case SYSTEM_VALUE_TESS_COORD
:
1620 return nir_intrinsic_load_tess_coord
;
1621 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1622 return nir_intrinsic_load_tess_level_outer
;
1623 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1624 return nir_intrinsic_load_tess_level_inner
;
1625 case SYSTEM_VALUE_VERTICES_IN
:
1626 return nir_intrinsic_load_patch_vertices_in
;
1627 case SYSTEM_VALUE_HELPER_INVOCATION
:
1628 return nir_intrinsic_load_helper_invocation
;
1630 unreachable("system value does not directly correspond to intrinsic");
1635 nir_system_value_from_intrinsic(nir_intrinsic_op intrin
)
1638 case nir_intrinsic_load_vertex_id
:
1639 return SYSTEM_VALUE_VERTEX_ID
;
1640 case nir_intrinsic_load_instance_id
:
1641 return SYSTEM_VALUE_INSTANCE_ID
;
1642 case nir_intrinsic_load_vertex_id_zero_base
:
1643 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1644 case nir_intrinsic_load_base_vertex
:
1645 return SYSTEM_VALUE_BASE_VERTEX
;
1646 case nir_intrinsic_load_invocation_id
:
1647 return SYSTEM_VALUE_INVOCATION_ID
;
1648 case nir_intrinsic_load_front_face
:
1649 return SYSTEM_VALUE_FRONT_FACE
;
1650 case nir_intrinsic_load_sample_id
:
1651 return SYSTEM_VALUE_SAMPLE_ID
;
1652 case nir_intrinsic_load_sample_pos
:
1653 return SYSTEM_VALUE_SAMPLE_POS
;
1654 case nir_intrinsic_load_sample_mask_in
:
1655 return SYSTEM_VALUE_SAMPLE_MASK_IN
;
1656 case nir_intrinsic_load_local_invocation_id
:
1657 return SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1658 case nir_intrinsic_load_num_work_groups
:
1659 return SYSTEM_VALUE_NUM_WORK_GROUPS
;
1660 case nir_intrinsic_load_work_group_id
:
1661 return SYSTEM_VALUE_WORK_GROUP_ID
;
1662 case nir_intrinsic_load_primitive_id
:
1663 return SYSTEM_VALUE_PRIMITIVE_ID
;
1664 case nir_intrinsic_load_tess_coord
:
1665 return SYSTEM_VALUE_TESS_COORD
;
1666 case nir_intrinsic_load_tess_level_outer
:
1667 return SYSTEM_VALUE_TESS_LEVEL_OUTER
;
1668 case nir_intrinsic_load_tess_level_inner
:
1669 return SYSTEM_VALUE_TESS_LEVEL_INNER
;
1670 case nir_intrinsic_load_patch_vertices_in
:
1671 return SYSTEM_VALUE_VERTICES_IN
;
1672 case nir_intrinsic_load_helper_invocation
:
1673 return SYSTEM_VALUE_HELPER_INVOCATION
;
1675 unreachable("intrinsic doesn't produce a system value");