2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_control_flow_private.h"
33 nir_shader_create(void *mem_ctx
,
34 gl_shader_stage stage
,
35 const nir_shader_compiler_options
*options
)
37 nir_shader
*shader
= ralloc(mem_ctx
, nir_shader
);
39 exec_list_make_empty(&shader
->uniforms
);
40 exec_list_make_empty(&shader
->inputs
);
41 exec_list_make_empty(&shader
->outputs
);
42 exec_list_make_empty(&shader
->shared
);
44 shader
->options
= options
;
45 memset(&shader
->info
, 0, sizeof(shader
->info
));
47 exec_list_make_empty(&shader
->functions
);
48 exec_list_make_empty(&shader
->registers
);
49 exec_list_make_empty(&shader
->globals
);
50 exec_list_make_empty(&shader
->system_values
);
51 shader
->reg_alloc
= 0;
53 shader
->num_inputs
= 0;
54 shader
->num_outputs
= 0;
55 shader
->num_uniforms
= 0;
56 shader
->num_shared
= 0;
58 shader
->stage
= stage
;
64 reg_create(void *mem_ctx
, struct exec_list
*list
)
66 nir_register
*reg
= ralloc(mem_ctx
, nir_register
);
68 list_inithead(®
->uses
);
69 list_inithead(®
->defs
);
70 list_inithead(®
->if_uses
);
72 reg
->num_components
= 0;
73 reg
->num_array_elems
= 0;
74 reg
->is_packed
= false;
77 exec_list_push_tail(list
, ®
->node
);
83 nir_global_reg_create(nir_shader
*shader
)
85 nir_register
*reg
= reg_create(shader
, &shader
->registers
);
86 reg
->index
= shader
->reg_alloc
++;
87 reg
->is_global
= true;
93 nir_local_reg_create(nir_function_impl
*impl
)
95 nir_register
*reg
= reg_create(ralloc_parent(impl
), &impl
->registers
);
96 reg
->index
= impl
->reg_alloc
++;
97 reg
->is_global
= false;
103 nir_reg_remove(nir_register
*reg
)
105 exec_node_remove(®
->node
);
109 nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
)
111 switch (var
->data
.mode
) {
113 assert(!"invalid mode");
117 assert(!"nir_shader_add_variable cannot be used for local variables");
121 exec_list_push_tail(&shader
->globals
, &var
->node
);
124 case nir_var_shader_in
:
125 exec_list_push_tail(&shader
->inputs
, &var
->node
);
128 case nir_var_shader_out
:
129 exec_list_push_tail(&shader
->outputs
, &var
->node
);
132 case nir_var_uniform
:
133 case nir_var_shader_storage
:
134 exec_list_push_tail(&shader
->uniforms
, &var
->node
);
138 assert(shader
->stage
== MESA_SHADER_COMPUTE
);
139 exec_list_push_tail(&shader
->shared
, &var
->node
);
142 case nir_var_system_value
:
143 exec_list_push_tail(&shader
->system_values
, &var
->node
);
149 nir_variable_create(nir_shader
*shader
, nir_variable_mode mode
,
150 const struct glsl_type
*type
, const char *name
)
152 nir_variable
*var
= rzalloc(shader
, nir_variable
);
153 var
->name
= ralloc_strdup(var
, name
);
155 var
->data
.mode
= mode
;
157 if ((mode
== nir_var_shader_in
&& shader
->stage
!= MESA_SHADER_VERTEX
) ||
158 (mode
== nir_var_shader_out
&& shader
->stage
!= MESA_SHADER_FRAGMENT
))
159 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
161 if (mode
== nir_var_shader_in
|| mode
== nir_var_uniform
)
162 var
->data
.read_only
= true;
164 nir_shader_add_variable(shader
, var
);
170 nir_local_variable_create(nir_function_impl
*impl
,
171 const struct glsl_type
*type
, const char *name
)
173 nir_variable
*var
= rzalloc(impl
->function
->shader
, nir_variable
);
174 var
->name
= ralloc_strdup(var
, name
);
176 var
->data
.mode
= nir_var_local
;
178 nir_function_impl_add_variable(impl
, var
);
184 nir_function_create(nir_shader
*shader
, const char *name
)
186 nir_function
*func
= ralloc(shader
, nir_function
);
188 exec_list_push_tail(&shader
->functions
, &func
->node
);
190 func
->name
= ralloc_strdup(func
, name
);
191 func
->shader
= shader
;
192 func
->num_params
= 0;
194 func
->return_type
= glsl_void_type();
200 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *mem_ctx
)
202 dest
->is_ssa
= src
->is_ssa
;
204 dest
->ssa
= src
->ssa
;
206 dest
->reg
.base_offset
= src
->reg
.base_offset
;
207 dest
->reg
.reg
= src
->reg
.reg
;
208 if (src
->reg
.indirect
) {
209 dest
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
210 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, mem_ctx
);
212 dest
->reg
.indirect
= NULL
;
217 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
)
219 /* Copying an SSA definition makes no sense whatsoever. */
220 assert(!src
->is_ssa
);
222 dest
->is_ssa
= false;
224 dest
->reg
.base_offset
= src
->reg
.base_offset
;
225 dest
->reg
.reg
= src
->reg
.reg
;
226 if (src
->reg
.indirect
) {
227 dest
->reg
.indirect
= ralloc(instr
, nir_src
);
228 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, instr
);
230 dest
->reg
.indirect
= NULL
;
235 nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
236 nir_alu_instr
*instr
)
238 nir_src_copy(&dest
->src
, &src
->src
, &instr
->instr
);
239 dest
->abs
= src
->abs
;
240 dest
->negate
= src
->negate
;
241 for (unsigned i
= 0; i
< 4; i
++)
242 dest
->swizzle
[i
] = src
->swizzle
[i
];
246 nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
247 nir_alu_instr
*instr
)
249 nir_dest_copy(&dest
->dest
, &src
->dest
, &instr
->instr
);
250 dest
->write_mask
= src
->write_mask
;
251 dest
->saturate
= src
->saturate
;
256 cf_init(nir_cf_node
*node
, nir_cf_node_type type
)
258 exec_node_init(&node
->node
);
264 nir_function_impl_create_bare(nir_shader
*shader
)
266 nir_function_impl
*impl
= ralloc(shader
, nir_function_impl
);
268 impl
->function
= NULL
;
270 cf_init(&impl
->cf_node
, nir_cf_node_function
);
272 exec_list_make_empty(&impl
->body
);
273 exec_list_make_empty(&impl
->registers
);
274 exec_list_make_empty(&impl
->locals
);
275 impl
->num_params
= 0;
277 impl
->return_var
= NULL
;
280 impl
->valid_metadata
= nir_metadata_none
;
282 /* create start & end blocks */
283 nir_block
*start_block
= nir_block_create(shader
);
284 nir_block
*end_block
= nir_block_create(shader
);
285 start_block
->cf_node
.parent
= &impl
->cf_node
;
286 end_block
->cf_node
.parent
= &impl
->cf_node
;
287 impl
->end_block
= end_block
;
289 exec_list_push_tail(&impl
->body
, &start_block
->cf_node
.node
);
291 start_block
->successors
[0] = end_block
;
292 _mesa_set_add(end_block
->predecessors
, start_block
);
297 nir_function_impl_create(nir_function
*function
)
299 assert(function
->impl
== NULL
);
301 nir_function_impl
*impl
= nir_function_impl_create_bare(function
->shader
);
303 function
->impl
= impl
;
304 impl
->function
= function
;
306 impl
->num_params
= function
->num_params
;
307 impl
->params
= ralloc_array(function
->shader
,
308 nir_variable
*, impl
->num_params
);
314 nir_block_create(nir_shader
*shader
)
316 nir_block
*block
= ralloc(shader
, nir_block
);
318 cf_init(&block
->cf_node
, nir_cf_node_block
);
320 block
->successors
[0] = block
->successors
[1] = NULL
;
321 block
->predecessors
= _mesa_set_create(block
, _mesa_hash_pointer
,
322 _mesa_key_pointer_equal
);
323 block
->imm_dom
= NULL
;
324 /* XXX maybe it would be worth it to defer allocation? This
325 * way it doesn't get allocated for shader ref's that never run
326 * nir_calc_dominance? For example, state-tracker creates an
327 * initial IR, clones that, runs appropriate lowering pass, passes
328 * to driver which does common lowering/opt, and then stores ref
329 * which is later used to do state specific lowering and futher
330 * opt. Do any of the references not need dominance metadata?
332 block
->dom_frontier
= _mesa_set_create(block
, _mesa_hash_pointer
,
333 _mesa_key_pointer_equal
);
335 exec_list_make_empty(&block
->instr_list
);
341 src_init(nir_src
*src
)
345 src
->reg
.indirect
= NULL
;
346 src
->reg
.base_offset
= 0;
350 nir_if_create(nir_shader
*shader
)
352 nir_if
*if_stmt
= ralloc(shader
, nir_if
);
354 cf_init(&if_stmt
->cf_node
, nir_cf_node_if
);
355 src_init(&if_stmt
->condition
);
357 nir_block
*then
= nir_block_create(shader
);
358 exec_list_make_empty(&if_stmt
->then_list
);
359 exec_list_push_tail(&if_stmt
->then_list
, &then
->cf_node
.node
);
360 then
->cf_node
.parent
= &if_stmt
->cf_node
;
362 nir_block
*else_stmt
= nir_block_create(shader
);
363 exec_list_make_empty(&if_stmt
->else_list
);
364 exec_list_push_tail(&if_stmt
->else_list
, &else_stmt
->cf_node
.node
);
365 else_stmt
->cf_node
.parent
= &if_stmt
->cf_node
;
371 nir_loop_create(nir_shader
*shader
)
373 nir_loop
*loop
= ralloc(shader
, nir_loop
);
375 cf_init(&loop
->cf_node
, nir_cf_node_loop
);
377 nir_block
*body
= nir_block_create(shader
);
378 exec_list_make_empty(&loop
->body
);
379 exec_list_push_tail(&loop
->body
, &body
->cf_node
.node
);
380 body
->cf_node
.parent
= &loop
->cf_node
;
382 body
->successors
[0] = body
;
383 _mesa_set_add(body
->predecessors
, body
);
389 instr_init(nir_instr
*instr
, nir_instr_type type
)
393 exec_node_init(&instr
->node
);
397 dest_init(nir_dest
*dest
)
399 dest
->is_ssa
= false;
400 dest
->reg
.reg
= NULL
;
401 dest
->reg
.indirect
= NULL
;
402 dest
->reg
.base_offset
= 0;
406 alu_dest_init(nir_alu_dest
*dest
)
408 dest_init(&dest
->dest
);
409 dest
->saturate
= false;
410 dest
->write_mask
= 0xf;
414 alu_src_init(nir_alu_src
*src
)
417 src
->abs
= src
->negate
= false;
425 nir_alu_instr_create(nir_shader
*shader
, nir_op op
)
427 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
428 nir_alu_instr
*instr
=
430 sizeof(nir_alu_instr
) + num_srcs
* sizeof(nir_alu_src
));
432 instr_init(&instr
->instr
, nir_instr_type_alu
);
434 alu_dest_init(&instr
->dest
);
435 for (unsigned i
= 0; i
< num_srcs
; i
++)
436 alu_src_init(&instr
->src
[i
]);
442 nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
)
444 nir_jump_instr
*instr
= ralloc(shader
, nir_jump_instr
);
445 instr_init(&instr
->instr
, nir_instr_type_jump
);
450 nir_load_const_instr
*
451 nir_load_const_instr_create(nir_shader
*shader
, unsigned num_components
)
453 nir_load_const_instr
*instr
= ralloc(shader
, nir_load_const_instr
);
454 instr_init(&instr
->instr
, nir_instr_type_load_const
);
456 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, NULL
);
461 nir_intrinsic_instr
*
462 nir_intrinsic_instr_create(nir_shader
*shader
, nir_intrinsic_op op
)
464 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
465 nir_intrinsic_instr
*instr
=
467 sizeof(nir_intrinsic_instr
) + num_srcs
* sizeof(nir_src
));
469 instr_init(&instr
->instr
, nir_instr_type_intrinsic
);
470 instr
->intrinsic
= op
;
472 if (nir_intrinsic_infos
[op
].has_dest
)
473 dest_init(&instr
->dest
);
475 for (unsigned i
= 0; i
< num_srcs
; i
++)
476 src_init(&instr
->src
[i
]);
482 nir_call_instr_create(nir_shader
*shader
, nir_function
*callee
)
484 nir_call_instr
*instr
= ralloc(shader
, nir_call_instr
);
485 instr_init(&instr
->instr
, nir_instr_type_call
);
487 instr
->callee
= callee
;
488 instr
->num_params
= callee
->num_params
;
489 instr
->params
= ralloc_array(instr
, nir_deref_var
*, instr
->num_params
);
490 instr
->return_deref
= NULL
;
496 nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
)
498 nir_tex_instr
*instr
= rzalloc(shader
, nir_tex_instr
);
499 instr_init(&instr
->instr
, nir_instr_type_tex
);
501 dest_init(&instr
->dest
);
503 instr
->num_srcs
= num_srcs
;
504 instr
->src
= ralloc_array(instr
, nir_tex_src
, num_srcs
);
505 for (unsigned i
= 0; i
< num_srcs
; i
++)
506 src_init(&instr
->src
[i
].src
);
508 instr
->texture_index
= 0;
509 instr
->texture_array_size
= 0;
510 instr
->texture
= NULL
;
511 instr
->sampler_index
= 0;
512 instr
->sampler
= NULL
;
518 nir_phi_instr_create(nir_shader
*shader
)
520 nir_phi_instr
*instr
= ralloc(shader
, nir_phi_instr
);
521 instr_init(&instr
->instr
, nir_instr_type_phi
);
523 dest_init(&instr
->dest
);
524 exec_list_make_empty(&instr
->srcs
);
528 nir_parallel_copy_instr
*
529 nir_parallel_copy_instr_create(nir_shader
*shader
)
531 nir_parallel_copy_instr
*instr
= ralloc(shader
, nir_parallel_copy_instr
);
532 instr_init(&instr
->instr
, nir_instr_type_parallel_copy
);
534 exec_list_make_empty(&instr
->entries
);
539 nir_ssa_undef_instr
*
540 nir_ssa_undef_instr_create(nir_shader
*shader
, unsigned num_components
)
542 nir_ssa_undef_instr
*instr
= ralloc(shader
, nir_ssa_undef_instr
);
543 instr_init(&instr
->instr
, nir_instr_type_ssa_undef
);
545 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, NULL
);
551 nir_deref_var_create(void *mem_ctx
, nir_variable
*var
)
553 nir_deref_var
*deref
= ralloc(mem_ctx
, nir_deref_var
);
554 deref
->deref
.deref_type
= nir_deref_type_var
;
555 deref
->deref
.child
= NULL
;
556 deref
->deref
.type
= var
->type
;
562 nir_deref_array_create(void *mem_ctx
)
564 nir_deref_array
*deref
= ralloc(mem_ctx
, nir_deref_array
);
565 deref
->deref
.deref_type
= nir_deref_type_array
;
566 deref
->deref
.child
= NULL
;
567 deref
->deref_array_type
= nir_deref_array_type_direct
;
568 src_init(&deref
->indirect
);
569 deref
->base_offset
= 0;
574 nir_deref_struct_create(void *mem_ctx
, unsigned field_index
)
576 nir_deref_struct
*deref
= ralloc(mem_ctx
, nir_deref_struct
);
577 deref
->deref
.deref_type
= nir_deref_type_struct
;
578 deref
->deref
.child
= NULL
;
579 deref
->index
= field_index
;
583 static nir_deref_var
*
584 copy_deref_var(void *mem_ctx
, nir_deref_var
*deref
)
586 nir_deref_var
*ret
= nir_deref_var_create(mem_ctx
, deref
->var
);
587 ret
->deref
.type
= deref
->deref
.type
;
588 if (deref
->deref
.child
)
589 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
593 static nir_deref_array
*
594 copy_deref_array(void *mem_ctx
, nir_deref_array
*deref
)
596 nir_deref_array
*ret
= nir_deref_array_create(mem_ctx
);
597 ret
->base_offset
= deref
->base_offset
;
598 ret
->deref_array_type
= deref
->deref_array_type
;
599 if (deref
->deref_array_type
== nir_deref_array_type_indirect
) {
600 nir_src_copy(&ret
->indirect
, &deref
->indirect
, mem_ctx
);
602 ret
->deref
.type
= deref
->deref
.type
;
603 if (deref
->deref
.child
)
604 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
608 static nir_deref_struct
*
609 copy_deref_struct(void *mem_ctx
, nir_deref_struct
*deref
)
611 nir_deref_struct
*ret
= nir_deref_struct_create(mem_ctx
, deref
->index
);
612 ret
->deref
.type
= deref
->deref
.type
;
613 if (deref
->deref
.child
)
614 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
619 nir_copy_deref(void *mem_ctx
, nir_deref
*deref
)
621 switch (deref
->deref_type
) {
622 case nir_deref_type_var
:
623 return ©_deref_var(mem_ctx
, nir_deref_as_var(deref
))->deref
;
624 case nir_deref_type_array
:
625 return ©_deref_array(mem_ctx
, nir_deref_as_array(deref
))->deref
;
626 case nir_deref_type_struct
:
627 return ©_deref_struct(mem_ctx
, nir_deref_as_struct(deref
))->deref
;
629 unreachable("Invalid dereference type");
635 /* Returns a load_const instruction that represents the constant
636 * initializer for the given deref chain. The caller is responsible for
637 * ensuring that there actually is a constant initializer.
639 nir_load_const_instr
*
640 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
)
642 nir_constant
*constant
= deref
->var
->constant_initializer
;
645 const nir_deref
*tail
= &deref
->deref
;
646 unsigned matrix_offset
= 0;
647 while (tail
->child
) {
648 switch (tail
->child
->deref_type
) {
649 case nir_deref_type_array
: {
650 nir_deref_array
*arr
= nir_deref_as_array(tail
->child
);
651 assert(arr
->deref_array_type
== nir_deref_array_type_direct
);
652 if (glsl_type_is_matrix(tail
->type
)) {
653 assert(arr
->deref
.child
== NULL
);
654 matrix_offset
= arr
->base_offset
;
656 constant
= constant
->elements
[arr
->base_offset
];
661 case nir_deref_type_struct
: {
662 constant
= constant
->elements
[nir_deref_as_struct(tail
->child
)->index
];
667 unreachable("Invalid deref child type");
673 nir_load_const_instr
*load
=
674 nir_load_const_instr_create(shader
, glsl_get_vector_elements(tail
->type
));
676 matrix_offset
*= load
->def
.num_components
;
677 for (unsigned i
= 0; i
< load
->def
.num_components
; i
++) {
678 switch (glsl_get_base_type(tail
->type
)) {
679 case GLSL_TYPE_FLOAT
:
682 load
->value
.u
[i
] = constant
->value
.u
[matrix_offset
+ i
];
685 load
->value
.u
[i
] = constant
->value
.b
[matrix_offset
+ i
] ?
686 NIR_TRUE
: NIR_FALSE
;
689 unreachable("Invalid immediate type");
697 nir_cf_node_get_function(nir_cf_node
*node
)
699 while (node
->type
!= nir_cf_node_function
) {
703 return nir_cf_node_as_function(node
);
706 /* Reduces a cursor by trying to convert everything to after and trying to
707 * go up to block granularity when possible.
710 reduce_cursor(nir_cursor cursor
)
712 switch (cursor
.option
) {
713 case nir_cursor_before_block
:
714 if (exec_list_is_empty(&cursor
.block
->instr_list
)) {
715 /* Empty block. After is as good as before. */
716 cursor
.option
= nir_cursor_after_block
;
718 /* Try to switch to after the previous block if there is one.
719 * (This isn't likely, but it can happen.)
721 nir_cf_node
*prev_node
= nir_cf_node_prev(&cursor
.block
->cf_node
);
722 if (prev_node
&& prev_node
->type
== nir_cf_node_block
) {
723 cursor
.block
= nir_cf_node_as_block(prev_node
);
724 cursor
.option
= nir_cursor_after_block
;
729 case nir_cursor_after_block
:
732 case nir_cursor_before_instr
: {
733 nir_instr
*prev_instr
= nir_instr_prev(cursor
.instr
);
735 /* Before this instruction is after the previous */
736 cursor
.instr
= prev_instr
;
737 cursor
.option
= nir_cursor_after_instr
;
739 /* No previous instruction. Switch to before block */
740 cursor
.block
= cursor
.instr
->block
;
741 cursor
.option
= nir_cursor_before_block
;
743 return reduce_cursor(cursor
);
746 case nir_cursor_after_instr
:
747 if (nir_instr_next(cursor
.instr
) == NULL
) {
748 /* This is the last instruction, switch to after block */
749 cursor
.option
= nir_cursor_after_block
;
750 cursor
.block
= cursor
.instr
->block
;
755 unreachable("Inavlid cursor option");
760 nir_cursors_equal(nir_cursor a
, nir_cursor b
)
762 /* Reduced cursors should be unique */
763 a
= reduce_cursor(a
);
764 b
= reduce_cursor(b
);
766 return a
.block
== b
.block
&& a
.option
== b
.option
;
770 add_use_cb(nir_src
*src
, void *state
)
772 nir_instr
*instr
= state
;
774 src
->parent_instr
= instr
;
775 list_addtail(&src
->use_link
,
776 src
->is_ssa
? &src
->ssa
->uses
: &src
->reg
.reg
->uses
);
782 add_ssa_def_cb(nir_ssa_def
*def
, void *state
)
784 nir_instr
*instr
= state
;
786 if (instr
->block
&& def
->index
== UINT_MAX
) {
787 nir_function_impl
*impl
=
788 nir_cf_node_get_function(&instr
->block
->cf_node
);
790 def
->index
= impl
->ssa_alloc
++;
797 add_reg_def_cb(nir_dest
*dest
, void *state
)
799 nir_instr
*instr
= state
;
802 dest
->reg
.parent_instr
= instr
;
803 list_addtail(&dest
->reg
.def_link
, &dest
->reg
.reg
->defs
);
810 add_defs_uses(nir_instr
*instr
)
812 nir_foreach_src(instr
, add_use_cb
, instr
);
813 nir_foreach_dest(instr
, add_reg_def_cb
, instr
);
814 nir_foreach_ssa_def(instr
, add_ssa_def_cb
, instr
);
818 nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
)
820 switch (cursor
.option
) {
821 case nir_cursor_before_block
:
822 /* Only allow inserting jumps into empty blocks. */
823 if (instr
->type
== nir_instr_type_jump
)
824 assert(exec_list_is_empty(&cursor
.block
->instr_list
));
826 instr
->block
= cursor
.block
;
827 add_defs_uses(instr
);
828 exec_list_push_head(&cursor
.block
->instr_list
, &instr
->node
);
830 case nir_cursor_after_block
: {
831 /* Inserting instructions after a jump is illegal. */
832 nir_instr
*last
= nir_block_last_instr(cursor
.block
);
833 assert(last
== NULL
|| last
->type
!= nir_instr_type_jump
);
836 instr
->block
= cursor
.block
;
837 add_defs_uses(instr
);
838 exec_list_push_tail(&cursor
.block
->instr_list
, &instr
->node
);
841 case nir_cursor_before_instr
:
842 assert(instr
->type
!= nir_instr_type_jump
);
843 instr
->block
= cursor
.instr
->block
;
844 add_defs_uses(instr
);
845 exec_node_insert_node_before(&cursor
.instr
->node
, &instr
->node
);
847 case nir_cursor_after_instr
:
848 /* Inserting instructions after a jump is illegal. */
849 assert(cursor
.instr
->type
!= nir_instr_type_jump
);
851 /* Only allow inserting jumps at the end of the block. */
852 if (instr
->type
== nir_instr_type_jump
)
853 assert(cursor
.instr
== nir_block_last_instr(cursor
.instr
->block
));
855 instr
->block
= cursor
.instr
->block
;
856 add_defs_uses(instr
);
857 exec_node_insert_after(&cursor
.instr
->node
, &instr
->node
);
861 if (instr
->type
== nir_instr_type_jump
)
862 nir_handle_add_jump(instr
->block
);
866 src_is_valid(const nir_src
*src
)
868 return src
->is_ssa
? (src
->ssa
!= NULL
) : (src
->reg
.reg
!= NULL
);
872 remove_use_cb(nir_src
*src
, void *state
)
874 if (src_is_valid(src
))
875 list_del(&src
->use_link
);
881 remove_def_cb(nir_dest
*dest
, void *state
)
884 list_del(&dest
->reg
.def_link
);
890 remove_defs_uses(nir_instr
*instr
)
892 nir_foreach_dest(instr
, remove_def_cb
, instr
);
893 nir_foreach_src(instr
, remove_use_cb
, instr
);
896 void nir_instr_remove(nir_instr
*instr
)
898 remove_defs_uses(instr
);
899 exec_node_remove(&instr
->node
);
901 if (instr
->type
== nir_instr_type_jump
) {
902 nir_jump_instr
*jump_instr
= nir_instr_as_jump(instr
);
903 nir_handle_remove_jump(instr
->block
, jump_instr
->type
);
910 nir_index_local_regs(nir_function_impl
*impl
)
913 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
914 reg
->index
= index
++;
916 impl
->reg_alloc
= index
;
920 nir_index_global_regs(nir_shader
*shader
)
923 foreach_list_typed(nir_register
, reg
, node
, &shader
->registers
) {
924 reg
->index
= index
++;
926 shader
->reg_alloc
= index
;
930 visit_alu_dest(nir_alu_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
932 return cb(&instr
->dest
.dest
, state
);
936 visit_intrinsic_dest(nir_intrinsic_instr
*instr
, nir_foreach_dest_cb cb
,
939 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
940 return cb(&instr
->dest
, state
);
946 visit_texture_dest(nir_tex_instr
*instr
, nir_foreach_dest_cb cb
,
949 return cb(&instr
->dest
, state
);
953 visit_phi_dest(nir_phi_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
955 return cb(&instr
->dest
, state
);
959 visit_parallel_copy_dest(nir_parallel_copy_instr
*instr
,
960 nir_foreach_dest_cb cb
, void *state
)
962 nir_foreach_parallel_copy_entry(instr
, entry
) {
963 if (!cb(&entry
->dest
, state
))
971 nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
973 switch (instr
->type
) {
974 case nir_instr_type_alu
:
975 return visit_alu_dest(nir_instr_as_alu(instr
), cb
, state
);
976 case nir_instr_type_intrinsic
:
977 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr
), cb
, state
);
978 case nir_instr_type_tex
:
979 return visit_texture_dest(nir_instr_as_tex(instr
), cb
, state
);
980 case nir_instr_type_phi
:
981 return visit_phi_dest(nir_instr_as_phi(instr
), cb
, state
);
982 case nir_instr_type_parallel_copy
:
983 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr
),
986 case nir_instr_type_load_const
:
987 case nir_instr_type_ssa_undef
:
988 case nir_instr_type_call
:
989 case nir_instr_type_jump
:
993 unreachable("Invalid instruction type");
1000 struct foreach_ssa_def_state
{
1001 nir_foreach_ssa_def_cb cb
;
1006 nir_ssa_def_visitor(nir_dest
*dest
, void *void_state
)
1008 struct foreach_ssa_def_state
*state
= void_state
;
1011 return state
->cb(&dest
->ssa
, state
->client_state
);
1017 nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
, void *state
)
1019 switch (instr
->type
) {
1020 case nir_instr_type_alu
:
1021 case nir_instr_type_tex
:
1022 case nir_instr_type_intrinsic
:
1023 case nir_instr_type_phi
:
1024 case nir_instr_type_parallel_copy
: {
1025 struct foreach_ssa_def_state foreach_state
= {cb
, state
};
1026 return nir_foreach_dest(instr
, nir_ssa_def_visitor
, &foreach_state
);
1029 case nir_instr_type_load_const
:
1030 return cb(&nir_instr_as_load_const(instr
)->def
, state
);
1031 case nir_instr_type_ssa_undef
:
1032 return cb(&nir_instr_as_ssa_undef(instr
)->def
, state
);
1033 case nir_instr_type_call
:
1034 case nir_instr_type_jump
:
1037 unreachable("Invalid instruction type");
1042 visit_src(nir_src
*src
, nir_foreach_src_cb cb
, void *state
)
1044 if (!cb(src
, state
))
1046 if (!src
->is_ssa
&& src
->reg
.indirect
)
1047 return cb(src
->reg
.indirect
, state
);
1052 visit_deref_array_src(nir_deref_array
*deref
, nir_foreach_src_cb cb
,
1055 if (deref
->deref_array_type
== nir_deref_array_type_indirect
)
1056 return visit_src(&deref
->indirect
, cb
, state
);
1061 visit_deref_src(nir_deref_var
*deref
, nir_foreach_src_cb cb
, void *state
)
1063 nir_deref
*cur
= &deref
->deref
;
1064 while (cur
!= NULL
) {
1065 if (cur
->deref_type
== nir_deref_type_array
)
1066 if (!visit_deref_array_src(nir_deref_as_array(cur
), cb
, state
))
1076 visit_alu_src(nir_alu_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1078 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1079 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1086 visit_tex_src(nir_tex_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1088 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++)
1089 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1092 if (instr
->texture
!= NULL
)
1093 if (!visit_deref_src(instr
->texture
, cb
, state
))
1096 if (instr
->sampler
!= NULL
)
1097 if (!visit_deref_src(instr
->sampler
, cb
, state
))
1104 visit_intrinsic_src(nir_intrinsic_instr
*instr
, nir_foreach_src_cb cb
,
1107 unsigned num_srcs
= nir_intrinsic_infos
[instr
->intrinsic
].num_srcs
;
1108 for (unsigned i
= 0; i
< num_srcs
; i
++)
1109 if (!visit_src(&instr
->src
[i
], cb
, state
))
1113 nir_intrinsic_infos
[instr
->intrinsic
].num_variables
;
1114 for (unsigned i
= 0; i
< num_vars
; i
++)
1115 if (!visit_deref_src(instr
->variables
[i
], cb
, state
))
1122 visit_call_src(nir_call_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1128 visit_load_const_src(nir_load_const_instr
*instr
, nir_foreach_src_cb cb
,
1135 visit_phi_src(nir_phi_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1137 nir_foreach_phi_src(instr
, src
) {
1138 if (!visit_src(&src
->src
, cb
, state
))
1146 visit_parallel_copy_src(nir_parallel_copy_instr
*instr
,
1147 nir_foreach_src_cb cb
, void *state
)
1149 nir_foreach_parallel_copy_entry(instr
, entry
) {
1150 if (!visit_src(&entry
->src
, cb
, state
))
1159 nir_foreach_src_cb cb
;
1160 } visit_dest_indirect_state
;
1163 visit_dest_indirect(nir_dest
*dest
, void *_state
)
1165 visit_dest_indirect_state
*state
= (visit_dest_indirect_state
*) _state
;
1167 if (!dest
->is_ssa
&& dest
->reg
.indirect
)
1168 return state
->cb(dest
->reg
.indirect
, state
->state
);
1174 nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1176 switch (instr
->type
) {
1177 case nir_instr_type_alu
:
1178 if (!visit_alu_src(nir_instr_as_alu(instr
), cb
, state
))
1181 case nir_instr_type_intrinsic
:
1182 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr
), cb
, state
))
1185 case nir_instr_type_tex
:
1186 if (!visit_tex_src(nir_instr_as_tex(instr
), cb
, state
))
1189 case nir_instr_type_call
:
1190 if (!visit_call_src(nir_instr_as_call(instr
), cb
, state
))
1193 case nir_instr_type_load_const
:
1194 if (!visit_load_const_src(nir_instr_as_load_const(instr
), cb
, state
))
1197 case nir_instr_type_phi
:
1198 if (!visit_phi_src(nir_instr_as_phi(instr
), cb
, state
))
1201 case nir_instr_type_parallel_copy
:
1202 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr
),
1206 case nir_instr_type_jump
:
1207 case nir_instr_type_ssa_undef
:
1211 unreachable("Invalid instruction type");
1215 visit_dest_indirect_state dest_state
;
1216 dest_state
.state
= state
;
1218 return nir_foreach_dest(instr
, visit_dest_indirect
, &dest_state
);
1222 nir_src_as_const_value(nir_src src
)
1227 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
1230 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1232 return &load
->value
;
1236 * Returns true if the source is known to be dynamically uniform. Otherwise it
1237 * returns false which means it may or may not be dynamically uniform but it
1238 * can't be determined.
1241 nir_src_is_dynamically_uniform(nir_src src
)
1246 /* Constants are trivially dynamically uniform */
1247 if (src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
)
1250 /* As are uniform variables */
1251 if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
1252 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
1254 if (intr
->intrinsic
== nir_intrinsic_load_uniform
)
1258 /* XXX: this could have many more tests, such as when a sampler function is
1259 * called with dynamically uniform arguments.
1265 src_remove_all_uses(nir_src
*src
)
1267 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1268 if (!src_is_valid(src
))
1271 list_del(&src
->use_link
);
1276 src_add_all_uses(nir_src
*src
, nir_instr
*parent_instr
, nir_if
*parent_if
)
1278 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1279 if (!src_is_valid(src
))
1283 src
->parent_instr
= parent_instr
;
1285 list_addtail(&src
->use_link
, &src
->ssa
->uses
);
1287 list_addtail(&src
->use_link
, &src
->reg
.reg
->uses
);
1290 src
->parent_if
= parent_if
;
1292 list_addtail(&src
->use_link
, &src
->ssa
->if_uses
);
1294 list_addtail(&src
->use_link
, &src
->reg
.reg
->if_uses
);
1300 nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
)
1302 assert(!src_is_valid(src
) || src
->parent_instr
== instr
);
1304 src_remove_all_uses(src
);
1306 src_add_all_uses(src
, instr
, NULL
);
1310 nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
)
1312 assert(!src_is_valid(dest
) || dest
->parent_instr
== dest_instr
);
1314 src_remove_all_uses(dest
);
1315 src_remove_all_uses(src
);
1317 *src
= NIR_SRC_INIT
;
1318 src_add_all_uses(dest
, dest_instr
, NULL
);
1322 nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
)
1324 nir_src
*src
= &if_stmt
->condition
;
1325 assert(!src_is_valid(src
) || src
->parent_if
== if_stmt
);
1327 src_remove_all_uses(src
);
1329 src_add_all_uses(src
, NULL
, if_stmt
);
1333 nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
, nir_dest new_dest
)
1336 /* We can only overwrite an SSA destination if it has no uses. */
1337 assert(list_empty(&dest
->ssa
.uses
) && list_empty(&dest
->ssa
.if_uses
));
1339 list_del(&dest
->reg
.def_link
);
1340 if (dest
->reg
.indirect
)
1341 src_remove_all_uses(dest
->reg
.indirect
);
1344 /* We can't re-write with an SSA def */
1345 assert(!new_dest
.is_ssa
);
1347 nir_dest_copy(dest
, &new_dest
, instr
);
1349 dest
->reg
.parent_instr
= instr
;
1350 list_addtail(&dest
->reg
.def_link
, &new_dest
.reg
.reg
->defs
);
1352 if (dest
->reg
.indirect
)
1353 src_add_all_uses(dest
->reg
.indirect
, instr
, NULL
);
1357 nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
1358 unsigned num_components
, const char *name
)
1361 def
->parent_instr
= instr
;
1362 list_inithead(&def
->uses
);
1363 list_inithead(&def
->if_uses
);
1364 def
->num_components
= num_components
;
1367 nir_function_impl
*impl
=
1368 nir_cf_node_get_function(&instr
->block
->cf_node
);
1370 def
->index
= impl
->ssa_alloc
++;
1372 def
->index
= UINT_MAX
;
1377 nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
1378 unsigned num_components
, const char *name
)
1380 dest
->is_ssa
= true;
1381 nir_ssa_def_init(instr
, &dest
->ssa
, num_components
, name
);
1385 nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
)
1387 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1389 nir_foreach_use_safe(def
, use_src
)
1390 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1392 nir_foreach_if_use_safe(def
, use_src
)
1393 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1397 is_instr_between(nir_instr
*start
, nir_instr
*end
, nir_instr
*between
)
1399 assert(start
->block
== end
->block
);
1401 if (between
->block
!= start
->block
)
1404 /* Search backwards looking for "between" */
1405 while (start
!= end
) {
1409 end
= nir_instr_prev(end
);
1416 /* Replaces all uses of the given SSA def with the given source but only if
1417 * the use comes after the after_me instruction. This can be useful if you
1418 * are emitting code to fix up the result of some instruction: you can freely
1419 * use the result in that code and then call rewrite_uses_after and pass the
1420 * last fixup instruction as after_me and it will replace all of the uses you
1421 * want without touching the fixup code.
1423 * This function assumes that after_me is in the same block as
1424 * def->parent_instr and that after_me comes after def->parent_instr.
1427 nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
1428 nir_instr
*after_me
)
1430 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1432 nir_foreach_use_safe(def
, use_src
) {
1433 assert(use_src
->parent_instr
!= def
->parent_instr
);
1434 /* Since def already dominates all of its uses, the only way a use can
1435 * not be dominated by after_me is if it is between def and after_me in
1436 * the instruction list.
1438 if (!is_instr_between(def
->parent_instr
, after_me
, use_src
->parent_instr
))
1439 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1442 nir_foreach_if_use_safe(def
, use_src
)
1443 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1446 static bool foreach_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1447 bool reverse
, void *state
);
1450 foreach_if(nir_if
*if_stmt
, nir_foreach_block_cb cb
, bool reverse
, void *state
)
1453 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
,
1454 &if_stmt
->else_list
) {
1455 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1459 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
,
1460 &if_stmt
->then_list
) {
1461 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1465 foreach_list_typed_safe(nir_cf_node
, node
, node
, &if_stmt
->then_list
) {
1466 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1470 foreach_list_typed_safe(nir_cf_node
, node
, node
, &if_stmt
->else_list
) {
1471 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1480 foreach_loop(nir_loop
*loop
, nir_foreach_block_cb cb
, bool reverse
, void *state
)
1483 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
, &loop
->body
) {
1484 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1488 foreach_list_typed_safe(nir_cf_node
, node
, node
, &loop
->body
) {
1489 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1498 foreach_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1499 bool reverse
, void *state
)
1501 switch (node
->type
) {
1502 case nir_cf_node_block
:
1503 return cb(nir_cf_node_as_block(node
), state
);
1504 case nir_cf_node_if
:
1505 return foreach_if(nir_cf_node_as_if(node
), cb
, reverse
, state
);
1506 case nir_cf_node_loop
:
1507 return foreach_loop(nir_cf_node_as_loop(node
), cb
, reverse
, state
);
1511 unreachable("Invalid CFG node type");
1519 nir_foreach_block_in_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1522 return foreach_cf_node(node
, cb
, false, state
);
1526 nir_foreach_block(nir_function_impl
*impl
, nir_foreach_block_cb cb
, void *state
)
1528 foreach_list_typed_safe(nir_cf_node
, node
, node
, &impl
->body
) {
1529 if (!foreach_cf_node(node
, cb
, false, state
))
1533 return cb(impl
->end_block
, state
);
1537 nir_foreach_block_reverse(nir_function_impl
*impl
, nir_foreach_block_cb cb
,
1540 if (!cb(impl
->end_block
, state
))
1543 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
, &impl
->body
) {
1544 if (!foreach_cf_node(node
, cb
, true, state
))
1552 nir_block_get_following_if(nir_block
*block
)
1554 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1557 if (nir_cf_node_is_last(&block
->cf_node
))
1560 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1562 if (next_node
->type
!= nir_cf_node_if
)
1565 return nir_cf_node_as_if(next_node
);
1569 nir_block_get_following_loop(nir_block
*block
)
1571 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1574 if (nir_cf_node_is_last(&block
->cf_node
))
1577 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1579 if (next_node
->type
!= nir_cf_node_loop
)
1582 return nir_cf_node_as_loop(next_node
);
1585 index_block(nir_block
*block
, void *state
)
1587 unsigned *index
= state
;
1588 block
->index
= (*index
)++;
1593 nir_index_blocks(nir_function_impl
*impl
)
1597 if (impl
->valid_metadata
& nir_metadata_block_index
)
1600 nir_foreach_block(impl
, index_block
, &index
);
1602 impl
->num_blocks
= index
;
1606 index_ssa_def_cb(nir_ssa_def
*def
, void *state
)
1608 unsigned *index
= (unsigned *) state
;
1609 def
->index
= (*index
)++;
1615 index_ssa_block(nir_block
*block
, void *state
)
1617 nir_foreach_instr(block
, instr
)
1618 nir_foreach_ssa_def(instr
, index_ssa_def_cb
, state
);
1624 * The indices are applied top-to-bottom which has the very nice property
1625 * that, if A dominates B, then A->index <= B->index.
1628 nir_index_ssa_defs(nir_function_impl
*impl
)
1631 nir_foreach_block(impl
, index_ssa_block
, &index
);
1632 impl
->ssa_alloc
= index
;
1636 index_instrs_block(nir_block
*block
, void *state
)
1638 unsigned *index
= state
;
1639 nir_foreach_instr(block
, instr
)
1640 instr
->index
= (*index
)++;
1646 * The indices are applied top-to-bottom which has the very nice property
1647 * that, if A dominates B, then A->index <= B->index.
1650 nir_index_instrs(nir_function_impl
*impl
)
1653 nir_foreach_block(impl
, index_instrs_block
, &index
);
1658 nir_intrinsic_from_system_value(gl_system_value val
)
1661 case SYSTEM_VALUE_VERTEX_ID
:
1662 return nir_intrinsic_load_vertex_id
;
1663 case SYSTEM_VALUE_INSTANCE_ID
:
1664 return nir_intrinsic_load_instance_id
;
1665 case SYSTEM_VALUE_DRAW_ID
:
1666 return nir_intrinsic_load_draw_id
;
1667 case SYSTEM_VALUE_BASE_INSTANCE
:
1668 return nir_intrinsic_load_base_instance
;
1669 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
1670 return nir_intrinsic_load_vertex_id_zero_base
;
1671 case SYSTEM_VALUE_BASE_VERTEX
:
1672 return nir_intrinsic_load_base_vertex
;
1673 case SYSTEM_VALUE_INVOCATION_ID
:
1674 return nir_intrinsic_load_invocation_id
;
1675 case SYSTEM_VALUE_FRONT_FACE
:
1676 return nir_intrinsic_load_front_face
;
1677 case SYSTEM_VALUE_SAMPLE_ID
:
1678 return nir_intrinsic_load_sample_id
;
1679 case SYSTEM_VALUE_SAMPLE_POS
:
1680 return nir_intrinsic_load_sample_pos
;
1681 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
1682 return nir_intrinsic_load_sample_mask_in
;
1683 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
1684 return nir_intrinsic_load_local_invocation_id
;
1685 case SYSTEM_VALUE_WORK_GROUP_ID
:
1686 return nir_intrinsic_load_work_group_id
;
1687 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
1688 return nir_intrinsic_load_num_work_groups
;
1689 case SYSTEM_VALUE_PRIMITIVE_ID
:
1690 return nir_intrinsic_load_primitive_id
;
1691 case SYSTEM_VALUE_TESS_COORD
:
1692 return nir_intrinsic_load_tess_coord
;
1693 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1694 return nir_intrinsic_load_tess_level_outer
;
1695 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1696 return nir_intrinsic_load_tess_level_inner
;
1697 case SYSTEM_VALUE_VERTICES_IN
:
1698 return nir_intrinsic_load_patch_vertices_in
;
1699 case SYSTEM_VALUE_HELPER_INVOCATION
:
1700 return nir_intrinsic_load_helper_invocation
;
1702 unreachable("system value does not directly correspond to intrinsic");
1707 nir_system_value_from_intrinsic(nir_intrinsic_op intrin
)
1710 case nir_intrinsic_load_vertex_id
:
1711 return SYSTEM_VALUE_VERTEX_ID
;
1712 case nir_intrinsic_load_instance_id
:
1713 return SYSTEM_VALUE_INSTANCE_ID
;
1714 case nir_intrinsic_load_draw_id
:
1715 return SYSTEM_VALUE_DRAW_ID
;
1716 case nir_intrinsic_load_base_instance
:
1717 return SYSTEM_VALUE_BASE_INSTANCE
;
1718 case nir_intrinsic_load_vertex_id_zero_base
:
1719 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1720 case nir_intrinsic_load_base_vertex
:
1721 return SYSTEM_VALUE_BASE_VERTEX
;
1722 case nir_intrinsic_load_invocation_id
:
1723 return SYSTEM_VALUE_INVOCATION_ID
;
1724 case nir_intrinsic_load_front_face
:
1725 return SYSTEM_VALUE_FRONT_FACE
;
1726 case nir_intrinsic_load_sample_id
:
1727 return SYSTEM_VALUE_SAMPLE_ID
;
1728 case nir_intrinsic_load_sample_pos
:
1729 return SYSTEM_VALUE_SAMPLE_POS
;
1730 case nir_intrinsic_load_sample_mask_in
:
1731 return SYSTEM_VALUE_SAMPLE_MASK_IN
;
1732 case nir_intrinsic_load_local_invocation_id
:
1733 return SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1734 case nir_intrinsic_load_num_work_groups
:
1735 return SYSTEM_VALUE_NUM_WORK_GROUPS
;
1736 case nir_intrinsic_load_work_group_id
:
1737 return SYSTEM_VALUE_WORK_GROUP_ID
;
1738 case nir_intrinsic_load_primitive_id
:
1739 return SYSTEM_VALUE_PRIMITIVE_ID
;
1740 case nir_intrinsic_load_tess_coord
:
1741 return SYSTEM_VALUE_TESS_COORD
;
1742 case nir_intrinsic_load_tess_level_outer
:
1743 return SYSTEM_VALUE_TESS_LEVEL_OUTER
;
1744 case nir_intrinsic_load_tess_level_inner
:
1745 return SYSTEM_VALUE_TESS_LEVEL_INNER
;
1746 case nir_intrinsic_load_patch_vertices_in
:
1747 return SYSTEM_VALUE_VERTICES_IN
;
1748 case nir_intrinsic_load_helper_invocation
:
1749 return SYSTEM_VALUE_HELPER_INVOCATION
;
1751 unreachable("intrinsic doesn't produce a system value");