2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_control_flow_private.h"
33 nir_shader_create(void *mem_ctx
,
34 gl_shader_stage stage
,
35 const nir_shader_compiler_options
*options
)
37 nir_shader
*shader
= ralloc(mem_ctx
, nir_shader
);
39 exec_list_make_empty(&shader
->uniforms
);
40 exec_list_make_empty(&shader
->inputs
);
41 exec_list_make_empty(&shader
->outputs
);
42 exec_list_make_empty(&shader
->shared
);
44 shader
->options
= options
;
45 memset(&shader
->info
, 0, sizeof(shader
->info
));
47 exec_list_make_empty(&shader
->functions
);
48 exec_list_make_empty(&shader
->registers
);
49 exec_list_make_empty(&shader
->globals
);
50 exec_list_make_empty(&shader
->system_values
);
51 shader
->reg_alloc
= 0;
53 shader
->num_inputs
= 0;
54 shader
->num_outputs
= 0;
55 shader
->num_uniforms
= 0;
56 shader
->num_shared
= 0;
58 shader
->stage
= stage
;
64 reg_create(void *mem_ctx
, struct exec_list
*list
)
66 nir_register
*reg
= ralloc(mem_ctx
, nir_register
);
68 list_inithead(®
->uses
);
69 list_inithead(®
->defs
);
70 list_inithead(®
->if_uses
);
72 reg
->num_components
= 0;
73 reg
->num_array_elems
= 0;
74 reg
->is_packed
= false;
77 exec_list_push_tail(list
, ®
->node
);
83 nir_global_reg_create(nir_shader
*shader
)
85 nir_register
*reg
= reg_create(shader
, &shader
->registers
);
86 reg
->index
= shader
->reg_alloc
++;
87 reg
->is_global
= true;
93 nir_local_reg_create(nir_function_impl
*impl
)
95 nir_register
*reg
= reg_create(ralloc_parent(impl
), &impl
->registers
);
96 reg
->index
= impl
->reg_alloc
++;
97 reg
->is_global
= false;
103 nir_reg_remove(nir_register
*reg
)
105 exec_node_remove(®
->node
);
109 nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
)
111 switch (var
->data
.mode
) {
113 assert(!"invalid mode");
117 assert(!"nir_shader_add_variable cannot be used for local variables");
121 assert(!"nir_shader_add_variable cannot be used for function parameters");
125 exec_list_push_tail(&shader
->globals
, &var
->node
);
128 case nir_var_shader_in
:
129 exec_list_push_tail(&shader
->inputs
, &var
->node
);
132 case nir_var_shader_out
:
133 exec_list_push_tail(&shader
->outputs
, &var
->node
);
136 case nir_var_uniform
:
137 case nir_var_shader_storage
:
138 exec_list_push_tail(&shader
->uniforms
, &var
->node
);
142 assert(shader
->stage
== MESA_SHADER_COMPUTE
);
143 exec_list_push_tail(&shader
->shared
, &var
->node
);
146 case nir_var_system_value
:
147 exec_list_push_tail(&shader
->system_values
, &var
->node
);
153 nir_variable_create(nir_shader
*shader
, nir_variable_mode mode
,
154 const struct glsl_type
*type
, const char *name
)
156 nir_variable
*var
= rzalloc(shader
, nir_variable
);
157 var
->name
= ralloc_strdup(var
, name
);
159 var
->data
.mode
= mode
;
161 if ((mode
== nir_var_shader_in
&& shader
->stage
!= MESA_SHADER_VERTEX
) ||
162 (mode
== nir_var_shader_out
&& shader
->stage
!= MESA_SHADER_FRAGMENT
))
163 var
->data
.interpolation
= INTERP_QUALIFIER_SMOOTH
;
165 if (mode
== nir_var_shader_in
|| mode
== nir_var_uniform
)
166 var
->data
.read_only
= true;
168 nir_shader_add_variable(shader
, var
);
174 nir_local_variable_create(nir_function_impl
*impl
,
175 const struct glsl_type
*type
, const char *name
)
177 nir_variable
*var
= rzalloc(impl
->function
->shader
, nir_variable
);
178 var
->name
= ralloc_strdup(var
, name
);
180 var
->data
.mode
= nir_var_local
;
182 nir_function_impl_add_variable(impl
, var
);
188 nir_function_create(nir_shader
*shader
, const char *name
)
190 nir_function
*func
= ralloc(shader
, nir_function
);
192 exec_list_push_tail(&shader
->functions
, &func
->node
);
194 func
->name
= ralloc_strdup(func
, name
);
195 func
->shader
= shader
;
196 func
->num_params
= 0;
198 func
->return_type
= glsl_void_type();
204 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *mem_ctx
)
206 dest
->is_ssa
= src
->is_ssa
;
208 dest
->ssa
= src
->ssa
;
210 dest
->reg
.base_offset
= src
->reg
.base_offset
;
211 dest
->reg
.reg
= src
->reg
.reg
;
212 if (src
->reg
.indirect
) {
213 dest
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
214 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, mem_ctx
);
216 dest
->reg
.indirect
= NULL
;
221 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
)
223 /* Copying an SSA definition makes no sense whatsoever. */
224 assert(!src
->is_ssa
);
226 dest
->is_ssa
= false;
228 dest
->reg
.base_offset
= src
->reg
.base_offset
;
229 dest
->reg
.reg
= src
->reg
.reg
;
230 if (src
->reg
.indirect
) {
231 dest
->reg
.indirect
= ralloc(instr
, nir_src
);
232 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, instr
);
234 dest
->reg
.indirect
= NULL
;
239 nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
240 nir_alu_instr
*instr
)
242 nir_src_copy(&dest
->src
, &src
->src
, &instr
->instr
);
243 dest
->abs
= src
->abs
;
244 dest
->negate
= src
->negate
;
245 for (unsigned i
= 0; i
< 4; i
++)
246 dest
->swizzle
[i
] = src
->swizzle
[i
];
250 nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
251 nir_alu_instr
*instr
)
253 nir_dest_copy(&dest
->dest
, &src
->dest
, &instr
->instr
);
254 dest
->write_mask
= src
->write_mask
;
255 dest
->saturate
= src
->saturate
;
260 cf_init(nir_cf_node
*node
, nir_cf_node_type type
)
262 exec_node_init(&node
->node
);
268 nir_function_impl_create_bare(nir_shader
*shader
)
270 nir_function_impl
*impl
= ralloc(shader
, nir_function_impl
);
272 impl
->function
= NULL
;
274 cf_init(&impl
->cf_node
, nir_cf_node_function
);
276 exec_list_make_empty(&impl
->body
);
277 exec_list_make_empty(&impl
->registers
);
278 exec_list_make_empty(&impl
->locals
);
279 impl
->num_params
= 0;
281 impl
->return_var
= NULL
;
284 impl
->valid_metadata
= nir_metadata_none
;
286 /* create start & end blocks */
287 nir_block
*start_block
= nir_block_create(shader
);
288 nir_block
*end_block
= nir_block_create(shader
);
289 start_block
->cf_node
.parent
= &impl
->cf_node
;
290 end_block
->cf_node
.parent
= &impl
->cf_node
;
291 impl
->end_block
= end_block
;
293 exec_list_push_tail(&impl
->body
, &start_block
->cf_node
.node
);
295 start_block
->successors
[0] = end_block
;
296 _mesa_set_add(end_block
->predecessors
, start_block
);
301 nir_function_impl_create(nir_function
*function
)
303 assert(function
->impl
== NULL
);
305 nir_function_impl
*impl
= nir_function_impl_create_bare(function
->shader
);
307 function
->impl
= impl
;
308 impl
->function
= function
;
310 impl
->num_params
= function
->num_params
;
311 impl
->params
= ralloc_array(function
->shader
,
312 nir_variable
*, impl
->num_params
);
314 for (unsigned i
= 0; i
< impl
->num_params
; i
++) {
315 impl
->params
[i
] = rzalloc(function
->shader
, nir_variable
);
316 impl
->params
[i
]->type
= function
->params
[i
].type
;
317 impl
->params
[i
]->data
.mode
= nir_var_param
;
318 impl
->params
[i
]->data
.location
= i
;
321 if (!glsl_type_is_void(function
->return_type
)) {
322 impl
->return_var
= rzalloc(function
->shader
, nir_variable
);
323 impl
->return_var
->type
= function
->return_type
;
324 impl
->return_var
->data
.mode
= nir_var_param
;
325 impl
->return_var
->data
.location
= -1;
332 nir_block_create(nir_shader
*shader
)
334 nir_block
*block
= ralloc(shader
, nir_block
);
336 cf_init(&block
->cf_node
, nir_cf_node_block
);
338 block
->successors
[0] = block
->successors
[1] = NULL
;
339 block
->predecessors
= _mesa_set_create(block
, _mesa_hash_pointer
,
340 _mesa_key_pointer_equal
);
341 block
->imm_dom
= NULL
;
342 /* XXX maybe it would be worth it to defer allocation? This
343 * way it doesn't get allocated for shader ref's that never run
344 * nir_calc_dominance? For example, state-tracker creates an
345 * initial IR, clones that, runs appropriate lowering pass, passes
346 * to driver which does common lowering/opt, and then stores ref
347 * which is later used to do state specific lowering and futher
348 * opt. Do any of the references not need dominance metadata?
350 block
->dom_frontier
= _mesa_set_create(block
, _mesa_hash_pointer
,
351 _mesa_key_pointer_equal
);
353 exec_list_make_empty(&block
->instr_list
);
359 src_init(nir_src
*src
)
363 src
->reg
.indirect
= NULL
;
364 src
->reg
.base_offset
= 0;
368 nir_if_create(nir_shader
*shader
)
370 nir_if
*if_stmt
= ralloc(shader
, nir_if
);
372 cf_init(&if_stmt
->cf_node
, nir_cf_node_if
);
373 src_init(&if_stmt
->condition
);
375 nir_block
*then
= nir_block_create(shader
);
376 exec_list_make_empty(&if_stmt
->then_list
);
377 exec_list_push_tail(&if_stmt
->then_list
, &then
->cf_node
.node
);
378 then
->cf_node
.parent
= &if_stmt
->cf_node
;
380 nir_block
*else_stmt
= nir_block_create(shader
);
381 exec_list_make_empty(&if_stmt
->else_list
);
382 exec_list_push_tail(&if_stmt
->else_list
, &else_stmt
->cf_node
.node
);
383 else_stmt
->cf_node
.parent
= &if_stmt
->cf_node
;
389 nir_loop_create(nir_shader
*shader
)
391 nir_loop
*loop
= ralloc(shader
, nir_loop
);
393 cf_init(&loop
->cf_node
, nir_cf_node_loop
);
395 nir_block
*body
= nir_block_create(shader
);
396 exec_list_make_empty(&loop
->body
);
397 exec_list_push_tail(&loop
->body
, &body
->cf_node
.node
);
398 body
->cf_node
.parent
= &loop
->cf_node
;
400 body
->successors
[0] = body
;
401 _mesa_set_add(body
->predecessors
, body
);
407 instr_init(nir_instr
*instr
, nir_instr_type type
)
411 exec_node_init(&instr
->node
);
415 dest_init(nir_dest
*dest
)
417 dest
->is_ssa
= false;
418 dest
->reg
.reg
= NULL
;
419 dest
->reg
.indirect
= NULL
;
420 dest
->reg
.base_offset
= 0;
424 alu_dest_init(nir_alu_dest
*dest
)
426 dest_init(&dest
->dest
);
427 dest
->saturate
= false;
428 dest
->write_mask
= 0xf;
432 alu_src_init(nir_alu_src
*src
)
435 src
->abs
= src
->negate
= false;
443 nir_alu_instr_create(nir_shader
*shader
, nir_op op
)
445 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
446 nir_alu_instr
*instr
=
448 sizeof(nir_alu_instr
) + num_srcs
* sizeof(nir_alu_src
));
450 instr_init(&instr
->instr
, nir_instr_type_alu
);
452 alu_dest_init(&instr
->dest
);
453 for (unsigned i
= 0; i
< num_srcs
; i
++)
454 alu_src_init(&instr
->src
[i
]);
460 nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
)
462 nir_jump_instr
*instr
= ralloc(shader
, nir_jump_instr
);
463 instr_init(&instr
->instr
, nir_instr_type_jump
);
468 nir_load_const_instr
*
469 nir_load_const_instr_create(nir_shader
*shader
, unsigned num_components
)
471 nir_load_const_instr
*instr
= ralloc(shader
, nir_load_const_instr
);
472 instr_init(&instr
->instr
, nir_instr_type_load_const
);
474 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, NULL
);
479 nir_intrinsic_instr
*
480 nir_intrinsic_instr_create(nir_shader
*shader
, nir_intrinsic_op op
)
482 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
483 nir_intrinsic_instr
*instr
=
485 sizeof(nir_intrinsic_instr
) + num_srcs
* sizeof(nir_src
));
487 instr_init(&instr
->instr
, nir_instr_type_intrinsic
);
488 instr
->intrinsic
= op
;
490 if (nir_intrinsic_infos
[op
].has_dest
)
491 dest_init(&instr
->dest
);
493 for (unsigned i
= 0; i
< num_srcs
; i
++)
494 src_init(&instr
->src
[i
]);
500 nir_call_instr_create(nir_shader
*shader
, nir_function
*callee
)
502 nir_call_instr
*instr
= ralloc(shader
, nir_call_instr
);
503 instr_init(&instr
->instr
, nir_instr_type_call
);
505 instr
->callee
= callee
;
506 instr
->num_params
= callee
->num_params
;
507 instr
->params
= ralloc_array(instr
, nir_deref_var
*, instr
->num_params
);
508 instr
->return_deref
= NULL
;
514 nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
)
516 nir_tex_instr
*instr
= rzalloc(shader
, nir_tex_instr
);
517 instr_init(&instr
->instr
, nir_instr_type_tex
);
519 dest_init(&instr
->dest
);
521 instr
->num_srcs
= num_srcs
;
522 instr
->src
= ralloc_array(instr
, nir_tex_src
, num_srcs
);
523 for (unsigned i
= 0; i
< num_srcs
; i
++)
524 src_init(&instr
->src
[i
].src
);
526 instr
->texture_index
= 0;
527 instr
->texture_array_size
= 0;
528 instr
->texture
= NULL
;
529 instr
->sampler_index
= 0;
530 instr
->sampler
= NULL
;
536 nir_phi_instr_create(nir_shader
*shader
)
538 nir_phi_instr
*instr
= ralloc(shader
, nir_phi_instr
);
539 instr_init(&instr
->instr
, nir_instr_type_phi
);
541 dest_init(&instr
->dest
);
542 exec_list_make_empty(&instr
->srcs
);
546 nir_parallel_copy_instr
*
547 nir_parallel_copy_instr_create(nir_shader
*shader
)
549 nir_parallel_copy_instr
*instr
= ralloc(shader
, nir_parallel_copy_instr
);
550 instr_init(&instr
->instr
, nir_instr_type_parallel_copy
);
552 exec_list_make_empty(&instr
->entries
);
557 nir_ssa_undef_instr
*
558 nir_ssa_undef_instr_create(nir_shader
*shader
, unsigned num_components
)
560 nir_ssa_undef_instr
*instr
= ralloc(shader
, nir_ssa_undef_instr
);
561 instr_init(&instr
->instr
, nir_instr_type_ssa_undef
);
563 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, NULL
);
569 nir_deref_var_create(void *mem_ctx
, nir_variable
*var
)
571 nir_deref_var
*deref
= ralloc(mem_ctx
, nir_deref_var
);
572 deref
->deref
.deref_type
= nir_deref_type_var
;
573 deref
->deref
.child
= NULL
;
574 deref
->deref
.type
= var
->type
;
580 nir_deref_array_create(void *mem_ctx
)
582 nir_deref_array
*deref
= ralloc(mem_ctx
, nir_deref_array
);
583 deref
->deref
.deref_type
= nir_deref_type_array
;
584 deref
->deref
.child
= NULL
;
585 deref
->deref_array_type
= nir_deref_array_type_direct
;
586 src_init(&deref
->indirect
);
587 deref
->base_offset
= 0;
592 nir_deref_struct_create(void *mem_ctx
, unsigned field_index
)
594 nir_deref_struct
*deref
= ralloc(mem_ctx
, nir_deref_struct
);
595 deref
->deref
.deref_type
= nir_deref_type_struct
;
596 deref
->deref
.child
= NULL
;
597 deref
->index
= field_index
;
601 static nir_deref_var
*
602 copy_deref_var(void *mem_ctx
, nir_deref_var
*deref
)
604 nir_deref_var
*ret
= nir_deref_var_create(mem_ctx
, deref
->var
);
605 ret
->deref
.type
= deref
->deref
.type
;
606 if (deref
->deref
.child
)
607 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
611 static nir_deref_array
*
612 copy_deref_array(void *mem_ctx
, nir_deref_array
*deref
)
614 nir_deref_array
*ret
= nir_deref_array_create(mem_ctx
);
615 ret
->base_offset
= deref
->base_offset
;
616 ret
->deref_array_type
= deref
->deref_array_type
;
617 if (deref
->deref_array_type
== nir_deref_array_type_indirect
) {
618 nir_src_copy(&ret
->indirect
, &deref
->indirect
, mem_ctx
);
620 ret
->deref
.type
= deref
->deref
.type
;
621 if (deref
->deref
.child
)
622 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
626 static nir_deref_struct
*
627 copy_deref_struct(void *mem_ctx
, nir_deref_struct
*deref
)
629 nir_deref_struct
*ret
= nir_deref_struct_create(mem_ctx
, deref
->index
);
630 ret
->deref
.type
= deref
->deref
.type
;
631 if (deref
->deref
.child
)
632 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
637 nir_copy_deref(void *mem_ctx
, nir_deref
*deref
)
639 switch (deref
->deref_type
) {
640 case nir_deref_type_var
:
641 return ©_deref_var(mem_ctx
, nir_deref_as_var(deref
))->deref
;
642 case nir_deref_type_array
:
643 return ©_deref_array(mem_ctx
, nir_deref_as_array(deref
))->deref
;
644 case nir_deref_type_struct
:
645 return ©_deref_struct(mem_ctx
, nir_deref_as_struct(deref
))->deref
;
647 unreachable("Invalid dereference type");
653 /* Returns a load_const instruction that represents the constant
654 * initializer for the given deref chain. The caller is responsible for
655 * ensuring that there actually is a constant initializer.
657 nir_load_const_instr
*
658 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
)
660 nir_constant
*constant
= deref
->var
->constant_initializer
;
663 const nir_deref
*tail
= &deref
->deref
;
664 unsigned matrix_offset
= 0;
665 while (tail
->child
) {
666 switch (tail
->child
->deref_type
) {
667 case nir_deref_type_array
: {
668 nir_deref_array
*arr
= nir_deref_as_array(tail
->child
);
669 assert(arr
->deref_array_type
== nir_deref_array_type_direct
);
670 if (glsl_type_is_matrix(tail
->type
)) {
671 assert(arr
->deref
.child
== NULL
);
672 matrix_offset
= arr
->base_offset
;
674 constant
= constant
->elements
[arr
->base_offset
];
679 case nir_deref_type_struct
: {
680 constant
= constant
->elements
[nir_deref_as_struct(tail
->child
)->index
];
685 unreachable("Invalid deref child type");
691 nir_load_const_instr
*load
=
692 nir_load_const_instr_create(shader
, glsl_get_vector_elements(tail
->type
));
694 matrix_offset
*= load
->def
.num_components
;
695 for (unsigned i
= 0; i
< load
->def
.num_components
; i
++) {
696 switch (glsl_get_base_type(tail
->type
)) {
697 case GLSL_TYPE_FLOAT
:
700 load
->value
.u
[i
] = constant
->value
.u
[matrix_offset
+ i
];
703 load
->value
.u
[i
] = constant
->value
.b
[matrix_offset
+ i
] ?
704 NIR_TRUE
: NIR_FALSE
;
707 unreachable("Invalid immediate type");
715 nir_cf_node_get_function(nir_cf_node
*node
)
717 while (node
->type
!= nir_cf_node_function
) {
721 return nir_cf_node_as_function(node
);
724 /* Reduces a cursor by trying to convert everything to after and trying to
725 * go up to block granularity when possible.
728 reduce_cursor(nir_cursor cursor
)
730 switch (cursor
.option
) {
731 case nir_cursor_before_block
:
732 if (exec_list_is_empty(&cursor
.block
->instr_list
)) {
733 /* Empty block. After is as good as before. */
734 cursor
.option
= nir_cursor_after_block
;
736 /* Try to switch to after the previous block if there is one.
737 * (This isn't likely, but it can happen.)
739 nir_cf_node
*prev_node
= nir_cf_node_prev(&cursor
.block
->cf_node
);
740 if (prev_node
&& prev_node
->type
== nir_cf_node_block
) {
741 cursor
.block
= nir_cf_node_as_block(prev_node
);
742 cursor
.option
= nir_cursor_after_block
;
747 case nir_cursor_after_block
:
750 case nir_cursor_before_instr
: {
751 nir_instr
*prev_instr
= nir_instr_prev(cursor
.instr
);
753 /* Before this instruction is after the previous */
754 cursor
.instr
= prev_instr
;
755 cursor
.option
= nir_cursor_after_instr
;
757 /* No previous instruction. Switch to before block */
758 cursor
.block
= cursor
.instr
->block
;
759 cursor
.option
= nir_cursor_before_block
;
761 return reduce_cursor(cursor
);
764 case nir_cursor_after_instr
:
765 if (nir_instr_next(cursor
.instr
) == NULL
) {
766 /* This is the last instruction, switch to after block */
767 cursor
.option
= nir_cursor_after_block
;
768 cursor
.block
= cursor
.instr
->block
;
773 unreachable("Inavlid cursor option");
778 nir_cursors_equal(nir_cursor a
, nir_cursor b
)
780 /* Reduced cursors should be unique */
781 a
= reduce_cursor(a
);
782 b
= reduce_cursor(b
);
784 return a
.block
== b
.block
&& a
.option
== b
.option
;
788 add_use_cb(nir_src
*src
, void *state
)
790 nir_instr
*instr
= state
;
792 src
->parent_instr
= instr
;
793 list_addtail(&src
->use_link
,
794 src
->is_ssa
? &src
->ssa
->uses
: &src
->reg
.reg
->uses
);
800 add_ssa_def_cb(nir_ssa_def
*def
, void *state
)
802 nir_instr
*instr
= state
;
804 if (instr
->block
&& def
->index
== UINT_MAX
) {
805 nir_function_impl
*impl
=
806 nir_cf_node_get_function(&instr
->block
->cf_node
);
808 def
->index
= impl
->ssa_alloc
++;
815 add_reg_def_cb(nir_dest
*dest
, void *state
)
817 nir_instr
*instr
= state
;
820 dest
->reg
.parent_instr
= instr
;
821 list_addtail(&dest
->reg
.def_link
, &dest
->reg
.reg
->defs
);
828 add_defs_uses(nir_instr
*instr
)
830 nir_foreach_src(instr
, add_use_cb
, instr
);
831 nir_foreach_dest(instr
, add_reg_def_cb
, instr
);
832 nir_foreach_ssa_def(instr
, add_ssa_def_cb
, instr
);
836 nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
)
838 switch (cursor
.option
) {
839 case nir_cursor_before_block
:
840 /* Only allow inserting jumps into empty blocks. */
841 if (instr
->type
== nir_instr_type_jump
)
842 assert(exec_list_is_empty(&cursor
.block
->instr_list
));
844 instr
->block
= cursor
.block
;
845 add_defs_uses(instr
);
846 exec_list_push_head(&cursor
.block
->instr_list
, &instr
->node
);
848 case nir_cursor_after_block
: {
849 /* Inserting instructions after a jump is illegal. */
850 nir_instr
*last
= nir_block_last_instr(cursor
.block
);
851 assert(last
== NULL
|| last
->type
!= nir_instr_type_jump
);
854 instr
->block
= cursor
.block
;
855 add_defs_uses(instr
);
856 exec_list_push_tail(&cursor
.block
->instr_list
, &instr
->node
);
859 case nir_cursor_before_instr
:
860 assert(instr
->type
!= nir_instr_type_jump
);
861 instr
->block
= cursor
.instr
->block
;
862 add_defs_uses(instr
);
863 exec_node_insert_node_before(&cursor
.instr
->node
, &instr
->node
);
865 case nir_cursor_after_instr
:
866 /* Inserting instructions after a jump is illegal. */
867 assert(cursor
.instr
->type
!= nir_instr_type_jump
);
869 /* Only allow inserting jumps at the end of the block. */
870 if (instr
->type
== nir_instr_type_jump
)
871 assert(cursor
.instr
== nir_block_last_instr(cursor
.instr
->block
));
873 instr
->block
= cursor
.instr
->block
;
874 add_defs_uses(instr
);
875 exec_node_insert_after(&cursor
.instr
->node
, &instr
->node
);
879 if (instr
->type
== nir_instr_type_jump
)
880 nir_handle_add_jump(instr
->block
);
884 src_is_valid(const nir_src
*src
)
886 return src
->is_ssa
? (src
->ssa
!= NULL
) : (src
->reg
.reg
!= NULL
);
890 remove_use_cb(nir_src
*src
, void *state
)
892 if (src_is_valid(src
))
893 list_del(&src
->use_link
);
899 remove_def_cb(nir_dest
*dest
, void *state
)
902 list_del(&dest
->reg
.def_link
);
908 remove_defs_uses(nir_instr
*instr
)
910 nir_foreach_dest(instr
, remove_def_cb
, instr
);
911 nir_foreach_src(instr
, remove_use_cb
, instr
);
914 void nir_instr_remove(nir_instr
*instr
)
916 remove_defs_uses(instr
);
917 exec_node_remove(&instr
->node
);
919 if (instr
->type
== nir_instr_type_jump
) {
920 nir_jump_instr
*jump_instr
= nir_instr_as_jump(instr
);
921 nir_handle_remove_jump(instr
->block
, jump_instr
->type
);
928 nir_index_local_regs(nir_function_impl
*impl
)
931 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
932 reg
->index
= index
++;
934 impl
->reg_alloc
= index
;
938 nir_index_global_regs(nir_shader
*shader
)
941 foreach_list_typed(nir_register
, reg
, node
, &shader
->registers
) {
942 reg
->index
= index
++;
944 shader
->reg_alloc
= index
;
948 visit_alu_dest(nir_alu_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
950 return cb(&instr
->dest
.dest
, state
);
954 visit_intrinsic_dest(nir_intrinsic_instr
*instr
, nir_foreach_dest_cb cb
,
957 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
958 return cb(&instr
->dest
, state
);
964 visit_texture_dest(nir_tex_instr
*instr
, nir_foreach_dest_cb cb
,
967 return cb(&instr
->dest
, state
);
971 visit_phi_dest(nir_phi_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
973 return cb(&instr
->dest
, state
);
977 visit_parallel_copy_dest(nir_parallel_copy_instr
*instr
,
978 nir_foreach_dest_cb cb
, void *state
)
980 nir_foreach_parallel_copy_entry(instr
, entry
) {
981 if (!cb(&entry
->dest
, state
))
989 nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
991 switch (instr
->type
) {
992 case nir_instr_type_alu
:
993 return visit_alu_dest(nir_instr_as_alu(instr
), cb
, state
);
994 case nir_instr_type_intrinsic
:
995 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr
), cb
, state
);
996 case nir_instr_type_tex
:
997 return visit_texture_dest(nir_instr_as_tex(instr
), cb
, state
);
998 case nir_instr_type_phi
:
999 return visit_phi_dest(nir_instr_as_phi(instr
), cb
, state
);
1000 case nir_instr_type_parallel_copy
:
1001 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr
),
1004 case nir_instr_type_load_const
:
1005 case nir_instr_type_ssa_undef
:
1006 case nir_instr_type_call
:
1007 case nir_instr_type_jump
:
1011 unreachable("Invalid instruction type");
1018 struct foreach_ssa_def_state
{
1019 nir_foreach_ssa_def_cb cb
;
1024 nir_ssa_def_visitor(nir_dest
*dest
, void *void_state
)
1026 struct foreach_ssa_def_state
*state
= void_state
;
1029 return state
->cb(&dest
->ssa
, state
->client_state
);
1035 nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
, void *state
)
1037 switch (instr
->type
) {
1038 case nir_instr_type_alu
:
1039 case nir_instr_type_tex
:
1040 case nir_instr_type_intrinsic
:
1041 case nir_instr_type_phi
:
1042 case nir_instr_type_parallel_copy
: {
1043 struct foreach_ssa_def_state foreach_state
= {cb
, state
};
1044 return nir_foreach_dest(instr
, nir_ssa_def_visitor
, &foreach_state
);
1047 case nir_instr_type_load_const
:
1048 return cb(&nir_instr_as_load_const(instr
)->def
, state
);
1049 case nir_instr_type_ssa_undef
:
1050 return cb(&nir_instr_as_ssa_undef(instr
)->def
, state
);
1051 case nir_instr_type_call
:
1052 case nir_instr_type_jump
:
1055 unreachable("Invalid instruction type");
1060 visit_src(nir_src
*src
, nir_foreach_src_cb cb
, void *state
)
1062 if (!cb(src
, state
))
1064 if (!src
->is_ssa
&& src
->reg
.indirect
)
1065 return cb(src
->reg
.indirect
, state
);
1070 visit_deref_array_src(nir_deref_array
*deref
, nir_foreach_src_cb cb
,
1073 if (deref
->deref_array_type
== nir_deref_array_type_indirect
)
1074 return visit_src(&deref
->indirect
, cb
, state
);
1079 visit_deref_src(nir_deref_var
*deref
, nir_foreach_src_cb cb
, void *state
)
1081 nir_deref
*cur
= &deref
->deref
;
1082 while (cur
!= NULL
) {
1083 if (cur
->deref_type
== nir_deref_type_array
) {
1084 if (!visit_deref_array_src(nir_deref_as_array(cur
), cb
, state
))
1095 visit_alu_src(nir_alu_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1097 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1098 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1105 visit_tex_src(nir_tex_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1107 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1108 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1112 if (instr
->texture
!= NULL
) {
1113 if (!visit_deref_src(instr
->texture
, cb
, state
))
1117 if (instr
->sampler
!= NULL
) {
1118 if (!visit_deref_src(instr
->sampler
, cb
, state
))
1126 visit_intrinsic_src(nir_intrinsic_instr
*instr
, nir_foreach_src_cb cb
,
1129 unsigned num_srcs
= nir_intrinsic_infos
[instr
->intrinsic
].num_srcs
;
1130 for (unsigned i
= 0; i
< num_srcs
; i
++) {
1131 if (!visit_src(&instr
->src
[i
], cb
, state
))
1136 nir_intrinsic_infos
[instr
->intrinsic
].num_variables
;
1137 for (unsigned i
= 0; i
< num_vars
; i
++) {
1138 if (!visit_deref_src(instr
->variables
[i
], cb
, state
))
1146 visit_call_src(nir_call_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1152 visit_load_const_src(nir_load_const_instr
*instr
, nir_foreach_src_cb cb
,
1159 visit_phi_src(nir_phi_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1161 nir_foreach_phi_src(instr
, src
) {
1162 if (!visit_src(&src
->src
, cb
, state
))
1170 visit_parallel_copy_src(nir_parallel_copy_instr
*instr
,
1171 nir_foreach_src_cb cb
, void *state
)
1173 nir_foreach_parallel_copy_entry(instr
, entry
) {
1174 if (!visit_src(&entry
->src
, cb
, state
))
1183 nir_foreach_src_cb cb
;
1184 } visit_dest_indirect_state
;
1187 visit_dest_indirect(nir_dest
*dest
, void *_state
)
1189 visit_dest_indirect_state
*state
= (visit_dest_indirect_state
*) _state
;
1191 if (!dest
->is_ssa
&& dest
->reg
.indirect
)
1192 return state
->cb(dest
->reg
.indirect
, state
->state
);
1198 nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1200 switch (instr
->type
) {
1201 case nir_instr_type_alu
:
1202 if (!visit_alu_src(nir_instr_as_alu(instr
), cb
, state
))
1205 case nir_instr_type_intrinsic
:
1206 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr
), cb
, state
))
1209 case nir_instr_type_tex
:
1210 if (!visit_tex_src(nir_instr_as_tex(instr
), cb
, state
))
1213 case nir_instr_type_call
:
1214 if (!visit_call_src(nir_instr_as_call(instr
), cb
, state
))
1217 case nir_instr_type_load_const
:
1218 if (!visit_load_const_src(nir_instr_as_load_const(instr
), cb
, state
))
1221 case nir_instr_type_phi
:
1222 if (!visit_phi_src(nir_instr_as_phi(instr
), cb
, state
))
1225 case nir_instr_type_parallel_copy
:
1226 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr
),
1230 case nir_instr_type_jump
:
1231 case nir_instr_type_ssa_undef
:
1235 unreachable("Invalid instruction type");
1239 visit_dest_indirect_state dest_state
;
1240 dest_state
.state
= state
;
1242 return nir_foreach_dest(instr
, visit_dest_indirect
, &dest_state
);
1246 nir_src_as_const_value(nir_src src
)
1251 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
1254 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1256 return &load
->value
;
1260 * Returns true if the source is known to be dynamically uniform. Otherwise it
1261 * returns false which means it may or may not be dynamically uniform but it
1262 * can't be determined.
1265 nir_src_is_dynamically_uniform(nir_src src
)
1270 /* Constants are trivially dynamically uniform */
1271 if (src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
)
1274 /* As are uniform variables */
1275 if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
1276 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
1278 if (intr
->intrinsic
== nir_intrinsic_load_uniform
)
1282 /* XXX: this could have many more tests, such as when a sampler function is
1283 * called with dynamically uniform arguments.
1289 src_remove_all_uses(nir_src
*src
)
1291 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1292 if (!src_is_valid(src
))
1295 list_del(&src
->use_link
);
1300 src_add_all_uses(nir_src
*src
, nir_instr
*parent_instr
, nir_if
*parent_if
)
1302 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1303 if (!src_is_valid(src
))
1307 src
->parent_instr
= parent_instr
;
1309 list_addtail(&src
->use_link
, &src
->ssa
->uses
);
1311 list_addtail(&src
->use_link
, &src
->reg
.reg
->uses
);
1314 src
->parent_if
= parent_if
;
1316 list_addtail(&src
->use_link
, &src
->ssa
->if_uses
);
1318 list_addtail(&src
->use_link
, &src
->reg
.reg
->if_uses
);
1324 nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
)
1326 assert(!src_is_valid(src
) || src
->parent_instr
== instr
);
1328 src_remove_all_uses(src
);
1330 src_add_all_uses(src
, instr
, NULL
);
1334 nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
)
1336 assert(!src_is_valid(dest
) || dest
->parent_instr
== dest_instr
);
1338 src_remove_all_uses(dest
);
1339 src_remove_all_uses(src
);
1341 *src
= NIR_SRC_INIT
;
1342 src_add_all_uses(dest
, dest_instr
, NULL
);
1346 nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
)
1348 nir_src
*src
= &if_stmt
->condition
;
1349 assert(!src_is_valid(src
) || src
->parent_if
== if_stmt
);
1351 src_remove_all_uses(src
);
1353 src_add_all_uses(src
, NULL
, if_stmt
);
1357 nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
, nir_dest new_dest
)
1360 /* We can only overwrite an SSA destination if it has no uses. */
1361 assert(list_empty(&dest
->ssa
.uses
) && list_empty(&dest
->ssa
.if_uses
));
1363 list_del(&dest
->reg
.def_link
);
1364 if (dest
->reg
.indirect
)
1365 src_remove_all_uses(dest
->reg
.indirect
);
1368 /* We can't re-write with an SSA def */
1369 assert(!new_dest
.is_ssa
);
1371 nir_dest_copy(dest
, &new_dest
, instr
);
1373 dest
->reg
.parent_instr
= instr
;
1374 list_addtail(&dest
->reg
.def_link
, &new_dest
.reg
.reg
->defs
);
1376 if (dest
->reg
.indirect
)
1377 src_add_all_uses(dest
->reg
.indirect
, instr
, NULL
);
1381 nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
1382 unsigned num_components
, const char *name
)
1385 def
->parent_instr
= instr
;
1386 list_inithead(&def
->uses
);
1387 list_inithead(&def
->if_uses
);
1388 def
->num_components
= num_components
;
1391 nir_function_impl
*impl
=
1392 nir_cf_node_get_function(&instr
->block
->cf_node
);
1394 def
->index
= impl
->ssa_alloc
++;
1396 def
->index
= UINT_MAX
;
1401 nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
1402 unsigned num_components
, const char *name
)
1404 dest
->is_ssa
= true;
1405 nir_ssa_def_init(instr
, &dest
->ssa
, num_components
, name
);
1409 nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
)
1411 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1413 nir_foreach_use_safe(def
, use_src
)
1414 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1416 nir_foreach_if_use_safe(def
, use_src
)
1417 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1421 is_instr_between(nir_instr
*start
, nir_instr
*end
, nir_instr
*between
)
1423 assert(start
->block
== end
->block
);
1425 if (between
->block
!= start
->block
)
1428 /* Search backwards looking for "between" */
1429 while (start
!= end
) {
1433 end
= nir_instr_prev(end
);
1440 /* Replaces all uses of the given SSA def with the given source but only if
1441 * the use comes after the after_me instruction. This can be useful if you
1442 * are emitting code to fix up the result of some instruction: you can freely
1443 * use the result in that code and then call rewrite_uses_after and pass the
1444 * last fixup instruction as after_me and it will replace all of the uses you
1445 * want without touching the fixup code.
1447 * This function assumes that after_me is in the same block as
1448 * def->parent_instr and that after_me comes after def->parent_instr.
1451 nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
1452 nir_instr
*after_me
)
1454 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1456 nir_foreach_use_safe(def
, use_src
) {
1457 assert(use_src
->parent_instr
!= def
->parent_instr
);
1458 /* Since def already dominates all of its uses, the only way a use can
1459 * not be dominated by after_me is if it is between def and after_me in
1460 * the instruction list.
1462 if (!is_instr_between(def
->parent_instr
, after_me
, use_src
->parent_instr
))
1463 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1466 nir_foreach_if_use_safe(def
, use_src
)
1467 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1470 static bool foreach_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1471 bool reverse
, void *state
);
1474 foreach_if(nir_if
*if_stmt
, nir_foreach_block_cb cb
, bool reverse
, void *state
)
1477 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
,
1478 &if_stmt
->else_list
) {
1479 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1483 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
,
1484 &if_stmt
->then_list
) {
1485 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1489 foreach_list_typed_safe(nir_cf_node
, node
, node
, &if_stmt
->then_list
) {
1490 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1494 foreach_list_typed_safe(nir_cf_node
, node
, node
, &if_stmt
->else_list
) {
1495 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1504 foreach_loop(nir_loop
*loop
, nir_foreach_block_cb cb
, bool reverse
, void *state
)
1507 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
, &loop
->body
) {
1508 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1512 foreach_list_typed_safe(nir_cf_node
, node
, node
, &loop
->body
) {
1513 if (!foreach_cf_node(node
, cb
, reverse
, state
))
1522 foreach_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1523 bool reverse
, void *state
)
1525 switch (node
->type
) {
1526 case nir_cf_node_block
:
1527 return cb(nir_cf_node_as_block(node
), state
);
1528 case nir_cf_node_if
:
1529 return foreach_if(nir_cf_node_as_if(node
), cb
, reverse
, state
);
1530 case nir_cf_node_loop
:
1531 return foreach_loop(nir_cf_node_as_loop(node
), cb
, reverse
, state
);
1535 unreachable("Invalid CFG node type");
1543 nir_foreach_block_in_cf_node(nir_cf_node
*node
, nir_foreach_block_cb cb
,
1546 return foreach_cf_node(node
, cb
, false, state
);
1550 nir_foreach_block(nir_function_impl
*impl
, nir_foreach_block_cb cb
, void *state
)
1552 foreach_list_typed_safe(nir_cf_node
, node
, node
, &impl
->body
) {
1553 if (!foreach_cf_node(node
, cb
, false, state
))
1557 return cb(impl
->end_block
, state
);
1561 nir_foreach_block_reverse(nir_function_impl
*impl
, nir_foreach_block_cb cb
,
1564 if (!cb(impl
->end_block
, state
))
1567 foreach_list_typed_reverse_safe(nir_cf_node
, node
, node
, &impl
->body
) {
1568 if (!foreach_cf_node(node
, cb
, true, state
))
1576 nir_block_get_following_if(nir_block
*block
)
1578 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1581 if (nir_cf_node_is_last(&block
->cf_node
))
1584 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1586 if (next_node
->type
!= nir_cf_node_if
)
1589 return nir_cf_node_as_if(next_node
);
1593 nir_block_get_following_loop(nir_block
*block
)
1595 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1598 if (nir_cf_node_is_last(&block
->cf_node
))
1601 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1603 if (next_node
->type
!= nir_cf_node_loop
)
1606 return nir_cf_node_as_loop(next_node
);
1609 index_block(nir_block
*block
, void *state
)
1611 unsigned *index
= state
;
1612 block
->index
= (*index
)++;
1617 nir_index_blocks(nir_function_impl
*impl
)
1621 if (impl
->valid_metadata
& nir_metadata_block_index
)
1624 nir_foreach_block(impl
, index_block
, &index
);
1626 impl
->num_blocks
= index
;
1630 index_ssa_def_cb(nir_ssa_def
*def
, void *state
)
1632 unsigned *index
= (unsigned *) state
;
1633 def
->index
= (*index
)++;
1639 index_ssa_block(nir_block
*block
, void *state
)
1641 nir_foreach_instr(block
, instr
)
1642 nir_foreach_ssa_def(instr
, index_ssa_def_cb
, state
);
1648 * The indices are applied top-to-bottom which has the very nice property
1649 * that, if A dominates B, then A->index <= B->index.
1652 nir_index_ssa_defs(nir_function_impl
*impl
)
1655 nir_foreach_block(impl
, index_ssa_block
, &index
);
1656 impl
->ssa_alloc
= index
;
1660 index_instrs_block(nir_block
*block
, void *state
)
1662 unsigned *index
= state
;
1663 nir_foreach_instr(block
, instr
)
1664 instr
->index
= (*index
)++;
1670 * The indices are applied top-to-bottom which has the very nice property
1671 * that, if A dominates B, then A->index <= B->index.
1674 nir_index_instrs(nir_function_impl
*impl
)
1677 nir_foreach_block(impl
, index_instrs_block
, &index
);
1682 nir_intrinsic_from_system_value(gl_system_value val
)
1685 case SYSTEM_VALUE_VERTEX_ID
:
1686 return nir_intrinsic_load_vertex_id
;
1687 case SYSTEM_VALUE_INSTANCE_ID
:
1688 return nir_intrinsic_load_instance_id
;
1689 case SYSTEM_VALUE_DRAW_ID
:
1690 return nir_intrinsic_load_draw_id
;
1691 case SYSTEM_VALUE_BASE_INSTANCE
:
1692 return nir_intrinsic_load_base_instance
;
1693 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
1694 return nir_intrinsic_load_vertex_id_zero_base
;
1695 case SYSTEM_VALUE_BASE_VERTEX
:
1696 return nir_intrinsic_load_base_vertex
;
1697 case SYSTEM_VALUE_INVOCATION_ID
:
1698 return nir_intrinsic_load_invocation_id
;
1699 case SYSTEM_VALUE_FRONT_FACE
:
1700 return nir_intrinsic_load_front_face
;
1701 case SYSTEM_VALUE_SAMPLE_ID
:
1702 return nir_intrinsic_load_sample_id
;
1703 case SYSTEM_VALUE_SAMPLE_POS
:
1704 return nir_intrinsic_load_sample_pos
;
1705 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
1706 return nir_intrinsic_load_sample_mask_in
;
1707 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
1708 return nir_intrinsic_load_local_invocation_id
;
1709 case SYSTEM_VALUE_WORK_GROUP_ID
:
1710 return nir_intrinsic_load_work_group_id
;
1711 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
1712 return nir_intrinsic_load_num_work_groups
;
1713 case SYSTEM_VALUE_PRIMITIVE_ID
:
1714 return nir_intrinsic_load_primitive_id
;
1715 case SYSTEM_VALUE_TESS_COORD
:
1716 return nir_intrinsic_load_tess_coord
;
1717 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1718 return nir_intrinsic_load_tess_level_outer
;
1719 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1720 return nir_intrinsic_load_tess_level_inner
;
1721 case SYSTEM_VALUE_VERTICES_IN
:
1722 return nir_intrinsic_load_patch_vertices_in
;
1723 case SYSTEM_VALUE_HELPER_INVOCATION
:
1724 return nir_intrinsic_load_helper_invocation
;
1726 unreachable("system value does not directly correspond to intrinsic");
1731 nir_system_value_from_intrinsic(nir_intrinsic_op intrin
)
1734 case nir_intrinsic_load_vertex_id
:
1735 return SYSTEM_VALUE_VERTEX_ID
;
1736 case nir_intrinsic_load_instance_id
:
1737 return SYSTEM_VALUE_INSTANCE_ID
;
1738 case nir_intrinsic_load_draw_id
:
1739 return SYSTEM_VALUE_DRAW_ID
;
1740 case nir_intrinsic_load_base_instance
:
1741 return SYSTEM_VALUE_BASE_INSTANCE
;
1742 case nir_intrinsic_load_vertex_id_zero_base
:
1743 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1744 case nir_intrinsic_load_base_vertex
:
1745 return SYSTEM_VALUE_BASE_VERTEX
;
1746 case nir_intrinsic_load_invocation_id
:
1747 return SYSTEM_VALUE_INVOCATION_ID
;
1748 case nir_intrinsic_load_front_face
:
1749 return SYSTEM_VALUE_FRONT_FACE
;
1750 case nir_intrinsic_load_sample_id
:
1751 return SYSTEM_VALUE_SAMPLE_ID
;
1752 case nir_intrinsic_load_sample_pos
:
1753 return SYSTEM_VALUE_SAMPLE_POS
;
1754 case nir_intrinsic_load_sample_mask_in
:
1755 return SYSTEM_VALUE_SAMPLE_MASK_IN
;
1756 case nir_intrinsic_load_local_invocation_id
:
1757 return SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1758 case nir_intrinsic_load_num_work_groups
:
1759 return SYSTEM_VALUE_NUM_WORK_GROUPS
;
1760 case nir_intrinsic_load_work_group_id
:
1761 return SYSTEM_VALUE_WORK_GROUP_ID
;
1762 case nir_intrinsic_load_primitive_id
:
1763 return SYSTEM_VALUE_PRIMITIVE_ID
;
1764 case nir_intrinsic_load_tess_coord
:
1765 return SYSTEM_VALUE_TESS_COORD
;
1766 case nir_intrinsic_load_tess_level_outer
:
1767 return SYSTEM_VALUE_TESS_LEVEL_OUTER
;
1768 case nir_intrinsic_load_tess_level_inner
:
1769 return SYSTEM_VALUE_TESS_LEVEL_INNER
;
1770 case nir_intrinsic_load_patch_vertices_in
:
1771 return SYSTEM_VALUE_VERTICES_IN
;
1772 case nir_intrinsic_load_helper_invocation
:
1773 return SYSTEM_VALUE_HELPER_INVOCATION
;
1775 unreachable("intrinsic doesn't produce a system value");