2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_control_flow_private.h"
33 nir_shader_create(void *mem_ctx
,
34 gl_shader_stage stage
,
35 const nir_shader_compiler_options
*options
,
38 nir_shader
*shader
= rzalloc(mem_ctx
, nir_shader
);
40 exec_list_make_empty(&shader
->uniforms
);
41 exec_list_make_empty(&shader
->inputs
);
42 exec_list_make_empty(&shader
->outputs
);
43 exec_list_make_empty(&shader
->shared
);
45 shader
->options
= options
;
48 assert(si
->stage
== stage
);
51 shader
->info
.stage
= stage
;
54 exec_list_make_empty(&shader
->functions
);
55 exec_list_make_empty(&shader
->registers
);
56 exec_list_make_empty(&shader
->globals
);
57 exec_list_make_empty(&shader
->system_values
);
58 shader
->reg_alloc
= 0;
60 shader
->num_inputs
= 0;
61 shader
->num_outputs
= 0;
62 shader
->num_uniforms
= 0;
63 shader
->num_shared
= 0;
69 reg_create(void *mem_ctx
, struct exec_list
*list
)
71 nir_register
*reg
= ralloc(mem_ctx
, nir_register
);
73 list_inithead(®
->uses
);
74 list_inithead(®
->defs
);
75 list_inithead(®
->if_uses
);
77 reg
->num_components
= 0;
79 reg
->num_array_elems
= 0;
80 reg
->is_packed
= false;
83 exec_list_push_tail(list
, ®
->node
);
89 nir_global_reg_create(nir_shader
*shader
)
91 nir_register
*reg
= reg_create(shader
, &shader
->registers
);
92 reg
->index
= shader
->reg_alloc
++;
93 reg
->is_global
= true;
99 nir_local_reg_create(nir_function_impl
*impl
)
101 nir_register
*reg
= reg_create(ralloc_parent(impl
), &impl
->registers
);
102 reg
->index
= impl
->reg_alloc
++;
103 reg
->is_global
= false;
109 nir_reg_remove(nir_register
*reg
)
111 exec_node_remove(®
->node
);
115 nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
)
117 switch (var
->data
.mode
) {
119 assert(!"invalid mode");
123 assert(!"nir_shader_add_variable cannot be used for local variables");
127 assert(!"nir_shader_add_variable cannot be used for function parameters");
131 exec_list_push_tail(&shader
->globals
, &var
->node
);
134 case nir_var_shader_in
:
135 exec_list_push_tail(&shader
->inputs
, &var
->node
);
138 case nir_var_shader_out
:
139 exec_list_push_tail(&shader
->outputs
, &var
->node
);
142 case nir_var_uniform
:
143 case nir_var_shader_storage
:
144 exec_list_push_tail(&shader
->uniforms
, &var
->node
);
148 assert(shader
->info
.stage
== MESA_SHADER_COMPUTE
);
149 exec_list_push_tail(&shader
->shared
, &var
->node
);
152 case nir_var_system_value
:
153 exec_list_push_tail(&shader
->system_values
, &var
->node
);
159 nir_variable_create(nir_shader
*shader
, nir_variable_mode mode
,
160 const struct glsl_type
*type
, const char *name
)
162 nir_variable
*var
= rzalloc(shader
, nir_variable
);
163 var
->name
= ralloc_strdup(var
, name
);
165 var
->data
.mode
= mode
;
167 if ((mode
== nir_var_shader_in
&&
168 shader
->info
.stage
!= MESA_SHADER_VERTEX
) ||
169 (mode
== nir_var_shader_out
&&
170 shader
->info
.stage
!= MESA_SHADER_FRAGMENT
))
171 var
->data
.interpolation
= INTERP_MODE_SMOOTH
;
173 if (mode
== nir_var_shader_in
|| mode
== nir_var_uniform
)
174 var
->data
.read_only
= true;
176 nir_shader_add_variable(shader
, var
);
182 nir_local_variable_create(nir_function_impl
*impl
,
183 const struct glsl_type
*type
, const char *name
)
185 nir_variable
*var
= rzalloc(impl
->function
->shader
, nir_variable
);
186 var
->name
= ralloc_strdup(var
, name
);
188 var
->data
.mode
= nir_var_local
;
190 nir_function_impl_add_variable(impl
, var
);
196 nir_function_create(nir_shader
*shader
, const char *name
)
198 nir_function
*func
= ralloc(shader
, nir_function
);
200 exec_list_push_tail(&shader
->functions
, &func
->node
);
202 func
->name
= ralloc_strdup(func
, name
);
203 func
->shader
= shader
;
204 func
->num_params
= 0;
206 func
->return_type
= glsl_void_type();
212 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *mem_ctx
)
214 dest
->is_ssa
= src
->is_ssa
;
216 dest
->ssa
= src
->ssa
;
218 dest
->reg
.base_offset
= src
->reg
.base_offset
;
219 dest
->reg
.reg
= src
->reg
.reg
;
220 if (src
->reg
.indirect
) {
221 dest
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
222 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, mem_ctx
);
224 dest
->reg
.indirect
= NULL
;
229 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
)
231 /* Copying an SSA definition makes no sense whatsoever. */
232 assert(!src
->is_ssa
);
234 dest
->is_ssa
= false;
236 dest
->reg
.base_offset
= src
->reg
.base_offset
;
237 dest
->reg
.reg
= src
->reg
.reg
;
238 if (src
->reg
.indirect
) {
239 dest
->reg
.indirect
= ralloc(instr
, nir_src
);
240 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, instr
);
242 dest
->reg
.indirect
= NULL
;
247 nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
248 nir_alu_instr
*instr
)
250 nir_src_copy(&dest
->src
, &src
->src
, &instr
->instr
);
251 dest
->abs
= src
->abs
;
252 dest
->negate
= src
->negate
;
253 for (unsigned i
= 0; i
< 4; i
++)
254 dest
->swizzle
[i
] = src
->swizzle
[i
];
258 nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
259 nir_alu_instr
*instr
)
261 nir_dest_copy(&dest
->dest
, &src
->dest
, &instr
->instr
);
262 dest
->write_mask
= src
->write_mask
;
263 dest
->saturate
= src
->saturate
;
268 cf_init(nir_cf_node
*node
, nir_cf_node_type type
)
270 exec_node_init(&node
->node
);
276 nir_function_impl_create_bare(nir_shader
*shader
)
278 nir_function_impl
*impl
= ralloc(shader
, nir_function_impl
);
280 impl
->function
= NULL
;
282 cf_init(&impl
->cf_node
, nir_cf_node_function
);
284 exec_list_make_empty(&impl
->body
);
285 exec_list_make_empty(&impl
->registers
);
286 exec_list_make_empty(&impl
->locals
);
287 impl
->num_params
= 0;
289 impl
->return_var
= NULL
;
292 impl
->valid_metadata
= nir_metadata_none
;
294 /* create start & end blocks */
295 nir_block
*start_block
= nir_block_create(shader
);
296 nir_block
*end_block
= nir_block_create(shader
);
297 start_block
->cf_node
.parent
= &impl
->cf_node
;
298 end_block
->cf_node
.parent
= &impl
->cf_node
;
299 impl
->end_block
= end_block
;
301 exec_list_push_tail(&impl
->body
, &start_block
->cf_node
.node
);
303 start_block
->successors
[0] = end_block
;
304 _mesa_set_add(end_block
->predecessors
, start_block
);
309 nir_function_impl_create(nir_function
*function
)
311 assert(function
->impl
== NULL
);
313 nir_function_impl
*impl
= nir_function_impl_create_bare(function
->shader
);
315 function
->impl
= impl
;
316 impl
->function
= function
;
318 impl
->num_params
= function
->num_params
;
319 impl
->params
= ralloc_array(function
->shader
,
320 nir_variable
*, impl
->num_params
);
322 for (unsigned i
= 0; i
< impl
->num_params
; i
++) {
323 impl
->params
[i
] = rzalloc(function
->shader
, nir_variable
);
324 impl
->params
[i
]->type
= function
->params
[i
].type
;
325 impl
->params
[i
]->data
.mode
= nir_var_param
;
326 impl
->params
[i
]->data
.location
= i
;
329 if (!glsl_type_is_void(function
->return_type
)) {
330 impl
->return_var
= rzalloc(function
->shader
, nir_variable
);
331 impl
->return_var
->type
= function
->return_type
;
332 impl
->return_var
->data
.mode
= nir_var_param
;
333 impl
->return_var
->data
.location
= -1;
335 impl
->return_var
= NULL
;
342 nir_block_create(nir_shader
*shader
)
344 nir_block
*block
= rzalloc(shader
, nir_block
);
346 cf_init(&block
->cf_node
, nir_cf_node_block
);
348 block
->successors
[0] = block
->successors
[1] = NULL
;
349 block
->predecessors
= _mesa_set_create(block
, _mesa_hash_pointer
,
350 _mesa_key_pointer_equal
);
351 block
->imm_dom
= NULL
;
352 /* XXX maybe it would be worth it to defer allocation? This
353 * way it doesn't get allocated for shader refs that never run
354 * nir_calc_dominance? For example, state-tracker creates an
355 * initial IR, clones that, runs appropriate lowering pass, passes
356 * to driver which does common lowering/opt, and then stores ref
357 * which is later used to do state specific lowering and futher
358 * opt. Do any of the references not need dominance metadata?
360 block
->dom_frontier
= _mesa_set_create(block
, _mesa_hash_pointer
,
361 _mesa_key_pointer_equal
);
363 exec_list_make_empty(&block
->instr_list
);
369 src_init(nir_src
*src
)
373 src
->reg
.indirect
= NULL
;
374 src
->reg
.base_offset
= 0;
378 nir_if_create(nir_shader
*shader
)
380 nir_if
*if_stmt
= ralloc(shader
, nir_if
);
382 cf_init(&if_stmt
->cf_node
, nir_cf_node_if
);
383 src_init(&if_stmt
->condition
);
385 nir_block
*then
= nir_block_create(shader
);
386 exec_list_make_empty(&if_stmt
->then_list
);
387 exec_list_push_tail(&if_stmt
->then_list
, &then
->cf_node
.node
);
388 then
->cf_node
.parent
= &if_stmt
->cf_node
;
390 nir_block
*else_stmt
= nir_block_create(shader
);
391 exec_list_make_empty(&if_stmt
->else_list
);
392 exec_list_push_tail(&if_stmt
->else_list
, &else_stmt
->cf_node
.node
);
393 else_stmt
->cf_node
.parent
= &if_stmt
->cf_node
;
399 nir_loop_create(nir_shader
*shader
)
401 nir_loop
*loop
= rzalloc(shader
, nir_loop
);
403 cf_init(&loop
->cf_node
, nir_cf_node_loop
);
405 nir_block
*body
= nir_block_create(shader
);
406 exec_list_make_empty(&loop
->body
);
407 exec_list_push_tail(&loop
->body
, &body
->cf_node
.node
);
408 body
->cf_node
.parent
= &loop
->cf_node
;
410 body
->successors
[0] = body
;
411 _mesa_set_add(body
->predecessors
, body
);
417 instr_init(nir_instr
*instr
, nir_instr_type type
)
421 exec_node_init(&instr
->node
);
425 dest_init(nir_dest
*dest
)
427 dest
->is_ssa
= false;
428 dest
->reg
.reg
= NULL
;
429 dest
->reg
.indirect
= NULL
;
430 dest
->reg
.base_offset
= 0;
434 alu_dest_init(nir_alu_dest
*dest
)
436 dest_init(&dest
->dest
);
437 dest
->saturate
= false;
438 dest
->write_mask
= 0xf;
442 alu_src_init(nir_alu_src
*src
)
445 src
->abs
= src
->negate
= false;
453 nir_alu_instr_create(nir_shader
*shader
, nir_op op
)
455 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
456 /* TODO: don't use rzalloc */
457 nir_alu_instr
*instr
=
459 sizeof(nir_alu_instr
) + num_srcs
* sizeof(nir_alu_src
));
461 instr_init(&instr
->instr
, nir_instr_type_alu
);
463 alu_dest_init(&instr
->dest
);
464 for (unsigned i
= 0; i
< num_srcs
; i
++)
465 alu_src_init(&instr
->src
[i
]);
471 nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
)
473 nir_jump_instr
*instr
= ralloc(shader
, nir_jump_instr
);
474 instr_init(&instr
->instr
, nir_instr_type_jump
);
479 nir_load_const_instr
*
480 nir_load_const_instr_create(nir_shader
*shader
, unsigned num_components
,
483 nir_load_const_instr
*instr
= rzalloc(shader
, nir_load_const_instr
);
484 instr_init(&instr
->instr
, nir_instr_type_load_const
);
486 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, bit_size
, NULL
);
491 nir_intrinsic_instr
*
492 nir_intrinsic_instr_create(nir_shader
*shader
, nir_intrinsic_op op
)
494 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
495 /* TODO: don't use rzalloc */
496 nir_intrinsic_instr
*instr
=
498 sizeof(nir_intrinsic_instr
) + num_srcs
* sizeof(nir_src
));
500 instr_init(&instr
->instr
, nir_instr_type_intrinsic
);
501 instr
->intrinsic
= op
;
503 if (nir_intrinsic_infos
[op
].has_dest
)
504 dest_init(&instr
->dest
);
506 for (unsigned i
= 0; i
< num_srcs
; i
++)
507 src_init(&instr
->src
[i
]);
513 nir_call_instr_create(nir_shader
*shader
, nir_function
*callee
)
515 nir_call_instr
*instr
= ralloc(shader
, nir_call_instr
);
516 instr_init(&instr
->instr
, nir_instr_type_call
);
518 instr
->callee
= callee
;
519 instr
->num_params
= callee
->num_params
;
520 instr
->params
= ralloc_array(instr
, nir_deref_var
*, instr
->num_params
);
521 instr
->return_deref
= NULL
;
527 nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
)
529 nir_tex_instr
*instr
= rzalloc(shader
, nir_tex_instr
);
530 instr_init(&instr
->instr
, nir_instr_type_tex
);
532 dest_init(&instr
->dest
);
534 instr
->num_srcs
= num_srcs
;
535 instr
->src
= ralloc_array(instr
, nir_tex_src
, num_srcs
);
536 for (unsigned i
= 0; i
< num_srcs
; i
++)
537 src_init(&instr
->src
[i
].src
);
539 instr
->texture_index
= 0;
540 instr
->texture_array_size
= 0;
541 instr
->texture
= NULL
;
542 instr
->sampler_index
= 0;
543 instr
->sampler
= NULL
;
549 nir_tex_instr_add_src(nir_tex_instr
*tex
,
550 nir_tex_src_type src_type
,
553 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
556 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
557 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
558 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
,
562 ralloc_free(tex
->src
);
565 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
566 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
].src
, src
);
571 nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
)
573 assert(src_idx
< tex
->num_srcs
);
575 /* First rewrite the source to NIR_SRC_INIT */
576 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[src_idx
].src
, NIR_SRC_INIT
);
578 /* Now, move all of the other sources down */
579 for (unsigned i
= src_idx
+ 1; i
< tex
->num_srcs
; i
++) {
580 tex
->src
[i
-1].src_type
= tex
->src
[i
].src_type
;
581 nir_instr_move_src(&tex
->instr
, &tex
->src
[i
-1].src
, &tex
->src
[i
].src
);
587 nir_phi_instr_create(nir_shader
*shader
)
589 nir_phi_instr
*instr
= ralloc(shader
, nir_phi_instr
);
590 instr_init(&instr
->instr
, nir_instr_type_phi
);
592 dest_init(&instr
->dest
);
593 exec_list_make_empty(&instr
->srcs
);
597 nir_parallel_copy_instr
*
598 nir_parallel_copy_instr_create(nir_shader
*shader
)
600 nir_parallel_copy_instr
*instr
= ralloc(shader
, nir_parallel_copy_instr
);
601 instr_init(&instr
->instr
, nir_instr_type_parallel_copy
);
603 exec_list_make_empty(&instr
->entries
);
608 nir_ssa_undef_instr
*
609 nir_ssa_undef_instr_create(nir_shader
*shader
,
610 unsigned num_components
,
613 nir_ssa_undef_instr
*instr
= ralloc(shader
, nir_ssa_undef_instr
);
614 instr_init(&instr
->instr
, nir_instr_type_ssa_undef
);
616 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, bit_size
, NULL
);
622 nir_deref_var_create(void *mem_ctx
, nir_variable
*var
)
624 nir_deref_var
*deref
= ralloc(mem_ctx
, nir_deref_var
);
625 deref
->deref
.deref_type
= nir_deref_type_var
;
626 deref
->deref
.child
= NULL
;
627 deref
->deref
.type
= var
->type
;
633 nir_deref_array_create(void *mem_ctx
)
635 nir_deref_array
*deref
= ralloc(mem_ctx
, nir_deref_array
);
636 deref
->deref
.deref_type
= nir_deref_type_array
;
637 deref
->deref
.child
= NULL
;
638 deref
->deref_array_type
= nir_deref_array_type_direct
;
639 src_init(&deref
->indirect
);
640 deref
->base_offset
= 0;
645 nir_deref_struct_create(void *mem_ctx
, unsigned field_index
)
647 nir_deref_struct
*deref
= ralloc(mem_ctx
, nir_deref_struct
);
648 deref
->deref
.deref_type
= nir_deref_type_struct
;
649 deref
->deref
.child
= NULL
;
650 deref
->index
= field_index
;
655 nir_deref_var_clone(const nir_deref_var
*deref
, void *mem_ctx
)
660 nir_deref_var
*ret
= nir_deref_var_create(mem_ctx
, deref
->var
);
661 ret
->deref
.type
= deref
->deref
.type
;
662 if (deref
->deref
.child
)
663 ret
->deref
.child
= nir_deref_clone(deref
->deref
.child
, ret
);
667 static nir_deref_array
*
668 deref_array_clone(const nir_deref_array
*deref
, void *mem_ctx
)
670 nir_deref_array
*ret
= nir_deref_array_create(mem_ctx
);
671 ret
->base_offset
= deref
->base_offset
;
672 ret
->deref_array_type
= deref
->deref_array_type
;
673 if (deref
->deref_array_type
== nir_deref_array_type_indirect
) {
674 nir_src_copy(&ret
->indirect
, &deref
->indirect
, mem_ctx
);
676 ret
->deref
.type
= deref
->deref
.type
;
677 if (deref
->deref
.child
)
678 ret
->deref
.child
= nir_deref_clone(deref
->deref
.child
, ret
);
682 static nir_deref_struct
*
683 deref_struct_clone(const nir_deref_struct
*deref
, void *mem_ctx
)
685 nir_deref_struct
*ret
= nir_deref_struct_create(mem_ctx
, deref
->index
);
686 ret
->deref
.type
= deref
->deref
.type
;
687 if (deref
->deref
.child
)
688 ret
->deref
.child
= nir_deref_clone(deref
->deref
.child
, ret
);
693 nir_deref_clone(const nir_deref
*deref
, void *mem_ctx
)
698 switch (deref
->deref_type
) {
699 case nir_deref_type_var
:
700 return &nir_deref_var_clone(nir_deref_as_var(deref
), mem_ctx
)->deref
;
701 case nir_deref_type_array
:
702 return &deref_array_clone(nir_deref_as_array(deref
), mem_ctx
)->deref
;
703 case nir_deref_type_struct
:
704 return &deref_struct_clone(nir_deref_as_struct(deref
), mem_ctx
)->deref
;
706 unreachable("Invalid dereference type");
712 /* This is the second step in the recursion. We've found the tail and made a
713 * copy. Now we need to iterate over all possible leaves and call the
714 * callback on each one.
717 deref_foreach_leaf_build_recur(nir_deref_var
*deref
, nir_deref
*tail
,
718 nir_deref_foreach_leaf_cb cb
, void *state
)
723 nir_deref_struct str
;
726 assert(tail
->child
== NULL
);
727 switch (glsl_get_base_type(tail
->type
)) {
729 case GLSL_TYPE_UINT16
:
730 case GLSL_TYPE_UINT64
:
732 case GLSL_TYPE_INT16
:
733 case GLSL_TYPE_INT64
:
734 case GLSL_TYPE_FLOAT
:
735 case GLSL_TYPE_FLOAT16
:
736 case GLSL_TYPE_DOUBLE
:
738 if (glsl_type_is_vector_or_scalar(tail
->type
))
739 return cb(deref
, state
);
742 case GLSL_TYPE_ARRAY
:
743 tmp
.arr
.deref
.deref_type
= nir_deref_type_array
;
744 tmp
.arr
.deref
.type
= glsl_get_array_element(tail
->type
);
745 tmp
.arr
.deref_array_type
= nir_deref_array_type_direct
;
746 tmp
.arr
.indirect
= NIR_SRC_INIT
;
747 tail
->child
= &tmp
.arr
.deref
;
749 length
= glsl_get_length(tail
->type
);
750 for (unsigned i
= 0; i
< length
; i
++) {
751 tmp
.arr
.deref
.child
= NULL
;
752 tmp
.arr
.base_offset
= i
;
753 if (!deref_foreach_leaf_build_recur(deref
, &tmp
.arr
.deref
, cb
, state
))
758 case GLSL_TYPE_STRUCT
:
759 tmp
.str
.deref
.deref_type
= nir_deref_type_struct
;
760 tail
->child
= &tmp
.str
.deref
;
762 length
= glsl_get_length(tail
->type
);
763 for (unsigned i
= 0; i
< length
; i
++) {
764 tmp
.arr
.deref
.child
= NULL
;
765 tmp
.str
.deref
.type
= glsl_get_struct_field(tail
->type
, i
);
767 if (!deref_foreach_leaf_build_recur(deref
, &tmp
.arr
.deref
, cb
, state
))
773 unreachable("Invalid type for dereference");
777 /* This is the first step of the foreach_leaf recursion. In this step we are
778 * walking to the end of the deref chain and making a copy in the stack as we
779 * go. This is because we don't want to mutate the deref chain that was
780 * passed in by the caller. The downside is that this deref chain is on the
781 * stack and , if the caller wants to do anything with it, they will have to
782 * make their own copy because this one will go away.
785 deref_foreach_leaf_copy_recur(nir_deref_var
*deref
, nir_deref
*tail
,
786 nir_deref_foreach_leaf_cb cb
, void *state
)
790 nir_deref_struct str
;
794 switch (tail
->child
->deref_type
) {
795 case nir_deref_type_array
:
796 c
.arr
= *nir_deref_as_array(tail
->child
);
797 tail
->child
= &c
.arr
.deref
;
798 return deref_foreach_leaf_copy_recur(deref
, &c
.arr
.deref
, cb
, state
);
800 case nir_deref_type_struct
:
801 c
.str
= *nir_deref_as_struct(tail
->child
);
802 tail
->child
= &c
.str
.deref
;
803 return deref_foreach_leaf_copy_recur(deref
, &c
.str
.deref
, cb
, state
);
805 case nir_deref_type_var
:
807 unreachable("Invalid deref type for a child");
810 /* We've gotten to the end of the original deref. Time to start
811 * building our own derefs.
813 return deref_foreach_leaf_build_recur(deref
, tail
, cb
, state
);
818 * This function iterates over all of the possible derefs that can be created
819 * with the given deref as the head. It then calls the provided callback with
820 * a full deref for each one.
822 * The deref passed to the callback will be allocated on the stack. You will
823 * need to make a copy if you want it to hang around.
826 nir_deref_foreach_leaf(nir_deref_var
*deref
,
827 nir_deref_foreach_leaf_cb cb
, void *state
)
829 nir_deref_var copy
= *deref
;
830 return deref_foreach_leaf_copy_recur(©
, ©
.deref
, cb
, state
);
833 /* Returns a load_const instruction that represents the constant
834 * initializer for the given deref chain. The caller is responsible for
835 * ensuring that there actually is a constant initializer.
837 nir_load_const_instr
*
838 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
)
840 nir_constant
*constant
= deref
->var
->constant_initializer
;
843 const nir_deref
*tail
= &deref
->deref
;
844 unsigned matrix_col
= 0;
845 while (tail
->child
) {
846 switch (tail
->child
->deref_type
) {
847 case nir_deref_type_array
: {
848 nir_deref_array
*arr
= nir_deref_as_array(tail
->child
);
849 assert(arr
->deref_array_type
== nir_deref_array_type_direct
);
850 if (glsl_type_is_matrix(tail
->type
)) {
851 assert(arr
->deref
.child
== NULL
);
852 matrix_col
= arr
->base_offset
;
854 constant
= constant
->elements
[arr
->base_offset
];
859 case nir_deref_type_struct
: {
860 constant
= constant
->elements
[nir_deref_as_struct(tail
->child
)->index
];
865 unreachable("Invalid deref child type");
871 unsigned bit_size
= glsl_get_bit_size(tail
->type
);
872 nir_load_const_instr
*load
=
873 nir_load_const_instr_create(shader
, glsl_get_vector_elements(tail
->type
),
876 switch (glsl_get_base_type(tail
->type
)) {
877 case GLSL_TYPE_FLOAT
:
880 case GLSL_TYPE_FLOAT16
:
881 case GLSL_TYPE_DOUBLE
:
882 case GLSL_TYPE_INT16
:
883 case GLSL_TYPE_UINT16
:
884 case GLSL_TYPE_UINT64
:
885 case GLSL_TYPE_INT64
:
887 load
->value
= constant
->values
[matrix_col
];
890 unreachable("Invalid immediate type");
897 nir_cf_node_get_function(nir_cf_node
*node
)
899 while (node
->type
!= nir_cf_node_function
) {
903 return nir_cf_node_as_function(node
);
906 /* Reduces a cursor by trying to convert everything to after and trying to
907 * go up to block granularity when possible.
910 reduce_cursor(nir_cursor cursor
)
912 switch (cursor
.option
) {
913 case nir_cursor_before_block
:
914 assert(nir_cf_node_prev(&cursor
.block
->cf_node
) == NULL
||
915 nir_cf_node_prev(&cursor
.block
->cf_node
)->type
!= nir_cf_node_block
);
916 if (exec_list_is_empty(&cursor
.block
->instr_list
)) {
917 /* Empty block. After is as good as before. */
918 cursor
.option
= nir_cursor_after_block
;
922 case nir_cursor_after_block
:
925 case nir_cursor_before_instr
: {
926 nir_instr
*prev_instr
= nir_instr_prev(cursor
.instr
);
928 /* Before this instruction is after the previous */
929 cursor
.instr
= prev_instr
;
930 cursor
.option
= nir_cursor_after_instr
;
932 /* No previous instruction. Switch to before block */
933 cursor
.block
= cursor
.instr
->block
;
934 cursor
.option
= nir_cursor_before_block
;
936 return reduce_cursor(cursor
);
939 case nir_cursor_after_instr
:
940 if (nir_instr_next(cursor
.instr
) == NULL
) {
941 /* This is the last instruction, switch to after block */
942 cursor
.option
= nir_cursor_after_block
;
943 cursor
.block
= cursor
.instr
->block
;
948 unreachable("Inavlid cursor option");
953 nir_cursors_equal(nir_cursor a
, nir_cursor b
)
955 /* Reduced cursors should be unique */
956 a
= reduce_cursor(a
);
957 b
= reduce_cursor(b
);
959 return a
.block
== b
.block
&& a
.option
== b
.option
;
963 add_use_cb(nir_src
*src
, void *state
)
965 nir_instr
*instr
= state
;
967 src
->parent_instr
= instr
;
968 list_addtail(&src
->use_link
,
969 src
->is_ssa
? &src
->ssa
->uses
: &src
->reg
.reg
->uses
);
975 add_ssa_def_cb(nir_ssa_def
*def
, void *state
)
977 nir_instr
*instr
= state
;
979 if (instr
->block
&& def
->index
== UINT_MAX
) {
980 nir_function_impl
*impl
=
981 nir_cf_node_get_function(&instr
->block
->cf_node
);
983 def
->index
= impl
->ssa_alloc
++;
990 add_reg_def_cb(nir_dest
*dest
, void *state
)
992 nir_instr
*instr
= state
;
995 dest
->reg
.parent_instr
= instr
;
996 list_addtail(&dest
->reg
.def_link
, &dest
->reg
.reg
->defs
);
1003 add_defs_uses(nir_instr
*instr
)
1005 nir_foreach_src(instr
, add_use_cb
, instr
);
1006 nir_foreach_dest(instr
, add_reg_def_cb
, instr
);
1007 nir_foreach_ssa_def(instr
, add_ssa_def_cb
, instr
);
1011 nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
)
1013 switch (cursor
.option
) {
1014 case nir_cursor_before_block
:
1015 /* Only allow inserting jumps into empty blocks. */
1016 if (instr
->type
== nir_instr_type_jump
)
1017 assert(exec_list_is_empty(&cursor
.block
->instr_list
));
1019 instr
->block
= cursor
.block
;
1020 add_defs_uses(instr
);
1021 exec_list_push_head(&cursor
.block
->instr_list
, &instr
->node
);
1023 case nir_cursor_after_block
: {
1024 /* Inserting instructions after a jump is illegal. */
1025 nir_instr
*last
= nir_block_last_instr(cursor
.block
);
1026 assert(last
== NULL
|| last
->type
!= nir_instr_type_jump
);
1029 instr
->block
= cursor
.block
;
1030 add_defs_uses(instr
);
1031 exec_list_push_tail(&cursor
.block
->instr_list
, &instr
->node
);
1034 case nir_cursor_before_instr
:
1035 assert(instr
->type
!= nir_instr_type_jump
);
1036 instr
->block
= cursor
.instr
->block
;
1037 add_defs_uses(instr
);
1038 exec_node_insert_node_before(&cursor
.instr
->node
, &instr
->node
);
1040 case nir_cursor_after_instr
:
1041 /* Inserting instructions after a jump is illegal. */
1042 assert(cursor
.instr
->type
!= nir_instr_type_jump
);
1044 /* Only allow inserting jumps at the end of the block. */
1045 if (instr
->type
== nir_instr_type_jump
)
1046 assert(cursor
.instr
== nir_block_last_instr(cursor
.instr
->block
));
1048 instr
->block
= cursor
.instr
->block
;
1049 add_defs_uses(instr
);
1050 exec_node_insert_after(&cursor
.instr
->node
, &instr
->node
);
1054 if (instr
->type
== nir_instr_type_jump
)
1055 nir_handle_add_jump(instr
->block
);
1059 src_is_valid(const nir_src
*src
)
1061 return src
->is_ssa
? (src
->ssa
!= NULL
) : (src
->reg
.reg
!= NULL
);
1065 remove_use_cb(nir_src
*src
, void *state
)
1069 if (src_is_valid(src
))
1070 list_del(&src
->use_link
);
1076 remove_def_cb(nir_dest
*dest
, void *state
)
1081 list_del(&dest
->reg
.def_link
);
1087 remove_defs_uses(nir_instr
*instr
)
1089 nir_foreach_dest(instr
, remove_def_cb
, instr
);
1090 nir_foreach_src(instr
, remove_use_cb
, instr
);
1093 void nir_instr_remove(nir_instr
*instr
)
1095 remove_defs_uses(instr
);
1096 exec_node_remove(&instr
->node
);
1098 if (instr
->type
== nir_instr_type_jump
) {
1099 nir_jump_instr
*jump_instr
= nir_instr_as_jump(instr
);
1100 nir_handle_remove_jump(instr
->block
, jump_instr
->type
);
1107 nir_index_local_regs(nir_function_impl
*impl
)
1110 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
1111 reg
->index
= index
++;
1113 impl
->reg_alloc
= index
;
1117 nir_index_global_regs(nir_shader
*shader
)
1120 foreach_list_typed(nir_register
, reg
, node
, &shader
->registers
) {
1121 reg
->index
= index
++;
1123 shader
->reg_alloc
= index
;
1127 visit_alu_dest(nir_alu_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
1129 return cb(&instr
->dest
.dest
, state
);
1133 visit_intrinsic_dest(nir_intrinsic_instr
*instr
, nir_foreach_dest_cb cb
,
1136 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1137 return cb(&instr
->dest
, state
);
1143 visit_texture_dest(nir_tex_instr
*instr
, nir_foreach_dest_cb cb
,
1146 return cb(&instr
->dest
, state
);
1150 visit_phi_dest(nir_phi_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
1152 return cb(&instr
->dest
, state
);
1156 visit_parallel_copy_dest(nir_parallel_copy_instr
*instr
,
1157 nir_foreach_dest_cb cb
, void *state
)
1159 nir_foreach_parallel_copy_entry(entry
, instr
) {
1160 if (!cb(&entry
->dest
, state
))
1168 nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
1170 switch (instr
->type
) {
1171 case nir_instr_type_alu
:
1172 return visit_alu_dest(nir_instr_as_alu(instr
), cb
, state
);
1173 case nir_instr_type_intrinsic
:
1174 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr
), cb
, state
);
1175 case nir_instr_type_tex
:
1176 return visit_texture_dest(nir_instr_as_tex(instr
), cb
, state
);
1177 case nir_instr_type_phi
:
1178 return visit_phi_dest(nir_instr_as_phi(instr
), cb
, state
);
1179 case nir_instr_type_parallel_copy
:
1180 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr
),
1183 case nir_instr_type_load_const
:
1184 case nir_instr_type_ssa_undef
:
1185 case nir_instr_type_call
:
1186 case nir_instr_type_jump
:
1190 unreachable("Invalid instruction type");
1197 struct foreach_ssa_def_state
{
1198 nir_foreach_ssa_def_cb cb
;
1203 nir_ssa_def_visitor(nir_dest
*dest
, void *void_state
)
1205 struct foreach_ssa_def_state
*state
= void_state
;
1208 return state
->cb(&dest
->ssa
, state
->client_state
);
1214 nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
, void *state
)
1216 switch (instr
->type
) {
1217 case nir_instr_type_alu
:
1218 case nir_instr_type_tex
:
1219 case nir_instr_type_intrinsic
:
1220 case nir_instr_type_phi
:
1221 case nir_instr_type_parallel_copy
: {
1222 struct foreach_ssa_def_state foreach_state
= {cb
, state
};
1223 return nir_foreach_dest(instr
, nir_ssa_def_visitor
, &foreach_state
);
1226 case nir_instr_type_load_const
:
1227 return cb(&nir_instr_as_load_const(instr
)->def
, state
);
1228 case nir_instr_type_ssa_undef
:
1229 return cb(&nir_instr_as_ssa_undef(instr
)->def
, state
);
1230 case nir_instr_type_call
:
1231 case nir_instr_type_jump
:
1234 unreachable("Invalid instruction type");
1239 visit_src(nir_src
*src
, nir_foreach_src_cb cb
, void *state
)
1241 if (!cb(src
, state
))
1243 if (!src
->is_ssa
&& src
->reg
.indirect
)
1244 return cb(src
->reg
.indirect
, state
);
1249 visit_deref_array_src(nir_deref_array
*deref
, nir_foreach_src_cb cb
,
1252 if (deref
->deref_array_type
== nir_deref_array_type_indirect
)
1253 return visit_src(&deref
->indirect
, cb
, state
);
1258 visit_deref_src(nir_deref_var
*deref
, nir_foreach_src_cb cb
, void *state
)
1260 nir_deref
*cur
= &deref
->deref
;
1261 while (cur
!= NULL
) {
1262 if (cur
->deref_type
== nir_deref_type_array
) {
1263 if (!visit_deref_array_src(nir_deref_as_array(cur
), cb
, state
))
1274 visit_alu_src(nir_alu_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1276 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1277 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1284 visit_tex_src(nir_tex_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1286 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1287 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1291 if (instr
->texture
!= NULL
) {
1292 if (!visit_deref_src(instr
->texture
, cb
, state
))
1296 if (instr
->sampler
!= NULL
) {
1297 if (!visit_deref_src(instr
->sampler
, cb
, state
))
1305 visit_intrinsic_src(nir_intrinsic_instr
*instr
, nir_foreach_src_cb cb
,
1308 unsigned num_srcs
= nir_intrinsic_infos
[instr
->intrinsic
].num_srcs
;
1309 for (unsigned i
= 0; i
< num_srcs
; i
++) {
1310 if (!visit_src(&instr
->src
[i
], cb
, state
))
1315 nir_intrinsic_infos
[instr
->intrinsic
].num_variables
;
1316 for (unsigned i
= 0; i
< num_vars
; i
++) {
1317 if (!visit_deref_src(instr
->variables
[i
], cb
, state
))
1325 visit_phi_src(nir_phi_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1327 nir_foreach_phi_src(src
, instr
) {
1328 if (!visit_src(&src
->src
, cb
, state
))
1336 visit_parallel_copy_src(nir_parallel_copy_instr
*instr
,
1337 nir_foreach_src_cb cb
, void *state
)
1339 nir_foreach_parallel_copy_entry(entry
, instr
) {
1340 if (!visit_src(&entry
->src
, cb
, state
))
1349 nir_foreach_src_cb cb
;
1350 } visit_dest_indirect_state
;
1353 visit_dest_indirect(nir_dest
*dest
, void *_state
)
1355 visit_dest_indirect_state
*state
= (visit_dest_indirect_state
*) _state
;
1357 if (!dest
->is_ssa
&& dest
->reg
.indirect
)
1358 return state
->cb(dest
->reg
.indirect
, state
->state
);
1364 nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1366 switch (instr
->type
) {
1367 case nir_instr_type_alu
:
1368 if (!visit_alu_src(nir_instr_as_alu(instr
), cb
, state
))
1371 case nir_instr_type_intrinsic
:
1372 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr
), cb
, state
))
1375 case nir_instr_type_tex
:
1376 if (!visit_tex_src(nir_instr_as_tex(instr
), cb
, state
))
1379 case nir_instr_type_call
:
1380 /* Call instructions have no regular sources */
1382 case nir_instr_type_load_const
:
1383 /* Constant load instructions have no regular sources */
1385 case nir_instr_type_phi
:
1386 if (!visit_phi_src(nir_instr_as_phi(instr
), cb
, state
))
1389 case nir_instr_type_parallel_copy
:
1390 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr
),
1394 case nir_instr_type_jump
:
1395 case nir_instr_type_ssa_undef
:
1399 unreachable("Invalid instruction type");
1403 visit_dest_indirect_state dest_state
;
1404 dest_state
.state
= state
;
1406 return nir_foreach_dest(instr
, visit_dest_indirect
, &dest_state
);
1410 nir_src_as_const_value(nir_src src
)
1415 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
1418 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1420 return &load
->value
;
1424 * Returns true if the source is known to be dynamically uniform. Otherwise it
1425 * returns false which means it may or may not be dynamically uniform but it
1426 * can't be determined.
1429 nir_src_is_dynamically_uniform(nir_src src
)
1434 /* Constants are trivially dynamically uniform */
1435 if (src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
)
1438 /* As are uniform variables */
1439 if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
1440 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
1442 if (intr
->intrinsic
== nir_intrinsic_load_uniform
)
1446 /* XXX: this could have many more tests, such as when a sampler function is
1447 * called with dynamically uniform arguments.
1453 src_remove_all_uses(nir_src
*src
)
1455 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1456 if (!src_is_valid(src
))
1459 list_del(&src
->use_link
);
1464 src_add_all_uses(nir_src
*src
, nir_instr
*parent_instr
, nir_if
*parent_if
)
1466 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1467 if (!src_is_valid(src
))
1471 src
->parent_instr
= parent_instr
;
1473 list_addtail(&src
->use_link
, &src
->ssa
->uses
);
1475 list_addtail(&src
->use_link
, &src
->reg
.reg
->uses
);
1478 src
->parent_if
= parent_if
;
1480 list_addtail(&src
->use_link
, &src
->ssa
->if_uses
);
1482 list_addtail(&src
->use_link
, &src
->reg
.reg
->if_uses
);
1488 nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
)
1490 assert(!src_is_valid(src
) || src
->parent_instr
== instr
);
1492 src_remove_all_uses(src
);
1494 src_add_all_uses(src
, instr
, NULL
);
1498 nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
)
1500 assert(!src_is_valid(dest
) || dest
->parent_instr
== dest_instr
);
1502 src_remove_all_uses(dest
);
1503 src_remove_all_uses(src
);
1505 *src
= NIR_SRC_INIT
;
1506 src_add_all_uses(dest
, dest_instr
, NULL
);
1510 nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
)
1512 nir_src
*src
= &if_stmt
->condition
;
1513 assert(!src_is_valid(src
) || src
->parent_if
== if_stmt
);
1515 src_remove_all_uses(src
);
1517 src_add_all_uses(src
, NULL
, if_stmt
);
1521 nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
, nir_dest new_dest
)
1524 /* We can only overwrite an SSA destination if it has no uses. */
1525 assert(list_empty(&dest
->ssa
.uses
) && list_empty(&dest
->ssa
.if_uses
));
1527 list_del(&dest
->reg
.def_link
);
1528 if (dest
->reg
.indirect
)
1529 src_remove_all_uses(dest
->reg
.indirect
);
1532 /* We can't re-write with an SSA def */
1533 assert(!new_dest
.is_ssa
);
1535 nir_dest_copy(dest
, &new_dest
, instr
);
1537 dest
->reg
.parent_instr
= instr
;
1538 list_addtail(&dest
->reg
.def_link
, &new_dest
.reg
.reg
->defs
);
1540 if (dest
->reg
.indirect
)
1541 src_add_all_uses(dest
->reg
.indirect
, instr
, NULL
);
1545 nir_instr_rewrite_deref(nir_instr
*instr
, nir_deref_var
**deref
,
1546 nir_deref_var
*new_deref
)
1549 visit_deref_src(*deref
, remove_use_cb
, NULL
);
1554 visit_deref_src(*deref
, add_use_cb
, instr
);
1557 /* note: does *not* take ownership of 'name' */
1559 nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
1560 unsigned num_components
,
1561 unsigned bit_size
, const char *name
)
1563 def
->name
= ralloc_strdup(instr
, name
);
1564 def
->parent_instr
= instr
;
1565 list_inithead(&def
->uses
);
1566 list_inithead(&def
->if_uses
);
1567 def
->num_components
= num_components
;
1568 def
->bit_size
= bit_size
;
1571 nir_function_impl
*impl
=
1572 nir_cf_node_get_function(&instr
->block
->cf_node
);
1574 def
->index
= impl
->ssa_alloc
++;
1576 def
->index
= UINT_MAX
;
1580 /* note: does *not* take ownership of 'name' */
1582 nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
1583 unsigned num_components
, unsigned bit_size
,
1586 dest
->is_ssa
= true;
1587 nir_ssa_def_init(instr
, &dest
->ssa
, num_components
, bit_size
, name
);
1591 nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
)
1593 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1595 nir_foreach_use_safe(use_src
, def
)
1596 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1598 nir_foreach_if_use_safe(use_src
, def
)
1599 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1603 is_instr_between(nir_instr
*start
, nir_instr
*end
, nir_instr
*between
)
1605 assert(start
->block
== end
->block
);
1607 if (between
->block
!= start
->block
)
1610 /* Search backwards looking for "between" */
1611 while (start
!= end
) {
1615 end
= nir_instr_prev(end
);
1622 /* Replaces all uses of the given SSA def with the given source but only if
1623 * the use comes after the after_me instruction. This can be useful if you
1624 * are emitting code to fix up the result of some instruction: you can freely
1625 * use the result in that code and then call rewrite_uses_after and pass the
1626 * last fixup instruction as after_me and it will replace all of the uses you
1627 * want without touching the fixup code.
1629 * This function assumes that after_me is in the same block as
1630 * def->parent_instr and that after_me comes after def->parent_instr.
1633 nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
1634 nir_instr
*after_me
)
1636 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1638 nir_foreach_use_safe(use_src
, def
) {
1639 assert(use_src
->parent_instr
!= def
->parent_instr
);
1640 /* Since def already dominates all of its uses, the only way a use can
1641 * not be dominated by after_me is if it is between def and after_me in
1642 * the instruction list.
1644 if (!is_instr_between(def
->parent_instr
, after_me
, use_src
->parent_instr
))
1645 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1648 nir_foreach_if_use_safe(use_src
, def
)
1649 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1653 nir_ssa_def_components_read(const nir_ssa_def
*def
)
1655 uint8_t read_mask
= 0;
1656 nir_foreach_use(use
, def
) {
1657 if (use
->parent_instr
->type
== nir_instr_type_alu
) {
1658 nir_alu_instr
*alu
= nir_instr_as_alu(use
->parent_instr
);
1659 nir_alu_src
*alu_src
= exec_node_data(nir_alu_src
, use
, src
);
1660 int src_idx
= alu_src
- &alu
->src
[0];
1661 assert(src_idx
>= 0 && src_idx
< nir_op_infos
[alu
->op
].num_inputs
);
1663 for (unsigned c
= 0; c
< 4; c
++) {
1664 if (!nir_alu_instr_channel_used(alu
, src_idx
, c
))
1667 read_mask
|= (1 << alu_src
->swizzle
[c
]);
1670 return (1 << def
->num_components
) - 1;
1678 nir_block_cf_tree_next(nir_block
*block
)
1680 if (block
== NULL
) {
1681 /* nir_foreach_block_safe() will call this function on a NULL block
1682 * after the last iteration, but it won't use the result so just return
1688 nir_cf_node
*cf_next
= nir_cf_node_next(&block
->cf_node
);
1690 return nir_cf_node_cf_tree_first(cf_next
);
1692 nir_cf_node
*parent
= block
->cf_node
.parent
;
1694 switch (parent
->type
) {
1695 case nir_cf_node_if
: {
1696 /* Are we at the end of the if? Go to the beginning of the else */
1697 nir_if
*if_stmt
= nir_cf_node_as_if(parent
);
1698 if (block
== nir_if_last_then_block(if_stmt
))
1699 return nir_if_first_else_block(if_stmt
);
1701 assert(block
== nir_if_last_else_block(if_stmt
));
1705 case nir_cf_node_loop
:
1706 return nir_cf_node_as_block(nir_cf_node_next(parent
));
1708 case nir_cf_node_function
:
1712 unreachable("unknown cf node type");
1717 nir_block_cf_tree_prev(nir_block
*block
)
1719 if (block
== NULL
) {
1720 /* do this for consistency with nir_block_cf_tree_next() */
1724 nir_cf_node
*cf_prev
= nir_cf_node_prev(&block
->cf_node
);
1726 return nir_cf_node_cf_tree_last(cf_prev
);
1728 nir_cf_node
*parent
= block
->cf_node
.parent
;
1730 switch (parent
->type
) {
1731 case nir_cf_node_if
: {
1732 /* Are we at the beginning of the else? Go to the end of the if */
1733 nir_if
*if_stmt
= nir_cf_node_as_if(parent
);
1734 if (block
== nir_if_first_else_block(if_stmt
))
1735 return nir_if_last_then_block(if_stmt
);
1737 assert(block
== nir_if_first_then_block(if_stmt
));
1741 case nir_cf_node_loop
:
1742 return nir_cf_node_as_block(nir_cf_node_prev(parent
));
1744 case nir_cf_node_function
:
1748 unreachable("unknown cf node type");
1752 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
)
1754 switch (node
->type
) {
1755 case nir_cf_node_function
: {
1756 nir_function_impl
*impl
= nir_cf_node_as_function(node
);
1757 return nir_start_block(impl
);
1760 case nir_cf_node_if
: {
1761 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
1762 return nir_if_first_then_block(if_stmt
);
1765 case nir_cf_node_loop
: {
1766 nir_loop
*loop
= nir_cf_node_as_loop(node
);
1767 return nir_loop_first_block(loop
);
1770 case nir_cf_node_block
: {
1771 return nir_cf_node_as_block(node
);
1775 unreachable("unknown node type");
1779 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
)
1781 switch (node
->type
) {
1782 case nir_cf_node_function
: {
1783 nir_function_impl
*impl
= nir_cf_node_as_function(node
);
1784 return nir_impl_last_block(impl
);
1787 case nir_cf_node_if
: {
1788 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
1789 return nir_if_last_else_block(if_stmt
);
1792 case nir_cf_node_loop
: {
1793 nir_loop
*loop
= nir_cf_node_as_loop(node
);
1794 return nir_loop_last_block(loop
);
1797 case nir_cf_node_block
: {
1798 return nir_cf_node_as_block(node
);
1802 unreachable("unknown node type");
1806 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
)
1808 if (node
->type
== nir_cf_node_block
)
1809 return nir_block_cf_tree_next(nir_cf_node_as_block(node
));
1810 else if (node
->type
== nir_cf_node_function
)
1813 return nir_cf_node_as_block(nir_cf_node_next(node
));
1817 nir_block_get_following_if(nir_block
*block
)
1819 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1822 if (nir_cf_node_is_last(&block
->cf_node
))
1825 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1827 if (next_node
->type
!= nir_cf_node_if
)
1830 return nir_cf_node_as_if(next_node
);
1834 nir_block_get_following_loop(nir_block
*block
)
1836 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1839 if (nir_cf_node_is_last(&block
->cf_node
))
1842 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1844 if (next_node
->type
!= nir_cf_node_loop
)
1847 return nir_cf_node_as_loop(next_node
);
1851 nir_index_blocks(nir_function_impl
*impl
)
1855 if (impl
->valid_metadata
& nir_metadata_block_index
)
1858 nir_foreach_block(block
, impl
) {
1859 block
->index
= index
++;
1862 impl
->num_blocks
= index
;
1866 index_ssa_def_cb(nir_ssa_def
*def
, void *state
)
1868 unsigned *index
= (unsigned *) state
;
1869 def
->index
= (*index
)++;
1875 * The indices are applied top-to-bottom which has the very nice property
1876 * that, if A dominates B, then A->index <= B->index.
1879 nir_index_ssa_defs(nir_function_impl
*impl
)
1883 nir_foreach_block(block
, impl
) {
1884 nir_foreach_instr(instr
, block
)
1885 nir_foreach_ssa_def(instr
, index_ssa_def_cb
, &index
);
1888 impl
->ssa_alloc
= index
;
1892 * The indices are applied top-to-bottom which has the very nice property
1893 * that, if A dominates B, then A->index <= B->index.
1896 nir_index_instrs(nir_function_impl
*impl
)
1900 nir_foreach_block(block
, impl
) {
1901 nir_foreach_instr(instr
, block
)
1902 instr
->index
= index
++;
1909 nir_intrinsic_from_system_value(gl_system_value val
)
1912 case SYSTEM_VALUE_VERTEX_ID
:
1913 return nir_intrinsic_load_vertex_id
;
1914 case SYSTEM_VALUE_INSTANCE_ID
:
1915 return nir_intrinsic_load_instance_id
;
1916 case SYSTEM_VALUE_DRAW_ID
:
1917 return nir_intrinsic_load_draw_id
;
1918 case SYSTEM_VALUE_BASE_INSTANCE
:
1919 return nir_intrinsic_load_base_instance
;
1920 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
1921 return nir_intrinsic_load_vertex_id_zero_base
;
1922 case SYSTEM_VALUE_BASE_VERTEX
:
1923 return nir_intrinsic_load_base_vertex
;
1924 case SYSTEM_VALUE_INVOCATION_ID
:
1925 return nir_intrinsic_load_invocation_id
;
1926 case SYSTEM_VALUE_FRAG_COORD
:
1927 return nir_intrinsic_load_frag_coord
;
1928 case SYSTEM_VALUE_FRONT_FACE
:
1929 return nir_intrinsic_load_front_face
;
1930 case SYSTEM_VALUE_SAMPLE_ID
:
1931 return nir_intrinsic_load_sample_id
;
1932 case SYSTEM_VALUE_SAMPLE_POS
:
1933 return nir_intrinsic_load_sample_pos
;
1934 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
1935 return nir_intrinsic_load_sample_mask_in
;
1936 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
1937 return nir_intrinsic_load_local_invocation_id
;
1938 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
:
1939 return nir_intrinsic_load_local_invocation_index
;
1940 case SYSTEM_VALUE_WORK_GROUP_ID
:
1941 return nir_intrinsic_load_work_group_id
;
1942 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
1943 return nir_intrinsic_load_num_work_groups
;
1944 case SYSTEM_VALUE_PRIMITIVE_ID
:
1945 return nir_intrinsic_load_primitive_id
;
1946 case SYSTEM_VALUE_TESS_COORD
:
1947 return nir_intrinsic_load_tess_coord
;
1948 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1949 return nir_intrinsic_load_tess_level_outer
;
1950 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1951 return nir_intrinsic_load_tess_level_inner
;
1952 case SYSTEM_VALUE_VERTICES_IN
:
1953 return nir_intrinsic_load_patch_vertices_in
;
1954 case SYSTEM_VALUE_HELPER_INVOCATION
:
1955 return nir_intrinsic_load_helper_invocation
;
1956 case SYSTEM_VALUE_VIEW_INDEX
:
1957 return nir_intrinsic_load_view_index
;
1958 case SYSTEM_VALUE_SUBGROUP_SIZE
:
1959 return nir_intrinsic_load_subgroup_size
;
1960 case SYSTEM_VALUE_SUBGROUP_INVOCATION
:
1961 return nir_intrinsic_load_subgroup_invocation
;
1962 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
1963 return nir_intrinsic_load_subgroup_eq_mask
;
1964 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
1965 return nir_intrinsic_load_subgroup_ge_mask
;
1966 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
1967 return nir_intrinsic_load_subgroup_gt_mask
;
1968 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
1969 return nir_intrinsic_load_subgroup_le_mask
;
1970 case SYSTEM_VALUE_SUBGROUP_LT_MASK
:
1971 return nir_intrinsic_load_subgroup_lt_mask
;
1972 case SYSTEM_VALUE_NUM_SUBGROUPS
:
1973 return nir_intrinsic_load_num_subgroups
;
1974 case SYSTEM_VALUE_SUBGROUP_ID
:
1975 return nir_intrinsic_load_subgroup_id
;
1976 case SYSTEM_VALUE_LOCAL_GROUP_SIZE
:
1977 return nir_intrinsic_load_local_group_size
;
1979 unreachable("system value does not directly correspond to intrinsic");
1984 nir_system_value_from_intrinsic(nir_intrinsic_op intrin
)
1987 case nir_intrinsic_load_vertex_id
:
1988 return SYSTEM_VALUE_VERTEX_ID
;
1989 case nir_intrinsic_load_instance_id
:
1990 return SYSTEM_VALUE_INSTANCE_ID
;
1991 case nir_intrinsic_load_draw_id
:
1992 return SYSTEM_VALUE_DRAW_ID
;
1993 case nir_intrinsic_load_base_instance
:
1994 return SYSTEM_VALUE_BASE_INSTANCE
;
1995 case nir_intrinsic_load_vertex_id_zero_base
:
1996 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1997 case nir_intrinsic_load_base_vertex
:
1998 return SYSTEM_VALUE_BASE_VERTEX
;
1999 case nir_intrinsic_load_invocation_id
:
2000 return SYSTEM_VALUE_INVOCATION_ID
;
2001 case nir_intrinsic_load_frag_coord
:
2002 return SYSTEM_VALUE_FRAG_COORD
;
2003 case nir_intrinsic_load_front_face
:
2004 return SYSTEM_VALUE_FRONT_FACE
;
2005 case nir_intrinsic_load_sample_id
:
2006 return SYSTEM_VALUE_SAMPLE_ID
;
2007 case nir_intrinsic_load_sample_pos
:
2008 return SYSTEM_VALUE_SAMPLE_POS
;
2009 case nir_intrinsic_load_sample_mask_in
:
2010 return SYSTEM_VALUE_SAMPLE_MASK_IN
;
2011 case nir_intrinsic_load_local_invocation_id
:
2012 return SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
2013 case nir_intrinsic_load_local_invocation_index
:
2014 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
2015 case nir_intrinsic_load_num_work_groups
:
2016 return SYSTEM_VALUE_NUM_WORK_GROUPS
;
2017 case nir_intrinsic_load_work_group_id
:
2018 return SYSTEM_VALUE_WORK_GROUP_ID
;
2019 case nir_intrinsic_load_primitive_id
:
2020 return SYSTEM_VALUE_PRIMITIVE_ID
;
2021 case nir_intrinsic_load_tess_coord
:
2022 return SYSTEM_VALUE_TESS_COORD
;
2023 case nir_intrinsic_load_tess_level_outer
:
2024 return SYSTEM_VALUE_TESS_LEVEL_OUTER
;
2025 case nir_intrinsic_load_tess_level_inner
:
2026 return SYSTEM_VALUE_TESS_LEVEL_INNER
;
2027 case nir_intrinsic_load_patch_vertices_in
:
2028 return SYSTEM_VALUE_VERTICES_IN
;
2029 case nir_intrinsic_load_helper_invocation
:
2030 return SYSTEM_VALUE_HELPER_INVOCATION
;
2031 case nir_intrinsic_load_view_index
:
2032 return SYSTEM_VALUE_VIEW_INDEX
;
2033 case nir_intrinsic_load_subgroup_size
:
2034 return SYSTEM_VALUE_SUBGROUP_SIZE
;
2035 case nir_intrinsic_load_subgroup_invocation
:
2036 return SYSTEM_VALUE_SUBGROUP_INVOCATION
;
2037 case nir_intrinsic_load_subgroup_eq_mask
:
2038 return SYSTEM_VALUE_SUBGROUP_EQ_MASK
;
2039 case nir_intrinsic_load_subgroup_ge_mask
:
2040 return SYSTEM_VALUE_SUBGROUP_GE_MASK
;
2041 case nir_intrinsic_load_subgroup_gt_mask
:
2042 return SYSTEM_VALUE_SUBGROUP_GT_MASK
;
2043 case nir_intrinsic_load_subgroup_le_mask
:
2044 return SYSTEM_VALUE_SUBGROUP_LE_MASK
;
2045 case nir_intrinsic_load_subgroup_lt_mask
:
2046 return SYSTEM_VALUE_SUBGROUP_LT_MASK
;
2047 case nir_intrinsic_load_num_subgroups
:
2048 return SYSTEM_VALUE_NUM_SUBGROUPS
;
2049 case nir_intrinsic_load_subgroup_id
:
2050 return SYSTEM_VALUE_SUBGROUP_ID
;
2051 case nir_intrinsic_load_local_group_size
:
2052 return SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
2054 unreachable("intrinsic doesn't produce a system value");