2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_control_flow_private.h"
33 nir_shader_create(void *mem_ctx
,
34 gl_shader_stage stage
,
35 const nir_shader_compiler_options
*options
)
37 nir_shader
*shader
= ralloc(mem_ctx
, nir_shader
);
39 exec_list_make_empty(&shader
->uniforms
);
40 exec_list_make_empty(&shader
->inputs
);
41 exec_list_make_empty(&shader
->outputs
);
42 exec_list_make_empty(&shader
->shared
);
44 shader
->options
= options
;
45 memset(&shader
->info
, 0, sizeof(shader
->info
));
47 exec_list_make_empty(&shader
->functions
);
48 exec_list_make_empty(&shader
->registers
);
49 exec_list_make_empty(&shader
->globals
);
50 exec_list_make_empty(&shader
->system_values
);
51 shader
->reg_alloc
= 0;
53 shader
->num_inputs
= 0;
54 shader
->num_outputs
= 0;
55 shader
->num_uniforms
= 0;
56 shader
->num_shared
= 0;
58 shader
->stage
= stage
;
64 reg_create(void *mem_ctx
, struct exec_list
*list
)
66 nir_register
*reg
= ralloc(mem_ctx
, nir_register
);
68 list_inithead(®
->uses
);
69 list_inithead(®
->defs
);
70 list_inithead(®
->if_uses
);
72 reg
->num_components
= 0;
74 reg
->num_array_elems
= 0;
75 reg
->is_packed
= false;
78 exec_list_push_tail(list
, ®
->node
);
84 nir_global_reg_create(nir_shader
*shader
)
86 nir_register
*reg
= reg_create(shader
, &shader
->registers
);
87 reg
->index
= shader
->reg_alloc
++;
88 reg
->is_global
= true;
94 nir_local_reg_create(nir_function_impl
*impl
)
96 nir_register
*reg
= reg_create(ralloc_parent(impl
), &impl
->registers
);
97 reg
->index
= impl
->reg_alloc
++;
98 reg
->is_global
= false;
104 nir_reg_remove(nir_register
*reg
)
106 exec_node_remove(®
->node
);
110 nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
)
112 switch (var
->data
.mode
) {
114 assert(!"invalid mode");
118 assert(!"nir_shader_add_variable cannot be used for local variables");
122 assert(!"nir_shader_add_variable cannot be used for function parameters");
126 exec_list_push_tail(&shader
->globals
, &var
->node
);
129 case nir_var_shader_in
:
130 exec_list_push_tail(&shader
->inputs
, &var
->node
);
133 case nir_var_shader_out
:
134 exec_list_push_tail(&shader
->outputs
, &var
->node
);
137 case nir_var_uniform
:
138 case nir_var_shader_storage
:
139 exec_list_push_tail(&shader
->uniforms
, &var
->node
);
143 assert(shader
->stage
== MESA_SHADER_COMPUTE
);
144 exec_list_push_tail(&shader
->shared
, &var
->node
);
147 case nir_var_system_value
:
148 exec_list_push_tail(&shader
->system_values
, &var
->node
);
154 nir_variable_create(nir_shader
*shader
, nir_variable_mode mode
,
155 const struct glsl_type
*type
, const char *name
)
157 nir_variable
*var
= rzalloc(shader
, nir_variable
);
158 var
->name
= ralloc_strdup(var
, name
);
160 var
->data
.mode
= mode
;
162 if ((mode
== nir_var_shader_in
&& shader
->stage
!= MESA_SHADER_VERTEX
) ||
163 (mode
== nir_var_shader_out
&& shader
->stage
!= MESA_SHADER_FRAGMENT
))
164 var
->data
.interpolation
= INTERP_MODE_SMOOTH
;
166 if (mode
== nir_var_shader_in
|| mode
== nir_var_uniform
)
167 var
->data
.read_only
= true;
169 nir_shader_add_variable(shader
, var
);
175 nir_local_variable_create(nir_function_impl
*impl
,
176 const struct glsl_type
*type
, const char *name
)
178 nir_variable
*var
= rzalloc(impl
->function
->shader
, nir_variable
);
179 var
->name
= ralloc_strdup(var
, name
);
181 var
->data
.mode
= nir_var_local
;
183 nir_function_impl_add_variable(impl
, var
);
189 nir_function_create(nir_shader
*shader
, const char *name
)
191 nir_function
*func
= ralloc(shader
, nir_function
);
193 exec_list_push_tail(&shader
->functions
, &func
->node
);
195 func
->name
= ralloc_strdup(func
, name
);
196 func
->shader
= shader
;
197 func
->num_params
= 0;
199 func
->return_type
= glsl_void_type();
205 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *mem_ctx
)
207 dest
->is_ssa
= src
->is_ssa
;
209 dest
->ssa
= src
->ssa
;
211 dest
->reg
.base_offset
= src
->reg
.base_offset
;
212 dest
->reg
.reg
= src
->reg
.reg
;
213 if (src
->reg
.indirect
) {
214 dest
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
215 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, mem_ctx
);
217 dest
->reg
.indirect
= NULL
;
222 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
)
224 /* Copying an SSA definition makes no sense whatsoever. */
225 assert(!src
->is_ssa
);
227 dest
->is_ssa
= false;
229 dest
->reg
.base_offset
= src
->reg
.base_offset
;
230 dest
->reg
.reg
= src
->reg
.reg
;
231 if (src
->reg
.indirect
) {
232 dest
->reg
.indirect
= ralloc(instr
, nir_src
);
233 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, instr
);
235 dest
->reg
.indirect
= NULL
;
240 nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
241 nir_alu_instr
*instr
)
243 nir_src_copy(&dest
->src
, &src
->src
, &instr
->instr
);
244 dest
->abs
= src
->abs
;
245 dest
->negate
= src
->negate
;
246 for (unsigned i
= 0; i
< 4; i
++)
247 dest
->swizzle
[i
] = src
->swizzle
[i
];
251 nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
252 nir_alu_instr
*instr
)
254 nir_dest_copy(&dest
->dest
, &src
->dest
, &instr
->instr
);
255 dest
->write_mask
= src
->write_mask
;
256 dest
->saturate
= src
->saturate
;
261 cf_init(nir_cf_node
*node
, nir_cf_node_type type
)
263 exec_node_init(&node
->node
);
269 nir_function_impl_create_bare(nir_shader
*shader
)
271 nir_function_impl
*impl
= ralloc(shader
, nir_function_impl
);
273 impl
->function
= NULL
;
275 cf_init(&impl
->cf_node
, nir_cf_node_function
);
277 exec_list_make_empty(&impl
->body
);
278 exec_list_make_empty(&impl
->registers
);
279 exec_list_make_empty(&impl
->locals
);
280 impl
->num_params
= 0;
282 impl
->return_var
= NULL
;
285 impl
->valid_metadata
= nir_metadata_none
;
287 /* create start & end blocks */
288 nir_block
*start_block
= nir_block_create(shader
);
289 nir_block
*end_block
= nir_block_create(shader
);
290 start_block
->cf_node
.parent
= &impl
->cf_node
;
291 end_block
->cf_node
.parent
= &impl
->cf_node
;
292 impl
->end_block
= end_block
;
294 exec_list_push_tail(&impl
->body
, &start_block
->cf_node
.node
);
296 start_block
->successors
[0] = end_block
;
297 _mesa_set_add(end_block
->predecessors
, start_block
);
302 nir_function_impl_create(nir_function
*function
)
304 assert(function
->impl
== NULL
);
306 nir_function_impl
*impl
= nir_function_impl_create_bare(function
->shader
);
308 function
->impl
= impl
;
309 impl
->function
= function
;
311 impl
->num_params
= function
->num_params
;
312 impl
->params
= ralloc_array(function
->shader
,
313 nir_variable
*, impl
->num_params
);
315 for (unsigned i
= 0; i
< impl
->num_params
; i
++) {
316 impl
->params
[i
] = rzalloc(function
->shader
, nir_variable
);
317 impl
->params
[i
]->type
= function
->params
[i
].type
;
318 impl
->params
[i
]->data
.mode
= nir_var_param
;
319 impl
->params
[i
]->data
.location
= i
;
322 if (!glsl_type_is_void(function
->return_type
)) {
323 impl
->return_var
= rzalloc(function
->shader
, nir_variable
);
324 impl
->return_var
->type
= function
->return_type
;
325 impl
->return_var
->data
.mode
= nir_var_param
;
326 impl
->return_var
->data
.location
= -1;
328 impl
->return_var
= NULL
;
335 nir_block_create(nir_shader
*shader
)
337 nir_block
*block
= ralloc(shader
, nir_block
);
339 cf_init(&block
->cf_node
, nir_cf_node_block
);
341 block
->successors
[0] = block
->successors
[1] = NULL
;
342 block
->predecessors
= _mesa_set_create(block
, _mesa_hash_pointer
,
343 _mesa_key_pointer_equal
);
344 block
->imm_dom
= NULL
;
345 /* XXX maybe it would be worth it to defer allocation? This
346 * way it doesn't get allocated for shader ref's that never run
347 * nir_calc_dominance? For example, state-tracker creates an
348 * initial IR, clones that, runs appropriate lowering pass, passes
349 * to driver which does common lowering/opt, and then stores ref
350 * which is later used to do state specific lowering and futher
351 * opt. Do any of the references not need dominance metadata?
353 block
->dom_frontier
= _mesa_set_create(block
, _mesa_hash_pointer
,
354 _mesa_key_pointer_equal
);
356 exec_list_make_empty(&block
->instr_list
);
362 src_init(nir_src
*src
)
366 src
->reg
.indirect
= NULL
;
367 src
->reg
.base_offset
= 0;
371 nir_if_create(nir_shader
*shader
)
373 nir_if
*if_stmt
= ralloc(shader
, nir_if
);
375 cf_init(&if_stmt
->cf_node
, nir_cf_node_if
);
376 src_init(&if_stmt
->condition
);
378 nir_block
*then
= nir_block_create(shader
);
379 exec_list_make_empty(&if_stmt
->then_list
);
380 exec_list_push_tail(&if_stmt
->then_list
, &then
->cf_node
.node
);
381 then
->cf_node
.parent
= &if_stmt
->cf_node
;
383 nir_block
*else_stmt
= nir_block_create(shader
);
384 exec_list_make_empty(&if_stmt
->else_list
);
385 exec_list_push_tail(&if_stmt
->else_list
, &else_stmt
->cf_node
.node
);
386 else_stmt
->cf_node
.parent
= &if_stmt
->cf_node
;
392 nir_loop_create(nir_shader
*shader
)
394 nir_loop
*loop
= ralloc(shader
, nir_loop
);
396 cf_init(&loop
->cf_node
, nir_cf_node_loop
);
398 nir_block
*body
= nir_block_create(shader
);
399 exec_list_make_empty(&loop
->body
);
400 exec_list_push_tail(&loop
->body
, &body
->cf_node
.node
);
401 body
->cf_node
.parent
= &loop
->cf_node
;
403 body
->successors
[0] = body
;
404 _mesa_set_add(body
->predecessors
, body
);
410 instr_init(nir_instr
*instr
, nir_instr_type type
)
414 exec_node_init(&instr
->node
);
418 dest_init(nir_dest
*dest
)
420 dest
->is_ssa
= false;
421 dest
->reg
.reg
= NULL
;
422 dest
->reg
.indirect
= NULL
;
423 dest
->reg
.base_offset
= 0;
427 alu_dest_init(nir_alu_dest
*dest
)
429 dest_init(&dest
->dest
);
430 dest
->saturate
= false;
431 dest
->write_mask
= 0xf;
435 alu_src_init(nir_alu_src
*src
)
438 src
->abs
= src
->negate
= false;
446 nir_alu_instr_create(nir_shader
*shader
, nir_op op
)
448 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
449 nir_alu_instr
*instr
=
451 sizeof(nir_alu_instr
) + num_srcs
* sizeof(nir_alu_src
));
453 instr_init(&instr
->instr
, nir_instr_type_alu
);
455 alu_dest_init(&instr
->dest
);
456 for (unsigned i
= 0; i
< num_srcs
; i
++)
457 alu_src_init(&instr
->src
[i
]);
463 nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
)
465 nir_jump_instr
*instr
= ralloc(shader
, nir_jump_instr
);
466 instr_init(&instr
->instr
, nir_instr_type_jump
);
471 nir_load_const_instr
*
472 nir_load_const_instr_create(nir_shader
*shader
, unsigned num_components
,
475 nir_load_const_instr
*instr
= ralloc(shader
, nir_load_const_instr
);
476 instr_init(&instr
->instr
, nir_instr_type_load_const
);
478 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, bit_size
, NULL
);
483 nir_intrinsic_instr
*
484 nir_intrinsic_instr_create(nir_shader
*shader
, nir_intrinsic_op op
)
486 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
487 nir_intrinsic_instr
*instr
=
489 sizeof(nir_intrinsic_instr
) + num_srcs
* sizeof(nir_src
));
491 instr_init(&instr
->instr
, nir_instr_type_intrinsic
);
492 instr
->intrinsic
= op
;
494 if (nir_intrinsic_infos
[op
].has_dest
)
495 dest_init(&instr
->dest
);
497 for (unsigned i
= 0; i
< num_srcs
; i
++)
498 src_init(&instr
->src
[i
]);
504 nir_call_instr_create(nir_shader
*shader
, nir_function
*callee
)
506 nir_call_instr
*instr
= ralloc(shader
, nir_call_instr
);
507 instr_init(&instr
->instr
, nir_instr_type_call
);
509 instr
->callee
= callee
;
510 instr
->num_params
= callee
->num_params
;
511 instr
->params
= ralloc_array(instr
, nir_deref_var
*, instr
->num_params
);
512 instr
->return_deref
= NULL
;
518 nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
)
520 nir_tex_instr
*instr
= rzalloc(shader
, nir_tex_instr
);
521 instr_init(&instr
->instr
, nir_instr_type_tex
);
523 dest_init(&instr
->dest
);
525 instr
->num_srcs
= num_srcs
;
526 instr
->src
= ralloc_array(instr
, nir_tex_src
, num_srcs
);
527 for (unsigned i
= 0; i
< num_srcs
; i
++)
528 src_init(&instr
->src
[i
].src
);
530 instr
->texture_index
= 0;
531 instr
->texture_array_size
= 0;
532 instr
->texture
= NULL
;
533 instr
->sampler_index
= 0;
534 instr
->sampler
= NULL
;
540 nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
)
542 assert(src_idx
< tex
->num_srcs
);
544 /* First rewrite the source to NIR_SRC_INIT */
545 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[src_idx
].src
, NIR_SRC_INIT
);
547 /* Now, move all of the other sources down */
548 for (unsigned i
= src_idx
+ 1; i
< tex
->num_srcs
; i
++) {
549 tex
->src
[i
-1].src_type
= tex
->src
[i
].src_type
;
550 nir_instr_move_src(&tex
->instr
, &tex
->src
[i
-1].src
, &tex
->src
[i
].src
);
556 nir_phi_instr_create(nir_shader
*shader
)
558 nir_phi_instr
*instr
= ralloc(shader
, nir_phi_instr
);
559 instr_init(&instr
->instr
, nir_instr_type_phi
);
561 dest_init(&instr
->dest
);
562 exec_list_make_empty(&instr
->srcs
);
566 nir_parallel_copy_instr
*
567 nir_parallel_copy_instr_create(nir_shader
*shader
)
569 nir_parallel_copy_instr
*instr
= ralloc(shader
, nir_parallel_copy_instr
);
570 instr_init(&instr
->instr
, nir_instr_type_parallel_copy
);
572 exec_list_make_empty(&instr
->entries
);
577 nir_ssa_undef_instr
*
578 nir_ssa_undef_instr_create(nir_shader
*shader
,
579 unsigned num_components
,
582 nir_ssa_undef_instr
*instr
= ralloc(shader
, nir_ssa_undef_instr
);
583 instr_init(&instr
->instr
, nir_instr_type_ssa_undef
);
585 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, bit_size
, NULL
);
591 nir_deref_var_create(void *mem_ctx
, nir_variable
*var
)
593 nir_deref_var
*deref
= ralloc(mem_ctx
, nir_deref_var
);
594 deref
->deref
.deref_type
= nir_deref_type_var
;
595 deref
->deref
.child
= NULL
;
596 deref
->deref
.type
= var
->type
;
602 nir_deref_array_create(void *mem_ctx
)
604 nir_deref_array
*deref
= ralloc(mem_ctx
, nir_deref_array
);
605 deref
->deref
.deref_type
= nir_deref_type_array
;
606 deref
->deref
.child
= NULL
;
607 deref
->deref_array_type
= nir_deref_array_type_direct
;
608 src_init(&deref
->indirect
);
609 deref
->base_offset
= 0;
614 nir_deref_struct_create(void *mem_ctx
, unsigned field_index
)
616 nir_deref_struct
*deref
= ralloc(mem_ctx
, nir_deref_struct
);
617 deref
->deref
.deref_type
= nir_deref_type_struct
;
618 deref
->deref
.child
= NULL
;
619 deref
->index
= field_index
;
623 static nir_deref_var
*
624 copy_deref_var(void *mem_ctx
, nir_deref_var
*deref
)
626 nir_deref_var
*ret
= nir_deref_var_create(mem_ctx
, deref
->var
);
627 ret
->deref
.type
= deref
->deref
.type
;
628 if (deref
->deref
.child
)
629 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
633 static nir_deref_array
*
634 copy_deref_array(void *mem_ctx
, nir_deref_array
*deref
)
636 nir_deref_array
*ret
= nir_deref_array_create(mem_ctx
);
637 ret
->base_offset
= deref
->base_offset
;
638 ret
->deref_array_type
= deref
->deref_array_type
;
639 if (deref
->deref_array_type
== nir_deref_array_type_indirect
) {
640 nir_src_copy(&ret
->indirect
, &deref
->indirect
, mem_ctx
);
642 ret
->deref
.type
= deref
->deref
.type
;
643 if (deref
->deref
.child
)
644 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
648 static nir_deref_struct
*
649 copy_deref_struct(void *mem_ctx
, nir_deref_struct
*deref
)
651 nir_deref_struct
*ret
= nir_deref_struct_create(mem_ctx
, deref
->index
);
652 ret
->deref
.type
= deref
->deref
.type
;
653 if (deref
->deref
.child
)
654 ret
->deref
.child
= nir_copy_deref(ret
, deref
->deref
.child
);
659 nir_copy_deref(void *mem_ctx
, nir_deref
*deref
)
664 switch (deref
->deref_type
) {
665 case nir_deref_type_var
:
666 return ©_deref_var(mem_ctx
, nir_deref_as_var(deref
))->deref
;
667 case nir_deref_type_array
:
668 return ©_deref_array(mem_ctx
, nir_deref_as_array(deref
))->deref
;
669 case nir_deref_type_struct
:
670 return ©_deref_struct(mem_ctx
, nir_deref_as_struct(deref
))->deref
;
672 unreachable("Invalid dereference type");
678 /* This is the second step in the recursion. We've found the tail and made a
679 * copy. Now we need to iterate over all possible leaves and call the
680 * callback on each one.
683 deref_foreach_leaf_build_recur(nir_deref_var
*deref
, nir_deref
*tail
,
684 nir_deref_foreach_leaf_cb cb
, void *state
)
689 nir_deref_struct str
;
692 assert(tail
->child
== NULL
);
693 switch (glsl_get_base_type(tail
->type
)) {
696 case GLSL_TYPE_FLOAT
:
697 case GLSL_TYPE_DOUBLE
:
699 if (glsl_type_is_vector_or_scalar(tail
->type
))
700 return cb(deref
, state
);
703 case GLSL_TYPE_ARRAY
:
704 tmp
.arr
.deref
.deref_type
= nir_deref_type_array
;
705 tmp
.arr
.deref
.type
= glsl_get_array_element(tail
->type
);
706 tmp
.arr
.deref_array_type
= nir_deref_array_type_direct
;
707 tmp
.arr
.indirect
= NIR_SRC_INIT
;
708 tail
->child
= &tmp
.arr
.deref
;
710 length
= glsl_get_length(tail
->type
);
711 for (unsigned i
= 0; i
< length
; i
++) {
712 tmp
.arr
.deref
.child
= NULL
;
713 tmp
.arr
.base_offset
= i
;
714 if (!deref_foreach_leaf_build_recur(deref
, &tmp
.arr
.deref
, cb
, state
))
719 case GLSL_TYPE_STRUCT
:
720 tmp
.str
.deref
.deref_type
= nir_deref_type_struct
;
721 tail
->child
= &tmp
.str
.deref
;
723 length
= glsl_get_length(tail
->type
);
724 for (unsigned i
= 0; i
< length
; i
++) {
725 tmp
.arr
.deref
.child
= NULL
;
726 tmp
.str
.deref
.type
= glsl_get_struct_field(tail
->type
, i
);
728 if (!deref_foreach_leaf_build_recur(deref
, &tmp
.arr
.deref
, cb
, state
))
734 unreachable("Invalid type for dereference");
738 /* This is the first step of the foreach_leaf recursion. In this step we are
739 * walking to the end of the deref chain and making a copy in the stack as we
740 * go. This is because we don't want to mutate the deref chain that was
741 * passed in by the caller. The downside is that this deref chain is on the
742 * stack and , if the caller wants to do anything with it, they will have to
743 * make their own copy because this one will go away.
746 deref_foreach_leaf_copy_recur(nir_deref_var
*deref
, nir_deref
*tail
,
747 nir_deref_foreach_leaf_cb cb
, void *state
)
751 nir_deref_struct str
;
755 switch (tail
->child
->deref_type
) {
756 case nir_deref_type_array
:
757 c
.arr
= *nir_deref_as_array(tail
->child
);
758 tail
->child
= &c
.arr
.deref
;
759 return deref_foreach_leaf_copy_recur(deref
, &c
.arr
.deref
, cb
, state
);
761 case nir_deref_type_struct
:
762 c
.str
= *nir_deref_as_struct(tail
->child
);
763 tail
->child
= &c
.str
.deref
;
764 return deref_foreach_leaf_copy_recur(deref
, &c
.str
.deref
, cb
, state
);
766 case nir_deref_type_var
:
768 unreachable("Invalid deref type for a child");
771 /* We've gotten to the end of the original deref. Time to start
772 * building our own derefs.
774 return deref_foreach_leaf_build_recur(deref
, tail
, cb
, state
);
779 * This function iterates over all of the possible derefs that can be created
780 * with the given deref as the head. It then calls the provided callback with
781 * a full deref for each one.
783 * The deref passed to the callback will be allocated on the stack. You will
784 * need to make a copy if you want it to hang around.
787 nir_deref_foreach_leaf(nir_deref_var
*deref
,
788 nir_deref_foreach_leaf_cb cb
, void *state
)
790 nir_deref_var copy
= *deref
;
791 return deref_foreach_leaf_copy_recur(©
, ©
.deref
, cb
, state
);
794 /* Returns a load_const instruction that represents the constant
795 * initializer for the given deref chain. The caller is responsible for
796 * ensuring that there actually is a constant initializer.
798 nir_load_const_instr
*
799 nir_deref_get_const_initializer_load(nir_shader
*shader
, nir_deref_var
*deref
)
801 nir_constant
*constant
= deref
->var
->constant_initializer
;
804 const nir_deref
*tail
= &deref
->deref
;
805 unsigned matrix_offset
= 0;
806 while (tail
->child
) {
807 switch (tail
->child
->deref_type
) {
808 case nir_deref_type_array
: {
809 nir_deref_array
*arr
= nir_deref_as_array(tail
->child
);
810 assert(arr
->deref_array_type
== nir_deref_array_type_direct
);
811 if (glsl_type_is_matrix(tail
->type
)) {
812 assert(arr
->deref
.child
== NULL
);
813 matrix_offset
= arr
->base_offset
;
815 constant
= constant
->elements
[arr
->base_offset
];
820 case nir_deref_type_struct
: {
821 constant
= constant
->elements
[nir_deref_as_struct(tail
->child
)->index
];
826 unreachable("Invalid deref child type");
832 unsigned bit_size
= glsl_get_bit_size(tail
->type
);
833 nir_load_const_instr
*load
=
834 nir_load_const_instr_create(shader
, glsl_get_vector_elements(tail
->type
),
837 matrix_offset
*= load
->def
.num_components
;
838 for (unsigned i
= 0; i
< load
->def
.num_components
; i
++) {
839 switch (glsl_get_base_type(tail
->type
)) {
840 case GLSL_TYPE_FLOAT
:
843 load
->value
.u32
[i
] = constant
->value
.u
[matrix_offset
+ i
];
845 case GLSL_TYPE_DOUBLE
:
846 load
->value
.f64
[i
] = constant
->value
.d
[matrix_offset
+ i
];
849 load
->value
.u32
[i
] = constant
->value
.b
[matrix_offset
+ i
] ?
850 NIR_TRUE
: NIR_FALSE
;
853 unreachable("Invalid immediate type");
861 nir_cf_node_get_function(nir_cf_node
*node
)
863 while (node
->type
!= nir_cf_node_function
) {
867 return nir_cf_node_as_function(node
);
870 /* Reduces a cursor by trying to convert everything to after and trying to
871 * go up to block granularity when possible.
874 reduce_cursor(nir_cursor cursor
)
876 switch (cursor
.option
) {
877 case nir_cursor_before_block
:
878 assert(nir_cf_node_prev(&cursor
.block
->cf_node
) == NULL
||
879 nir_cf_node_prev(&cursor
.block
->cf_node
)->type
!= nir_cf_node_block
);
880 if (exec_list_is_empty(&cursor
.block
->instr_list
)) {
881 /* Empty block. After is as good as before. */
882 cursor
.option
= nir_cursor_after_block
;
886 case nir_cursor_after_block
:
889 case nir_cursor_before_instr
: {
890 nir_instr
*prev_instr
= nir_instr_prev(cursor
.instr
);
892 /* Before this instruction is after the previous */
893 cursor
.instr
= prev_instr
;
894 cursor
.option
= nir_cursor_after_instr
;
896 /* No previous instruction. Switch to before block */
897 cursor
.block
= cursor
.instr
->block
;
898 cursor
.option
= nir_cursor_before_block
;
900 return reduce_cursor(cursor
);
903 case nir_cursor_after_instr
:
904 if (nir_instr_next(cursor
.instr
) == NULL
) {
905 /* This is the last instruction, switch to after block */
906 cursor
.option
= nir_cursor_after_block
;
907 cursor
.block
= cursor
.instr
->block
;
912 unreachable("Inavlid cursor option");
917 nir_cursors_equal(nir_cursor a
, nir_cursor b
)
919 /* Reduced cursors should be unique */
920 a
= reduce_cursor(a
);
921 b
= reduce_cursor(b
);
923 return a
.block
== b
.block
&& a
.option
== b
.option
;
927 add_use_cb(nir_src
*src
, void *state
)
929 nir_instr
*instr
= state
;
931 src
->parent_instr
= instr
;
932 list_addtail(&src
->use_link
,
933 src
->is_ssa
? &src
->ssa
->uses
: &src
->reg
.reg
->uses
);
939 add_ssa_def_cb(nir_ssa_def
*def
, void *state
)
941 nir_instr
*instr
= state
;
943 if (instr
->block
&& def
->index
== UINT_MAX
) {
944 nir_function_impl
*impl
=
945 nir_cf_node_get_function(&instr
->block
->cf_node
);
947 def
->index
= impl
->ssa_alloc
++;
954 add_reg_def_cb(nir_dest
*dest
, void *state
)
956 nir_instr
*instr
= state
;
959 dest
->reg
.parent_instr
= instr
;
960 list_addtail(&dest
->reg
.def_link
, &dest
->reg
.reg
->defs
);
967 add_defs_uses(nir_instr
*instr
)
969 nir_foreach_src(instr
, add_use_cb
, instr
);
970 nir_foreach_dest(instr
, add_reg_def_cb
, instr
);
971 nir_foreach_ssa_def(instr
, add_ssa_def_cb
, instr
);
975 nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
)
977 switch (cursor
.option
) {
978 case nir_cursor_before_block
:
979 /* Only allow inserting jumps into empty blocks. */
980 if (instr
->type
== nir_instr_type_jump
)
981 assert(exec_list_is_empty(&cursor
.block
->instr_list
));
983 instr
->block
= cursor
.block
;
984 add_defs_uses(instr
);
985 exec_list_push_head(&cursor
.block
->instr_list
, &instr
->node
);
987 case nir_cursor_after_block
: {
988 /* Inserting instructions after a jump is illegal. */
989 nir_instr
*last
= nir_block_last_instr(cursor
.block
);
990 assert(last
== NULL
|| last
->type
!= nir_instr_type_jump
);
993 instr
->block
= cursor
.block
;
994 add_defs_uses(instr
);
995 exec_list_push_tail(&cursor
.block
->instr_list
, &instr
->node
);
998 case nir_cursor_before_instr
:
999 assert(instr
->type
!= nir_instr_type_jump
);
1000 instr
->block
= cursor
.instr
->block
;
1001 add_defs_uses(instr
);
1002 exec_node_insert_node_before(&cursor
.instr
->node
, &instr
->node
);
1004 case nir_cursor_after_instr
:
1005 /* Inserting instructions after a jump is illegal. */
1006 assert(cursor
.instr
->type
!= nir_instr_type_jump
);
1008 /* Only allow inserting jumps at the end of the block. */
1009 if (instr
->type
== nir_instr_type_jump
)
1010 assert(cursor
.instr
== nir_block_last_instr(cursor
.instr
->block
));
1012 instr
->block
= cursor
.instr
->block
;
1013 add_defs_uses(instr
);
1014 exec_node_insert_after(&cursor
.instr
->node
, &instr
->node
);
1018 if (instr
->type
== nir_instr_type_jump
)
1019 nir_handle_add_jump(instr
->block
);
1023 src_is_valid(const nir_src
*src
)
1025 return src
->is_ssa
? (src
->ssa
!= NULL
) : (src
->reg
.reg
!= NULL
);
1029 remove_use_cb(nir_src
*src
, void *state
)
1033 if (src_is_valid(src
))
1034 list_del(&src
->use_link
);
1040 remove_def_cb(nir_dest
*dest
, void *state
)
1045 list_del(&dest
->reg
.def_link
);
1051 remove_defs_uses(nir_instr
*instr
)
1053 nir_foreach_dest(instr
, remove_def_cb
, instr
);
1054 nir_foreach_src(instr
, remove_use_cb
, instr
);
1057 void nir_instr_remove(nir_instr
*instr
)
1059 remove_defs_uses(instr
);
1060 exec_node_remove(&instr
->node
);
1062 if (instr
->type
== nir_instr_type_jump
) {
1063 nir_jump_instr
*jump_instr
= nir_instr_as_jump(instr
);
1064 nir_handle_remove_jump(instr
->block
, jump_instr
->type
);
1071 nir_index_local_regs(nir_function_impl
*impl
)
1074 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
1075 reg
->index
= index
++;
1077 impl
->reg_alloc
= index
;
1081 nir_index_global_regs(nir_shader
*shader
)
1084 foreach_list_typed(nir_register
, reg
, node
, &shader
->registers
) {
1085 reg
->index
= index
++;
1087 shader
->reg_alloc
= index
;
1091 visit_alu_dest(nir_alu_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
1093 return cb(&instr
->dest
.dest
, state
);
1097 visit_intrinsic_dest(nir_intrinsic_instr
*instr
, nir_foreach_dest_cb cb
,
1100 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
1101 return cb(&instr
->dest
, state
);
1107 visit_texture_dest(nir_tex_instr
*instr
, nir_foreach_dest_cb cb
,
1110 return cb(&instr
->dest
, state
);
1114 visit_phi_dest(nir_phi_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
1116 return cb(&instr
->dest
, state
);
1120 visit_parallel_copy_dest(nir_parallel_copy_instr
*instr
,
1121 nir_foreach_dest_cb cb
, void *state
)
1123 nir_foreach_parallel_copy_entry(entry
, instr
) {
1124 if (!cb(&entry
->dest
, state
))
1132 nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
1134 switch (instr
->type
) {
1135 case nir_instr_type_alu
:
1136 return visit_alu_dest(nir_instr_as_alu(instr
), cb
, state
);
1137 case nir_instr_type_intrinsic
:
1138 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr
), cb
, state
);
1139 case nir_instr_type_tex
:
1140 return visit_texture_dest(nir_instr_as_tex(instr
), cb
, state
);
1141 case nir_instr_type_phi
:
1142 return visit_phi_dest(nir_instr_as_phi(instr
), cb
, state
);
1143 case nir_instr_type_parallel_copy
:
1144 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr
),
1147 case nir_instr_type_load_const
:
1148 case nir_instr_type_ssa_undef
:
1149 case nir_instr_type_call
:
1150 case nir_instr_type_jump
:
1154 unreachable("Invalid instruction type");
1161 struct foreach_ssa_def_state
{
1162 nir_foreach_ssa_def_cb cb
;
1167 nir_ssa_def_visitor(nir_dest
*dest
, void *void_state
)
1169 struct foreach_ssa_def_state
*state
= void_state
;
1172 return state
->cb(&dest
->ssa
, state
->client_state
);
1178 nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
, void *state
)
1180 switch (instr
->type
) {
1181 case nir_instr_type_alu
:
1182 case nir_instr_type_tex
:
1183 case nir_instr_type_intrinsic
:
1184 case nir_instr_type_phi
:
1185 case nir_instr_type_parallel_copy
: {
1186 struct foreach_ssa_def_state foreach_state
= {cb
, state
};
1187 return nir_foreach_dest(instr
, nir_ssa_def_visitor
, &foreach_state
);
1190 case nir_instr_type_load_const
:
1191 return cb(&nir_instr_as_load_const(instr
)->def
, state
);
1192 case nir_instr_type_ssa_undef
:
1193 return cb(&nir_instr_as_ssa_undef(instr
)->def
, state
);
1194 case nir_instr_type_call
:
1195 case nir_instr_type_jump
:
1198 unreachable("Invalid instruction type");
1203 visit_src(nir_src
*src
, nir_foreach_src_cb cb
, void *state
)
1205 if (!cb(src
, state
))
1207 if (!src
->is_ssa
&& src
->reg
.indirect
)
1208 return cb(src
->reg
.indirect
, state
);
1213 visit_deref_array_src(nir_deref_array
*deref
, nir_foreach_src_cb cb
,
1216 if (deref
->deref_array_type
== nir_deref_array_type_indirect
)
1217 return visit_src(&deref
->indirect
, cb
, state
);
1222 visit_deref_src(nir_deref_var
*deref
, nir_foreach_src_cb cb
, void *state
)
1224 nir_deref
*cur
= &deref
->deref
;
1225 while (cur
!= NULL
) {
1226 if (cur
->deref_type
== nir_deref_type_array
) {
1227 if (!visit_deref_array_src(nir_deref_as_array(cur
), cb
, state
))
1238 visit_alu_src(nir_alu_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1240 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1241 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1248 visit_tex_src(nir_tex_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1250 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1251 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1255 if (instr
->texture
!= NULL
) {
1256 if (!visit_deref_src(instr
->texture
, cb
, state
))
1260 if (instr
->sampler
!= NULL
) {
1261 if (!visit_deref_src(instr
->sampler
, cb
, state
))
1269 visit_intrinsic_src(nir_intrinsic_instr
*instr
, nir_foreach_src_cb cb
,
1272 unsigned num_srcs
= nir_intrinsic_infos
[instr
->intrinsic
].num_srcs
;
1273 for (unsigned i
= 0; i
< num_srcs
; i
++) {
1274 if (!visit_src(&instr
->src
[i
], cb
, state
))
1279 nir_intrinsic_infos
[instr
->intrinsic
].num_variables
;
1280 for (unsigned i
= 0; i
< num_vars
; i
++) {
1281 if (!visit_deref_src(instr
->variables
[i
], cb
, state
))
1289 visit_phi_src(nir_phi_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1291 nir_foreach_phi_src(src
, instr
) {
1292 if (!visit_src(&src
->src
, cb
, state
))
1300 visit_parallel_copy_src(nir_parallel_copy_instr
*instr
,
1301 nir_foreach_src_cb cb
, void *state
)
1303 nir_foreach_parallel_copy_entry(entry
, instr
) {
1304 if (!visit_src(&entry
->src
, cb
, state
))
1313 nir_foreach_src_cb cb
;
1314 } visit_dest_indirect_state
;
1317 visit_dest_indirect(nir_dest
*dest
, void *_state
)
1319 visit_dest_indirect_state
*state
= (visit_dest_indirect_state
*) _state
;
1321 if (!dest
->is_ssa
&& dest
->reg
.indirect
)
1322 return state
->cb(dest
->reg
.indirect
, state
->state
);
1328 nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1330 switch (instr
->type
) {
1331 case nir_instr_type_alu
:
1332 if (!visit_alu_src(nir_instr_as_alu(instr
), cb
, state
))
1335 case nir_instr_type_intrinsic
:
1336 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr
), cb
, state
))
1339 case nir_instr_type_tex
:
1340 if (!visit_tex_src(nir_instr_as_tex(instr
), cb
, state
))
1343 case nir_instr_type_call
:
1344 /* Call instructions have no regular sources */
1346 case nir_instr_type_load_const
:
1347 /* Constant load instructions have no regular sources */
1349 case nir_instr_type_phi
:
1350 if (!visit_phi_src(nir_instr_as_phi(instr
), cb
, state
))
1353 case nir_instr_type_parallel_copy
:
1354 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr
),
1358 case nir_instr_type_jump
:
1359 case nir_instr_type_ssa_undef
:
1363 unreachable("Invalid instruction type");
1367 visit_dest_indirect_state dest_state
;
1368 dest_state
.state
= state
;
1370 return nir_foreach_dest(instr
, visit_dest_indirect
, &dest_state
);
1374 nir_src_as_const_value(nir_src src
)
1379 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
1382 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1384 return &load
->value
;
1388 * Returns true if the source is known to be dynamically uniform. Otherwise it
1389 * returns false which means it may or may not be dynamically uniform but it
1390 * can't be determined.
1393 nir_src_is_dynamically_uniform(nir_src src
)
1398 /* Constants are trivially dynamically uniform */
1399 if (src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
)
1402 /* As are uniform variables */
1403 if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
1404 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
1406 if (intr
->intrinsic
== nir_intrinsic_load_uniform
)
1410 /* XXX: this could have many more tests, such as when a sampler function is
1411 * called with dynamically uniform arguments.
1417 src_remove_all_uses(nir_src
*src
)
1419 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1420 if (!src_is_valid(src
))
1423 list_del(&src
->use_link
);
1428 src_add_all_uses(nir_src
*src
, nir_instr
*parent_instr
, nir_if
*parent_if
)
1430 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1431 if (!src_is_valid(src
))
1435 src
->parent_instr
= parent_instr
;
1437 list_addtail(&src
->use_link
, &src
->ssa
->uses
);
1439 list_addtail(&src
->use_link
, &src
->reg
.reg
->uses
);
1442 src
->parent_if
= parent_if
;
1444 list_addtail(&src
->use_link
, &src
->ssa
->if_uses
);
1446 list_addtail(&src
->use_link
, &src
->reg
.reg
->if_uses
);
1452 nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
)
1454 assert(!src_is_valid(src
) || src
->parent_instr
== instr
);
1456 src_remove_all_uses(src
);
1458 src_add_all_uses(src
, instr
, NULL
);
1462 nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
)
1464 assert(!src_is_valid(dest
) || dest
->parent_instr
== dest_instr
);
1466 src_remove_all_uses(dest
);
1467 src_remove_all_uses(src
);
1469 *src
= NIR_SRC_INIT
;
1470 src_add_all_uses(dest
, dest_instr
, NULL
);
1474 nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
)
1476 nir_src
*src
= &if_stmt
->condition
;
1477 assert(!src_is_valid(src
) || src
->parent_if
== if_stmt
);
1479 src_remove_all_uses(src
);
1481 src_add_all_uses(src
, NULL
, if_stmt
);
1485 nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
, nir_dest new_dest
)
1488 /* We can only overwrite an SSA destination if it has no uses. */
1489 assert(list_empty(&dest
->ssa
.uses
) && list_empty(&dest
->ssa
.if_uses
));
1491 list_del(&dest
->reg
.def_link
);
1492 if (dest
->reg
.indirect
)
1493 src_remove_all_uses(dest
->reg
.indirect
);
1496 /* We can't re-write with an SSA def */
1497 assert(!new_dest
.is_ssa
);
1499 nir_dest_copy(dest
, &new_dest
, instr
);
1501 dest
->reg
.parent_instr
= instr
;
1502 list_addtail(&dest
->reg
.def_link
, &new_dest
.reg
.reg
->defs
);
1504 if (dest
->reg
.indirect
)
1505 src_add_all_uses(dest
->reg
.indirect
, instr
, NULL
);
1508 /* note: does *not* take ownership of 'name' */
1510 nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
1511 unsigned num_components
,
1512 unsigned bit_size
, const char *name
)
1514 def
->name
= ralloc_strdup(instr
, name
);
1515 def
->parent_instr
= instr
;
1516 list_inithead(&def
->uses
);
1517 list_inithead(&def
->if_uses
);
1518 def
->num_components
= num_components
;
1519 def
->bit_size
= bit_size
;
1522 nir_function_impl
*impl
=
1523 nir_cf_node_get_function(&instr
->block
->cf_node
);
1525 def
->index
= impl
->ssa_alloc
++;
1527 def
->index
= UINT_MAX
;
1531 /* note: does *not* take ownership of 'name' */
1533 nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
1534 unsigned num_components
, unsigned bit_size
,
1537 dest
->is_ssa
= true;
1538 nir_ssa_def_init(instr
, &dest
->ssa
, num_components
, bit_size
, name
);
1542 nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
)
1544 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1546 nir_foreach_use_safe(use_src
, def
)
1547 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1549 nir_foreach_if_use_safe(use_src
, def
)
1550 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1554 is_instr_between(nir_instr
*start
, nir_instr
*end
, nir_instr
*between
)
1556 assert(start
->block
== end
->block
);
1558 if (between
->block
!= start
->block
)
1561 /* Search backwards looking for "between" */
1562 while (start
!= end
) {
1566 end
= nir_instr_prev(end
);
1573 /* Replaces all uses of the given SSA def with the given source but only if
1574 * the use comes after the after_me instruction. This can be useful if you
1575 * are emitting code to fix up the result of some instruction: you can freely
1576 * use the result in that code and then call rewrite_uses_after and pass the
1577 * last fixup instruction as after_me and it will replace all of the uses you
1578 * want without touching the fixup code.
1580 * This function assumes that after_me is in the same block as
1581 * def->parent_instr and that after_me comes after def->parent_instr.
1584 nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
1585 nir_instr
*after_me
)
1587 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1589 nir_foreach_use_safe(use_src
, def
) {
1590 assert(use_src
->parent_instr
!= def
->parent_instr
);
1591 /* Since def already dominates all of its uses, the only way a use can
1592 * not be dominated by after_me is if it is between def and after_me in
1593 * the instruction list.
1595 if (!is_instr_between(def
->parent_instr
, after_me
, use_src
->parent_instr
))
1596 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1599 nir_foreach_if_use_safe(use_src
, def
)
1600 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1604 nir_ssa_def_components_read(nir_ssa_def
*def
)
1606 uint8_t read_mask
= 0;
1607 nir_foreach_use(use
, def
) {
1608 if (use
->parent_instr
->type
== nir_instr_type_alu
) {
1609 nir_alu_instr
*alu
= nir_instr_as_alu(use
->parent_instr
);
1610 nir_alu_src
*alu_src
= exec_node_data(nir_alu_src
, use
, src
);
1611 int src_idx
= alu_src
- &alu
->src
[0];
1612 assert(src_idx
>= 0 && src_idx
< nir_op_infos
[alu
->op
].num_inputs
);
1614 for (unsigned c
= 0; c
< 4; c
++) {
1615 if (!nir_alu_instr_channel_used(alu
, src_idx
, c
))
1618 read_mask
|= (1 << alu_src
->swizzle
[c
]);
1621 return (1 << def
->num_components
) - 1;
1629 nir_block_cf_tree_next(nir_block
*block
)
1631 if (block
== NULL
) {
1632 /* nir_foreach_block_safe() will call this function on a NULL block
1633 * after the last iteration, but it won't use the result so just return
1639 nir_cf_node
*cf_next
= nir_cf_node_next(&block
->cf_node
);
1641 return nir_cf_node_cf_tree_first(cf_next
);
1643 nir_cf_node
*parent
= block
->cf_node
.parent
;
1645 switch (parent
->type
) {
1646 case nir_cf_node_if
: {
1647 /* Are we at the end of the if? Go to the beginning of the else */
1648 nir_if
*if_stmt
= nir_cf_node_as_if(parent
);
1649 if (&block
->cf_node
== nir_if_last_then_node(if_stmt
))
1650 return nir_cf_node_as_block(nir_if_first_else_node(if_stmt
));
1652 assert(&block
->cf_node
== nir_if_last_else_node(if_stmt
));
1656 case nir_cf_node_loop
:
1657 return nir_cf_node_as_block(nir_cf_node_next(parent
));
1659 case nir_cf_node_function
:
1663 unreachable("unknown cf node type");
1668 nir_block_cf_tree_prev(nir_block
*block
)
1670 if (block
== NULL
) {
1671 /* do this for consistency with nir_block_cf_tree_next() */
1675 nir_cf_node
*cf_prev
= nir_cf_node_prev(&block
->cf_node
);
1677 return nir_cf_node_cf_tree_last(cf_prev
);
1679 nir_cf_node
*parent
= block
->cf_node
.parent
;
1681 switch (parent
->type
) {
1682 case nir_cf_node_if
: {
1683 /* Are we at the beginning of the else? Go to the end of the if */
1684 nir_if
*if_stmt
= nir_cf_node_as_if(parent
);
1685 if (&block
->cf_node
== nir_if_first_else_node(if_stmt
))
1686 return nir_cf_node_as_block(nir_if_last_then_node(if_stmt
));
1688 assert(&block
->cf_node
== nir_if_first_then_node(if_stmt
));
1692 case nir_cf_node_loop
:
1693 return nir_cf_node_as_block(nir_cf_node_prev(parent
));
1695 case nir_cf_node_function
:
1699 unreachable("unknown cf node type");
1703 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
)
1705 switch (node
->type
) {
1706 case nir_cf_node_function
: {
1707 nir_function_impl
*impl
= nir_cf_node_as_function(node
);
1708 return nir_start_block(impl
);
1711 case nir_cf_node_if
: {
1712 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
1713 return nir_cf_node_as_block(nir_if_first_then_node(if_stmt
));
1716 case nir_cf_node_loop
: {
1717 nir_loop
*loop
= nir_cf_node_as_loop(node
);
1718 return nir_cf_node_as_block(nir_loop_first_cf_node(loop
));
1721 case nir_cf_node_block
: {
1722 return nir_cf_node_as_block(node
);
1726 unreachable("unknown node type");
1730 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
)
1732 switch (node
->type
) {
1733 case nir_cf_node_function
: {
1734 nir_function_impl
*impl
= nir_cf_node_as_function(node
);
1735 return nir_impl_last_block(impl
);
1738 case nir_cf_node_if
: {
1739 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
1740 return nir_cf_node_as_block(nir_if_last_else_node(if_stmt
));
1743 case nir_cf_node_loop
: {
1744 nir_loop
*loop
= nir_cf_node_as_loop(node
);
1745 return nir_cf_node_as_block(nir_loop_last_cf_node(loop
));
1748 case nir_cf_node_block
: {
1749 return nir_cf_node_as_block(node
);
1753 unreachable("unknown node type");
1757 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
)
1759 if (node
->type
== nir_cf_node_block
)
1760 return nir_cf_node_cf_tree_first(nir_cf_node_next(node
));
1761 else if (node
->type
== nir_cf_node_function
)
1764 return nir_cf_node_as_block(nir_cf_node_next(node
));
1768 nir_block_get_following_if(nir_block
*block
)
1770 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1773 if (nir_cf_node_is_last(&block
->cf_node
))
1776 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1778 if (next_node
->type
!= nir_cf_node_if
)
1781 return nir_cf_node_as_if(next_node
);
1785 nir_block_get_following_loop(nir_block
*block
)
1787 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1790 if (nir_cf_node_is_last(&block
->cf_node
))
1793 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1795 if (next_node
->type
!= nir_cf_node_loop
)
1798 return nir_cf_node_as_loop(next_node
);
1802 nir_index_blocks(nir_function_impl
*impl
)
1806 if (impl
->valid_metadata
& nir_metadata_block_index
)
1809 nir_foreach_block(block
, impl
) {
1810 block
->index
= index
++;
1813 impl
->num_blocks
= index
;
1817 index_ssa_def_cb(nir_ssa_def
*def
, void *state
)
1819 unsigned *index
= (unsigned *) state
;
1820 def
->index
= (*index
)++;
1826 * The indices are applied top-to-bottom which has the very nice property
1827 * that, if A dominates B, then A->index <= B->index.
1830 nir_index_ssa_defs(nir_function_impl
*impl
)
1834 nir_foreach_block(block
, impl
) {
1835 nir_foreach_instr(instr
, block
)
1836 nir_foreach_ssa_def(instr
, index_ssa_def_cb
, &index
);
1839 impl
->ssa_alloc
= index
;
1843 * The indices are applied top-to-bottom which has the very nice property
1844 * that, if A dominates B, then A->index <= B->index.
1847 nir_index_instrs(nir_function_impl
*impl
)
1851 nir_foreach_block(block
, impl
) {
1852 nir_foreach_instr(instr
, block
)
1853 instr
->index
= index
++;
1860 nir_intrinsic_from_system_value(gl_system_value val
)
1863 case SYSTEM_VALUE_VERTEX_ID
:
1864 return nir_intrinsic_load_vertex_id
;
1865 case SYSTEM_VALUE_INSTANCE_ID
:
1866 return nir_intrinsic_load_instance_id
;
1867 case SYSTEM_VALUE_DRAW_ID
:
1868 return nir_intrinsic_load_draw_id
;
1869 case SYSTEM_VALUE_BASE_INSTANCE
:
1870 return nir_intrinsic_load_base_instance
;
1871 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
1872 return nir_intrinsic_load_vertex_id_zero_base
;
1873 case SYSTEM_VALUE_BASE_VERTEX
:
1874 return nir_intrinsic_load_base_vertex
;
1875 case SYSTEM_VALUE_INVOCATION_ID
:
1876 return nir_intrinsic_load_invocation_id
;
1877 case SYSTEM_VALUE_FRONT_FACE
:
1878 return nir_intrinsic_load_front_face
;
1879 case SYSTEM_VALUE_SAMPLE_ID
:
1880 return nir_intrinsic_load_sample_id
;
1881 case SYSTEM_VALUE_SAMPLE_POS
:
1882 return nir_intrinsic_load_sample_pos
;
1883 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
1884 return nir_intrinsic_load_sample_mask_in
;
1885 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
1886 return nir_intrinsic_load_local_invocation_id
;
1887 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
:
1888 return nir_intrinsic_load_local_invocation_index
;
1889 case SYSTEM_VALUE_WORK_GROUP_ID
:
1890 return nir_intrinsic_load_work_group_id
;
1891 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
1892 return nir_intrinsic_load_num_work_groups
;
1893 case SYSTEM_VALUE_PRIMITIVE_ID
:
1894 return nir_intrinsic_load_primitive_id
;
1895 case SYSTEM_VALUE_TESS_COORD
:
1896 return nir_intrinsic_load_tess_coord
;
1897 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1898 return nir_intrinsic_load_tess_level_outer
;
1899 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1900 return nir_intrinsic_load_tess_level_inner
;
1901 case SYSTEM_VALUE_VERTICES_IN
:
1902 return nir_intrinsic_load_patch_vertices_in
;
1903 case SYSTEM_VALUE_HELPER_INVOCATION
:
1904 return nir_intrinsic_load_helper_invocation
;
1906 unreachable("system value does not directly correspond to intrinsic");
1911 nir_system_value_from_intrinsic(nir_intrinsic_op intrin
)
1914 case nir_intrinsic_load_vertex_id
:
1915 return SYSTEM_VALUE_VERTEX_ID
;
1916 case nir_intrinsic_load_instance_id
:
1917 return SYSTEM_VALUE_INSTANCE_ID
;
1918 case nir_intrinsic_load_draw_id
:
1919 return SYSTEM_VALUE_DRAW_ID
;
1920 case nir_intrinsic_load_base_instance
:
1921 return SYSTEM_VALUE_BASE_INSTANCE
;
1922 case nir_intrinsic_load_vertex_id_zero_base
:
1923 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1924 case nir_intrinsic_load_base_vertex
:
1925 return SYSTEM_VALUE_BASE_VERTEX
;
1926 case nir_intrinsic_load_invocation_id
:
1927 return SYSTEM_VALUE_INVOCATION_ID
;
1928 case nir_intrinsic_load_front_face
:
1929 return SYSTEM_VALUE_FRONT_FACE
;
1930 case nir_intrinsic_load_sample_id
:
1931 return SYSTEM_VALUE_SAMPLE_ID
;
1932 case nir_intrinsic_load_sample_pos
:
1933 return SYSTEM_VALUE_SAMPLE_POS
;
1934 case nir_intrinsic_load_sample_mask_in
:
1935 return SYSTEM_VALUE_SAMPLE_MASK_IN
;
1936 case nir_intrinsic_load_local_invocation_id
:
1937 return SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1938 case nir_intrinsic_load_local_invocation_index
:
1939 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1940 case nir_intrinsic_load_num_work_groups
:
1941 return SYSTEM_VALUE_NUM_WORK_GROUPS
;
1942 case nir_intrinsic_load_work_group_id
:
1943 return SYSTEM_VALUE_WORK_GROUP_ID
;
1944 case nir_intrinsic_load_primitive_id
:
1945 return SYSTEM_VALUE_PRIMITIVE_ID
;
1946 case nir_intrinsic_load_tess_coord
:
1947 return SYSTEM_VALUE_TESS_COORD
;
1948 case nir_intrinsic_load_tess_level_outer
:
1949 return SYSTEM_VALUE_TESS_LEVEL_OUTER
;
1950 case nir_intrinsic_load_tess_level_inner
:
1951 return SYSTEM_VALUE_TESS_LEVEL_INNER
;
1952 case nir_intrinsic_load_patch_vertices_in
:
1953 return SYSTEM_VALUE_VERTICES_IN
;
1954 case nir_intrinsic_load_helper_invocation
:
1955 return SYSTEM_VALUE_HELPER_INVOCATION
;
1957 unreachable("intrinsic doesn't produce a system value");