2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
29 #include "nir_control_flow_private.h"
30 #include "util/half_float.h"
34 #include "util/u_math.h"
36 #include "main/menums.h" /* BITFIELD64_MASK */
39 nir_shader_create(void *mem_ctx
,
40 gl_shader_stage stage
,
41 const nir_shader_compiler_options
*options
,
44 nir_shader
*shader
= rzalloc(mem_ctx
, nir_shader
);
46 exec_list_make_empty(&shader
->uniforms
);
47 exec_list_make_empty(&shader
->inputs
);
48 exec_list_make_empty(&shader
->outputs
);
49 exec_list_make_empty(&shader
->shared
);
51 shader
->options
= options
;
54 assert(si
->stage
== stage
);
57 shader
->info
.stage
= stage
;
60 exec_list_make_empty(&shader
->functions
);
61 exec_list_make_empty(&shader
->registers
);
62 exec_list_make_empty(&shader
->globals
);
63 exec_list_make_empty(&shader
->system_values
);
64 shader
->reg_alloc
= 0;
66 shader
->num_inputs
= 0;
67 shader
->num_outputs
= 0;
68 shader
->num_uniforms
= 0;
69 shader
->num_shared
= 0;
75 reg_create(void *mem_ctx
, struct exec_list
*list
)
77 nir_register
*reg
= ralloc(mem_ctx
, nir_register
);
79 list_inithead(®
->uses
);
80 list_inithead(®
->defs
);
81 list_inithead(®
->if_uses
);
83 reg
->num_components
= 0;
85 reg
->num_array_elems
= 0;
86 reg
->is_packed
= false;
89 exec_list_push_tail(list
, ®
->node
);
95 nir_global_reg_create(nir_shader
*shader
)
97 nir_register
*reg
= reg_create(shader
, &shader
->registers
);
98 reg
->index
= shader
->reg_alloc
++;
99 reg
->is_global
= true;
105 nir_local_reg_create(nir_function_impl
*impl
)
107 nir_register
*reg
= reg_create(ralloc_parent(impl
), &impl
->registers
);
108 reg
->index
= impl
->reg_alloc
++;
109 reg
->is_global
= false;
115 nir_reg_remove(nir_register
*reg
)
117 exec_node_remove(®
->node
);
121 nir_shader_add_variable(nir_shader
*shader
, nir_variable
*var
)
123 switch (var
->data
.mode
) {
125 assert(!"invalid mode");
128 case nir_var_function_temp
:
129 assert(!"nir_shader_add_variable cannot be used for local variables");
132 case nir_var_shader_temp
:
133 exec_list_push_tail(&shader
->globals
, &var
->node
);
136 case nir_var_shader_in
:
137 exec_list_push_tail(&shader
->inputs
, &var
->node
);
140 case nir_var_shader_out
:
141 exec_list_push_tail(&shader
->outputs
, &var
->node
);
144 case nir_var_uniform
:
145 case nir_var_mem_ubo
:
146 case nir_var_mem_ssbo
:
147 exec_list_push_tail(&shader
->uniforms
, &var
->node
);
150 case nir_var_mem_shared
:
151 assert(gl_shader_stage_is_compute(shader
->info
.stage
));
152 exec_list_push_tail(&shader
->shared
, &var
->node
);
155 case nir_var_mem_global
:
156 assert(!"nir_shader_add_variable cannot be used for global memory");
159 case nir_var_system_value
:
160 exec_list_push_tail(&shader
->system_values
, &var
->node
);
166 nir_variable_create(nir_shader
*shader
, nir_variable_mode mode
,
167 const struct glsl_type
*type
, const char *name
)
169 nir_variable
*var
= rzalloc(shader
, nir_variable
);
170 var
->name
= ralloc_strdup(var
, name
);
172 var
->data
.mode
= mode
;
173 var
->data
.how_declared
= nir_var_declared_normally
;
175 if ((mode
== nir_var_shader_in
&&
176 shader
->info
.stage
!= MESA_SHADER_VERTEX
) ||
177 (mode
== nir_var_shader_out
&&
178 shader
->info
.stage
!= MESA_SHADER_FRAGMENT
))
179 var
->data
.interpolation
= INTERP_MODE_SMOOTH
;
181 if (mode
== nir_var_shader_in
|| mode
== nir_var_uniform
)
182 var
->data
.read_only
= true;
184 nir_shader_add_variable(shader
, var
);
190 nir_local_variable_create(nir_function_impl
*impl
,
191 const struct glsl_type
*type
, const char *name
)
193 nir_variable
*var
= rzalloc(impl
->function
->shader
, nir_variable
);
194 var
->name
= ralloc_strdup(var
, name
);
196 var
->data
.mode
= nir_var_function_temp
;
198 nir_function_impl_add_variable(impl
, var
);
204 nir_function_create(nir_shader
*shader
, const char *name
)
206 nir_function
*func
= ralloc(shader
, nir_function
);
208 exec_list_push_tail(&shader
->functions
, &func
->node
);
210 func
->name
= ralloc_strdup(func
, name
);
211 func
->shader
= shader
;
212 func
->num_params
= 0;
215 func
->is_entrypoint
= false;
220 /* NOTE: if the instruction you are copying a src to is already added
221 * to the IR, use nir_instr_rewrite_src() instead.
223 void nir_src_copy(nir_src
*dest
, const nir_src
*src
, void *mem_ctx
)
225 dest
->is_ssa
= src
->is_ssa
;
227 dest
->ssa
= src
->ssa
;
229 dest
->reg
.base_offset
= src
->reg
.base_offset
;
230 dest
->reg
.reg
= src
->reg
.reg
;
231 if (src
->reg
.indirect
) {
232 dest
->reg
.indirect
= ralloc(mem_ctx
, nir_src
);
233 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, mem_ctx
);
235 dest
->reg
.indirect
= NULL
;
240 void nir_dest_copy(nir_dest
*dest
, const nir_dest
*src
, nir_instr
*instr
)
242 /* Copying an SSA definition makes no sense whatsoever. */
243 assert(!src
->is_ssa
);
245 dest
->is_ssa
= false;
247 dest
->reg
.base_offset
= src
->reg
.base_offset
;
248 dest
->reg
.reg
= src
->reg
.reg
;
249 if (src
->reg
.indirect
) {
250 dest
->reg
.indirect
= ralloc(instr
, nir_src
);
251 nir_src_copy(dest
->reg
.indirect
, src
->reg
.indirect
, instr
);
253 dest
->reg
.indirect
= NULL
;
258 nir_alu_src_copy(nir_alu_src
*dest
, const nir_alu_src
*src
,
259 nir_alu_instr
*instr
)
261 nir_src_copy(&dest
->src
, &src
->src
, &instr
->instr
);
262 dest
->abs
= src
->abs
;
263 dest
->negate
= src
->negate
;
264 for (unsigned i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; i
++)
265 dest
->swizzle
[i
] = src
->swizzle
[i
];
269 nir_alu_dest_copy(nir_alu_dest
*dest
, const nir_alu_dest
*src
,
270 nir_alu_instr
*instr
)
272 nir_dest_copy(&dest
->dest
, &src
->dest
, &instr
->instr
);
273 dest
->write_mask
= src
->write_mask
;
274 dest
->saturate
= src
->saturate
;
279 cf_init(nir_cf_node
*node
, nir_cf_node_type type
)
281 exec_node_init(&node
->node
);
287 nir_function_impl_create_bare(nir_shader
*shader
)
289 nir_function_impl
*impl
= ralloc(shader
, nir_function_impl
);
291 impl
->function
= NULL
;
293 cf_init(&impl
->cf_node
, nir_cf_node_function
);
295 exec_list_make_empty(&impl
->body
);
296 exec_list_make_empty(&impl
->registers
);
297 exec_list_make_empty(&impl
->locals
);
300 impl
->valid_metadata
= nir_metadata_none
;
302 /* create start & end blocks */
303 nir_block
*start_block
= nir_block_create(shader
);
304 nir_block
*end_block
= nir_block_create(shader
);
305 start_block
->cf_node
.parent
= &impl
->cf_node
;
306 end_block
->cf_node
.parent
= &impl
->cf_node
;
307 impl
->end_block
= end_block
;
309 exec_list_push_tail(&impl
->body
, &start_block
->cf_node
.node
);
311 start_block
->successors
[0] = end_block
;
312 _mesa_set_add(end_block
->predecessors
, start_block
);
317 nir_function_impl_create(nir_function
*function
)
319 assert(function
->impl
== NULL
);
321 nir_function_impl
*impl
= nir_function_impl_create_bare(function
->shader
);
323 function
->impl
= impl
;
324 impl
->function
= function
;
330 nir_block_create(nir_shader
*shader
)
332 nir_block
*block
= rzalloc(shader
, nir_block
);
334 cf_init(&block
->cf_node
, nir_cf_node_block
);
336 block
->successors
[0] = block
->successors
[1] = NULL
;
337 block
->predecessors
= _mesa_pointer_set_create(block
);
338 block
->imm_dom
= NULL
;
339 /* XXX maybe it would be worth it to defer allocation? This
340 * way it doesn't get allocated for shader refs that never run
341 * nir_calc_dominance? For example, state-tracker creates an
342 * initial IR, clones that, runs appropriate lowering pass, passes
343 * to driver which does common lowering/opt, and then stores ref
344 * which is later used to do state specific lowering and futher
345 * opt. Do any of the references not need dominance metadata?
347 block
->dom_frontier
= _mesa_pointer_set_create(block
);
349 exec_list_make_empty(&block
->instr_list
);
355 src_init(nir_src
*src
)
359 src
->reg
.indirect
= NULL
;
360 src
->reg
.base_offset
= 0;
364 nir_if_create(nir_shader
*shader
)
366 nir_if
*if_stmt
= ralloc(shader
, nir_if
);
368 cf_init(&if_stmt
->cf_node
, nir_cf_node_if
);
369 src_init(&if_stmt
->condition
);
371 nir_block
*then
= nir_block_create(shader
);
372 exec_list_make_empty(&if_stmt
->then_list
);
373 exec_list_push_tail(&if_stmt
->then_list
, &then
->cf_node
.node
);
374 then
->cf_node
.parent
= &if_stmt
->cf_node
;
376 nir_block
*else_stmt
= nir_block_create(shader
);
377 exec_list_make_empty(&if_stmt
->else_list
);
378 exec_list_push_tail(&if_stmt
->else_list
, &else_stmt
->cf_node
.node
);
379 else_stmt
->cf_node
.parent
= &if_stmt
->cf_node
;
385 nir_loop_create(nir_shader
*shader
)
387 nir_loop
*loop
= rzalloc(shader
, nir_loop
);
389 cf_init(&loop
->cf_node
, nir_cf_node_loop
);
391 nir_block
*body
= nir_block_create(shader
);
392 exec_list_make_empty(&loop
->body
);
393 exec_list_push_tail(&loop
->body
, &body
->cf_node
.node
);
394 body
->cf_node
.parent
= &loop
->cf_node
;
396 body
->successors
[0] = body
;
397 _mesa_set_add(body
->predecessors
, body
);
403 instr_init(nir_instr
*instr
, nir_instr_type type
)
407 exec_node_init(&instr
->node
);
411 dest_init(nir_dest
*dest
)
413 dest
->is_ssa
= false;
414 dest
->reg
.reg
= NULL
;
415 dest
->reg
.indirect
= NULL
;
416 dest
->reg
.base_offset
= 0;
420 alu_dest_init(nir_alu_dest
*dest
)
422 dest_init(&dest
->dest
);
423 dest
->saturate
= false;
424 dest
->write_mask
= 0xf;
428 alu_src_init(nir_alu_src
*src
)
431 src
->abs
= src
->negate
= false;
432 for (int i
= 0; i
< NIR_MAX_VEC_COMPONENTS
; ++i
)
437 nir_alu_instr_create(nir_shader
*shader
, nir_op op
)
439 unsigned num_srcs
= nir_op_infos
[op
].num_inputs
;
440 /* TODO: don't use rzalloc */
441 nir_alu_instr
*instr
=
443 sizeof(nir_alu_instr
) + num_srcs
* sizeof(nir_alu_src
));
445 instr_init(&instr
->instr
, nir_instr_type_alu
);
447 alu_dest_init(&instr
->dest
);
448 for (unsigned i
= 0; i
< num_srcs
; i
++)
449 alu_src_init(&instr
->src
[i
]);
455 nir_deref_instr_create(nir_shader
*shader
, nir_deref_type deref_type
)
457 nir_deref_instr
*instr
=
458 rzalloc_size(shader
, sizeof(nir_deref_instr
));
460 instr_init(&instr
->instr
, nir_instr_type_deref
);
462 instr
->deref_type
= deref_type
;
463 if (deref_type
!= nir_deref_type_var
)
464 src_init(&instr
->parent
);
466 if (deref_type
== nir_deref_type_array
||
467 deref_type
== nir_deref_type_ptr_as_array
)
468 src_init(&instr
->arr
.index
);
470 dest_init(&instr
->dest
);
476 nir_jump_instr_create(nir_shader
*shader
, nir_jump_type type
)
478 nir_jump_instr
*instr
= ralloc(shader
, nir_jump_instr
);
479 instr_init(&instr
->instr
, nir_instr_type_jump
);
484 nir_load_const_instr
*
485 nir_load_const_instr_create(nir_shader
*shader
, unsigned num_components
,
488 nir_load_const_instr
*instr
= rzalloc(shader
, nir_load_const_instr
);
489 instr_init(&instr
->instr
, nir_instr_type_load_const
);
491 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, bit_size
, NULL
);
496 nir_intrinsic_instr
*
497 nir_intrinsic_instr_create(nir_shader
*shader
, nir_intrinsic_op op
)
499 unsigned num_srcs
= nir_intrinsic_infos
[op
].num_srcs
;
500 /* TODO: don't use rzalloc */
501 nir_intrinsic_instr
*instr
=
503 sizeof(nir_intrinsic_instr
) + num_srcs
* sizeof(nir_src
));
505 instr_init(&instr
->instr
, nir_instr_type_intrinsic
);
506 instr
->intrinsic
= op
;
508 if (nir_intrinsic_infos
[op
].has_dest
)
509 dest_init(&instr
->dest
);
511 for (unsigned i
= 0; i
< num_srcs
; i
++)
512 src_init(&instr
->src
[i
]);
518 nir_call_instr_create(nir_shader
*shader
, nir_function
*callee
)
520 const unsigned num_params
= callee
->num_params
;
521 nir_call_instr
*instr
=
522 rzalloc_size(shader
, sizeof(*instr
) +
523 num_params
* sizeof(instr
->params
[0]));
525 instr_init(&instr
->instr
, nir_instr_type_call
);
526 instr
->callee
= callee
;
527 instr
->num_params
= num_params
;
528 for (unsigned i
= 0; i
< num_params
; i
++)
529 src_init(&instr
->params
[i
]);
535 nir_tex_instr_create(nir_shader
*shader
, unsigned num_srcs
)
537 nir_tex_instr
*instr
= rzalloc(shader
, nir_tex_instr
);
538 instr_init(&instr
->instr
, nir_instr_type_tex
);
540 dest_init(&instr
->dest
);
542 instr
->num_srcs
= num_srcs
;
543 instr
->src
= ralloc_array(instr
, nir_tex_src
, num_srcs
);
544 for (unsigned i
= 0; i
< num_srcs
; i
++)
545 src_init(&instr
->src
[i
].src
);
547 instr
->texture_index
= 0;
548 instr
->texture_array_size
= 0;
549 instr
->sampler_index
= 0;
555 nir_tex_instr_add_src(nir_tex_instr
*tex
,
556 nir_tex_src_type src_type
,
559 nir_tex_src
*new_srcs
= rzalloc_array(tex
, nir_tex_src
,
562 for (unsigned i
= 0; i
< tex
->num_srcs
; i
++) {
563 new_srcs
[i
].src_type
= tex
->src
[i
].src_type
;
564 nir_instr_move_src(&tex
->instr
, &new_srcs
[i
].src
,
568 ralloc_free(tex
->src
);
571 tex
->src
[tex
->num_srcs
].src_type
= src_type
;
572 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[tex
->num_srcs
].src
, src
);
577 nir_tex_instr_remove_src(nir_tex_instr
*tex
, unsigned src_idx
)
579 assert(src_idx
< tex
->num_srcs
);
581 /* First rewrite the source to NIR_SRC_INIT */
582 nir_instr_rewrite_src(&tex
->instr
, &tex
->src
[src_idx
].src
, NIR_SRC_INIT
);
584 /* Now, move all of the other sources down */
585 for (unsigned i
= src_idx
+ 1; i
< tex
->num_srcs
; i
++) {
586 tex
->src
[i
-1].src_type
= tex
->src
[i
].src_type
;
587 nir_instr_move_src(&tex
->instr
, &tex
->src
[i
-1].src
, &tex
->src
[i
].src
);
593 nir_phi_instr_create(nir_shader
*shader
)
595 nir_phi_instr
*instr
= ralloc(shader
, nir_phi_instr
);
596 instr_init(&instr
->instr
, nir_instr_type_phi
);
598 dest_init(&instr
->dest
);
599 exec_list_make_empty(&instr
->srcs
);
603 nir_parallel_copy_instr
*
604 nir_parallel_copy_instr_create(nir_shader
*shader
)
606 nir_parallel_copy_instr
*instr
= ralloc(shader
, nir_parallel_copy_instr
);
607 instr_init(&instr
->instr
, nir_instr_type_parallel_copy
);
609 exec_list_make_empty(&instr
->entries
);
614 nir_ssa_undef_instr
*
615 nir_ssa_undef_instr_create(nir_shader
*shader
,
616 unsigned num_components
,
619 nir_ssa_undef_instr
*instr
= ralloc(shader
, nir_ssa_undef_instr
);
620 instr_init(&instr
->instr
, nir_instr_type_ssa_undef
);
622 nir_ssa_def_init(&instr
->instr
, &instr
->def
, num_components
, bit_size
, NULL
);
627 static nir_const_value
628 const_value_float(double d
, unsigned bit_size
)
632 case 16: v
.u16
[0] = _mesa_float_to_half(d
); break;
633 case 32: v
.f32
[0] = d
; break;
634 case 64: v
.f64
[0] = d
; break;
636 unreachable("Invalid bit size");
641 static nir_const_value
642 const_value_int(int64_t i
, unsigned bit_size
)
646 case 1: v
.b
[0] = i
& 1; break;
647 case 8: v
.i8
[0] = i
; break;
648 case 16: v
.i16
[0] = i
; break;
649 case 32: v
.i32
[0] = i
; break;
650 case 64: v
.i64
[0] = i
; break;
652 unreachable("Invalid bit size");
658 nir_alu_binop_identity(nir_op binop
, unsigned bit_size
)
660 const int64_t max_int
= (1ull << (bit_size
- 1)) - 1;
661 const int64_t min_int
= -max_int
- 1;
664 return const_value_int(0, bit_size
);
666 return const_value_float(0, bit_size
);
668 return const_value_int(1, bit_size
);
670 return const_value_float(1, bit_size
);
672 return const_value_int(max_int
, bit_size
);
674 return const_value_int(~0ull, bit_size
);
676 return const_value_float(INFINITY
, bit_size
);
678 return const_value_int(min_int
, bit_size
);
680 return const_value_int(0, bit_size
);
682 return const_value_float(-INFINITY
, bit_size
);
684 return const_value_int(~0ull, bit_size
);
686 return const_value_int(0, bit_size
);
688 return const_value_int(0, bit_size
);
690 unreachable("Invalid reduction operation");
695 nir_cf_node_get_function(nir_cf_node
*node
)
697 while (node
->type
!= nir_cf_node_function
) {
701 return nir_cf_node_as_function(node
);
704 /* Reduces a cursor by trying to convert everything to after and trying to
705 * go up to block granularity when possible.
708 reduce_cursor(nir_cursor cursor
)
710 switch (cursor
.option
) {
711 case nir_cursor_before_block
:
712 assert(nir_cf_node_prev(&cursor
.block
->cf_node
) == NULL
||
713 nir_cf_node_prev(&cursor
.block
->cf_node
)->type
!= nir_cf_node_block
);
714 if (exec_list_is_empty(&cursor
.block
->instr_list
)) {
715 /* Empty block. After is as good as before. */
716 cursor
.option
= nir_cursor_after_block
;
720 case nir_cursor_after_block
:
723 case nir_cursor_before_instr
: {
724 nir_instr
*prev_instr
= nir_instr_prev(cursor
.instr
);
726 /* Before this instruction is after the previous */
727 cursor
.instr
= prev_instr
;
728 cursor
.option
= nir_cursor_after_instr
;
730 /* No previous instruction. Switch to before block */
731 cursor
.block
= cursor
.instr
->block
;
732 cursor
.option
= nir_cursor_before_block
;
734 return reduce_cursor(cursor
);
737 case nir_cursor_after_instr
:
738 if (nir_instr_next(cursor
.instr
) == NULL
) {
739 /* This is the last instruction, switch to after block */
740 cursor
.option
= nir_cursor_after_block
;
741 cursor
.block
= cursor
.instr
->block
;
746 unreachable("Inavlid cursor option");
751 nir_cursors_equal(nir_cursor a
, nir_cursor b
)
753 /* Reduced cursors should be unique */
754 a
= reduce_cursor(a
);
755 b
= reduce_cursor(b
);
757 return a
.block
== b
.block
&& a
.option
== b
.option
;
761 add_use_cb(nir_src
*src
, void *state
)
763 nir_instr
*instr
= state
;
765 src
->parent_instr
= instr
;
766 list_addtail(&src
->use_link
,
767 src
->is_ssa
? &src
->ssa
->uses
: &src
->reg
.reg
->uses
);
773 add_ssa_def_cb(nir_ssa_def
*def
, void *state
)
775 nir_instr
*instr
= state
;
777 if (instr
->block
&& def
->index
== UINT_MAX
) {
778 nir_function_impl
*impl
=
779 nir_cf_node_get_function(&instr
->block
->cf_node
);
781 def
->index
= impl
->ssa_alloc
++;
788 add_reg_def_cb(nir_dest
*dest
, void *state
)
790 nir_instr
*instr
= state
;
793 dest
->reg
.parent_instr
= instr
;
794 list_addtail(&dest
->reg
.def_link
, &dest
->reg
.reg
->defs
);
801 add_defs_uses(nir_instr
*instr
)
803 nir_foreach_src(instr
, add_use_cb
, instr
);
804 nir_foreach_dest(instr
, add_reg_def_cb
, instr
);
805 nir_foreach_ssa_def(instr
, add_ssa_def_cb
, instr
);
809 nir_instr_insert(nir_cursor cursor
, nir_instr
*instr
)
811 switch (cursor
.option
) {
812 case nir_cursor_before_block
:
813 /* Only allow inserting jumps into empty blocks. */
814 if (instr
->type
== nir_instr_type_jump
)
815 assert(exec_list_is_empty(&cursor
.block
->instr_list
));
817 instr
->block
= cursor
.block
;
818 add_defs_uses(instr
);
819 exec_list_push_head(&cursor
.block
->instr_list
, &instr
->node
);
821 case nir_cursor_after_block
: {
822 /* Inserting instructions after a jump is illegal. */
823 nir_instr
*last
= nir_block_last_instr(cursor
.block
);
824 assert(last
== NULL
|| last
->type
!= nir_instr_type_jump
);
827 instr
->block
= cursor
.block
;
828 add_defs_uses(instr
);
829 exec_list_push_tail(&cursor
.block
->instr_list
, &instr
->node
);
832 case nir_cursor_before_instr
:
833 assert(instr
->type
!= nir_instr_type_jump
);
834 instr
->block
= cursor
.instr
->block
;
835 add_defs_uses(instr
);
836 exec_node_insert_node_before(&cursor
.instr
->node
, &instr
->node
);
838 case nir_cursor_after_instr
:
839 /* Inserting instructions after a jump is illegal. */
840 assert(cursor
.instr
->type
!= nir_instr_type_jump
);
842 /* Only allow inserting jumps at the end of the block. */
843 if (instr
->type
== nir_instr_type_jump
)
844 assert(cursor
.instr
== nir_block_last_instr(cursor
.instr
->block
));
846 instr
->block
= cursor
.instr
->block
;
847 add_defs_uses(instr
);
848 exec_node_insert_after(&cursor
.instr
->node
, &instr
->node
);
852 if (instr
->type
== nir_instr_type_jump
)
853 nir_handle_add_jump(instr
->block
);
857 src_is_valid(const nir_src
*src
)
859 return src
->is_ssa
? (src
->ssa
!= NULL
) : (src
->reg
.reg
!= NULL
);
863 remove_use_cb(nir_src
*src
, void *state
)
867 if (src_is_valid(src
))
868 list_del(&src
->use_link
);
874 remove_def_cb(nir_dest
*dest
, void *state
)
879 list_del(&dest
->reg
.def_link
);
885 remove_defs_uses(nir_instr
*instr
)
887 nir_foreach_dest(instr
, remove_def_cb
, instr
);
888 nir_foreach_src(instr
, remove_use_cb
, instr
);
891 void nir_instr_remove_v(nir_instr
*instr
)
893 remove_defs_uses(instr
);
894 exec_node_remove(&instr
->node
);
896 if (instr
->type
== nir_instr_type_jump
) {
897 nir_jump_instr
*jump_instr
= nir_instr_as_jump(instr
);
898 nir_handle_remove_jump(instr
->block
, jump_instr
->type
);
905 nir_index_local_regs(nir_function_impl
*impl
)
908 foreach_list_typed(nir_register
, reg
, node
, &impl
->registers
) {
909 reg
->index
= index
++;
911 impl
->reg_alloc
= index
;
915 nir_index_global_regs(nir_shader
*shader
)
918 foreach_list_typed(nir_register
, reg
, node
, &shader
->registers
) {
919 reg
->index
= index
++;
921 shader
->reg_alloc
= index
;
925 visit_alu_dest(nir_alu_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
927 return cb(&instr
->dest
.dest
, state
);
931 visit_deref_dest(nir_deref_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
933 return cb(&instr
->dest
, state
);
937 visit_intrinsic_dest(nir_intrinsic_instr
*instr
, nir_foreach_dest_cb cb
,
940 if (nir_intrinsic_infos
[instr
->intrinsic
].has_dest
)
941 return cb(&instr
->dest
, state
);
947 visit_texture_dest(nir_tex_instr
*instr
, nir_foreach_dest_cb cb
,
950 return cb(&instr
->dest
, state
);
954 visit_phi_dest(nir_phi_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
956 return cb(&instr
->dest
, state
);
960 visit_parallel_copy_dest(nir_parallel_copy_instr
*instr
,
961 nir_foreach_dest_cb cb
, void *state
)
963 nir_foreach_parallel_copy_entry(entry
, instr
) {
964 if (!cb(&entry
->dest
, state
))
972 nir_foreach_dest(nir_instr
*instr
, nir_foreach_dest_cb cb
, void *state
)
974 switch (instr
->type
) {
975 case nir_instr_type_alu
:
976 return visit_alu_dest(nir_instr_as_alu(instr
), cb
, state
);
977 case nir_instr_type_deref
:
978 return visit_deref_dest(nir_instr_as_deref(instr
), cb
, state
);
979 case nir_instr_type_intrinsic
:
980 return visit_intrinsic_dest(nir_instr_as_intrinsic(instr
), cb
, state
);
981 case nir_instr_type_tex
:
982 return visit_texture_dest(nir_instr_as_tex(instr
), cb
, state
);
983 case nir_instr_type_phi
:
984 return visit_phi_dest(nir_instr_as_phi(instr
), cb
, state
);
985 case nir_instr_type_parallel_copy
:
986 return visit_parallel_copy_dest(nir_instr_as_parallel_copy(instr
),
989 case nir_instr_type_load_const
:
990 case nir_instr_type_ssa_undef
:
991 case nir_instr_type_call
:
992 case nir_instr_type_jump
:
996 unreachable("Invalid instruction type");
1003 struct foreach_ssa_def_state
{
1004 nir_foreach_ssa_def_cb cb
;
1009 nir_ssa_def_visitor(nir_dest
*dest
, void *void_state
)
1011 struct foreach_ssa_def_state
*state
= void_state
;
1014 return state
->cb(&dest
->ssa
, state
->client_state
);
1020 nir_foreach_ssa_def(nir_instr
*instr
, nir_foreach_ssa_def_cb cb
, void *state
)
1022 switch (instr
->type
) {
1023 case nir_instr_type_alu
:
1024 case nir_instr_type_deref
:
1025 case nir_instr_type_tex
:
1026 case nir_instr_type_intrinsic
:
1027 case nir_instr_type_phi
:
1028 case nir_instr_type_parallel_copy
: {
1029 struct foreach_ssa_def_state foreach_state
= {cb
, state
};
1030 return nir_foreach_dest(instr
, nir_ssa_def_visitor
, &foreach_state
);
1033 case nir_instr_type_load_const
:
1034 return cb(&nir_instr_as_load_const(instr
)->def
, state
);
1035 case nir_instr_type_ssa_undef
:
1036 return cb(&nir_instr_as_ssa_undef(instr
)->def
, state
);
1037 case nir_instr_type_call
:
1038 case nir_instr_type_jump
:
1041 unreachable("Invalid instruction type");
1046 visit_src(nir_src
*src
, nir_foreach_src_cb cb
, void *state
)
1048 if (!cb(src
, state
))
1050 if (!src
->is_ssa
&& src
->reg
.indirect
)
1051 return cb(src
->reg
.indirect
, state
);
1056 visit_alu_src(nir_alu_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1058 for (unsigned i
= 0; i
< nir_op_infos
[instr
->op
].num_inputs
; i
++)
1059 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1066 visit_deref_instr_src(nir_deref_instr
*instr
,
1067 nir_foreach_src_cb cb
, void *state
)
1069 if (instr
->deref_type
!= nir_deref_type_var
) {
1070 if (!visit_src(&instr
->parent
, cb
, state
))
1074 if (instr
->deref_type
== nir_deref_type_array
||
1075 instr
->deref_type
== nir_deref_type_ptr_as_array
) {
1076 if (!visit_src(&instr
->arr
.index
, cb
, state
))
1084 visit_tex_src(nir_tex_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1086 for (unsigned i
= 0; i
< instr
->num_srcs
; i
++) {
1087 if (!visit_src(&instr
->src
[i
].src
, cb
, state
))
1095 visit_intrinsic_src(nir_intrinsic_instr
*instr
, nir_foreach_src_cb cb
,
1098 unsigned num_srcs
= nir_intrinsic_infos
[instr
->intrinsic
].num_srcs
;
1099 for (unsigned i
= 0; i
< num_srcs
; i
++) {
1100 if (!visit_src(&instr
->src
[i
], cb
, state
))
1108 visit_call_src(nir_call_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1110 for (unsigned i
= 0; i
< instr
->num_params
; i
++) {
1111 if (!visit_src(&instr
->params
[i
], cb
, state
))
1119 visit_phi_src(nir_phi_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1121 nir_foreach_phi_src(src
, instr
) {
1122 if (!visit_src(&src
->src
, cb
, state
))
1130 visit_parallel_copy_src(nir_parallel_copy_instr
*instr
,
1131 nir_foreach_src_cb cb
, void *state
)
1133 nir_foreach_parallel_copy_entry(entry
, instr
) {
1134 if (!visit_src(&entry
->src
, cb
, state
))
1143 nir_foreach_src_cb cb
;
1144 } visit_dest_indirect_state
;
1147 visit_dest_indirect(nir_dest
*dest
, void *_state
)
1149 visit_dest_indirect_state
*state
= (visit_dest_indirect_state
*) _state
;
1151 if (!dest
->is_ssa
&& dest
->reg
.indirect
)
1152 return state
->cb(dest
->reg
.indirect
, state
->state
);
1158 nir_foreach_src(nir_instr
*instr
, nir_foreach_src_cb cb
, void *state
)
1160 switch (instr
->type
) {
1161 case nir_instr_type_alu
:
1162 if (!visit_alu_src(nir_instr_as_alu(instr
), cb
, state
))
1165 case nir_instr_type_deref
:
1166 if (!visit_deref_instr_src(nir_instr_as_deref(instr
), cb
, state
))
1169 case nir_instr_type_intrinsic
:
1170 if (!visit_intrinsic_src(nir_instr_as_intrinsic(instr
), cb
, state
))
1173 case nir_instr_type_tex
:
1174 if (!visit_tex_src(nir_instr_as_tex(instr
), cb
, state
))
1177 case nir_instr_type_call
:
1178 if (!visit_call_src(nir_instr_as_call(instr
), cb
, state
))
1181 case nir_instr_type_load_const
:
1182 /* Constant load instructions have no regular sources */
1184 case nir_instr_type_phi
:
1185 if (!visit_phi_src(nir_instr_as_phi(instr
), cb
, state
))
1188 case nir_instr_type_parallel_copy
:
1189 if (!visit_parallel_copy_src(nir_instr_as_parallel_copy(instr
),
1193 case nir_instr_type_jump
:
1194 case nir_instr_type_ssa_undef
:
1198 unreachable("Invalid instruction type");
1202 visit_dest_indirect_state dest_state
;
1203 dest_state
.state
= state
;
1205 return nir_foreach_dest(instr
, visit_dest_indirect
, &dest_state
);
1209 nir_src_comp_as_int(nir_src src
, unsigned comp
)
1211 assert(nir_src_is_const(src
));
1212 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1214 assert(comp
< load
->def
.num_components
);
1215 switch (load
->def
.bit_size
) {
1216 /* int1_t uses 0/-1 convention */
1217 case 1: return -(int)load
->value
.b
[comp
];
1218 case 8: return load
->value
.i8
[comp
];
1219 case 16: return load
->value
.i16
[comp
];
1220 case 32: return load
->value
.i32
[comp
];
1221 case 64: return load
->value
.i64
[comp
];
1223 unreachable("Invalid bit size");
1228 nir_src_comp_as_uint(nir_src src
, unsigned comp
)
1230 assert(nir_src_is_const(src
));
1231 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1233 assert(comp
< load
->def
.num_components
);
1234 switch (load
->def
.bit_size
) {
1235 case 1: return load
->value
.b
[comp
];
1236 case 8: return load
->value
.u8
[comp
];
1237 case 16: return load
->value
.u16
[comp
];
1238 case 32: return load
->value
.u32
[comp
];
1239 case 64: return load
->value
.u64
[comp
];
1241 unreachable("Invalid bit size");
1246 nir_src_comp_as_bool(nir_src src
, unsigned comp
)
1248 int64_t i
= nir_src_comp_as_int(src
, comp
);
1250 /* Booleans of any size use 0/-1 convention */
1251 assert(i
== 0 || i
== -1);
1257 nir_src_comp_as_float(nir_src src
, unsigned comp
)
1259 assert(nir_src_is_const(src
));
1260 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1262 assert(comp
< load
->def
.num_components
);
1263 switch (load
->def
.bit_size
) {
1264 case 16: return _mesa_half_to_float(load
->value
.u16
[comp
]);
1265 case 32: return load
->value
.f32
[comp
];
1266 case 64: return load
->value
.f64
[comp
];
1268 unreachable("Invalid bit size");
1273 nir_src_as_int(nir_src src
)
1275 assert(nir_src_num_components(src
) == 1);
1276 return nir_src_comp_as_int(src
, 0);
1280 nir_src_as_uint(nir_src src
)
1282 assert(nir_src_num_components(src
) == 1);
1283 return nir_src_comp_as_uint(src
, 0);
1287 nir_src_as_bool(nir_src src
)
1289 assert(nir_src_num_components(src
) == 1);
1290 return nir_src_comp_as_bool(src
, 0);
1294 nir_src_as_float(nir_src src
)
1296 assert(nir_src_num_components(src
) == 1);
1297 return nir_src_comp_as_float(src
, 0);
1301 nir_src_as_const_value(nir_src src
)
1306 if (src
.ssa
->parent_instr
->type
!= nir_instr_type_load_const
)
1309 nir_load_const_instr
*load
= nir_instr_as_load_const(src
.ssa
->parent_instr
);
1311 return &load
->value
;
1315 * Returns true if the source is known to be dynamically uniform. Otherwise it
1316 * returns false which means it may or may not be dynamically uniform but it
1317 * can't be determined.
1320 nir_src_is_dynamically_uniform(nir_src src
)
1325 /* Constants are trivially dynamically uniform */
1326 if (src
.ssa
->parent_instr
->type
== nir_instr_type_load_const
)
1329 /* As are uniform variables */
1330 if (src
.ssa
->parent_instr
->type
== nir_instr_type_intrinsic
) {
1331 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(src
.ssa
->parent_instr
);
1333 if (intr
->intrinsic
== nir_intrinsic_load_uniform
)
1337 /* XXX: this could have many more tests, such as when a sampler function is
1338 * called with dynamically uniform arguments.
1344 src_remove_all_uses(nir_src
*src
)
1346 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1347 if (!src_is_valid(src
))
1350 list_del(&src
->use_link
);
1355 src_add_all_uses(nir_src
*src
, nir_instr
*parent_instr
, nir_if
*parent_if
)
1357 for (; src
; src
= src
->is_ssa
? NULL
: src
->reg
.indirect
) {
1358 if (!src_is_valid(src
))
1362 src
->parent_instr
= parent_instr
;
1364 list_addtail(&src
->use_link
, &src
->ssa
->uses
);
1366 list_addtail(&src
->use_link
, &src
->reg
.reg
->uses
);
1369 src
->parent_if
= parent_if
;
1371 list_addtail(&src
->use_link
, &src
->ssa
->if_uses
);
1373 list_addtail(&src
->use_link
, &src
->reg
.reg
->if_uses
);
1379 nir_instr_rewrite_src(nir_instr
*instr
, nir_src
*src
, nir_src new_src
)
1381 assert(!src_is_valid(src
) || src
->parent_instr
== instr
);
1383 src_remove_all_uses(src
);
1385 src_add_all_uses(src
, instr
, NULL
);
1389 nir_instr_move_src(nir_instr
*dest_instr
, nir_src
*dest
, nir_src
*src
)
1391 assert(!src_is_valid(dest
) || dest
->parent_instr
== dest_instr
);
1393 src_remove_all_uses(dest
);
1394 src_remove_all_uses(src
);
1396 *src
= NIR_SRC_INIT
;
1397 src_add_all_uses(dest
, dest_instr
, NULL
);
1401 nir_if_rewrite_condition(nir_if
*if_stmt
, nir_src new_src
)
1403 nir_src
*src
= &if_stmt
->condition
;
1404 assert(!src_is_valid(src
) || src
->parent_if
== if_stmt
);
1406 src_remove_all_uses(src
);
1408 src_add_all_uses(src
, NULL
, if_stmt
);
1412 nir_instr_rewrite_dest(nir_instr
*instr
, nir_dest
*dest
, nir_dest new_dest
)
1415 /* We can only overwrite an SSA destination if it has no uses. */
1416 assert(list_empty(&dest
->ssa
.uses
) && list_empty(&dest
->ssa
.if_uses
));
1418 list_del(&dest
->reg
.def_link
);
1419 if (dest
->reg
.indirect
)
1420 src_remove_all_uses(dest
->reg
.indirect
);
1423 /* We can't re-write with an SSA def */
1424 assert(!new_dest
.is_ssa
);
1426 nir_dest_copy(dest
, &new_dest
, instr
);
1428 dest
->reg
.parent_instr
= instr
;
1429 list_addtail(&dest
->reg
.def_link
, &new_dest
.reg
.reg
->defs
);
1431 if (dest
->reg
.indirect
)
1432 src_add_all_uses(dest
->reg
.indirect
, instr
, NULL
);
1435 /* note: does *not* take ownership of 'name' */
1437 nir_ssa_def_init(nir_instr
*instr
, nir_ssa_def
*def
,
1438 unsigned num_components
,
1439 unsigned bit_size
, const char *name
)
1441 def
->name
= ralloc_strdup(instr
, name
);
1442 def
->parent_instr
= instr
;
1443 list_inithead(&def
->uses
);
1444 list_inithead(&def
->if_uses
);
1445 def
->num_components
= num_components
;
1446 def
->bit_size
= bit_size
;
1449 nir_function_impl
*impl
=
1450 nir_cf_node_get_function(&instr
->block
->cf_node
);
1452 def
->index
= impl
->ssa_alloc
++;
1454 def
->index
= UINT_MAX
;
1458 /* note: does *not* take ownership of 'name' */
1460 nir_ssa_dest_init(nir_instr
*instr
, nir_dest
*dest
,
1461 unsigned num_components
, unsigned bit_size
,
1464 dest
->is_ssa
= true;
1465 nir_ssa_def_init(instr
, &dest
->ssa
, num_components
, bit_size
, name
);
1469 nir_ssa_def_rewrite_uses(nir_ssa_def
*def
, nir_src new_src
)
1471 assert(!new_src
.is_ssa
|| def
!= new_src
.ssa
);
1473 nir_foreach_use_safe(use_src
, def
)
1474 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1476 nir_foreach_if_use_safe(use_src
, def
)
1477 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1481 is_instr_between(nir_instr
*start
, nir_instr
*end
, nir_instr
*between
)
1483 assert(start
->block
== end
->block
);
1485 if (between
->block
!= start
->block
)
1488 /* Search backwards looking for "between" */
1489 while (start
!= end
) {
1493 end
= nir_instr_prev(end
);
1500 /* Replaces all uses of the given SSA def with the given source but only if
1501 * the use comes after the after_me instruction. This can be useful if you
1502 * are emitting code to fix up the result of some instruction: you can freely
1503 * use the result in that code and then call rewrite_uses_after and pass the
1504 * last fixup instruction as after_me and it will replace all of the uses you
1505 * want without touching the fixup code.
1507 * This function assumes that after_me is in the same block as
1508 * def->parent_instr and that after_me comes after def->parent_instr.
1511 nir_ssa_def_rewrite_uses_after(nir_ssa_def
*def
, nir_src new_src
,
1512 nir_instr
*after_me
)
1514 if (new_src
.is_ssa
&& def
== new_src
.ssa
)
1517 nir_foreach_use_safe(use_src
, def
) {
1518 assert(use_src
->parent_instr
!= def
->parent_instr
);
1519 /* Since def already dominates all of its uses, the only way a use can
1520 * not be dominated by after_me is if it is between def and after_me in
1521 * the instruction list.
1523 if (!is_instr_between(def
->parent_instr
, after_me
, use_src
->parent_instr
))
1524 nir_instr_rewrite_src(use_src
->parent_instr
, use_src
, new_src
);
1527 nir_foreach_if_use_safe(use_src
, def
)
1528 nir_if_rewrite_condition(use_src
->parent_if
, new_src
);
1531 nir_component_mask_t
1532 nir_ssa_def_components_read(const nir_ssa_def
*def
)
1534 nir_component_mask_t read_mask
= 0;
1535 nir_foreach_use(use
, def
) {
1536 if (use
->parent_instr
->type
== nir_instr_type_alu
) {
1537 nir_alu_instr
*alu
= nir_instr_as_alu(use
->parent_instr
);
1538 nir_alu_src
*alu_src
= exec_node_data(nir_alu_src
, use
, src
);
1539 int src_idx
= alu_src
- &alu
->src
[0];
1540 assert(src_idx
>= 0 && src_idx
< nir_op_infos
[alu
->op
].num_inputs
);
1541 read_mask
|= nir_alu_instr_src_read_mask(alu
, src_idx
);
1543 return (1 << def
->num_components
) - 1;
1547 if (!list_empty(&def
->if_uses
))
1554 nir_block_cf_tree_next(nir_block
*block
)
1556 if (block
== NULL
) {
1557 /* nir_foreach_block_safe() will call this function on a NULL block
1558 * after the last iteration, but it won't use the result so just return
1564 nir_cf_node
*cf_next
= nir_cf_node_next(&block
->cf_node
);
1566 return nir_cf_node_cf_tree_first(cf_next
);
1568 nir_cf_node
*parent
= block
->cf_node
.parent
;
1570 switch (parent
->type
) {
1571 case nir_cf_node_if
: {
1572 /* Are we at the end of the if? Go to the beginning of the else */
1573 nir_if
*if_stmt
= nir_cf_node_as_if(parent
);
1574 if (block
== nir_if_last_then_block(if_stmt
))
1575 return nir_if_first_else_block(if_stmt
);
1577 assert(block
== nir_if_last_else_block(if_stmt
));
1581 case nir_cf_node_loop
:
1582 return nir_cf_node_as_block(nir_cf_node_next(parent
));
1584 case nir_cf_node_function
:
1588 unreachable("unknown cf node type");
1593 nir_block_cf_tree_prev(nir_block
*block
)
1595 if (block
== NULL
) {
1596 /* do this for consistency with nir_block_cf_tree_next() */
1600 nir_cf_node
*cf_prev
= nir_cf_node_prev(&block
->cf_node
);
1602 return nir_cf_node_cf_tree_last(cf_prev
);
1604 nir_cf_node
*parent
= block
->cf_node
.parent
;
1606 switch (parent
->type
) {
1607 case nir_cf_node_if
: {
1608 /* Are we at the beginning of the else? Go to the end of the if */
1609 nir_if
*if_stmt
= nir_cf_node_as_if(parent
);
1610 if (block
== nir_if_first_else_block(if_stmt
))
1611 return nir_if_last_then_block(if_stmt
);
1613 assert(block
== nir_if_first_then_block(if_stmt
));
1617 case nir_cf_node_loop
:
1618 return nir_cf_node_as_block(nir_cf_node_prev(parent
));
1620 case nir_cf_node_function
:
1624 unreachable("unknown cf node type");
1628 nir_block
*nir_cf_node_cf_tree_first(nir_cf_node
*node
)
1630 switch (node
->type
) {
1631 case nir_cf_node_function
: {
1632 nir_function_impl
*impl
= nir_cf_node_as_function(node
);
1633 return nir_start_block(impl
);
1636 case nir_cf_node_if
: {
1637 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
1638 return nir_if_first_then_block(if_stmt
);
1641 case nir_cf_node_loop
: {
1642 nir_loop
*loop
= nir_cf_node_as_loop(node
);
1643 return nir_loop_first_block(loop
);
1646 case nir_cf_node_block
: {
1647 return nir_cf_node_as_block(node
);
1651 unreachable("unknown node type");
1655 nir_block
*nir_cf_node_cf_tree_last(nir_cf_node
*node
)
1657 switch (node
->type
) {
1658 case nir_cf_node_function
: {
1659 nir_function_impl
*impl
= nir_cf_node_as_function(node
);
1660 return nir_impl_last_block(impl
);
1663 case nir_cf_node_if
: {
1664 nir_if
*if_stmt
= nir_cf_node_as_if(node
);
1665 return nir_if_last_else_block(if_stmt
);
1668 case nir_cf_node_loop
: {
1669 nir_loop
*loop
= nir_cf_node_as_loop(node
);
1670 return nir_loop_last_block(loop
);
1673 case nir_cf_node_block
: {
1674 return nir_cf_node_as_block(node
);
1678 unreachable("unknown node type");
1682 nir_block
*nir_cf_node_cf_tree_next(nir_cf_node
*node
)
1684 if (node
->type
== nir_cf_node_block
)
1685 return nir_block_cf_tree_next(nir_cf_node_as_block(node
));
1686 else if (node
->type
== nir_cf_node_function
)
1689 return nir_cf_node_as_block(nir_cf_node_next(node
));
1693 nir_block_get_following_if(nir_block
*block
)
1695 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1698 if (nir_cf_node_is_last(&block
->cf_node
))
1701 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1703 if (next_node
->type
!= nir_cf_node_if
)
1706 return nir_cf_node_as_if(next_node
);
1710 nir_block_get_following_loop(nir_block
*block
)
1712 if (exec_node_is_tail_sentinel(&block
->cf_node
.node
))
1715 if (nir_cf_node_is_last(&block
->cf_node
))
1718 nir_cf_node
*next_node
= nir_cf_node_next(&block
->cf_node
);
1720 if (next_node
->type
!= nir_cf_node_loop
)
1723 return nir_cf_node_as_loop(next_node
);
1727 nir_index_blocks(nir_function_impl
*impl
)
1731 if (impl
->valid_metadata
& nir_metadata_block_index
)
1734 nir_foreach_block(block
, impl
) {
1735 block
->index
= index
++;
1738 /* The end_block isn't really part of the program, which is why its index
1741 impl
->num_blocks
= impl
->end_block
->index
= index
;
1745 index_ssa_def_cb(nir_ssa_def
*def
, void *state
)
1747 unsigned *index
= (unsigned *) state
;
1748 def
->index
= (*index
)++;
1754 * The indices are applied top-to-bottom which has the very nice property
1755 * that, if A dominates B, then A->index <= B->index.
1758 nir_index_ssa_defs(nir_function_impl
*impl
)
1762 nir_foreach_block(block
, impl
) {
1763 nir_foreach_instr(instr
, block
)
1764 nir_foreach_ssa_def(instr
, index_ssa_def_cb
, &index
);
1767 impl
->ssa_alloc
= index
;
1771 * The indices are applied top-to-bottom which has the very nice property
1772 * that, if A dominates B, then A->index <= B->index.
1775 nir_index_instrs(nir_function_impl
*impl
)
1779 nir_foreach_block(block
, impl
) {
1780 nir_foreach_instr(instr
, block
)
1781 instr
->index
= index
++;
1788 nir_intrinsic_from_system_value(gl_system_value val
)
1791 case SYSTEM_VALUE_VERTEX_ID
:
1792 return nir_intrinsic_load_vertex_id
;
1793 case SYSTEM_VALUE_INSTANCE_ID
:
1794 return nir_intrinsic_load_instance_id
;
1795 case SYSTEM_VALUE_DRAW_ID
:
1796 return nir_intrinsic_load_draw_id
;
1797 case SYSTEM_VALUE_BASE_INSTANCE
:
1798 return nir_intrinsic_load_base_instance
;
1799 case SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
:
1800 return nir_intrinsic_load_vertex_id_zero_base
;
1801 case SYSTEM_VALUE_IS_INDEXED_DRAW
:
1802 return nir_intrinsic_load_is_indexed_draw
;
1803 case SYSTEM_VALUE_FIRST_VERTEX
:
1804 return nir_intrinsic_load_first_vertex
;
1805 case SYSTEM_VALUE_BASE_VERTEX
:
1806 return nir_intrinsic_load_base_vertex
;
1807 case SYSTEM_VALUE_INVOCATION_ID
:
1808 return nir_intrinsic_load_invocation_id
;
1809 case SYSTEM_VALUE_FRAG_COORD
:
1810 return nir_intrinsic_load_frag_coord
;
1811 case SYSTEM_VALUE_FRONT_FACE
:
1812 return nir_intrinsic_load_front_face
;
1813 case SYSTEM_VALUE_SAMPLE_ID
:
1814 return nir_intrinsic_load_sample_id
;
1815 case SYSTEM_VALUE_SAMPLE_POS
:
1816 return nir_intrinsic_load_sample_pos
;
1817 case SYSTEM_VALUE_SAMPLE_MASK_IN
:
1818 return nir_intrinsic_load_sample_mask_in
;
1819 case SYSTEM_VALUE_LOCAL_INVOCATION_ID
:
1820 return nir_intrinsic_load_local_invocation_id
;
1821 case SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
:
1822 return nir_intrinsic_load_local_invocation_index
;
1823 case SYSTEM_VALUE_WORK_GROUP_ID
:
1824 return nir_intrinsic_load_work_group_id
;
1825 case SYSTEM_VALUE_NUM_WORK_GROUPS
:
1826 return nir_intrinsic_load_num_work_groups
;
1827 case SYSTEM_VALUE_PRIMITIVE_ID
:
1828 return nir_intrinsic_load_primitive_id
;
1829 case SYSTEM_VALUE_TESS_COORD
:
1830 return nir_intrinsic_load_tess_coord
;
1831 case SYSTEM_VALUE_TESS_LEVEL_OUTER
:
1832 return nir_intrinsic_load_tess_level_outer
;
1833 case SYSTEM_VALUE_TESS_LEVEL_INNER
:
1834 return nir_intrinsic_load_tess_level_inner
;
1835 case SYSTEM_VALUE_VERTICES_IN
:
1836 return nir_intrinsic_load_patch_vertices_in
;
1837 case SYSTEM_VALUE_HELPER_INVOCATION
:
1838 return nir_intrinsic_load_helper_invocation
;
1839 case SYSTEM_VALUE_VIEW_INDEX
:
1840 return nir_intrinsic_load_view_index
;
1841 case SYSTEM_VALUE_SUBGROUP_SIZE
:
1842 return nir_intrinsic_load_subgroup_size
;
1843 case SYSTEM_VALUE_SUBGROUP_INVOCATION
:
1844 return nir_intrinsic_load_subgroup_invocation
;
1845 case SYSTEM_VALUE_SUBGROUP_EQ_MASK
:
1846 return nir_intrinsic_load_subgroup_eq_mask
;
1847 case SYSTEM_VALUE_SUBGROUP_GE_MASK
:
1848 return nir_intrinsic_load_subgroup_ge_mask
;
1849 case SYSTEM_VALUE_SUBGROUP_GT_MASK
:
1850 return nir_intrinsic_load_subgroup_gt_mask
;
1851 case SYSTEM_VALUE_SUBGROUP_LE_MASK
:
1852 return nir_intrinsic_load_subgroup_le_mask
;
1853 case SYSTEM_VALUE_SUBGROUP_LT_MASK
:
1854 return nir_intrinsic_load_subgroup_lt_mask
;
1855 case SYSTEM_VALUE_NUM_SUBGROUPS
:
1856 return nir_intrinsic_load_num_subgroups
;
1857 case SYSTEM_VALUE_SUBGROUP_ID
:
1858 return nir_intrinsic_load_subgroup_id
;
1859 case SYSTEM_VALUE_LOCAL_GROUP_SIZE
:
1860 return nir_intrinsic_load_local_group_size
;
1861 case SYSTEM_VALUE_GLOBAL_INVOCATION_ID
:
1862 return nir_intrinsic_load_global_invocation_id
;
1863 case SYSTEM_VALUE_WORK_DIM
:
1864 return nir_intrinsic_load_work_dim
;
1866 unreachable("system value does not directly correspond to intrinsic");
1871 nir_system_value_from_intrinsic(nir_intrinsic_op intrin
)
1874 case nir_intrinsic_load_vertex_id
:
1875 return SYSTEM_VALUE_VERTEX_ID
;
1876 case nir_intrinsic_load_instance_id
:
1877 return SYSTEM_VALUE_INSTANCE_ID
;
1878 case nir_intrinsic_load_draw_id
:
1879 return SYSTEM_VALUE_DRAW_ID
;
1880 case nir_intrinsic_load_base_instance
:
1881 return SYSTEM_VALUE_BASE_INSTANCE
;
1882 case nir_intrinsic_load_vertex_id_zero_base
:
1883 return SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
;
1884 case nir_intrinsic_load_first_vertex
:
1885 return SYSTEM_VALUE_FIRST_VERTEX
;
1886 case nir_intrinsic_load_is_indexed_draw
:
1887 return SYSTEM_VALUE_IS_INDEXED_DRAW
;
1888 case nir_intrinsic_load_base_vertex
:
1889 return SYSTEM_VALUE_BASE_VERTEX
;
1890 case nir_intrinsic_load_invocation_id
:
1891 return SYSTEM_VALUE_INVOCATION_ID
;
1892 case nir_intrinsic_load_frag_coord
:
1893 return SYSTEM_VALUE_FRAG_COORD
;
1894 case nir_intrinsic_load_front_face
:
1895 return SYSTEM_VALUE_FRONT_FACE
;
1896 case nir_intrinsic_load_sample_id
:
1897 return SYSTEM_VALUE_SAMPLE_ID
;
1898 case nir_intrinsic_load_sample_pos
:
1899 return SYSTEM_VALUE_SAMPLE_POS
;
1900 case nir_intrinsic_load_sample_mask_in
:
1901 return SYSTEM_VALUE_SAMPLE_MASK_IN
;
1902 case nir_intrinsic_load_local_invocation_id
:
1903 return SYSTEM_VALUE_LOCAL_INVOCATION_ID
;
1904 case nir_intrinsic_load_local_invocation_index
:
1905 return SYSTEM_VALUE_LOCAL_INVOCATION_INDEX
;
1906 case nir_intrinsic_load_num_work_groups
:
1907 return SYSTEM_VALUE_NUM_WORK_GROUPS
;
1908 case nir_intrinsic_load_work_group_id
:
1909 return SYSTEM_VALUE_WORK_GROUP_ID
;
1910 case nir_intrinsic_load_primitive_id
:
1911 return SYSTEM_VALUE_PRIMITIVE_ID
;
1912 case nir_intrinsic_load_tess_coord
:
1913 return SYSTEM_VALUE_TESS_COORD
;
1914 case nir_intrinsic_load_tess_level_outer
:
1915 return SYSTEM_VALUE_TESS_LEVEL_OUTER
;
1916 case nir_intrinsic_load_tess_level_inner
:
1917 return SYSTEM_VALUE_TESS_LEVEL_INNER
;
1918 case nir_intrinsic_load_patch_vertices_in
:
1919 return SYSTEM_VALUE_VERTICES_IN
;
1920 case nir_intrinsic_load_helper_invocation
:
1921 return SYSTEM_VALUE_HELPER_INVOCATION
;
1922 case nir_intrinsic_load_view_index
:
1923 return SYSTEM_VALUE_VIEW_INDEX
;
1924 case nir_intrinsic_load_subgroup_size
:
1925 return SYSTEM_VALUE_SUBGROUP_SIZE
;
1926 case nir_intrinsic_load_subgroup_invocation
:
1927 return SYSTEM_VALUE_SUBGROUP_INVOCATION
;
1928 case nir_intrinsic_load_subgroup_eq_mask
:
1929 return SYSTEM_VALUE_SUBGROUP_EQ_MASK
;
1930 case nir_intrinsic_load_subgroup_ge_mask
:
1931 return SYSTEM_VALUE_SUBGROUP_GE_MASK
;
1932 case nir_intrinsic_load_subgroup_gt_mask
:
1933 return SYSTEM_VALUE_SUBGROUP_GT_MASK
;
1934 case nir_intrinsic_load_subgroup_le_mask
:
1935 return SYSTEM_VALUE_SUBGROUP_LE_MASK
;
1936 case nir_intrinsic_load_subgroup_lt_mask
:
1937 return SYSTEM_VALUE_SUBGROUP_LT_MASK
;
1938 case nir_intrinsic_load_num_subgroups
:
1939 return SYSTEM_VALUE_NUM_SUBGROUPS
;
1940 case nir_intrinsic_load_subgroup_id
:
1941 return SYSTEM_VALUE_SUBGROUP_ID
;
1942 case nir_intrinsic_load_local_group_size
:
1943 return SYSTEM_VALUE_LOCAL_GROUP_SIZE
;
1944 case nir_intrinsic_load_global_invocation_id
:
1945 return SYSTEM_VALUE_GLOBAL_INVOCATION_ID
;
1947 unreachable("intrinsic doesn't produce a system value");
1951 /* OpenGL utility method that remaps the location attributes if they are
1952 * doubles. Not needed for vulkan due the differences on the input location
1953 * count for doubles on vulkan vs OpenGL
1955 * The bitfield returned in dual_slot is one bit for each double input slot in
1956 * the original OpenGL single-slot input numbering. The mapping from old
1957 * locations to new locations is as follows:
1959 * new_loc = loc + util_bitcount(dual_slot & BITFIELD64_MASK(loc))
1962 nir_remap_dual_slot_attributes(nir_shader
*shader
, uint64_t *dual_slot
)
1964 assert(shader
->info
.stage
== MESA_SHADER_VERTEX
);
1967 nir_foreach_variable(var
, &shader
->inputs
) {
1968 if (glsl_type_is_dual_slot(glsl_without_array(var
->type
))) {
1969 unsigned slots
= glsl_count_attribute_slots(var
->type
, true);
1970 *dual_slot
|= BITFIELD64_MASK(slots
) << var
->data
.location
;
1974 nir_foreach_variable(var
, &shader
->inputs
) {
1975 var
->data
.location
+=
1976 util_bitcount64(*dual_slot
& BITFIELD64_MASK(var
->data
.location
));
1980 /* Returns an attribute mask that has been re-compacted using the given
1984 nir_get_single_slot_attribs_mask(uint64_t attribs
, uint64_t dual_slot
)
1987 unsigned loc
= u_bit_scan64(&dual_slot
);
1988 /* mask of all bits up to and including loc */
1989 uint64_t mask
= BITFIELD64_MASK(loc
+ 1);
1990 attribs
= (attribs
& mask
) | ((attribs
& ~mask
) >> 1);