2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "vtn_private.h"
25 #include "spirv_info.h"
26 #include "nir/nir_vla.h"
27 #include "util/debug.h"
29 static struct vtn_block
*
30 vtn_block(struct vtn_builder
*b
, uint32_t value_id
)
32 return vtn_value(b
, value_id
, vtn_value_type_block
)->block
;
36 glsl_type_count_function_params(const struct glsl_type
*type
)
38 if (glsl_type_is_vector_or_scalar(type
)) {
40 } else if (glsl_type_is_array_or_matrix(type
)) {
41 return glsl_get_length(type
) *
42 glsl_type_count_function_params(glsl_get_array_element(type
));
44 assert(glsl_type_is_struct_or_ifc(type
));
46 unsigned elems
= glsl_get_length(type
);
47 for (unsigned i
= 0; i
< elems
; i
++) {
48 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
49 count
+= glsl_type_count_function_params(elem_type
);
56 glsl_type_add_to_function_params(const struct glsl_type
*type
,
60 if (glsl_type_is_vector_or_scalar(type
)) {
61 func
->params
[(*param_idx
)++] = (nir_parameter
) {
62 .num_components
= glsl_get_vector_elements(type
),
63 .bit_size
= glsl_get_bit_size(type
),
65 } else if (glsl_type_is_array_or_matrix(type
)) {
66 unsigned elems
= glsl_get_length(type
);
67 const struct glsl_type
*elem_type
= glsl_get_array_element(type
);
68 for (unsigned i
= 0; i
< elems
; i
++)
69 glsl_type_add_to_function_params(elem_type
,func
, param_idx
);
71 assert(glsl_type_is_struct_or_ifc(type
));
72 unsigned elems
= glsl_get_length(type
);
73 for (unsigned i
= 0; i
< elems
; i
++) {
74 const struct glsl_type
*elem_type
= glsl_get_struct_field(type
, i
);
75 glsl_type_add_to_function_params(elem_type
, func
, param_idx
);
81 vtn_ssa_value_add_to_call_params(struct vtn_builder
*b
,
82 struct vtn_ssa_value
*value
,
86 if (glsl_type_is_vector_or_scalar(value
->type
)) {
87 call
->params
[(*param_idx
)++] = nir_src_for_ssa(value
->def
);
89 unsigned elems
= glsl_get_length(value
->type
);
90 for (unsigned i
= 0; i
< elems
; i
++) {
91 vtn_ssa_value_add_to_call_params(b
, value
->elems
[i
],
98 vtn_ssa_value_load_function_param(struct vtn_builder
*b
,
99 struct vtn_ssa_value
*value
,
102 if (glsl_type_is_vector_or_scalar(value
->type
)) {
103 value
->def
= nir_load_param(&b
->nb
, (*param_idx
)++);
105 unsigned elems
= glsl_get_length(value
->type
);
106 for (unsigned i
= 0; i
< elems
; i
++)
107 vtn_ssa_value_load_function_param(b
, value
->elems
[i
], param_idx
);
112 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
113 const uint32_t *w
, unsigned count
)
115 struct vtn_function
*vtn_callee
=
116 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
117 struct nir_function
*callee
= vtn_callee
->impl
->function
;
119 vtn_callee
->referenced
= true;
121 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
123 unsigned param_idx
= 0;
125 nir_deref_instr
*ret_deref
= NULL
;
126 struct vtn_type
*ret_type
= vtn_callee
->type
->return_type
;
127 if (ret_type
->base_type
!= vtn_base_type_void
) {
128 nir_variable
*ret_tmp
=
129 nir_local_variable_create(b
->nb
.impl
,
130 glsl_get_bare_type(ret_type
->type
),
132 ret_deref
= nir_build_deref_var(&b
->nb
, ret_tmp
);
133 call
->params
[param_idx
++] = nir_src_for_ssa(&ret_deref
->dest
.ssa
);
136 for (unsigned i
= 0; i
< vtn_callee
->type
->length
; i
++) {
137 vtn_ssa_value_add_to_call_params(b
, vtn_ssa_value(b
, w
[4 + i
]),
140 assert(param_idx
== call
->num_params
);
142 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
144 if (ret_type
->base_type
== vtn_base_type_void
) {
145 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
147 vtn_push_ssa_value(b
, w
[2], vtn_local_load(b
, ret_deref
, 0));
152 vtn_cfg_handle_prepass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
153 const uint32_t *w
, unsigned count
)
156 case SpvOpFunction
: {
157 vtn_assert(b
->func
== NULL
);
158 b
->func
= rzalloc(b
, struct vtn_function
);
160 b
->func
->node
.type
= vtn_cf_node_type_function
;
161 b
->func
->node
.parent
= NULL
;
162 list_inithead(&b
->func
->body
);
163 b
->func
->control
= w
[3];
165 UNUSED
const struct glsl_type
*result_type
= vtn_get_type(b
, w
[1])->type
;
166 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
169 b
->func
->type
= vtn_get_type(b
, w
[4]);
170 const struct vtn_type
*func_type
= b
->func
->type
;
172 vtn_assert(func_type
->return_type
->type
== result_type
);
175 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
177 unsigned num_params
= 0;
178 for (unsigned i
= 0; i
< func_type
->length
; i
++)
179 num_params
+= glsl_type_count_function_params(func_type
->params
[i
]->type
);
181 /* Add one parameter for the function return value */
182 if (func_type
->return_type
->base_type
!= vtn_base_type_void
)
185 func
->num_params
= num_params
;
186 func
->params
= ralloc_array(b
->shader
, nir_parameter
, num_params
);
189 if (func_type
->return_type
->base_type
!= vtn_base_type_void
) {
190 nir_address_format addr_format
=
191 vtn_mode_to_address_format(b
, vtn_variable_mode_function
);
192 /* The return value is a regular pointer */
193 func
->params
[idx
++] = (nir_parameter
) {
194 .num_components
= nir_address_format_num_components(addr_format
),
195 .bit_size
= nir_address_format_bit_size(addr_format
),
199 for (unsigned i
= 0; i
< func_type
->length
; i
++)
200 glsl_type_add_to_function_params(func_type
->params
[i
]->type
, func
, &idx
);
201 assert(idx
== num_params
);
203 b
->func
->impl
= nir_function_impl_create(func
);
204 nir_builder_init(&b
->nb
, func
->impl
);
205 b
->nb
.cursor
= nir_before_cf_list(&b
->func
->impl
->body
);
206 b
->nb
.exact
= b
->exact
;
208 b
->func_param_idx
= 0;
210 /* The return value is the first parameter */
211 if (func_type
->return_type
->base_type
!= vtn_base_type_void
)
216 case SpvOpFunctionEnd
:
221 case SpvOpFunctionParameter
: {
222 vtn_assert(b
->func_param_idx
< b
->func
->impl
->function
->num_params
);
223 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
224 struct vtn_ssa_value
*value
= vtn_create_ssa_value(b
, type
->type
);
225 vtn_ssa_value_load_function_param(b
, value
, &b
->func_param_idx
);
226 vtn_push_ssa_value(b
, w
[2], value
);
231 vtn_assert(b
->block
== NULL
);
232 b
->block
= rzalloc(b
, struct vtn_block
);
233 b
->block
->node
.type
= vtn_cf_node_type_block
;
235 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
237 if (b
->func
->start_block
== NULL
) {
238 /* This is the first block encountered for this function. In this
239 * case, we set the start block and add it to the list of
240 * implemented functions that we'll walk later.
242 b
->func
->start_block
= b
->block
;
243 list_addtail(&b
->func
->node
.link
, &b
->functions
);
248 case SpvOpSelectionMerge
:
250 vtn_assert(b
->block
&& b
->block
->merge
== NULL
);
255 case SpvOpBranchConditional
:
259 case SpvOpReturnValue
:
260 case SpvOpUnreachable
:
261 vtn_assert(b
->block
&& b
->block
->branch
== NULL
);
262 b
->block
->branch
= w
;
267 /* Continue on as per normal */
274 /* This function performs a depth-first search of the cases and puts them
275 * in fall-through order.
278 vtn_order_case(struct vtn_switch
*swtch
, struct vtn_case
*cse
)
285 list_del(&cse
->node
.link
);
287 if (cse
->fallthrough
) {
288 vtn_order_case(swtch
, cse
->fallthrough
);
290 /* If we have a fall-through, place this case right before the case it
291 * falls through to. This ensures that fallthroughs come one after
292 * the other. These two can never get separated because that would
293 * imply something else falling through to the same case. Also, this
294 * can't break ordering because the DFS ensures that this case is
295 * visited before anything that falls through to it.
297 list_addtail(&cse
->node
.link
, &cse
->fallthrough
->node
.link
);
299 list_add(&cse
->node
.link
, &swtch
->cases
);
304 vtn_switch_order_cases(struct vtn_switch
*swtch
)
306 struct list_head cases
;
307 list_replace(&swtch
->cases
, &cases
);
308 list_inithead(&swtch
->cases
);
309 while (!list_is_empty(&cases
)) {
310 struct vtn_case
*cse
=
311 list_first_entry(&cases
, struct vtn_case
, node
.link
);
312 vtn_order_case(swtch
, cse
);
317 vtn_block_set_merge_cf_node(struct vtn_builder
*b
, struct vtn_block
*block
,
318 struct vtn_cf_node
*cf_node
)
320 vtn_fail_if(block
->merge_cf_node
!= NULL
,
321 "The merge block declared by a header block cannot be a "
322 "merge block declared by any other header block.");
324 block
->merge_cf_node
= cf_node
;
327 #define VTN_DECL_CF_NODE_FIND(_type) \
328 static inline struct vtn_##_type * \
329 vtn_cf_node_find_##_type(struct vtn_cf_node *node) \
331 while (node && node->type != vtn_cf_node_type_##_type) \
332 node = node->parent; \
333 return (struct vtn_##_type *)node; \
336 VTN_DECL_CF_NODE_FIND(if)
337 VTN_DECL_CF_NODE_FIND(loop
)
338 VTN_DECL_CF_NODE_FIND(case)
339 VTN_DECL_CF_NODE_FIND(switch)
340 VTN_DECL_CF_NODE_FIND(function
)
342 static enum vtn_branch_type
343 vtn_handle_branch(struct vtn_builder
*b
,
344 struct vtn_cf_node
*cf_parent
,
345 struct vtn_block
*target_block
)
347 struct vtn_loop
*loop
= vtn_cf_node_find_loop(cf_parent
);
349 /* Detect a loop back-edge first. That way none of the code below
350 * accidentally operates on a loop back-edge.
352 if (loop
&& target_block
== loop
->header_block
)
353 return vtn_branch_type_loop_back_edge
;
355 /* Try to detect fall-through */
356 if (target_block
->switch_case
) {
357 /* When it comes to handling switch cases, we can break calls to
358 * vtn_handle_branch into two cases: calls from within a case construct
359 * and calls for the jump to each case construct. In the second case,
360 * cf_parent is the vtn_switch itself and vtn_cf_node_find_case() will
361 * return the outer switch case in which this switch is contained. It's
362 * fine if the target block is a switch case from an outer switch as
363 * long as it is also the switch break for this switch.
365 struct vtn_case
*switch_case
= vtn_cf_node_find_case(cf_parent
);
367 /* This doesn't get called for the OpSwitch */
368 vtn_fail_if(switch_case
== NULL
,
369 "A switch case can only be entered through an OpSwitch or "
370 "falling through from another switch case.");
372 /* Because block->switch_case is only set on the entry block for a given
373 * switch case, we only ever get here if we're jumping to the start of a
374 * switch case. It's possible, however, that a switch case could jump
375 * to itself via a back-edge. That *should* get caught by the loop
376 * handling case above but if we have a back edge without a loop merge,
377 * we could en up here.
379 vtn_fail_if(target_block
->switch_case
== switch_case
,
380 "A switch cannot fall-through to itself. Likely, there is "
381 "a back-edge which is not to a loop header.");
383 vtn_fail_if(target_block
->switch_case
->node
.parent
!=
384 switch_case
->node
.parent
,
385 "A switch case fall-through must come from the same "
386 "OpSwitch construct");
388 vtn_fail_if(switch_case
->fallthrough
!= NULL
&&
389 switch_case
->fallthrough
!= target_block
->switch_case
,
390 "Each case construct can have at most one branch to "
391 "another case construct");
393 switch_case
->fallthrough
= target_block
->switch_case
;
395 /* We don't immediately return vtn_branch_type_switch_fallthrough
396 * because it may also be a loop or switch break for an inner loop or
397 * switch and that takes precedence.
401 if (loop
&& target_block
== loop
->cont_block
)
402 return vtn_branch_type_loop_continue
;
404 /* We walk blocks as a breadth-first search on the control-flow construct
405 * tree where, when we find a construct, we add the vtn_cf_node for that
406 * construct and continue iterating at the merge target block (if any).
407 * Therefore, we want merges whose with parent == cf_parent to be treated
408 * as regular branches. We only want to consider merges if they break out
409 * of the current CF construct.
411 if (target_block
->merge_cf_node
!= NULL
&&
412 target_block
->merge_cf_node
->parent
!= cf_parent
) {
413 switch (target_block
->merge_cf_node
->type
) {
414 case vtn_cf_node_type_if
:
415 for (struct vtn_cf_node
*node
= cf_parent
;
416 node
!= target_block
->merge_cf_node
; node
= node
->parent
) {
417 vtn_fail_if(node
== NULL
|| node
->type
!= vtn_cf_node_type_if
,
418 "Branching to the merge block of a selection "
419 "construct can only be used to break out of a "
420 "selection construct");
422 struct vtn_if
*if_stmt
= vtn_cf_node_as_if(node
);
424 /* This should be guaranteed by our iteration */
425 assert(if_stmt
->merge_block
!= target_block
);
427 vtn_fail_if(if_stmt
->merge_block
!= NULL
,
428 "Branching to the merge block of a selection "
429 "construct can only be used to break out of the "
430 "inner most nested selection level");
432 return vtn_branch_type_if_merge
;
434 case vtn_cf_node_type_loop
:
435 vtn_fail_if(target_block
->merge_cf_node
!= &loop
->node
,
436 "Loop breaks can only break out of the inner most "
437 "nested loop level");
438 return vtn_branch_type_loop_break
;
440 case vtn_cf_node_type_switch
: {
441 struct vtn_switch
*swtch
= vtn_cf_node_find_switch(cf_parent
);
442 vtn_fail_if(target_block
->merge_cf_node
!= &swtch
->node
,
443 "Switch breaks can only break out of the inner most "
444 "nested switch level");
445 return vtn_branch_type_switch_break
;
449 unreachable("Invalid CF node type for a merge");
453 if (target_block
->switch_case
)
454 return vtn_branch_type_switch_fallthrough
;
456 return vtn_branch_type_none
;
459 struct vtn_cfg_work_item
{
460 struct list_head link
;
462 struct vtn_cf_node
*cf_parent
;
463 struct list_head
*cf_list
;
464 struct vtn_block
*start_block
;
468 vtn_add_cfg_work_item(struct vtn_builder
*b
,
469 struct list_head
*work_list
,
470 struct vtn_cf_node
*cf_parent
,
471 struct list_head
*cf_list
,
472 struct vtn_block
*start_block
)
474 struct vtn_cfg_work_item
*work
= ralloc(b
, struct vtn_cfg_work_item
);
475 work
->cf_parent
= cf_parent
;
476 work
->cf_list
= cf_list
;
477 work
->start_block
= start_block
;
478 list_addtail(&work
->link
, work_list
);
481 /* returns the default block */
483 vtn_parse_switch(struct vtn_builder
*b
,
484 struct vtn_switch
*swtch
,
485 const uint32_t *branch
,
486 struct list_head
*case_list
)
488 const uint32_t *branch_end
= branch
+ (branch
[0] >> SpvWordCountShift
);
490 struct vtn_value
*sel_val
= vtn_untyped_value(b
, branch
[1]);
491 vtn_fail_if(!sel_val
->type
||
492 sel_val
->type
->base_type
!= vtn_base_type_scalar
,
493 "Selector of OpSwitch must have a type of OpTypeInt");
495 nir_alu_type sel_type
=
496 nir_get_nir_type_for_glsl_type(sel_val
->type
->type
);
497 vtn_fail_if(nir_alu_type_get_base_type(sel_type
) != nir_type_int
&&
498 nir_alu_type_get_base_type(sel_type
) != nir_type_uint
,
499 "Selector of OpSwitch must have a type of OpTypeInt");
501 struct hash_table
*block_to_case
= _mesa_pointer_hash_table_create(b
);
503 bool is_default
= true;
504 const unsigned bitsize
= nir_alu_type_get_type_size(sel_type
);
505 for (const uint32_t *w
= branch
+ 2; w
< branch_end
;) {
506 uint64_t literal
= 0;
511 assert(bitsize
== 64);
512 literal
= vtn_u64_literal(w
);
516 struct vtn_block
*case_block
= vtn_block(b
, *(w
++));
518 struct hash_entry
*case_entry
=
519 _mesa_hash_table_search(block_to_case
, case_block
);
521 struct vtn_case
*cse
;
523 cse
= case_entry
->data
;
525 cse
= rzalloc(b
, struct vtn_case
);
527 cse
->node
.type
= vtn_cf_node_type_case
;
528 cse
->node
.parent
= swtch
? &swtch
->node
: NULL
;
529 cse
->block
= case_block
;
530 list_inithead(&cse
->body
);
531 util_dynarray_init(&cse
->values
, b
);
533 list_addtail(&cse
->node
.link
, case_list
);
534 _mesa_hash_table_insert(block_to_case
, case_block
, cse
);
538 cse
->is_default
= true;
540 util_dynarray_append(&cse
->values
, uint64_t, literal
);
546 _mesa_hash_table_destroy(block_to_case
, NULL
);
549 /* Processes a block and returns the next block to process or NULL if we've
550 * reached the end of the construct.
552 static struct vtn_block
*
553 vtn_process_block(struct vtn_builder
*b
,
554 struct list_head
*work_list
,
555 struct vtn_cf_node
*cf_parent
,
556 struct list_head
*cf_list
,
557 struct vtn_block
*block
)
559 if (!list_is_empty(cf_list
)) {
560 /* vtn_process_block() acts like an iterator: it processes the given
561 * block and then returns the next block to process. For a given
562 * control-flow construct, vtn_build_cfg() calls vtn_process_block()
563 * repeatedly until it finally returns NULL. Therefore, we know that
564 * the only blocks on which vtn_process_block() can be called are either
565 * the first block in a construct or a block that vtn_process_block()
566 * returned for the current construct. If cf_list is empty then we know
567 * that we're processing the first block in the construct and we have to
568 * add it to the list.
570 * If cf_list is not empty, then it must be the block returned by the
571 * previous call to vtn_process_block(). We know a priori that
572 * vtn_process_block only returns either normal branches
573 * (vtn_branch_type_none) or merge target blocks.
575 switch (vtn_handle_branch(b
, cf_parent
, block
)) {
576 case vtn_branch_type_none
:
577 /* For normal branches, we want to process them and add them to the
578 * current construct. Merge target blocks also look like normal
579 * branches from the perspective of this construct. See also
580 * vtn_handle_branch().
584 case vtn_branch_type_loop_continue
:
585 case vtn_branch_type_switch_fallthrough
:
586 /* The two cases where we can get early exits from a construct that
587 * are not to that construct's merge target are loop continues and
588 * switch fall-throughs. In these cases, we need to break out of the
589 * current construct by returning NULL.
594 /* The only way we can get here is if something was used as two kinds
595 * of merges at the same time and that's illegal.
597 vtn_fail("A block was used as a merge target from two or more "
598 "structured control-flow constructs");
602 /* Once a block has been processed, it is placed into and the list link
603 * will point to something non-null. If we see a node we've already
604 * processed here, it either exists in multiple functions or it's an
607 if (block
->node
.parent
!= NULL
) {
608 vtn_fail_if(vtn_cf_node_find_function(&block
->node
) !=
609 vtn_cf_node_find_function(cf_parent
),
610 "A block cannot exist in two functions at the "
613 vtn_fail("Invalid back or cross-edge in the CFG");
616 if (block
->merge
&& (*block
->merge
& SpvOpCodeMask
) == SpvOpLoopMerge
&&
617 block
->loop
== NULL
) {
618 vtn_fail_if((*block
->branch
& SpvOpCodeMask
) != SpvOpBranch
&&
619 (*block
->branch
& SpvOpCodeMask
) != SpvOpBranchConditional
,
620 "An OpLoopMerge instruction must immediately precede "
621 "either an OpBranch or OpBranchConditional instruction.");
623 struct vtn_loop
*loop
= rzalloc(b
, struct vtn_loop
);
625 loop
->node
.type
= vtn_cf_node_type_loop
;
626 loop
->node
.parent
= cf_parent
;
627 list_inithead(&loop
->body
);
628 list_inithead(&loop
->cont_body
);
629 loop
->header_block
= block
;
630 loop
->break_block
= vtn_block(b
, block
->merge
[1]);
631 loop
->cont_block
= vtn_block(b
, block
->merge
[2]);
632 loop
->control
= block
->merge
[3];
634 list_addtail(&loop
->node
.link
, cf_list
);
637 /* Note: The work item for the main loop body will start with the
638 * current block as its start block. If we weren't careful, we would
639 * get here again and end up in an infinite loop. This is why we set
640 * block->loop above and check for it before creating one. This way,
641 * we only create the loop once and the second iteration that tries to
642 * handle this loop goes to the cases below and gets handled as a
645 vtn_add_cfg_work_item(b
, work_list
, &loop
->node
,
646 &loop
->body
, loop
->header_block
);
648 /* For continue targets, SPIR-V guarantees the following:
650 * - the Continue Target must dominate the back-edge block
651 * - the back-edge block must post dominate the Continue Target
653 * If the header block is the same as the continue target, this
654 * condition is trivially satisfied and there is no real continue
657 if (loop
->cont_block
!= loop
->header_block
) {
658 vtn_add_cfg_work_item(b
, work_list
, &loop
->node
,
659 &loop
->cont_body
, loop
->cont_block
);
662 vtn_block_set_merge_cf_node(b
, loop
->break_block
, &loop
->node
);
664 return loop
->break_block
;
667 /* Add the block to the CF list */
668 block
->node
.parent
= cf_parent
;
669 list_addtail(&block
->node
.link
, cf_list
);
671 switch (*block
->branch
& SpvOpCodeMask
) {
673 struct vtn_block
*branch_block
= vtn_block(b
, block
->branch
[1]);
675 block
->branch_type
= vtn_handle_branch(b
, cf_parent
, branch_block
);
677 if (block
->branch_type
== vtn_branch_type_none
)
684 case SpvOpReturnValue
:
685 block
->branch_type
= vtn_branch_type_return
;
689 block
->branch_type
= vtn_branch_type_discard
;
692 case SpvOpBranchConditional
: {
693 struct vtn_value
*cond_val
= vtn_untyped_value(b
, block
->branch
[1]);
694 vtn_fail_if(!cond_val
->type
||
695 cond_val
->type
->base_type
!= vtn_base_type_scalar
||
696 cond_val
->type
->type
!= glsl_bool_type(),
697 "Condition must be a Boolean type scalar");
699 struct vtn_block
*then_block
= vtn_block(b
, block
->branch
[2]);
700 struct vtn_block
*else_block
= vtn_block(b
, block
->branch
[3]);
702 if (then_block
== else_block
) {
703 /* This is uncommon but it can happen. We treat this the same way as
704 * an unconditional branch.
706 block
->branch_type
= vtn_handle_branch(b
, cf_parent
, then_block
);
708 if (block
->branch_type
== vtn_branch_type_none
)
714 struct vtn_if
*if_stmt
= rzalloc(b
, struct vtn_if
);
716 if_stmt
->node
.type
= vtn_cf_node_type_if
;
717 if_stmt
->node
.parent
= cf_parent
;
718 if_stmt
->condition
= block
->branch
[1];
719 list_inithead(&if_stmt
->then_body
);
720 list_inithead(&if_stmt
->else_body
);
722 list_addtail(&if_stmt
->node
.link
, cf_list
);
725 (*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
) {
726 /* We may not always have a merge block and that merge doesn't
727 * technically have to be an OpSelectionMerge. We could have a block
728 * with an OpLoopMerge which ends in an OpBranchConditional.
730 if_stmt
->merge_block
= vtn_block(b
, block
->merge
[1]);
731 vtn_block_set_merge_cf_node(b
, if_stmt
->merge_block
, &if_stmt
->node
);
733 if_stmt
->control
= block
->merge
[2];
736 if_stmt
->then_type
= vtn_handle_branch(b
, &if_stmt
->node
, then_block
);
737 if (if_stmt
->then_type
== vtn_branch_type_none
) {
738 vtn_add_cfg_work_item(b
, work_list
, &if_stmt
->node
,
739 &if_stmt
->then_body
, then_block
);
742 if_stmt
->else_type
= vtn_handle_branch(b
, &if_stmt
->node
, else_block
);
743 if (if_stmt
->else_type
== vtn_branch_type_none
) {
744 vtn_add_cfg_work_item(b
, work_list
, &if_stmt
->node
,
745 &if_stmt
->else_body
, else_block
);
748 return if_stmt
->merge_block
;
752 struct vtn_switch
*swtch
= rzalloc(b
, struct vtn_switch
);
754 swtch
->node
.type
= vtn_cf_node_type_switch
;
755 swtch
->node
.parent
= cf_parent
;
756 swtch
->selector
= block
->branch
[1];
757 list_inithead(&swtch
->cases
);
759 list_addtail(&swtch
->node
.link
, cf_list
);
761 /* We may not always have a merge block */
763 vtn_fail_if((*block
->merge
& SpvOpCodeMask
) != SpvOpSelectionMerge
,
764 "An OpLoopMerge instruction must immediately precede "
765 "either an OpBranch or OpBranchConditional "
767 swtch
->break_block
= vtn_block(b
, block
->merge
[1]);
768 vtn_block_set_merge_cf_node(b
, swtch
->break_block
, &swtch
->node
);
771 /* First, we go through and record all of the cases. */
772 vtn_parse_switch(b
, swtch
, block
->branch
, &swtch
->cases
);
774 /* Gather the branch types for the switch */
775 vtn_foreach_cf_node(case_node
, &swtch
->cases
) {
776 struct vtn_case
*cse
= vtn_cf_node_as_case(case_node
);
778 cse
->type
= vtn_handle_branch(b
, &swtch
->node
, cse
->block
);
780 case vtn_branch_type_none
:
781 /* This is a "real" cases which has stuff in it */
782 vtn_fail_if(cse
->block
->switch_case
!= NULL
,
783 "OpSwitch has a case which is also in another "
784 "OpSwitch construct");
785 cse
->block
->switch_case
= cse
;
786 vtn_add_cfg_work_item(b
, work_list
, &cse
->node
,
787 &cse
->body
, cse
->block
);
790 case vtn_branch_type_switch_break
:
791 case vtn_branch_type_loop_break
:
792 case vtn_branch_type_loop_continue
:
793 /* Switch breaks as well as loop breaks and continues can be
794 * used to break out of a switch construct or as direct targets
800 vtn_fail("Target of OpSwitch is not a valid structured exit "
801 "from the switch construct.");
805 return swtch
->break_block
;
808 case SpvOpUnreachable
:
812 vtn_fail("Block did not end with a valid branch instruction");
817 vtn_build_cfg(struct vtn_builder
*b
, const uint32_t *words
, const uint32_t *end
)
819 vtn_foreach_instruction(b
, words
, end
,
820 vtn_cfg_handle_prepass_instruction
);
822 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
)
825 vtn_foreach_cf_node(func_node
, &b
->functions
) {
826 struct vtn_function
*func
= vtn_cf_node_as_function(func_node
);
828 /* We build the CFG for each function by doing a breadth-first search on
829 * the control-flow graph. We keep track of our state using a worklist.
830 * Doing a BFS ensures that we visit each structured control-flow
831 * construct and its merge node before we visit the stuff inside the
834 struct list_head work_list
;
835 list_inithead(&work_list
);
836 vtn_add_cfg_work_item(b
, &work_list
, &func
->node
, &func
->body
,
839 while (!list_is_empty(&work_list
)) {
840 struct vtn_cfg_work_item
*work
=
841 list_first_entry(&work_list
, struct vtn_cfg_work_item
, link
);
842 list_del(&work
->link
);
844 for (struct vtn_block
*block
= work
->start_block
; block
; ) {
845 block
= vtn_process_block(b
, &work_list
, work
->cf_parent
,
846 work
->cf_list
, block
);
853 vtn_handle_phis_first_pass(struct vtn_builder
*b
, SpvOp opcode
,
854 const uint32_t *w
, unsigned count
)
856 if (opcode
== SpvOpLabel
)
857 return true; /* Nothing to do */
859 /* If this isn't a phi node, stop. */
860 if (opcode
!= SpvOpPhi
)
863 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
864 * For each phi, we create a variable with the appropreate type and
865 * do a load from that variable. Then, in a second pass, we add
866 * stores to that variable to each of the predecessor blocks.
868 * We could do something more intelligent here. However, in order to
869 * handle loops and things properly, we really need dominance
870 * information. It would end up basically being the into-SSA
871 * algorithm all over again. It's easier if we just let
872 * lower_vars_to_ssa do that for us instead of repeating it here.
874 struct vtn_type
*type
= vtn_get_type(b
, w
[1]);
875 nir_variable
*phi_var
=
876 nir_local_variable_create(b
->nb
.impl
, type
->type
, "phi");
877 _mesa_hash_table_insert(b
->phi_table
, w
, phi_var
);
879 vtn_push_ssa_value(b
, w
[2],
880 vtn_local_load(b
, nir_build_deref_var(&b
->nb
, phi_var
), 0));
886 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
887 const uint32_t *w
, unsigned count
)
889 if (opcode
!= SpvOpPhi
)
892 struct hash_entry
*phi_entry
= _mesa_hash_table_search(b
->phi_table
, w
);
894 /* It's possible that this phi is in an unreachable block in which case it
895 * may never have been emitted and therefore may not be in the hash table.
896 * In this case, there's no var for it and it's safe to just bail.
898 if (phi_entry
== NULL
)
901 nir_variable
*phi_var
= phi_entry
->data
;
903 for (unsigned i
= 3; i
< count
; i
+= 2) {
904 struct vtn_block
*pred
= vtn_block(b
, w
[i
+ 1]);
906 /* If block does not have end_nop, that is because it is an unreacheable
907 * block, and hence it is not worth to handle it */
911 b
->nb
.cursor
= nir_after_instr(&pred
->end_nop
->instr
);
913 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[i
]);
915 vtn_local_store(b
, src
, nir_build_deref_var(&b
->nb
, phi_var
), 0);
922 vtn_emit_branch(struct vtn_builder
*b
, enum vtn_branch_type branch_type
,
923 nir_variable
*switch_fall_var
, bool *has_switch_break
)
925 switch (branch_type
) {
926 case vtn_branch_type_if_merge
:
927 break; /* Nothing to do */
928 case vtn_branch_type_switch_break
:
929 nir_store_var(&b
->nb
, switch_fall_var
, nir_imm_false(&b
->nb
), 1);
930 *has_switch_break
= true;
932 case vtn_branch_type_switch_fallthrough
:
933 break; /* Nothing to do */
934 case vtn_branch_type_loop_break
:
935 nir_jump(&b
->nb
, nir_jump_break
);
937 case vtn_branch_type_loop_continue
:
938 nir_jump(&b
->nb
, nir_jump_continue
);
940 case vtn_branch_type_loop_back_edge
:
942 case vtn_branch_type_return
:
943 nir_jump(&b
->nb
, nir_jump_return
);
945 case vtn_branch_type_discard
: {
946 nir_intrinsic_instr
*discard
=
947 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_discard
);
948 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
952 vtn_fail("Invalid branch type");
957 vtn_switch_case_condition(struct vtn_builder
*b
, struct vtn_switch
*swtch
,
958 nir_ssa_def
*sel
, struct vtn_case
*cse
)
960 if (cse
->is_default
) {
961 nir_ssa_def
*any
= nir_imm_false(&b
->nb
);
962 vtn_foreach_cf_node(other_node
, &swtch
->cases
) {
963 struct vtn_case
*other
= vtn_cf_node_as_case(other_node
);
964 if (other
->is_default
)
967 any
= nir_ior(&b
->nb
, any
,
968 vtn_switch_case_condition(b
, swtch
, sel
, other
));
970 return nir_inot(&b
->nb
, any
);
972 nir_ssa_def
*cond
= nir_imm_false(&b
->nb
);
973 util_dynarray_foreach(&cse
->values
, uint64_t, val
) {
974 nir_ssa_def
*imm
= nir_imm_intN_t(&b
->nb
, *val
, sel
->bit_size
);
975 cond
= nir_ior(&b
->nb
, cond
, nir_ieq(&b
->nb
, sel
, imm
));
981 static nir_loop_control
982 vtn_loop_control(struct vtn_builder
*b
, struct vtn_loop
*vtn_loop
)
984 if (vtn_loop
->control
== SpvLoopControlMaskNone
)
985 return nir_loop_control_none
;
986 else if (vtn_loop
->control
& SpvLoopControlDontUnrollMask
)
987 return nir_loop_control_dont_unroll
;
988 else if (vtn_loop
->control
& SpvLoopControlUnrollMask
)
989 return nir_loop_control_unroll
;
990 else if (vtn_loop
->control
& SpvLoopControlDependencyInfiniteMask
||
991 vtn_loop
->control
& SpvLoopControlDependencyLengthMask
||
992 vtn_loop
->control
& SpvLoopControlMinIterationsMask
||
993 vtn_loop
->control
& SpvLoopControlMaxIterationsMask
||
994 vtn_loop
->control
& SpvLoopControlIterationMultipleMask
||
995 vtn_loop
->control
& SpvLoopControlPeelCountMask
||
996 vtn_loop
->control
& SpvLoopControlPartialCountMask
) {
997 /* We do not do anything special with these yet. */
998 return nir_loop_control_none
;
1000 vtn_fail("Invalid loop control");
1004 static nir_selection_control
1005 vtn_selection_control(struct vtn_builder
*b
, struct vtn_if
*vtn_if
)
1007 if (vtn_if
->control
== SpvSelectionControlMaskNone
)
1008 return nir_selection_control_none
;
1009 else if (vtn_if
->control
& SpvSelectionControlDontFlattenMask
)
1010 return nir_selection_control_dont_flatten
;
1011 else if (vtn_if
->control
& SpvSelectionControlFlattenMask
)
1012 return nir_selection_control_flatten
;
1014 vtn_fail("Invalid selection control");
1018 vtn_emit_ret_store(struct vtn_builder
*b
, struct vtn_block
*block
)
1020 if ((*block
->branch
& SpvOpCodeMask
) != SpvOpReturnValue
)
1023 vtn_fail_if(b
->func
->type
->return_type
->base_type
== vtn_base_type_void
,
1024 "Return with a value from a function returning void");
1025 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, block
->branch
[1]);
1026 const struct glsl_type
*ret_type
=
1027 glsl_get_bare_type(b
->func
->type
->return_type
->type
);
1028 nir_deref_instr
*ret_deref
=
1029 nir_build_deref_cast(&b
->nb
, nir_load_param(&b
->nb
, 0),
1030 nir_var_function_temp
, ret_type
, 0);
1031 vtn_local_store(b
, src
, ret_deref
, 0);
1035 vtn_emit_cf_list_structured(struct vtn_builder
*b
, struct list_head
*cf_list
,
1036 nir_variable
*switch_fall_var
,
1037 bool *has_switch_break
,
1038 vtn_instruction_handler handler
)
1040 vtn_foreach_cf_node(node
, cf_list
) {
1041 switch (node
->type
) {
1042 case vtn_cf_node_type_block
: {
1043 struct vtn_block
*block
= vtn_cf_node_as_block(node
);
1045 const uint32_t *block_start
= block
->label
;
1046 const uint32_t *block_end
= block
->merge
? block
->merge
:
1049 block_start
= vtn_foreach_instruction(b
, block_start
, block_end
,
1050 vtn_handle_phis_first_pass
);
1052 vtn_foreach_instruction(b
, block_start
, block_end
, handler
);
1054 block
->end_nop
= nir_intrinsic_instr_create(b
->nb
.shader
,
1056 nir_builder_instr_insert(&b
->nb
, &block
->end_nop
->instr
);
1058 vtn_emit_ret_store(b
, block
);
1060 if (block
->branch_type
!= vtn_branch_type_none
) {
1061 vtn_emit_branch(b
, block
->branch_type
,
1062 switch_fall_var
, has_switch_break
);
1069 case vtn_cf_node_type_if
: {
1070 struct vtn_if
*vtn_if
= vtn_cf_node_as_if(node
);
1071 bool sw_break
= false;
1074 nir_push_if(&b
->nb
, vtn_get_nir_ssa(b
, vtn_if
->condition
));
1076 nif
->control
= vtn_selection_control(b
, vtn_if
);
1078 if (vtn_if
->then_type
== vtn_branch_type_none
) {
1079 vtn_emit_cf_list_structured(b
, &vtn_if
->then_body
,
1080 switch_fall_var
, &sw_break
, handler
);
1082 vtn_emit_branch(b
, vtn_if
->then_type
, switch_fall_var
, &sw_break
);
1085 nir_push_else(&b
->nb
, nif
);
1086 if (vtn_if
->else_type
== vtn_branch_type_none
) {
1087 vtn_emit_cf_list_structured(b
, &vtn_if
->else_body
,
1088 switch_fall_var
, &sw_break
, handler
);
1090 vtn_emit_branch(b
, vtn_if
->else_type
, switch_fall_var
, &sw_break
);
1093 nir_pop_if(&b
->nb
, nif
);
1095 /* If we encountered a switch break somewhere inside of the if,
1096 * then it would have been handled correctly by calling
1097 * emit_cf_list or emit_branch for the interrior. However, we
1098 * need to predicate everything following on wether or not we're
1102 *has_switch_break
= true;
1103 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, switch_fall_var
));
1108 case vtn_cf_node_type_loop
: {
1109 struct vtn_loop
*vtn_loop
= vtn_cf_node_as_loop(node
);
1111 nir_loop
*loop
= nir_push_loop(&b
->nb
);
1112 loop
->control
= vtn_loop_control(b
, vtn_loop
);
1114 vtn_emit_cf_list_structured(b
, &vtn_loop
->body
, NULL
, NULL
, handler
);
1116 if (!list_is_empty(&vtn_loop
->cont_body
)) {
1117 /* If we have a non-trivial continue body then we need to put
1118 * it at the beginning of the loop with a flag to ensure that
1119 * it doesn't get executed in the first iteration.
1121 nir_variable
*do_cont
=
1122 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "cont");
1124 b
->nb
.cursor
= nir_before_cf_node(&loop
->cf_node
);
1125 nir_store_var(&b
->nb
, do_cont
, nir_imm_false(&b
->nb
), 1);
1127 b
->nb
.cursor
= nir_before_cf_list(&loop
->body
);
1130 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, do_cont
));
1132 vtn_emit_cf_list_structured(b
, &vtn_loop
->cont_body
, NULL
, NULL
,
1135 nir_pop_if(&b
->nb
, cont_if
);
1137 nir_store_var(&b
->nb
, do_cont
, nir_imm_true(&b
->nb
), 1);
1139 b
->has_loop_continue
= true;
1142 nir_pop_loop(&b
->nb
, loop
);
1146 case vtn_cf_node_type_switch
: {
1147 struct vtn_switch
*vtn_switch
= vtn_cf_node_as_switch(node
);
1149 /* Before we can emit anything, we need to sort the list of cases in
1150 * fall-through order.
1152 vtn_switch_order_cases(vtn_switch
);
1154 /* First, we create a variable to keep track of whether or not the
1155 * switch is still going at any given point. Any switch breaks
1156 * will set this variable to false.
1158 nir_variable
*fall_var
=
1159 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "fall");
1160 nir_store_var(&b
->nb
, fall_var
, nir_imm_false(&b
->nb
), 1);
1162 nir_ssa_def
*sel
= vtn_get_nir_ssa(b
, vtn_switch
->selector
);
1164 /* Now we can walk the list of cases and actually emit code */
1165 vtn_foreach_cf_node(case_node
, &vtn_switch
->cases
) {
1166 struct vtn_case
*cse
= vtn_cf_node_as_case(case_node
);
1168 /* Figure out the condition */
1170 vtn_switch_case_condition(b
, vtn_switch
, sel
, cse
);
1171 /* Take fallthrough into account */
1172 cond
= nir_ior(&b
->nb
, cond
, nir_load_var(&b
->nb
, fall_var
));
1174 nir_if
*case_if
= nir_push_if(&b
->nb
, cond
);
1176 bool has_break
= false;
1177 nir_store_var(&b
->nb
, fall_var
, nir_imm_true(&b
->nb
), 1);
1178 vtn_emit_cf_list_structured(b
, &cse
->body
, fall_var
, &has_break
,
1180 (void)has_break
; /* We don't care */
1182 nir_pop_if(&b
->nb
, case_if
);
1189 vtn_fail("Invalid CF node type");
1194 static struct nir_block
*
1195 vtn_new_unstructured_block(struct vtn_builder
*b
, struct vtn_function
*func
)
1197 struct nir_block
*n
= nir_block_create(b
->shader
);
1198 exec_list_push_tail(&func
->impl
->body
, &n
->cf_node
.node
);
1199 n
->cf_node
.parent
= &func
->impl
->cf_node
;
1204 vtn_add_unstructured_block(struct vtn_builder
*b
,
1205 struct vtn_function
*func
,
1206 struct list_head
*work_list
,
1207 struct vtn_block
*block
)
1209 if (!block
->block
) {
1210 block
->block
= vtn_new_unstructured_block(b
, func
);
1211 list_addtail(&block
->node
.link
, work_list
);
1216 vtn_emit_cf_func_unstructured(struct vtn_builder
*b
, struct vtn_function
*func
,
1217 vtn_instruction_handler handler
)
1219 struct list_head work_list
;
1220 list_inithead(&work_list
);
1222 func
->start_block
->block
= nir_start_block(func
->impl
);
1223 list_addtail(&func
->start_block
->node
.link
, &work_list
);
1224 while (!list_is_empty(&work_list
)) {
1225 struct vtn_block
*block
=
1226 list_first_entry(&work_list
, struct vtn_block
, node
.link
);
1227 list_del(&block
->node
.link
);
1229 vtn_assert(block
->block
);
1231 const uint32_t *block_start
= block
->label
;
1232 const uint32_t *block_end
= block
->branch
;
1234 b
->nb
.cursor
= nir_after_block(block
->block
);
1235 block_start
= vtn_foreach_instruction(b
, block_start
, block_end
,
1236 vtn_handle_phis_first_pass
);
1237 vtn_foreach_instruction(b
, block_start
, block_end
, handler
);
1238 block
->end_nop
= nir_intrinsic_instr_create(b
->nb
.shader
,
1240 nir_builder_instr_insert(&b
->nb
, &block
->end_nop
->instr
);
1242 SpvOp op
= *block_end
& SpvOpCodeMask
;
1245 struct vtn_block
*branch_block
= vtn_block(b
, block
->branch
[1]);
1246 vtn_add_unstructured_block(b
, func
, &work_list
, branch_block
);
1247 nir_goto(&b
->nb
, branch_block
->block
);
1251 case SpvOpBranchConditional
: {
1252 nir_ssa_def
*cond
= vtn_ssa_value(b
, block
->branch
[1])->def
;
1253 struct vtn_block
*then_block
= vtn_block(b
, block
->branch
[2]);
1254 struct vtn_block
*else_block
= vtn_block(b
, block
->branch
[3]);
1256 vtn_add_unstructured_block(b
, func
, &work_list
, then_block
);
1257 if (then_block
== else_block
) {
1258 nir_goto(&b
->nb
, then_block
->block
);
1260 vtn_add_unstructured_block(b
, func
, &work_list
, else_block
);
1261 nir_goto_if(&b
->nb
, then_block
->block
, nir_src_for_ssa(cond
),
1269 struct list_head cases
;
1270 list_inithead(&cases
);
1271 vtn_parse_switch(b
, NULL
, block
->branch
, &cases
);
1273 nir_ssa_def
*sel
= vtn_get_nir_ssa(b
, block
->branch
[1]);
1275 struct vtn_case
*def
= NULL
;
1276 vtn_foreach_cf_node(case_node
, &cases
) {
1277 struct vtn_case
*cse
= vtn_cf_node_as_case(case_node
);
1278 if (cse
->is_default
) {
1279 assert(def
== NULL
);
1284 nir_ssa_def
*cond
= nir_imm_false(&b
->nb
);
1285 util_dynarray_foreach(&cse
->values
, uint64_t, val
) {
1286 nir_ssa_def
*imm
= nir_imm_intN_t(&b
->nb
, *val
, sel
->bit_size
);
1287 cond
= nir_ior(&b
->nb
, cond
, nir_ieq(&b
->nb
, sel
, imm
));
1290 /* block for the next check */
1291 nir_block
*e
= vtn_new_unstructured_block(b
, func
);
1292 vtn_add_unstructured_block(b
, func
, &work_list
, cse
->block
);
1295 nir_goto_if(&b
->nb
, cse
->block
->block
, nir_src_for_ssa(cond
), e
);
1296 b
->nb
.cursor
= nir_after_block(e
);
1299 vtn_assert(def
!= NULL
);
1300 vtn_add_unstructured_block(b
, func
, &work_list
, def
->block
);
1302 /* now that all cases are handled, branch into the default block */
1303 nir_goto(&b
->nb
, def
->block
->block
);
1308 nir_intrinsic_instr
*discard
=
1309 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_discard
);
1310 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
1311 nir_goto(&b
->nb
, b
->func
->impl
->end_block
);
1315 case SpvOpUnreachable
:
1317 case SpvOpReturnValue
: {
1318 vtn_emit_ret_store(b
, block
);
1319 nir_goto(&b
->nb
, b
->func
->impl
->end_block
);
1324 vtn_fail("Unhandled opcode %s", spirv_op_to_string(op
));
1330 vtn_function_emit(struct vtn_builder
*b
, struct vtn_function
*func
,
1331 vtn_instruction_handler instruction_handler
)
1333 static int force_unstructured
= -1;
1334 if (force_unstructured
< 0) {
1335 force_unstructured
=
1336 env_var_as_boolean("MESA_SPIRV_FORCE_UNSTRUCTURED", false);
1339 nir_builder_init(&b
->nb
, func
->impl
);
1341 b
->nb
.cursor
= nir_after_cf_list(&func
->impl
->body
);
1342 b
->nb
.exact
= b
->exact
;
1343 b
->has_loop_continue
= false;
1344 b
->phi_table
= _mesa_pointer_hash_table_create(b
);
1346 if (b
->shader
->info
.stage
== MESA_SHADER_KERNEL
|| force_unstructured
) {
1347 b
->func
->impl
->structured
= false;
1348 vtn_emit_cf_func_unstructured(b
, func
, instruction_handler
);
1350 vtn_emit_cf_list_structured(b
, &func
->body
, NULL
, NULL
,
1351 instruction_handler
);
1354 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
1355 vtn_handle_phi_second_pass
);
1357 nir_rematerialize_derefs_in_use_blocks_impl(func
->impl
);
1359 /* Continue blocks for loops get inserted before the body of the loop
1360 * but instructions in the continue may use SSA defs in the loop body.
1361 * Therefore, we need to repair SSA to insert the needed phi nodes.
1363 if (b
->has_loop_continue
)
1364 nir_repair_ssa_impl(func
->impl
);
1366 func
->emitted
= true;