2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "vtn_private.h"
25 #include "nir/nir_vla.h"
28 vtn_cfg_handle_prepass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
29 const uint32_t *w
, unsigned count
)
33 assert(b
->func
== NULL
);
34 b
->func
= rzalloc(b
, struct vtn_function
);
36 list_inithead(&b
->func
->body
);
37 b
->func
->control
= w
[3];
39 const struct glsl_type
*result_type
=
40 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
41 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
44 const struct glsl_type
*func_type
=
45 vtn_value(b
, w
[4], vtn_value_type_type
)->type
->type
;
47 assert(glsl_get_function_return_type(func_type
) == result_type
);
50 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
52 func
->num_params
= glsl_get_length(func_type
);
53 func
->params
= ralloc_array(b
->shader
, nir_parameter
, func
->num_params
);
54 for (unsigned i
= 0; i
< func
->num_params
; i
++) {
55 const struct glsl_function_param
*param
=
56 glsl_get_function_param(func_type
, i
);
57 func
->params
[i
].type
= param
->type
;
60 func
->params
[i
].param_type
= nir_parameter_inout
;
62 func
->params
[i
].param_type
= nir_parameter_in
;
66 func
->params
[i
].param_type
= nir_parameter_out
;
68 assert(!"Parameter is neither in nor out");
73 func
->return_type
= glsl_get_function_return_type(func_type
);
75 b
->func
->impl
= nir_function_impl_create(func
);
76 if (!glsl_type_is_void(func
->return_type
)) {
77 b
->func
->impl
->return_var
=
78 nir_local_variable_create(b
->func
->impl
, func
->return_type
, "ret");
81 b
->func_param_idx
= 0;
85 case SpvOpFunctionEnd
:
90 case SpvOpFunctionParameter
: {
91 struct vtn_value
*val
=
92 vtn_push_value(b
, w
[2], vtn_value_type_access_chain
);
94 assert(b
->func_param_idx
< b
->func
->impl
->num_params
);
95 unsigned idx
= b
->func_param_idx
++;
98 nir_local_variable_create(b
->func
->impl
,
99 b
->func
->impl
->function
->params
[idx
].type
,
101 b
->func
->impl
->params
[idx
] = param
;
103 struct vtn_variable
*vtn_var
= rzalloc(b
, struct vtn_variable
);
104 vtn_var
->mode
= vtn_variable_mode_param
;
105 vtn_var
->type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
106 vtn_var
->var
= param
;
107 vtn_var
->chain
.var
= vtn_var
;
108 vtn_var
->chain
.length
= 0;
110 val
->access_chain
= &vtn_var
->chain
;
115 assert(b
->block
== NULL
);
116 b
->block
= rzalloc(b
, struct vtn_block
);
117 b
->block
->node
.type
= vtn_cf_node_type_block
;
119 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
121 if (b
->func
->start_block
== NULL
) {
122 /* This is the first block encountered for this function. In this
123 * case, we set the start block and add it to the list of
124 * implemented functions that we'll walk later.
126 b
->func
->start_block
= b
->block
;
127 exec_list_push_tail(&b
->functions
, &b
->func
->node
);
132 case SpvOpSelectionMerge
:
134 assert(b
->block
&& b
->block
->merge
== NULL
);
139 case SpvOpBranchConditional
:
143 case SpvOpReturnValue
:
144 case SpvOpUnreachable
:
145 assert(b
->block
&& b
->block
->branch
== NULL
);
146 b
->block
->branch
= w
;
151 /* Continue on as per normal */
159 vtn_add_case(struct vtn_builder
*b
, struct vtn_switch
*swtch
,
160 struct vtn_block
*break_block
,
161 uint32_t block_id
, uint32_t val
, bool is_default
)
163 struct vtn_block
*case_block
=
164 vtn_value(b
, block_id
, vtn_value_type_block
)->block
;
166 /* Don't create dummy cases that just break */
167 if (case_block
== break_block
)
170 if (case_block
->switch_case
== NULL
) {
171 struct vtn_case
*c
= ralloc(b
, struct vtn_case
);
173 list_inithead(&c
->body
);
174 c
->start_block
= case_block
;
175 c
->fallthrough
= NULL
;
176 nir_array_init(&c
->values
, b
);
177 c
->is_default
= false;
180 list_addtail(&c
->link
, &swtch
->cases
);
182 case_block
->switch_case
= c
;
186 case_block
->switch_case
->is_default
= true;
188 nir_array_add(&case_block
->switch_case
->values
, uint32_t, val
);
192 /* This function performs a depth-first search of the cases and puts them
193 * in fall-through order.
196 vtn_order_case(struct vtn_switch
*swtch
, struct vtn_case
*cse
)
203 list_del(&cse
->link
);
205 if (cse
->fallthrough
) {
206 vtn_order_case(swtch
, cse
->fallthrough
);
208 /* If we have a fall-through, place this case right before the case it
209 * falls through to. This ensures that fallthroughs come one after
210 * the other. These two can never get separated because that would
211 * imply something else falling through to the same case. Also, this
212 * can't break ordering because the DFS ensures that this case is
213 * visited before anything that falls through to it.
215 list_addtail(&cse
->link
, &cse
->fallthrough
->link
);
217 list_add(&cse
->link
, &swtch
->cases
);
221 static enum vtn_branch_type
222 vtn_get_branch_type(struct vtn_block
*block
,
223 struct vtn_case
*swcase
, struct vtn_block
*switch_break
,
224 struct vtn_block
*loop_break
, struct vtn_block
*loop_cont
)
226 if (block
->switch_case
) {
227 /* This branch is actually a fallthrough */
228 assert(swcase
->fallthrough
== NULL
||
229 swcase
->fallthrough
== block
->switch_case
);
230 swcase
->fallthrough
= block
->switch_case
;
231 return vtn_branch_type_switch_fallthrough
;
232 } else if (block
== switch_break
) {
233 return vtn_branch_type_switch_break
;
234 } else if (block
== loop_break
) {
235 return vtn_branch_type_loop_break
;
236 } else if (block
== loop_cont
) {
237 return vtn_branch_type_loop_continue
;
239 return vtn_branch_type_none
;
244 vtn_cfg_walk_blocks(struct vtn_builder
*b
, struct list_head
*cf_list
,
245 struct vtn_block
*start
, struct vtn_case
*switch_case
,
246 struct vtn_block
*switch_break
,
247 struct vtn_block
*loop_break
, struct vtn_block
*loop_cont
,
248 struct vtn_block
*end
)
250 struct vtn_block
*block
= start
;
251 while (block
!= end
) {
252 if (block
->merge
&& (*block
->merge
& SpvOpCodeMask
) == SpvOpLoopMerge
&&
254 struct vtn_loop
*loop
= ralloc(b
, struct vtn_loop
);
256 loop
->node
.type
= vtn_cf_node_type_loop
;
257 list_inithead(&loop
->body
);
258 list_inithead(&loop
->cont_body
);
259 loop
->control
= block
->merge
[3];
261 list_addtail(&loop
->node
.link
, cf_list
);
264 struct vtn_block
*new_loop_break
=
265 vtn_value(b
, block
->merge
[1], vtn_value_type_block
)->block
;
266 struct vtn_block
*new_loop_cont
=
267 vtn_value(b
, block
->merge
[2], vtn_value_type_block
)->block
;
269 /* Note: This recursive call will start with the current block as
270 * its start block. If we weren't careful, we would get here
271 * again and end up in infinite recursion. This is why we set
272 * block->loop above and check for it before creating one. This
273 * way, we only create the loop once and the second call that
274 * tries to handle this loop goes to the cases below and gets
275 * handled as a regular block.
277 * Note: When we make the recursive walk calls, we pass NULL for
278 * the switch break since you have to break out of the loop first.
279 * We do, however, still pass the current switch case because it's
280 * possible that the merge block for the loop is the start of
283 vtn_cfg_walk_blocks(b
, &loop
->body
, block
, switch_case
, NULL
,
284 new_loop_break
, new_loop_cont
, NULL
);
285 vtn_cfg_walk_blocks(b
, &loop
->cont_body
, new_loop_cont
, NULL
, NULL
,
286 new_loop_break
, NULL
, block
);
288 block
= new_loop_break
;
292 assert(block
->node
.link
.next
== NULL
);
293 list_addtail(&block
->node
.link
, cf_list
);
295 switch (*block
->branch
& SpvOpCodeMask
) {
297 struct vtn_block
*branch_block
=
298 vtn_value(b
, block
->branch
[1], vtn_value_type_block
)->block
;
300 block
->branch_type
= vtn_get_branch_type(branch_block
,
301 switch_case
, switch_break
,
302 loop_break
, loop_cont
);
304 if (block
->branch_type
!= vtn_branch_type_none
)
307 block
= branch_block
;
312 case SpvOpReturnValue
:
313 block
->branch_type
= vtn_branch_type_return
;
317 block
->branch_type
= vtn_branch_type_discard
;
320 case SpvOpBranchConditional
: {
321 struct vtn_block
*then_block
=
322 vtn_value(b
, block
->branch
[2], vtn_value_type_block
)->block
;
323 struct vtn_block
*else_block
=
324 vtn_value(b
, block
->branch
[3], vtn_value_type_block
)->block
;
326 struct vtn_if
*if_stmt
= ralloc(b
, struct vtn_if
);
328 if_stmt
->node
.type
= vtn_cf_node_type_if
;
329 if_stmt
->condition
= block
->branch
[1];
330 list_inithead(&if_stmt
->then_body
);
331 list_inithead(&if_stmt
->else_body
);
333 list_addtail(&if_stmt
->node
.link
, cf_list
);
336 (*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
) {
337 if_stmt
->control
= block
->merge
[2];
340 if_stmt
->then_type
= vtn_get_branch_type(then_block
,
341 switch_case
, switch_break
,
342 loop_break
, loop_cont
);
343 if_stmt
->else_type
= vtn_get_branch_type(else_block
,
344 switch_case
, switch_break
,
345 loop_break
, loop_cont
);
347 if (if_stmt
->then_type
== vtn_branch_type_none
&&
348 if_stmt
->else_type
== vtn_branch_type_none
) {
349 /* Neither side of the if is something we can short-circuit. */
350 assert((*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
);
351 struct vtn_block
*merge_block
=
352 vtn_value(b
, block
->merge
[1], vtn_value_type_block
)->block
;
354 vtn_cfg_walk_blocks(b
, &if_stmt
->then_body
, then_block
,
355 switch_case
, switch_break
,
356 loop_break
, loop_cont
, merge_block
);
357 vtn_cfg_walk_blocks(b
, &if_stmt
->else_body
, else_block
,
358 switch_case
, switch_break
,
359 loop_break
, loop_cont
, merge_block
);
361 enum vtn_branch_type merge_type
=
362 vtn_get_branch_type(merge_block
, switch_case
, switch_break
,
363 loop_break
, loop_cont
);
364 if (merge_type
== vtn_branch_type_none
) {
370 } else if (if_stmt
->then_type
!= vtn_branch_type_none
&&
371 if_stmt
->else_type
!= vtn_branch_type_none
) {
372 /* Both sides were short-circuited. We're done here. */
375 /* Exeactly one side of the branch could be short-circuited.
376 * We set the branch up as a predicated break/continue and we
377 * continue on with the other side as if it were what comes
380 if (if_stmt
->then_type
== vtn_branch_type_none
) {
387 unreachable("Should have returned or continued");
391 assert((*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
);
392 struct vtn_block
*break_block
=
393 vtn_value(b
, block
->merge
[1], vtn_value_type_block
)->block
;
395 struct vtn_switch
*swtch
= ralloc(b
, struct vtn_switch
);
397 swtch
->node
.type
= vtn_cf_node_type_switch
;
398 swtch
->selector
= block
->branch
[1];
399 list_inithead(&swtch
->cases
);
401 list_addtail(&swtch
->node
.link
, cf_list
);
403 /* First, we go through and record all of the cases. */
404 const uint32_t *branch_end
=
405 block
->branch
+ (block
->branch
[0] >> SpvWordCountShift
);
407 vtn_add_case(b
, swtch
, break_block
, block
->branch
[2], 0, true);
408 for (const uint32_t *w
= block
->branch
+ 3; w
< branch_end
; w
+= 2)
409 vtn_add_case(b
, swtch
, break_block
, w
[1], w
[0], false);
411 /* Now, we go through and walk the blocks. While we walk through
412 * the blocks, we also gather the much-needed fall-through
415 list_for_each_entry(struct vtn_case
, cse
, &swtch
->cases
, link
) {
416 assert(cse
->start_block
!= break_block
);
417 vtn_cfg_walk_blocks(b
, &cse
->body
, cse
->start_block
, cse
,
418 break_block
, NULL
, loop_cont
, NULL
);
421 /* Finally, we walk over all of the cases one more time and put
422 * them in fall-through order.
424 for (const uint32_t *w
= block
->branch
+ 2; w
< branch_end
; w
+= 2) {
425 struct vtn_block
*case_block
=
426 vtn_value(b
, *w
, vtn_value_type_block
)->block
;
428 if (case_block
== break_block
)
431 assert(case_block
->switch_case
);
433 vtn_order_case(swtch
, case_block
->switch_case
);
440 case SpvOpUnreachable
:
444 unreachable("Unhandled opcode");
450 vtn_build_cfg(struct vtn_builder
*b
, const uint32_t *words
, const uint32_t *end
)
452 vtn_foreach_instruction(b
, words
, end
,
453 vtn_cfg_handle_prepass_instruction
);
455 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
456 vtn_cfg_walk_blocks(b
, &func
->body
, func
->start_block
,
457 NULL
, NULL
, NULL
, NULL
, NULL
);
462 vtn_handle_phis_first_pass(struct vtn_builder
*b
, SpvOp opcode
,
463 const uint32_t *w
, unsigned count
)
465 if (opcode
== SpvOpLabel
)
466 return true; /* Nothing to do */
468 /* If this isn't a phi node, stop. */
469 if (opcode
!= SpvOpPhi
)
472 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
473 * For each phi, we create a variable with the appropreate type and
474 * do a load from that variable. Then, in a second pass, we add
475 * stores to that variable to each of the predecessor blocks.
477 * We could do something more intelligent here. However, in order to
478 * handle loops and things properly, we really need dominance
479 * information. It would end up basically being the into-SSA
480 * algorithm all over again. It's easier if we just let
481 * lower_vars_to_ssa do that for us instead of repeating it here.
483 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_ssa
);
485 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
486 nir_variable
*phi_var
=
487 nir_local_variable_create(b
->nb
.impl
, type
->type
, "phi");
488 _mesa_hash_table_insert(b
->phi_table
, w
, phi_var
);
490 val
->ssa
= vtn_local_load(b
, nir_deref_var_create(b
, phi_var
));
496 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
497 const uint32_t *w
, unsigned count
)
499 if (opcode
!= SpvOpPhi
)
502 struct hash_entry
*phi_entry
= _mesa_hash_table_search(b
->phi_table
, w
);
504 nir_variable
*phi_var
= phi_entry
->data
;
506 for (unsigned i
= 3; i
< count
; i
+= 2) {
507 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[i
]);
508 struct vtn_block
*pred
=
509 vtn_value(b
, w
[i
+ 1], vtn_value_type_block
)->block
;
511 b
->nb
.cursor
= nir_after_block_before_jump(pred
->end_block
);
513 vtn_local_store(b
, src
, nir_deref_var_create(b
, phi_var
));
520 vtn_emit_branch(struct vtn_builder
*b
, enum vtn_branch_type branch_type
,
521 nir_variable
*switch_fall_var
, bool *has_switch_break
)
523 switch (branch_type
) {
524 case vtn_branch_type_switch_break
:
525 nir_store_var(&b
->nb
, switch_fall_var
, nir_imm_int(&b
->nb
, NIR_FALSE
), 1);
526 *has_switch_break
= true;
528 case vtn_branch_type_switch_fallthrough
:
529 break; /* Nothing to do */
530 case vtn_branch_type_loop_break
:
531 nir_jump(&b
->nb
, nir_jump_break
);
533 case vtn_branch_type_loop_continue
:
534 nir_jump(&b
->nb
, nir_jump_continue
);
536 case vtn_branch_type_return
:
537 nir_jump(&b
->nb
, nir_jump_return
);
539 case vtn_branch_type_discard
: {
540 nir_intrinsic_instr
*discard
=
541 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_discard
);
542 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
546 unreachable("Invalid branch type");
551 vtn_emit_cf_list(struct vtn_builder
*b
, struct list_head
*cf_list
,
552 nir_variable
*switch_fall_var
, bool *has_switch_break
,
553 vtn_instruction_handler handler
)
555 list_for_each_entry(struct vtn_cf_node
, node
, cf_list
, link
) {
556 switch (node
->type
) {
557 case vtn_cf_node_type_block
: {
558 struct vtn_block
*block
= (struct vtn_block
*)node
;
560 const uint32_t *block_start
= block
->label
;
561 const uint32_t *block_end
= block
->merge
? block
->merge
:
564 block_start
= vtn_foreach_instruction(b
, block_start
, block_end
,
565 vtn_handle_phis_first_pass
);
567 vtn_foreach_instruction(b
, block_start
, block_end
, handler
);
569 block
->end_block
= nir_cursor_current_block(b
->nb
.cursor
);
571 if ((*block
->branch
& SpvOpCodeMask
) == SpvOpReturnValue
) {
572 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, block
->branch
[1]);
573 vtn_local_store(b
, src
,
574 nir_deref_var_create(b
, b
->impl
->return_var
));
577 if (block
->branch_type
!= vtn_branch_type_none
) {
578 vtn_emit_branch(b
, block
->branch_type
,
579 switch_fall_var
, has_switch_break
);
585 case vtn_cf_node_type_if
: {
586 struct vtn_if
*vtn_if
= (struct vtn_if
*)node
;
588 nir_if
*if_stmt
= nir_if_create(b
->shader
);
590 nir_src_for_ssa(vtn_ssa_value(b
, vtn_if
->condition
)->def
);
591 nir_cf_node_insert(b
->nb
.cursor
, &if_stmt
->cf_node
);
593 bool sw_break
= false;
595 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->then_list
);
596 if (vtn_if
->then_type
== vtn_branch_type_none
) {
597 vtn_emit_cf_list(b
, &vtn_if
->then_body
,
598 switch_fall_var
, &sw_break
, handler
);
600 vtn_emit_branch(b
, vtn_if
->then_type
, switch_fall_var
, &sw_break
);
603 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->else_list
);
604 if (vtn_if
->else_type
== vtn_branch_type_none
) {
605 vtn_emit_cf_list(b
, &vtn_if
->else_body
,
606 switch_fall_var
, &sw_break
, handler
);
608 vtn_emit_branch(b
, vtn_if
->else_type
, switch_fall_var
, &sw_break
);
611 b
->nb
.cursor
= nir_after_cf_node(&if_stmt
->cf_node
);
613 /* If we encountered a switch break somewhere inside of the if,
614 * then it would have been handled correctly by calling
615 * emit_cf_list or emit_branch for the interrior. However, we
616 * need to predicate everything following on wether or not we're
620 *has_switch_break
= true;
622 nir_if
*switch_if
= nir_if_create(b
->shader
);
623 switch_if
->condition
=
624 nir_src_for_ssa(nir_load_var(&b
->nb
, switch_fall_var
));
625 nir_cf_node_insert(b
->nb
.cursor
, &switch_if
->cf_node
);
627 b
->nb
.cursor
= nir_after_cf_list(&if_stmt
->then_list
);
632 case vtn_cf_node_type_loop
: {
633 struct vtn_loop
*vtn_loop
= (struct vtn_loop
*)node
;
635 nir_loop
*loop
= nir_loop_create(b
->shader
);
636 nir_cf_node_insert(b
->nb
.cursor
, &loop
->cf_node
);
638 b
->nb
.cursor
= nir_after_cf_list(&loop
->body
);
639 vtn_emit_cf_list(b
, &vtn_loop
->body
, NULL
, NULL
, handler
);
641 if (!list_empty(&vtn_loop
->cont_body
)) {
642 /* If we have a non-trivial continue body then we need to put
643 * it at the beginning of the loop with a flag to ensure that
644 * it doesn't get executed in the first iteration.
646 nir_variable
*do_cont
=
647 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "cont");
649 b
->nb
.cursor
= nir_before_cf_node(&loop
->cf_node
);
650 nir_store_var(&b
->nb
, do_cont
, nir_imm_int(&b
->nb
, NIR_FALSE
), 1);
652 b
->nb
.cursor
= nir_before_cf_list(&loop
->body
);
653 nir_if
*cont_if
= nir_if_create(b
->shader
);
654 cont_if
->condition
= nir_src_for_ssa(nir_load_var(&b
->nb
, do_cont
));
655 nir_cf_node_insert(b
->nb
.cursor
, &cont_if
->cf_node
);
657 b
->nb
.cursor
= nir_after_cf_list(&cont_if
->then_list
);
658 vtn_emit_cf_list(b
, &vtn_loop
->cont_body
, NULL
, NULL
, handler
);
660 b
->nb
.cursor
= nir_after_cf_node(&cont_if
->cf_node
);
661 nir_store_var(&b
->nb
, do_cont
, nir_imm_int(&b
->nb
, NIR_TRUE
), 1);
663 b
->has_loop_continue
= true;
666 b
->nb
.cursor
= nir_after_cf_node(&loop
->cf_node
);
670 case vtn_cf_node_type_switch
: {
671 struct vtn_switch
*vtn_switch
= (struct vtn_switch
*)node
;
673 /* First, we create a variable to keep track of whether or not the
674 * switch is still going at any given point. Any switch breaks
675 * will set this variable to false.
677 nir_variable
*fall_var
=
678 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "fall");
679 nir_store_var(&b
->nb
, fall_var
, nir_imm_int(&b
->nb
, NIR_FALSE
), 1);
681 /* Next, we gather up all of the conditions. We have to do this
682 * up-front because we also need to build an "any" condition so
683 * that we can use !any for default.
685 const int num_cases
= list_length(&vtn_switch
->cases
);
686 NIR_VLA(nir_ssa_def
*, conditions
, num_cases
);
688 nir_ssa_def
*sel
= vtn_ssa_value(b
, vtn_switch
->selector
)->def
;
689 /* An accumulation of all conditions. Used for the default */
690 nir_ssa_def
*any
= NULL
;
693 list_for_each_entry(struct vtn_case
, cse
, &vtn_switch
->cases
, link
) {
694 if (cse
->is_default
) {
695 conditions
[i
++] = NULL
;
699 nir_ssa_def
*cond
= NULL
;
700 nir_array_foreach(&cse
->values
, uint32_t, val
) {
701 nir_ssa_def
*is_val
=
702 nir_ieq(&b
->nb
, sel
, nir_imm_int(&b
->nb
, *val
));
704 cond
= cond
? nir_ior(&b
->nb
, cond
, is_val
) : is_val
;
707 any
= any
? nir_ior(&b
->nb
, any
, cond
) : cond
;
708 conditions
[i
++] = cond
;
710 assert(i
== num_cases
);
712 /* Now we can walk the list of cases and actually emit code */
714 list_for_each_entry(struct vtn_case
, cse
, &vtn_switch
->cases
, link
) {
715 /* Figure out the condition */
716 nir_ssa_def
*cond
= conditions
[i
++];
717 if (cse
->is_default
) {
718 assert(cond
== NULL
);
719 cond
= nir_inot(&b
->nb
, any
);
721 /* Take fallthrough into account */
722 cond
= nir_ior(&b
->nb
, cond
, nir_load_var(&b
->nb
, fall_var
));
724 nir_if
*case_if
= nir_if_create(b
->nb
.shader
);
725 case_if
->condition
= nir_src_for_ssa(cond
);
726 nir_cf_node_insert(b
->nb
.cursor
, &case_if
->cf_node
);
728 bool has_break
= false;
729 b
->nb
.cursor
= nir_after_cf_list(&case_if
->then_list
);
730 nir_store_var(&b
->nb
, fall_var
, nir_imm_int(&b
->nb
, NIR_TRUE
), 1);
731 vtn_emit_cf_list(b
, &cse
->body
, fall_var
, &has_break
, handler
);
732 (void)has_break
; /* We don't care */
734 b
->nb
.cursor
= nir_after_cf_node(&case_if
->cf_node
);
736 assert(i
== num_cases
);
742 unreachable("Invalid CF node type");
748 vtn_function_emit(struct vtn_builder
*b
, struct vtn_function
*func
,
749 vtn_instruction_handler instruction_handler
)
751 nir_builder_init(&b
->nb
, func
->impl
);
752 b
->nb
.cursor
= nir_after_cf_list(&func
->impl
->body
);
753 b
->has_loop_continue
= false;
754 b
->phi_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
755 _mesa_key_pointer_equal
);
757 vtn_emit_cf_list(b
, &func
->body
, NULL
, NULL
, instruction_handler
);
759 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
760 vtn_handle_phi_second_pass
);
762 /* Continue blocks for loops get inserted before the body of the loop
763 * but instructions in the continue may use SSA defs in the loop body.
764 * Therefore, we need to repair SSA to insert the needed phi nodes.
766 if (b
->has_loop_continue
)
767 nir_repair_ssa_impl(func
->impl
);