2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "vtn_private.h"
25 #include "nir/nir_vla.h"
27 static struct vtn_pointer
*
28 vtn_pointer_for_image_or_sampler_variable(struct vtn_builder
*b
,
29 struct vtn_variable
*var
)
31 assert(var
->type
->base_type
== vtn_base_type_image
||
32 var
->type
->base_type
== vtn_base_type_sampler
);
34 struct vtn_type
*ptr_type
= rzalloc(b
, struct vtn_type
);
35 ptr_type
->base_type
= vtn_base_type_pointer
;
36 ptr_type
->storage_class
= SpvStorageClassUniformConstant
;
37 ptr_type
->deref
= var
->type
;
39 return vtn_pointer_for_variable(b
, var
, ptr_type
);
43 vtn_cfg_handle_prepass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
44 const uint32_t *w
, unsigned count
)
48 vtn_assert(b
->func
== NULL
);
49 b
->func
= rzalloc(b
, struct vtn_function
);
51 list_inithead(&b
->func
->body
);
52 b
->func
->control
= w
[3];
54 MAYBE_UNUSED
const struct glsl_type
*result_type
=
55 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
56 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
59 b
->func
->type
= vtn_value(b
, w
[4], vtn_value_type_type
)->type
;
60 const struct vtn_type
*func_type
= b
->func
->type
;
62 vtn_assert(func_type
->return_type
->type
== result_type
);
65 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
67 func
->num_params
= func_type
->length
;
68 func
->params
= ralloc_array(b
->shader
, nir_parameter
, func
->num_params
);
70 for (unsigned i
= 0; i
< func_type
->length
; i
++) {
71 if (func_type
->params
[i
]->base_type
== vtn_base_type_pointer
&&
72 func_type
->params
[i
]->type
== NULL
) {
73 func
->params
[np
].type
= func_type
->params
[i
]->deref
->type
;
74 func
->params
[np
].param_type
= nir_parameter_inout
;
76 } else if (func_type
->params
[i
]->base_type
==
77 vtn_base_type_sampled_image
) {
78 /* Sampled images are actually two parameters */
79 func
->params
= reralloc(b
->shader
, func
->params
,
80 nir_parameter
, func
->num_params
++);
81 func
->params
[np
].type
= func_type
->params
[i
]->type
;
82 func
->params
[np
].param_type
= nir_parameter_in
;
84 func
->params
[np
].type
= glsl_bare_sampler_type();
85 func
->params
[np
].param_type
= nir_parameter_in
;
88 func
->params
[np
].type
= func_type
->params
[i
]->type
;
89 func
->params
[np
].param_type
= nir_parameter_in
;
93 assert(np
== func
->num_params
);
95 func
->return_type
= func_type
->return_type
->type
;
97 b
->func
->impl
= nir_function_impl_create(func
);
98 nir_builder_init(&b
->nb
, func
->impl
);
99 b
->nb
.cursor
= nir_before_cf_list(&b
->func
->impl
->body
);
101 b
->func_param_idx
= 0;
105 case SpvOpFunctionEnd
:
110 case SpvOpFunctionParameter
: {
111 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
113 vtn_assert(b
->func_param_idx
< b
->func
->impl
->num_params
);
114 nir_variable
*param
= b
->func
->impl
->params
[b
->func_param_idx
++];
116 if (type
->base_type
== vtn_base_type_pointer
&& type
->type
== NULL
) {
117 struct vtn_variable
*vtn_var
= rzalloc(b
, struct vtn_variable
);
118 vtn_var
->type
= type
->deref
;
119 vtn_var
->var
= param
;
121 vtn_assert(vtn_var
->type
->type
== param
->type
);
123 struct vtn_type
*without_array
= vtn_var
->type
;
124 while(glsl_type_is_array(without_array
->type
))
125 without_array
= without_array
->array_element
;
127 if (glsl_type_is_image(without_array
->type
)) {
128 vtn_var
->mode
= vtn_variable_mode_uniform
;
129 param
->interface_type
= without_array
->type
;
130 } else if (glsl_type_is_sampler(without_array
->type
)) {
131 vtn_var
->mode
= vtn_variable_mode_uniform
;
132 param
->interface_type
= without_array
->type
;
134 vtn_var
->mode
= vtn_variable_mode_param
;
137 struct vtn_value
*val
=
138 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
140 /* Name the parameter so it shows up nicely in NIR */
141 param
->name
= ralloc_strdup(param
, val
->name
);
143 val
->pointer
= vtn_pointer_for_variable(b
, vtn_var
, type
);
144 } else if (type
->base_type
== vtn_base_type_image
||
145 type
->base_type
== vtn_base_type_sampler
||
146 type
->base_type
== vtn_base_type_sampled_image
) {
147 struct vtn_variable
*vtn_var
= rzalloc(b
, struct vtn_variable
);
148 vtn_var
->type
= type
;
149 vtn_var
->var
= param
;
150 param
->interface_type
= param
->type
;
152 if (type
->base_type
== vtn_base_type_sampled_image
) {
153 /* Sampled images are actually two parameters. The first is the
154 * image and the second is the sampler.
156 struct vtn_value
*val
=
157 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
159 /* Name the parameter so it shows up nicely in NIR */
160 param
->name
= ralloc_strdup(param
, val
->name
);
162 /* Adjust the type of the image variable to the image type */
163 vtn_var
->type
= type
->image
;
165 /* Now get the sampler parameter and set up its variable */
166 param
= b
->func
->impl
->params
[b
->func_param_idx
++];
167 struct vtn_variable
*sampler_var
= rzalloc(b
, struct vtn_variable
);
168 sampler_var
->type
= rzalloc(b
, struct vtn_type
);
169 sampler_var
->type
->base_type
= vtn_base_type_sampler
;
170 sampler_var
->type
->type
= glsl_bare_sampler_type();
171 sampler_var
->var
= param
;
172 param
->interface_type
= param
->type
;
173 param
->name
= ralloc_strdup(param
, val
->name
);
175 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
176 val
->sampled_image
->type
= type
;
177 val
->sampled_image
->image
=
178 vtn_pointer_for_image_or_sampler_variable(b
, vtn_var
);
179 val
->sampled_image
->sampler
=
180 vtn_pointer_for_image_or_sampler_variable(b
, sampler_var
);
182 struct vtn_value
*val
=
183 vtn_push_value(b
, w
[2], vtn_value_type_pointer
);
185 /* Name the parameter so it shows up nicely in NIR */
186 param
->name
= ralloc_strdup(param
, val
->name
);
189 vtn_pointer_for_image_or_sampler_variable(b
, vtn_var
);
192 /* We're a regular SSA value. */
193 struct vtn_ssa_value
*param_ssa
=
194 vtn_local_load(b
, nir_build_deref_var(&b
->nb
, param
));
195 struct vtn_value
*val
= vtn_push_ssa(b
, w
[2], type
, param_ssa
);
197 /* Name the parameter so it shows up nicely in NIR */
198 param
->name
= ralloc_strdup(param
, val
->name
);
204 vtn_assert(b
->block
== NULL
);
205 b
->block
= rzalloc(b
, struct vtn_block
);
206 b
->block
->node
.type
= vtn_cf_node_type_block
;
208 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
210 if (b
->func
->start_block
== NULL
) {
211 /* This is the first block encountered for this function. In this
212 * case, we set the start block and add it to the list of
213 * implemented functions that we'll walk later.
215 b
->func
->start_block
= b
->block
;
216 exec_list_push_tail(&b
->functions
, &b
->func
->node
);
221 case SpvOpSelectionMerge
:
223 vtn_assert(b
->block
&& b
->block
->merge
== NULL
);
228 case SpvOpBranchConditional
:
232 case SpvOpReturnValue
:
233 case SpvOpUnreachable
:
234 vtn_assert(b
->block
&& b
->block
->branch
== NULL
);
235 b
->block
->branch
= w
;
240 /* Continue on as per normal */
248 vtn_add_case(struct vtn_builder
*b
, struct vtn_switch
*swtch
,
249 struct vtn_block
*break_block
,
250 uint32_t block_id
, uint64_t val
, bool is_default
)
252 struct vtn_block
*case_block
=
253 vtn_value(b
, block_id
, vtn_value_type_block
)->block
;
255 /* Don't create dummy cases that just break */
256 if (case_block
== break_block
)
259 if (case_block
->switch_case
== NULL
) {
260 struct vtn_case
*c
= ralloc(b
, struct vtn_case
);
262 list_inithead(&c
->body
);
263 c
->start_block
= case_block
;
264 c
->fallthrough
= NULL
;
265 util_dynarray_init(&c
->values
, b
);
266 c
->is_default
= false;
269 list_addtail(&c
->link
, &swtch
->cases
);
271 case_block
->switch_case
= c
;
275 case_block
->switch_case
->is_default
= true;
277 util_dynarray_append(&case_block
->switch_case
->values
, uint64_t, val
);
281 /* This function performs a depth-first search of the cases and puts them
282 * in fall-through order.
285 vtn_order_case(struct vtn_switch
*swtch
, struct vtn_case
*cse
)
292 list_del(&cse
->link
);
294 if (cse
->fallthrough
) {
295 vtn_order_case(swtch
, cse
->fallthrough
);
297 /* If we have a fall-through, place this case right before the case it
298 * falls through to. This ensures that fallthroughs come one after
299 * the other. These two can never get separated because that would
300 * imply something else falling through to the same case. Also, this
301 * can't break ordering because the DFS ensures that this case is
302 * visited before anything that falls through to it.
304 list_addtail(&cse
->link
, &cse
->fallthrough
->link
);
306 list_add(&cse
->link
, &swtch
->cases
);
310 static enum vtn_branch_type
311 vtn_get_branch_type(struct vtn_builder
*b
,
312 struct vtn_block
*block
,
313 struct vtn_case
*swcase
, struct vtn_block
*switch_break
,
314 struct vtn_block
*loop_break
, struct vtn_block
*loop_cont
)
316 if (block
->switch_case
) {
317 /* This branch is actually a fallthrough */
318 vtn_assert(swcase
->fallthrough
== NULL
||
319 swcase
->fallthrough
== block
->switch_case
);
320 swcase
->fallthrough
= block
->switch_case
;
321 return vtn_branch_type_switch_fallthrough
;
322 } else if (block
== loop_break
) {
323 return vtn_branch_type_loop_break
;
324 } else if (block
== loop_cont
) {
325 return vtn_branch_type_loop_continue
;
326 } else if (block
== switch_break
) {
327 return vtn_branch_type_switch_break
;
329 return vtn_branch_type_none
;
334 vtn_cfg_walk_blocks(struct vtn_builder
*b
, struct list_head
*cf_list
,
335 struct vtn_block
*start
, struct vtn_case
*switch_case
,
336 struct vtn_block
*switch_break
,
337 struct vtn_block
*loop_break
, struct vtn_block
*loop_cont
,
338 struct vtn_block
*end
)
340 struct vtn_block
*block
= start
;
341 while (block
!= end
) {
342 if (block
->merge
&& (*block
->merge
& SpvOpCodeMask
) == SpvOpLoopMerge
&&
344 struct vtn_loop
*loop
= ralloc(b
, struct vtn_loop
);
346 loop
->node
.type
= vtn_cf_node_type_loop
;
347 list_inithead(&loop
->body
);
348 list_inithead(&loop
->cont_body
);
349 loop
->control
= block
->merge
[3];
351 list_addtail(&loop
->node
.link
, cf_list
);
354 struct vtn_block
*new_loop_break
=
355 vtn_value(b
, block
->merge
[1], vtn_value_type_block
)->block
;
356 struct vtn_block
*new_loop_cont
=
357 vtn_value(b
, block
->merge
[2], vtn_value_type_block
)->block
;
359 /* Note: This recursive call will start with the current block as
360 * its start block. If we weren't careful, we would get here
361 * again and end up in infinite recursion. This is why we set
362 * block->loop above and check for it before creating one. This
363 * way, we only create the loop once and the second call that
364 * tries to handle this loop goes to the cases below and gets
365 * handled as a regular block.
367 * Note: When we make the recursive walk calls, we pass NULL for
368 * the switch break since you have to break out of the loop first.
369 * We do, however, still pass the current switch case because it's
370 * possible that the merge block for the loop is the start of
373 vtn_cfg_walk_blocks(b
, &loop
->body
, block
, switch_case
, NULL
,
374 new_loop_break
, new_loop_cont
, NULL
);
375 vtn_cfg_walk_blocks(b
, &loop
->cont_body
, new_loop_cont
, NULL
, NULL
,
376 new_loop_break
, NULL
, block
);
378 enum vtn_branch_type branch_type
=
379 vtn_get_branch_type(b
, new_loop_break
, switch_case
, switch_break
,
380 loop_break
, loop_cont
);
382 if (branch_type
!= vtn_branch_type_none
) {
383 /* Stop walking through the CFG when this inner loop's break block
384 * ends up as the same block as the outer loop's continue block
385 * because we are already going to visit it.
387 vtn_assert(branch_type
== vtn_branch_type_loop_continue
);
391 block
= new_loop_break
;
395 vtn_assert(block
->node
.link
.next
== NULL
);
396 list_addtail(&block
->node
.link
, cf_list
);
398 switch (*block
->branch
& SpvOpCodeMask
) {
400 struct vtn_block
*branch_block
=
401 vtn_value(b
, block
->branch
[1], vtn_value_type_block
)->block
;
403 block
->branch_type
= vtn_get_branch_type(b
, branch_block
,
404 switch_case
, switch_break
,
405 loop_break
, loop_cont
);
407 if (block
->branch_type
!= vtn_branch_type_none
)
410 block
= branch_block
;
415 case SpvOpReturnValue
:
416 block
->branch_type
= vtn_branch_type_return
;
420 block
->branch_type
= vtn_branch_type_discard
;
423 case SpvOpBranchConditional
: {
424 struct vtn_block
*then_block
=
425 vtn_value(b
, block
->branch
[2], vtn_value_type_block
)->block
;
426 struct vtn_block
*else_block
=
427 vtn_value(b
, block
->branch
[3], vtn_value_type_block
)->block
;
429 struct vtn_if
*if_stmt
= ralloc(b
, struct vtn_if
);
431 if_stmt
->node
.type
= vtn_cf_node_type_if
;
432 if_stmt
->condition
= block
->branch
[1];
433 list_inithead(&if_stmt
->then_body
);
434 list_inithead(&if_stmt
->else_body
);
436 list_addtail(&if_stmt
->node
.link
, cf_list
);
439 (*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
) {
440 if_stmt
->control
= block
->merge
[2];
443 if_stmt
->then_type
= vtn_get_branch_type(b
, then_block
,
444 switch_case
, switch_break
,
445 loop_break
, loop_cont
);
446 if_stmt
->else_type
= vtn_get_branch_type(b
, else_block
,
447 switch_case
, switch_break
,
448 loop_break
, loop_cont
);
450 if (then_block
== else_block
) {
451 block
->branch_type
= if_stmt
->then_type
;
452 if (block
->branch_type
== vtn_branch_type_none
) {
458 } else if (if_stmt
->then_type
== vtn_branch_type_none
&&
459 if_stmt
->else_type
== vtn_branch_type_none
) {
460 /* Neither side of the if is something we can short-circuit. */
461 vtn_assert((*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
);
462 struct vtn_block
*merge_block
=
463 vtn_value(b
, block
->merge
[1], vtn_value_type_block
)->block
;
465 vtn_cfg_walk_blocks(b
, &if_stmt
->then_body
, then_block
,
466 switch_case
, switch_break
,
467 loop_break
, loop_cont
, merge_block
);
468 vtn_cfg_walk_blocks(b
, &if_stmt
->else_body
, else_block
,
469 switch_case
, switch_break
,
470 loop_break
, loop_cont
, merge_block
);
472 enum vtn_branch_type merge_type
=
473 vtn_get_branch_type(b
, merge_block
, switch_case
, switch_break
,
474 loop_break
, loop_cont
);
475 if (merge_type
== vtn_branch_type_none
) {
481 } else if (if_stmt
->then_type
!= vtn_branch_type_none
&&
482 if_stmt
->else_type
!= vtn_branch_type_none
) {
483 /* Both sides were short-circuited. We're done here. */
486 /* Exeactly one side of the branch could be short-circuited.
487 * We set the branch up as a predicated break/continue and we
488 * continue on with the other side as if it were what comes
491 if (if_stmt
->then_type
== vtn_branch_type_none
) {
498 vtn_fail("Should have returned or continued");
502 vtn_assert((*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
);
503 struct vtn_block
*break_block
=
504 vtn_value(b
, block
->merge
[1], vtn_value_type_block
)->block
;
506 struct vtn_switch
*swtch
= ralloc(b
, struct vtn_switch
);
508 swtch
->node
.type
= vtn_cf_node_type_switch
;
509 swtch
->selector
= block
->branch
[1];
510 list_inithead(&swtch
->cases
);
512 list_addtail(&swtch
->node
.link
, cf_list
);
514 /* First, we go through and record all of the cases. */
515 const uint32_t *branch_end
=
516 block
->branch
+ (block
->branch
[0] >> SpvWordCountShift
);
518 struct vtn_value
*cond_val
= vtn_untyped_value(b
, block
->branch
[1]);
519 vtn_fail_if(!cond_val
->type
||
520 cond_val
->type
->base_type
!= vtn_base_type_scalar
,
521 "Selector of OpSelect must have a type of OpTypeInt");
523 nir_alu_type cond_type
=
524 nir_get_nir_type_for_glsl_type(cond_val
->type
->type
);
525 vtn_fail_if(nir_alu_type_get_base_type(cond_type
) != nir_type_int
&&
526 nir_alu_type_get_base_type(cond_type
) != nir_type_uint
,
527 "Selector of OpSelect must have a type of OpTypeInt");
529 bool is_default
= true;
530 const unsigned bitsize
= nir_alu_type_get_type_size(cond_type
);
531 for (const uint32_t *w
= block
->branch
+ 2; w
< branch_end
;) {
532 uint64_t literal
= 0;
537 assert(bitsize
== 64);
538 literal
= vtn_u64_literal(w
);
543 uint32_t block_id
= *(w
++);
545 vtn_add_case(b
, swtch
, break_block
, block_id
, literal
, is_default
);
549 /* Now, we go through and walk the blocks. While we walk through
550 * the blocks, we also gather the much-needed fall-through
553 list_for_each_entry(struct vtn_case
, cse
, &swtch
->cases
, link
) {
554 vtn_assert(cse
->start_block
!= break_block
);
555 vtn_cfg_walk_blocks(b
, &cse
->body
, cse
->start_block
, cse
,
556 break_block
, loop_break
, loop_cont
, NULL
);
559 /* Finally, we walk over all of the cases one more time and put
560 * them in fall-through order.
562 for (const uint32_t *w
= block
->branch
+ 2; w
< branch_end
;) {
563 struct vtn_block
*case_block
=
564 vtn_value(b
, *w
, vtn_value_type_block
)->block
;
569 assert(bitsize
== 64);
573 if (case_block
== break_block
)
576 vtn_assert(case_block
->switch_case
);
578 vtn_order_case(swtch
, case_block
->switch_case
);
581 enum vtn_branch_type branch_type
=
582 vtn_get_branch_type(b
, break_block
, switch_case
, NULL
,
583 loop_break
, loop_cont
);
585 if (branch_type
!= vtn_branch_type_none
) {
586 /* It is possible that the break is actually the continue block
587 * for the containing loop. In this case, we need to bail and let
588 * the loop parsing code handle the continue properly.
590 vtn_assert(branch_type
== vtn_branch_type_loop_continue
);
598 case SpvOpUnreachable
:
602 vtn_fail("Unhandled opcode");
608 vtn_build_cfg(struct vtn_builder
*b
, const uint32_t *words
, const uint32_t *end
)
610 vtn_foreach_instruction(b
, words
, end
,
611 vtn_cfg_handle_prepass_instruction
);
613 foreach_list_typed(struct vtn_function
, func
, node
, &b
->functions
) {
614 vtn_cfg_walk_blocks(b
, &func
->body
, func
->start_block
,
615 NULL
, NULL
, NULL
, NULL
, NULL
);
620 vtn_handle_phis_first_pass(struct vtn_builder
*b
, SpvOp opcode
,
621 const uint32_t *w
, unsigned count
)
623 if (opcode
== SpvOpLabel
)
624 return true; /* Nothing to do */
626 /* If this isn't a phi node, stop. */
627 if (opcode
!= SpvOpPhi
)
630 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
631 * For each phi, we create a variable with the appropreate type and
632 * do a load from that variable. Then, in a second pass, we add
633 * stores to that variable to each of the predecessor blocks.
635 * We could do something more intelligent here. However, in order to
636 * handle loops and things properly, we really need dominance
637 * information. It would end up basically being the into-SSA
638 * algorithm all over again. It's easier if we just let
639 * lower_vars_to_ssa do that for us instead of repeating it here.
641 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
642 nir_variable
*phi_var
=
643 nir_local_variable_create(b
->nb
.impl
, type
->type
, "phi");
644 _mesa_hash_table_insert(b
->phi_table
, w
, phi_var
);
646 vtn_push_ssa(b
, w
[2], type
,
647 vtn_local_load(b
, nir_build_deref_var(&b
->nb
, phi_var
)));
653 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
654 const uint32_t *w
, unsigned count
)
656 if (opcode
!= SpvOpPhi
)
659 struct hash_entry
*phi_entry
= _mesa_hash_table_search(b
->phi_table
, w
);
660 vtn_assert(phi_entry
);
661 nir_variable
*phi_var
= phi_entry
->data
;
663 for (unsigned i
= 3; i
< count
; i
+= 2) {
664 struct vtn_block
*pred
=
665 vtn_value(b
, w
[i
+ 1], vtn_value_type_block
)->block
;
667 b
->nb
.cursor
= nir_after_instr(&pred
->end_nop
->instr
);
669 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[i
]);
671 vtn_local_store(b
, src
, nir_build_deref_var(&b
->nb
, phi_var
));
678 vtn_emit_branch(struct vtn_builder
*b
, enum vtn_branch_type branch_type
,
679 nir_variable
*switch_fall_var
, bool *has_switch_break
)
681 switch (branch_type
) {
682 case vtn_branch_type_switch_break
:
683 nir_store_var(&b
->nb
, switch_fall_var
, nir_imm_int(&b
->nb
, NIR_FALSE
), 1);
684 *has_switch_break
= true;
686 case vtn_branch_type_switch_fallthrough
:
687 break; /* Nothing to do */
688 case vtn_branch_type_loop_break
:
689 nir_jump(&b
->nb
, nir_jump_break
);
691 case vtn_branch_type_loop_continue
:
692 nir_jump(&b
->nb
, nir_jump_continue
);
694 case vtn_branch_type_return
:
695 nir_jump(&b
->nb
, nir_jump_return
);
697 case vtn_branch_type_discard
: {
698 nir_intrinsic_instr
*discard
=
699 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_discard
);
700 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
704 vtn_fail("Invalid branch type");
709 vtn_emit_cf_list(struct vtn_builder
*b
, struct list_head
*cf_list
,
710 nir_variable
*switch_fall_var
, bool *has_switch_break
,
711 vtn_instruction_handler handler
)
713 list_for_each_entry(struct vtn_cf_node
, node
, cf_list
, link
) {
714 switch (node
->type
) {
715 case vtn_cf_node_type_block
: {
716 struct vtn_block
*block
= (struct vtn_block
*)node
;
718 const uint32_t *block_start
= block
->label
;
719 const uint32_t *block_end
= block
->merge
? block
->merge
:
722 block_start
= vtn_foreach_instruction(b
, block_start
, block_end
,
723 vtn_handle_phis_first_pass
);
725 vtn_foreach_instruction(b
, block_start
, block_end
, handler
);
727 block
->end_nop
= nir_intrinsic_instr_create(b
->nb
.shader
,
729 nir_builder_instr_insert(&b
->nb
, &block
->end_nop
->instr
);
731 if ((*block
->branch
& SpvOpCodeMask
) == SpvOpReturnValue
) {
732 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, block
->branch
[1]);
733 vtn_local_store(b
, src
,
734 nir_build_deref_var(&b
->nb
, b
->nb
.impl
->return_var
));
737 if (block
->branch_type
!= vtn_branch_type_none
) {
738 vtn_emit_branch(b
, block
->branch_type
,
739 switch_fall_var
, has_switch_break
);
745 case vtn_cf_node_type_if
: {
746 struct vtn_if
*vtn_if
= (struct vtn_if
*)node
;
747 bool sw_break
= false;
750 nir_push_if(&b
->nb
, vtn_ssa_value(b
, vtn_if
->condition
)->def
);
751 if (vtn_if
->then_type
== vtn_branch_type_none
) {
752 vtn_emit_cf_list(b
, &vtn_if
->then_body
,
753 switch_fall_var
, &sw_break
, handler
);
755 vtn_emit_branch(b
, vtn_if
->then_type
, switch_fall_var
, &sw_break
);
758 nir_push_else(&b
->nb
, nif
);
759 if (vtn_if
->else_type
== vtn_branch_type_none
) {
760 vtn_emit_cf_list(b
, &vtn_if
->else_body
,
761 switch_fall_var
, &sw_break
, handler
);
763 vtn_emit_branch(b
, vtn_if
->else_type
, switch_fall_var
, &sw_break
);
766 nir_pop_if(&b
->nb
, nif
);
768 /* If we encountered a switch break somewhere inside of the if,
769 * then it would have been handled correctly by calling
770 * emit_cf_list or emit_branch for the interrior. However, we
771 * need to predicate everything following on wether or not we're
775 *has_switch_break
= true;
776 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, switch_fall_var
));
781 case vtn_cf_node_type_loop
: {
782 struct vtn_loop
*vtn_loop
= (struct vtn_loop
*)node
;
784 nir_loop
*loop
= nir_push_loop(&b
->nb
);
785 vtn_emit_cf_list(b
, &vtn_loop
->body
, NULL
, NULL
, handler
);
787 if (!list_empty(&vtn_loop
->cont_body
)) {
788 /* If we have a non-trivial continue body then we need to put
789 * it at the beginning of the loop with a flag to ensure that
790 * it doesn't get executed in the first iteration.
792 nir_variable
*do_cont
=
793 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "cont");
795 b
->nb
.cursor
= nir_before_cf_node(&loop
->cf_node
);
796 nir_store_var(&b
->nb
, do_cont
, nir_imm_int(&b
->nb
, NIR_FALSE
), 1);
798 b
->nb
.cursor
= nir_before_cf_list(&loop
->body
);
801 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, do_cont
));
803 vtn_emit_cf_list(b
, &vtn_loop
->cont_body
, NULL
, NULL
, handler
);
805 nir_pop_if(&b
->nb
, cont_if
);
807 nir_store_var(&b
->nb
, do_cont
, nir_imm_int(&b
->nb
, NIR_TRUE
), 1);
809 b
->has_loop_continue
= true;
812 nir_pop_loop(&b
->nb
, loop
);
816 case vtn_cf_node_type_switch
: {
817 struct vtn_switch
*vtn_switch
= (struct vtn_switch
*)node
;
819 /* First, we create a variable to keep track of whether or not the
820 * switch is still going at any given point. Any switch breaks
821 * will set this variable to false.
823 nir_variable
*fall_var
=
824 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "fall");
825 nir_store_var(&b
->nb
, fall_var
, nir_imm_int(&b
->nb
, NIR_FALSE
), 1);
827 /* Next, we gather up all of the conditions. We have to do this
828 * up-front because we also need to build an "any" condition so
829 * that we can use !any for default.
831 const int num_cases
= list_length(&vtn_switch
->cases
);
832 NIR_VLA(nir_ssa_def
*, conditions
, num_cases
);
834 nir_ssa_def
*sel
= vtn_ssa_value(b
, vtn_switch
->selector
)->def
;
835 /* An accumulation of all conditions. Used for the default */
836 nir_ssa_def
*any
= NULL
;
839 list_for_each_entry(struct vtn_case
, cse
, &vtn_switch
->cases
, link
) {
840 if (cse
->is_default
) {
841 conditions
[i
++] = NULL
;
845 nir_ssa_def
*cond
= NULL
;
846 util_dynarray_foreach(&cse
->values
, uint64_t, val
) {
847 nir_ssa_def
*imm
= nir_imm_intN_t(&b
->nb
, *val
, sel
->bit_size
);
848 nir_ssa_def
*is_val
= nir_ieq(&b
->nb
, sel
, imm
);
850 cond
= cond
? nir_ior(&b
->nb
, cond
, is_val
) : is_val
;
853 any
= any
? nir_ior(&b
->nb
, any
, cond
) : cond
;
854 conditions
[i
++] = cond
;
856 vtn_assert(i
== num_cases
);
858 /* Now we can walk the list of cases and actually emit code */
860 list_for_each_entry(struct vtn_case
, cse
, &vtn_switch
->cases
, link
) {
861 /* Figure out the condition */
862 nir_ssa_def
*cond
= conditions
[i
++];
863 if (cse
->is_default
) {
864 vtn_assert(cond
== NULL
);
865 cond
= nir_inot(&b
->nb
, any
);
867 /* Take fallthrough into account */
868 cond
= nir_ior(&b
->nb
, cond
, nir_load_var(&b
->nb
, fall_var
));
870 nir_if
*case_if
= nir_push_if(&b
->nb
, cond
);
872 bool has_break
= false;
873 nir_store_var(&b
->nb
, fall_var
, nir_imm_int(&b
->nb
, NIR_TRUE
), 1);
874 vtn_emit_cf_list(b
, &cse
->body
, fall_var
, &has_break
, handler
);
875 (void)has_break
; /* We don't care */
877 nir_pop_if(&b
->nb
, case_if
);
879 vtn_assert(i
== num_cases
);
885 vtn_fail("Invalid CF node type");
891 vtn_function_emit(struct vtn_builder
*b
, struct vtn_function
*func
,
892 vtn_instruction_handler instruction_handler
)
894 nir_builder_init(&b
->nb
, func
->impl
);
896 b
->nb
.cursor
= nir_after_cf_list(&func
->impl
->body
);
897 b
->has_loop_continue
= false;
898 b
->phi_table
= _mesa_hash_table_create(b
, _mesa_hash_pointer
,
899 _mesa_key_pointer_equal
);
901 vtn_emit_cf_list(b
, &func
->body
, NULL
, NULL
, instruction_handler
);
903 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
904 vtn_handle_phi_second_pass
);
906 /* Continue blocks for loops get inserted before the body of the loop
907 * but instructions in the continue may use SSA defs in the loop body.
908 * Therefore, we need to repair SSA to insert the needed phi nodes.
910 if (b
->has_loop_continue
)
911 nir_repair_ssa_impl(func
->impl
);
913 func
->emitted
= true;