2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "vtn_private.h"
25 #include "nir/nir_vla.h"
27 static struct vtn_block
*
28 vtn_block(struct vtn_builder
*b
, uint32_t value_id
)
30 return vtn_value(b
, value_id
, vtn_value_type_block
)->block
;
33 static struct vtn_pointer
*
34 vtn_load_param_pointer(struct vtn_builder
*b
,
35 struct vtn_type
*param_type
,
38 struct vtn_type
*ptr_type
= param_type
;
39 if (param_type
->base_type
!= vtn_base_type_pointer
) {
40 assert(param_type
->base_type
== vtn_base_type_image
||
41 param_type
->base_type
== vtn_base_type_sampler
);
42 ptr_type
= rzalloc(b
, struct vtn_type
);
43 ptr_type
->base_type
= vtn_base_type_pointer
;
44 ptr_type
->deref
= param_type
;
45 ptr_type
->storage_class
= SpvStorageClassUniformConstant
;
48 return vtn_pointer_from_ssa(b
, nir_load_param(&b
->nb
, param_idx
), ptr_type
);
52 vtn_type_count_function_params(struct vtn_type
*type
)
54 switch (type
->base_type
) {
55 case vtn_base_type_array
:
56 case vtn_base_type_matrix
:
57 return type
->length
* vtn_type_count_function_params(type
->array_element
);
59 case vtn_base_type_struct
: {
61 for (unsigned i
= 0; i
< type
->length
; i
++)
62 count
+= vtn_type_count_function_params(type
->members
[i
]);
66 case vtn_base_type_sampled_image
:
75 vtn_type_add_to_function_params(struct vtn_type
*type
,
79 static const nir_parameter nir_deref_param
= {
84 switch (type
->base_type
) {
85 case vtn_base_type_array
:
86 case vtn_base_type_matrix
:
87 for (unsigned i
= 0; i
< type
->length
; i
++)
88 vtn_type_add_to_function_params(type
->array_element
, func
, param_idx
);
91 case vtn_base_type_struct
:
92 for (unsigned i
= 0; i
< type
->length
; i
++)
93 vtn_type_add_to_function_params(type
->members
[i
], func
, param_idx
);
96 case vtn_base_type_sampled_image
:
97 func
->params
[(*param_idx
)++] = nir_deref_param
;
98 func
->params
[(*param_idx
)++] = nir_deref_param
;
101 case vtn_base_type_image
:
102 case vtn_base_type_sampler
:
103 func
->params
[(*param_idx
)++] = nir_deref_param
;
106 case vtn_base_type_pointer
:
108 func
->params
[(*param_idx
)++] = (nir_parameter
) {
109 .num_components
= glsl_get_vector_elements(type
->type
),
110 .bit_size
= glsl_get_bit_size(type
->type
),
113 func
->params
[(*param_idx
)++] = nir_deref_param
;
118 func
->params
[(*param_idx
)++] = (nir_parameter
) {
119 .num_components
= glsl_get_vector_elements(type
->type
),
120 .bit_size
= glsl_get_bit_size(type
->type
),
126 vtn_ssa_value_add_to_call_params(struct vtn_builder
*b
,
127 struct vtn_ssa_value
*value
,
128 struct vtn_type
*type
,
129 nir_call_instr
*call
,
132 switch (type
->base_type
) {
133 case vtn_base_type_array
:
134 case vtn_base_type_matrix
:
135 for (unsigned i
= 0; i
< type
->length
; i
++) {
136 vtn_ssa_value_add_to_call_params(b
, value
->elems
[i
],
142 case vtn_base_type_struct
:
143 for (unsigned i
= 0; i
< type
->length
; i
++) {
144 vtn_ssa_value_add_to_call_params(b
, value
->elems
[i
],
151 call
->params
[(*param_idx
)++] = nir_src_for_ssa(value
->def
);
157 vtn_ssa_value_load_function_param(struct vtn_builder
*b
,
158 struct vtn_ssa_value
*value
,
159 struct vtn_type
*type
,
162 switch (type
->base_type
) {
163 case vtn_base_type_array
:
164 case vtn_base_type_matrix
:
165 for (unsigned i
= 0; i
< type
->length
; i
++) {
166 vtn_ssa_value_load_function_param(b
, value
->elems
[i
],
167 type
->array_element
, param_idx
);
171 case vtn_base_type_struct
:
172 for (unsigned i
= 0; i
< type
->length
; i
++) {
173 vtn_ssa_value_load_function_param(b
, value
->elems
[i
],
174 type
->members
[i
], param_idx
);
179 value
->def
= nir_load_param(&b
->nb
, (*param_idx
)++);
185 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
186 const uint32_t *w
, unsigned count
)
188 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
189 struct vtn_function
*vtn_callee
=
190 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
191 struct nir_function
*callee
= vtn_callee
->impl
->function
;
193 vtn_callee
->referenced
= true;
195 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
197 unsigned param_idx
= 0;
199 nir_deref_instr
*ret_deref
= NULL
;
200 struct vtn_type
*ret_type
= vtn_callee
->type
->return_type
;
201 if (ret_type
->base_type
!= vtn_base_type_void
) {
202 nir_variable
*ret_tmp
=
203 nir_local_variable_create(b
->nb
.impl
,
204 glsl_get_bare_type(ret_type
->type
),
206 ret_deref
= nir_build_deref_var(&b
->nb
, ret_tmp
);
207 call
->params
[param_idx
++] = nir_src_for_ssa(&ret_deref
->dest
.ssa
);
210 for (unsigned i
= 0; i
< vtn_callee
->type
->length
; i
++) {
211 struct vtn_type
*arg_type
= vtn_callee
->type
->params
[i
];
212 unsigned arg_id
= w
[4 + i
];
214 if (arg_type
->base_type
== vtn_base_type_sampled_image
) {
215 struct vtn_sampled_image
*sampled_image
=
216 vtn_value(b
, arg_id
, vtn_value_type_sampled_image
)->sampled_image
;
218 call
->params
[param_idx
++] =
219 nir_src_for_ssa(vtn_pointer_to_ssa(b
, sampled_image
->image
));
220 call
->params
[param_idx
++] =
221 nir_src_for_ssa(vtn_pointer_to_ssa(b
, sampled_image
->sampler
));
222 } else if (arg_type
->base_type
== vtn_base_type_pointer
||
223 arg_type
->base_type
== vtn_base_type_image
||
224 arg_type
->base_type
== vtn_base_type_sampler
) {
225 struct vtn_pointer
*pointer
=
226 vtn_value(b
, arg_id
, vtn_value_type_pointer
)->pointer
;
227 call
->params
[param_idx
++] =
228 nir_src_for_ssa(vtn_pointer_to_ssa(b
, pointer
));
230 vtn_ssa_value_add_to_call_params(b
, vtn_ssa_value(b
, arg_id
),
231 arg_type
, call
, ¶m_idx
);
234 assert(param_idx
== call
->num_params
);
236 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
238 if (ret_type
->base_type
== vtn_base_type_void
) {
239 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
241 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, ret_deref
, 0));
246 vtn_cfg_handle_prepass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
247 const uint32_t *w
, unsigned count
)
250 case SpvOpFunction
: {
251 vtn_assert(b
->func
== NULL
);
252 b
->func
= rzalloc(b
, struct vtn_function
);
254 b
->func
->node
.type
= vtn_cf_node_type_function
;
255 b
->func
->node
.parent
= NULL
;
256 list_inithead(&b
->func
->body
);
257 b
->func
->control
= w
[3];
259 UNUSED
const struct glsl_type
*result_type
=
260 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
261 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
264 b
->func
->type
= vtn_value(b
, w
[4], vtn_value_type_type
)->type
;
265 const struct vtn_type
*func_type
= b
->func
->type
;
267 vtn_assert(func_type
->return_type
->type
== result_type
);
270 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
272 unsigned num_params
= 0;
273 for (unsigned i
= 0; i
< func_type
->length
; i
++)
274 num_params
+= vtn_type_count_function_params(func_type
->params
[i
]);
276 /* Add one parameter for the function return value */
277 if (func_type
->return_type
->base_type
!= vtn_base_type_void
)
280 func
->num_params
= num_params
;
281 func
->params
= ralloc_array(b
->shader
, nir_parameter
, num_params
);
284 if (func_type
->return_type
->base_type
!= vtn_base_type_void
) {
285 nir_address_format addr_format
=
286 vtn_mode_to_address_format(b
, vtn_variable_mode_function
);
287 /* The return value is a regular pointer */
288 func
->params
[idx
++] = (nir_parameter
) {
289 .num_components
= nir_address_format_num_components(addr_format
),
290 .bit_size
= nir_address_format_bit_size(addr_format
),
294 for (unsigned i
= 0; i
< func_type
->length
; i
++)
295 vtn_type_add_to_function_params(func_type
->params
[i
], func
, &idx
);
296 assert(idx
== num_params
);
298 b
->func
->impl
= nir_function_impl_create(func
);
299 nir_builder_init(&b
->nb
, func
->impl
);
300 b
->nb
.cursor
= nir_before_cf_list(&b
->func
->impl
->body
);
301 b
->nb
.exact
= b
->exact
;
303 b
->func_param_idx
= 0;
305 /* The return value is the first parameter */
306 if (func_type
->return_type
->base_type
!= vtn_base_type_void
)
311 case SpvOpFunctionEnd
:
316 case SpvOpFunctionParameter
: {
317 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
319 vtn_assert(b
->func_param_idx
< b
->func
->impl
->function
->num_params
);
321 if (type
->base_type
== vtn_base_type_sampled_image
) {
322 /* Sampled images are actually two parameters. The first is the
323 * image and the second is the sampler.
325 struct vtn_value
*val
=
326 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
328 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
330 struct vtn_type
*image_type
= rzalloc(b
, struct vtn_type
);
331 image_type
->base_type
= vtn_base_type_image
;
332 image_type
->type
= type
->type
;
334 struct vtn_type
*sampler_type
= rzalloc(b
, struct vtn_type
);
335 sampler_type
->base_type
= vtn_base_type_sampler
;
336 sampler_type
->type
= glsl_bare_sampler_type();
338 val
->sampled_image
->image
=
339 vtn_load_param_pointer(b
, image_type
, b
->func_param_idx
++);
340 val
->sampled_image
->sampler
=
341 vtn_load_param_pointer(b
, sampler_type
, b
->func_param_idx
++);
342 } else if (type
->base_type
== vtn_base_type_pointer
&&
343 type
->type
!= NULL
) {
344 /* This is a pointer with an actual storage type */
345 nir_ssa_def
*ssa_ptr
= nir_load_param(&b
->nb
, b
->func_param_idx
++);
346 vtn_push_value_pointer(b
, w
[2], vtn_pointer_from_ssa(b
, ssa_ptr
, type
));
347 } else if (type
->base_type
== vtn_base_type_pointer
||
348 type
->base_type
== vtn_base_type_image
||
349 type
->base_type
== vtn_base_type_sampler
) {
350 vtn_push_value_pointer(b
, w
[2], vtn_load_param_pointer(b
, type
, b
->func_param_idx
++));
352 /* We're a regular SSA value. */
353 struct vtn_ssa_value
*value
= vtn_create_ssa_value(b
, type
->type
);
354 vtn_ssa_value_load_function_param(b
, value
, type
, &b
->func_param_idx
);
355 vtn_push_ssa(b
, w
[2], type
, value
);
361 vtn_assert(b
->block
== NULL
);
362 b
->block
= rzalloc(b
, struct vtn_block
);
363 b
->block
->node
.type
= vtn_cf_node_type_block
;
365 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
367 if (b
->func
->start_block
== NULL
) {
368 /* This is the first block encountered for this function. In this
369 * case, we set the start block and add it to the list of
370 * implemented functions that we'll walk later.
372 b
->func
->start_block
= b
->block
;
373 list_addtail(&b
->func
->node
.link
, &b
->functions
);
378 case SpvOpSelectionMerge
:
380 vtn_assert(b
->block
&& b
->block
->merge
== NULL
);
385 case SpvOpBranchConditional
:
389 case SpvOpReturnValue
:
390 case SpvOpUnreachable
:
391 vtn_assert(b
->block
&& b
->block
->branch
== NULL
);
392 b
->block
->branch
= w
;
397 /* Continue on as per normal */
404 /* This function performs a depth-first search of the cases and puts them
405 * in fall-through order.
408 vtn_order_case(struct vtn_switch
*swtch
, struct vtn_case
*cse
)
415 list_del(&cse
->node
.link
);
417 if (cse
->fallthrough
) {
418 vtn_order_case(swtch
, cse
->fallthrough
);
420 /* If we have a fall-through, place this case right before the case it
421 * falls through to. This ensures that fallthroughs come one after
422 * the other. These two can never get separated because that would
423 * imply something else falling through to the same case. Also, this
424 * can't break ordering because the DFS ensures that this case is
425 * visited before anything that falls through to it.
427 list_addtail(&cse
->node
.link
, &cse
->fallthrough
->node
.link
);
429 list_add(&cse
->node
.link
, &swtch
->cases
);
434 vtn_switch_order_cases(struct vtn_switch
*swtch
)
436 struct list_head cases
;
437 list_replace(&swtch
->cases
, &cases
);
438 list_inithead(&swtch
->cases
);
439 while (!list_is_empty(&cases
)) {
440 struct vtn_case
*cse
=
441 list_first_entry(&cases
, struct vtn_case
, node
.link
);
442 vtn_order_case(swtch
, cse
);
447 vtn_block_set_merge_cf_node(struct vtn_builder
*b
, struct vtn_block
*block
,
448 struct vtn_cf_node
*cf_node
)
450 vtn_fail_if(block
->merge_cf_node
!= NULL
,
451 "The merge block declared by a header block cannot be a "
452 "merge block declared by any other header block.");
454 block
->merge_cf_node
= cf_node
;
457 #define VTN_DECL_CF_NODE_FIND(_type) \
458 static inline struct vtn_##_type * \
459 vtn_cf_node_find_##_type(struct vtn_cf_node *node) \
461 while (node && node->type != vtn_cf_node_type_##_type) \
462 node = node->parent; \
463 return (struct vtn_##_type *)node; \
466 VTN_DECL_CF_NODE_FIND(if)
467 VTN_DECL_CF_NODE_FIND(loop
)
468 VTN_DECL_CF_NODE_FIND(case)
469 VTN_DECL_CF_NODE_FIND(switch)
470 VTN_DECL_CF_NODE_FIND(function
)
472 static enum vtn_branch_type
473 vtn_handle_branch(struct vtn_builder
*b
,
474 struct vtn_cf_node
*cf_parent
,
475 struct vtn_block
*target_block
)
477 struct vtn_loop
*loop
= vtn_cf_node_find_loop(cf_parent
);
479 /* Detect a loop back-edge first. That way none of the code below
480 * accidentally operates on a loop back-edge.
482 if (loop
&& target_block
== loop
->header_block
)
483 return vtn_branch_type_loop_back_edge
;
485 /* Try to detect fall-through */
486 if (target_block
->switch_case
) {
487 /* When it comes to handling switch cases, we can break calls to
488 * vtn_handle_branch into two cases: calls from within a case construct
489 * and calls for the jump to each case construct. In the second case,
490 * cf_parent is the vtn_switch itself and vtn_cf_node_find_case() will
491 * return the outer switch case in which this switch is contained. It's
492 * fine if the target block is a switch case from an outer switch as
493 * long as it is also the switch break for this switch.
495 struct vtn_case
*switch_case
= vtn_cf_node_find_case(cf_parent
);
497 /* This doesn't get called for the OpSwitch */
498 vtn_fail_if(switch_case
== NULL
,
499 "A switch case can only be entered through an OpSwitch or "
500 "falling through from another switch case.");
502 /* Because block->switch_case is only set on the entry block for a given
503 * switch case, we only ever get here if we're jumping to the start of a
504 * switch case. It's possible, however, that a switch case could jump
505 * to itself via a back-edge. That *should* get caught by the loop
506 * handling case above but if we have a back edge without a loop merge,
507 * we could en up here.
509 vtn_fail_if(target_block
->switch_case
== switch_case
,
510 "A switch cannot fall-through to itself. Likely, there is "
511 "a back-edge which is not to a loop header.");
513 vtn_fail_if(target_block
->switch_case
->node
.parent
!=
514 switch_case
->node
.parent
,
515 "A switch case fall-through must come from the same "
516 "OpSwitch construct");
518 vtn_fail_if(switch_case
->fallthrough
!= NULL
&&
519 switch_case
->fallthrough
!= target_block
->switch_case
,
520 "Each case construct can have at most one branch to "
521 "another case construct");
523 switch_case
->fallthrough
= target_block
->switch_case
;
525 /* We don't immediately return vtn_branch_type_switch_fallthrough
526 * because it may also be a loop or switch break for an inner loop or
527 * switch and that takes precedence.
531 if (loop
&& target_block
== loop
->cont_block
)
532 return vtn_branch_type_loop_continue
;
534 /* We walk blocks as a breadth-first search on the control-flow construct
535 * tree where, when we find a construct, we add the vtn_cf_node for that
536 * construct and continue iterating at the merge target block (if any).
537 * Therefore, we want merges whose with parent == cf_parent to be treated
538 * as regular branches. We only want to consider merges if they break out
539 * of the current CF construct.
541 if (target_block
->merge_cf_node
!= NULL
&&
542 target_block
->merge_cf_node
->parent
!= cf_parent
) {
543 switch (target_block
->merge_cf_node
->type
) {
544 case vtn_cf_node_type_if
:
545 for (struct vtn_cf_node
*node
= cf_parent
;
546 node
!= target_block
->merge_cf_node
; node
= node
->parent
) {
547 vtn_fail_if(node
== NULL
|| node
->type
!= vtn_cf_node_type_if
,
548 "Branching to the merge block of a selection "
549 "construct can only be used to break out of a "
550 "selection construct");
552 struct vtn_if
*if_stmt
= vtn_cf_node_as_if(node
);
554 /* This should be guaranteed by our iteration */
555 assert(if_stmt
->merge_block
!= target_block
);
557 vtn_fail_if(if_stmt
->merge_block
!= NULL
,
558 "Branching to the merge block of a selection "
559 "construct can only be used to break out of the "
560 "inner most nested selection level");
562 return vtn_branch_type_if_merge
;
564 case vtn_cf_node_type_loop
:
565 vtn_fail_if(target_block
->merge_cf_node
!= &loop
->node
,
566 "Loop breaks can only break out of the inner most "
567 "nested loop level");
568 return vtn_branch_type_loop_break
;
570 case vtn_cf_node_type_switch
: {
571 struct vtn_switch
*swtch
= vtn_cf_node_find_switch(cf_parent
);
572 vtn_fail_if(target_block
->merge_cf_node
!= &swtch
->node
,
573 "Switch breaks can only break out of the inner most "
574 "nested switch level");
575 return vtn_branch_type_switch_break
;
579 unreachable("Invalid CF node type for a merge");
583 if (target_block
->switch_case
)
584 return vtn_branch_type_switch_fallthrough
;
586 return vtn_branch_type_none
;
589 struct vtn_cfg_work_item
{
590 struct list_head link
;
592 struct vtn_cf_node
*cf_parent
;
593 struct list_head
*cf_list
;
594 struct vtn_block
*start_block
;
598 vtn_add_cfg_work_item(struct vtn_builder
*b
,
599 struct list_head
*work_list
,
600 struct vtn_cf_node
*cf_parent
,
601 struct list_head
*cf_list
,
602 struct vtn_block
*start_block
)
604 struct vtn_cfg_work_item
*work
= ralloc(b
, struct vtn_cfg_work_item
);
605 work
->cf_parent
= cf_parent
;
606 work
->cf_list
= cf_list
;
607 work
->start_block
= start_block
;
608 list_addtail(&work
->link
, work_list
);
611 /* Processes a block and returns the next block to process or NULL if we've
612 * reached the end of the construct.
614 static struct vtn_block
*
615 vtn_process_block(struct vtn_builder
*b
,
616 struct list_head
*work_list
,
617 struct vtn_cf_node
*cf_parent
,
618 struct list_head
*cf_list
,
619 struct vtn_block
*block
)
621 if (!list_is_empty(cf_list
)) {
622 /* vtn_process_block() acts like an iterator: it processes the given
623 * block and then returns the next block to process. For a given
624 * control-flow construct, vtn_build_cfg() calls vtn_process_block()
625 * repeatedly until it finally returns NULL. Therefore, we know that
626 * the only blocks on which vtn_process_block() can be called are either
627 * the first block in a construct or a block that vtn_process_block()
628 * returned for the current construct. If cf_list is empty then we know
629 * that we're processing the first block in the construct and we have to
630 * add it to the list.
632 * If cf_list is not empty, then it must be the block returned by the
633 * previous call to vtn_process_block(). We know a priori that
634 * vtn_process_block only returns either normal branches
635 * (vtn_branch_type_none) or merge target blocks.
637 switch (vtn_handle_branch(b
, cf_parent
, block
)) {
638 case vtn_branch_type_none
:
639 /* For normal branches, we want to process them and add them to the
640 * current construct. Merge target blocks also look like normal
641 * branches from the perspective of this construct. See also
642 * vtn_handle_branch().
646 case vtn_branch_type_loop_continue
:
647 case vtn_branch_type_switch_fallthrough
:
648 /* The two cases where we can get early exits from a construct that
649 * are not to that construct's merge target are loop continues and
650 * switch fall-throughs. In these cases, we need to break out of the
651 * current construct by returning NULL.
656 /* The only way we can get here is if something was used as two kinds
657 * of merges at the same time and that's illegal.
659 vtn_fail("A block was used as a merge target from two or more "
660 "structured control-flow constructs");
664 /* Once a block has been processed, it is placed into and the list link
665 * will point to something non-null. If we see a node we've already
666 * processed here, it either exists in multiple functions or it's an
669 if (block
->node
.parent
!= NULL
) {
670 vtn_fail_if(vtn_cf_node_find_function(&block
->node
) !=
671 vtn_cf_node_find_function(cf_parent
),
672 "A block cannot exist in two functions at the "
675 vtn_fail("Invalid back or cross-edge in the CFG");
678 if (block
->merge
&& (*block
->merge
& SpvOpCodeMask
) == SpvOpLoopMerge
&&
679 block
->loop
== NULL
) {
680 vtn_fail_if((*block
->branch
& SpvOpCodeMask
) != SpvOpBranch
&&
681 (*block
->branch
& SpvOpCodeMask
) != SpvOpBranchConditional
,
682 "An OpLoopMerge instruction must immediately precede "
683 "either an OpBranch or OpBranchConditional instruction.");
685 struct vtn_loop
*loop
= rzalloc(b
, struct vtn_loop
);
687 loop
->node
.type
= vtn_cf_node_type_loop
;
688 loop
->node
.parent
= cf_parent
;
689 list_inithead(&loop
->body
);
690 list_inithead(&loop
->cont_body
);
691 loop
->header_block
= block
;
692 loop
->break_block
= vtn_block(b
, block
->merge
[1]);
693 loop
->cont_block
= vtn_block(b
, block
->merge
[2]);
694 loop
->control
= block
->merge
[3];
696 list_addtail(&loop
->node
.link
, cf_list
);
699 /* Note: The work item for the main loop body will start with the
700 * current block as its start block. If we weren't careful, we would
701 * get here again and end up in an infinite loop. This is why we set
702 * block->loop above and check for it before creating one. This way,
703 * we only create the loop once and the second iteration that tries to
704 * handle this loop goes to the cases below and gets handled as a
707 vtn_add_cfg_work_item(b
, work_list
, &loop
->node
,
708 &loop
->body
, loop
->header_block
);
710 /* For continue targets, SPIR-V guarantees the following:
712 * - the Continue Target must dominate the back-edge block
713 * - the back-edge block must post dominate the Continue Target
715 * If the header block is the same as the continue target, this
716 * condition is trivially satisfied and there is no real continue
719 if (loop
->cont_block
!= loop
->header_block
) {
720 vtn_add_cfg_work_item(b
, work_list
, &loop
->node
,
721 &loop
->cont_body
, loop
->cont_block
);
724 vtn_block_set_merge_cf_node(b
, loop
->break_block
, &loop
->node
);
726 return loop
->break_block
;
729 /* Add the block to the CF list */
730 block
->node
.parent
= cf_parent
;
731 list_addtail(&block
->node
.link
, cf_list
);
733 switch (*block
->branch
& SpvOpCodeMask
) {
735 struct vtn_block
*branch_block
= vtn_block(b
, block
->branch
[1]);
737 block
->branch_type
= vtn_handle_branch(b
, cf_parent
, branch_block
);
739 if (block
->branch_type
== vtn_branch_type_none
)
746 case SpvOpReturnValue
:
747 block
->branch_type
= vtn_branch_type_return
;
751 block
->branch_type
= vtn_branch_type_discard
;
754 case SpvOpBranchConditional
: {
755 struct vtn_value
*cond_val
= vtn_untyped_value(b
, block
->branch
[1]);
756 vtn_fail_if(!cond_val
->type
||
757 cond_val
->type
->base_type
!= vtn_base_type_scalar
||
758 cond_val
->type
->type
!= glsl_bool_type(),
759 "Condition must be a Boolean type scalar");
761 struct vtn_block
*then_block
= vtn_block(b
, block
->branch
[2]);
762 struct vtn_block
*else_block
= vtn_block(b
, block
->branch
[3]);
764 if (then_block
== else_block
) {
765 /* This is uncommon but it can happen. We treat this the same way as
766 * an unconditional branch.
768 block
->branch_type
= vtn_handle_branch(b
, cf_parent
, then_block
);
770 if (block
->branch_type
== vtn_branch_type_none
)
776 struct vtn_if
*if_stmt
= rzalloc(b
, struct vtn_if
);
778 if_stmt
->node
.type
= vtn_cf_node_type_if
;
779 if_stmt
->node
.parent
= cf_parent
;
780 if_stmt
->condition
= block
->branch
[1];
781 list_inithead(&if_stmt
->then_body
);
782 list_inithead(&if_stmt
->else_body
);
784 list_addtail(&if_stmt
->node
.link
, cf_list
);
787 (*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
) {
788 /* We may not always have a merge block and that merge doesn't
789 * technically have to be an OpSelectionMerge. We could have a block
790 * with an OpLoopMerge which ends in an OpBranchConditional.
792 if_stmt
->merge_block
= vtn_block(b
, block
->merge
[1]);
793 vtn_block_set_merge_cf_node(b
, if_stmt
->merge_block
, &if_stmt
->node
);
795 if_stmt
->control
= block
->merge
[2];
798 if_stmt
->then_type
= vtn_handle_branch(b
, &if_stmt
->node
, then_block
);
799 if (if_stmt
->then_type
== vtn_branch_type_none
) {
800 vtn_add_cfg_work_item(b
, work_list
, &if_stmt
->node
,
801 &if_stmt
->then_body
, then_block
);
804 if_stmt
->else_type
= vtn_handle_branch(b
, &if_stmt
->node
, else_block
);
805 if (if_stmt
->else_type
== vtn_branch_type_none
) {
806 vtn_add_cfg_work_item(b
, work_list
, &if_stmt
->node
,
807 &if_stmt
->else_body
, else_block
);
810 return if_stmt
->merge_block
;
814 struct vtn_value
*sel_val
= vtn_untyped_value(b
, block
->branch
[1]);
815 vtn_fail_if(!sel_val
->type
||
816 sel_val
->type
->base_type
!= vtn_base_type_scalar
,
817 "Selector of OpSwitch must have a type of OpTypeInt");
819 nir_alu_type sel_type
=
820 nir_get_nir_type_for_glsl_type(sel_val
->type
->type
);
821 vtn_fail_if(nir_alu_type_get_base_type(sel_type
) != nir_type_int
&&
822 nir_alu_type_get_base_type(sel_type
) != nir_type_uint
,
823 "Selector of OpSwitch must have a type of OpTypeInt");
825 struct vtn_switch
*swtch
= rzalloc(b
, struct vtn_switch
);
827 swtch
->node
.type
= vtn_cf_node_type_switch
;
828 swtch
->node
.parent
= cf_parent
;
829 swtch
->selector
= block
->branch
[1];
830 list_inithead(&swtch
->cases
);
832 list_addtail(&swtch
->node
.link
, cf_list
);
834 /* We may not always have a merge block */
836 vtn_fail_if((*block
->merge
& SpvOpCodeMask
) != SpvOpSelectionMerge
,
837 "An OpLoopMerge instruction must immediately precede "
838 "either an OpBranch or OpBranchConditional "
840 swtch
->break_block
= vtn_block(b
, block
->merge
[1]);
841 vtn_block_set_merge_cf_node(b
, swtch
->break_block
, &swtch
->node
);
844 /* First, we go through and record all of the cases. */
845 const uint32_t *branch_end
=
846 block
->branch
+ (block
->branch
[0] >> SpvWordCountShift
);
848 struct hash_table
*block_to_case
= _mesa_pointer_hash_table_create(b
);
850 bool is_default
= true;
851 const unsigned bitsize
= nir_alu_type_get_type_size(sel_type
);
852 for (const uint32_t *w
= block
->branch
+ 2; w
< branch_end
;) {
853 uint64_t literal
= 0;
858 assert(bitsize
== 64);
859 literal
= vtn_u64_literal(w
);
863 struct vtn_block
*case_block
= vtn_block(b
, *(w
++));
865 struct hash_entry
*case_entry
=
866 _mesa_hash_table_search(block_to_case
, case_block
);
868 struct vtn_case
*cse
;
870 cse
= case_entry
->data
;
872 cse
= rzalloc(b
, struct vtn_case
);
874 cse
->node
.type
= vtn_cf_node_type_case
;
875 cse
->node
.parent
= &swtch
->node
;
876 list_inithead(&cse
->body
);
877 util_dynarray_init(&cse
->values
, b
);
879 cse
->type
= vtn_handle_branch(b
, &swtch
->node
, case_block
);
881 case vtn_branch_type_none
:
882 /* This is a "real" cases which has stuff in it */
883 vtn_fail_if(case_block
->switch_case
!= NULL
,
884 "OpSwitch has a case which is also in another "
885 "OpSwitch construct");
886 case_block
->switch_case
= cse
;
887 vtn_add_cfg_work_item(b
, work_list
, &cse
->node
,
888 &cse
->body
, case_block
);
891 case vtn_branch_type_switch_break
:
892 case vtn_branch_type_loop_break
:
893 case vtn_branch_type_loop_continue
:
894 /* Switch breaks as well as loop breaks and continues can be
895 * used to break out of a switch construct or as direct targets
901 vtn_fail("Target of OpSwitch is not a valid structured exit "
902 "from the switch construct.");
905 list_addtail(&cse
->node
.link
, &swtch
->cases
);
907 _mesa_hash_table_insert(block_to_case
, case_block
, cse
);
911 cse
->is_default
= true;
913 util_dynarray_append(&cse
->values
, uint64_t, literal
);
919 _mesa_hash_table_destroy(block_to_case
, NULL
);
921 return swtch
->break_block
;
924 case SpvOpUnreachable
:
928 vtn_fail("Block did not end with a valid branch instruction");
933 vtn_build_cfg(struct vtn_builder
*b
, const uint32_t *words
, const uint32_t *end
)
935 vtn_foreach_instruction(b
, words
, end
,
936 vtn_cfg_handle_prepass_instruction
);
938 vtn_foreach_cf_node(func_node
, &b
->functions
) {
939 struct vtn_function
*func
= vtn_cf_node_as_function(func_node
);
941 /* We build the CFG for each function by doing a breadth-first search on
942 * the control-flow graph. We keep track of our state using a worklist.
943 * Doing a BFS ensures that we visit each structured control-flow
944 * construct and its merge node before we visit the stuff inside the
947 struct list_head work_list
;
948 list_inithead(&work_list
);
949 vtn_add_cfg_work_item(b
, &work_list
, &func
->node
, &func
->body
,
952 while (!list_is_empty(&work_list
)) {
953 struct vtn_cfg_work_item
*work
=
954 list_first_entry(&work_list
, struct vtn_cfg_work_item
, link
);
955 list_del(&work
->link
);
957 for (struct vtn_block
*block
= work
->start_block
; block
; ) {
958 block
= vtn_process_block(b
, &work_list
, work
->cf_parent
,
959 work
->cf_list
, block
);
966 vtn_handle_phis_first_pass(struct vtn_builder
*b
, SpvOp opcode
,
967 const uint32_t *w
, unsigned count
)
969 if (opcode
== SpvOpLabel
)
970 return true; /* Nothing to do */
972 /* If this isn't a phi node, stop. */
973 if (opcode
!= SpvOpPhi
)
976 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
977 * For each phi, we create a variable with the appropreate type and
978 * do a load from that variable. Then, in a second pass, we add
979 * stores to that variable to each of the predecessor blocks.
981 * We could do something more intelligent here. However, in order to
982 * handle loops and things properly, we really need dominance
983 * information. It would end up basically being the into-SSA
984 * algorithm all over again. It's easier if we just let
985 * lower_vars_to_ssa do that for us instead of repeating it here.
987 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
988 nir_variable
*phi_var
=
989 nir_local_variable_create(b
->nb
.impl
, type
->type
, "phi");
990 _mesa_hash_table_insert(b
->phi_table
, w
, phi_var
);
992 vtn_push_ssa(b
, w
[2], type
,
993 vtn_local_load(b
, nir_build_deref_var(&b
->nb
, phi_var
), 0));
999 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
1000 const uint32_t *w
, unsigned count
)
1002 if (opcode
!= SpvOpPhi
)
1005 struct hash_entry
*phi_entry
= _mesa_hash_table_search(b
->phi_table
, w
);
1006 vtn_assert(phi_entry
);
1007 nir_variable
*phi_var
= phi_entry
->data
;
1009 for (unsigned i
= 3; i
< count
; i
+= 2) {
1010 struct vtn_block
*pred
= vtn_block(b
, w
[i
+ 1]);
1012 /* If block does not have end_nop, that is because it is an unreacheable
1013 * block, and hence it is not worth to handle it */
1017 b
->nb
.cursor
= nir_after_instr(&pred
->end_nop
->instr
);
1019 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[i
]);
1021 vtn_local_store(b
, src
, nir_build_deref_var(&b
->nb
, phi_var
), 0);
1028 vtn_emit_branch(struct vtn_builder
*b
, enum vtn_branch_type branch_type
,
1029 nir_variable
*switch_fall_var
, bool *has_switch_break
)
1031 switch (branch_type
) {
1032 case vtn_branch_type_if_merge
:
1033 break; /* Nothing to do */
1034 case vtn_branch_type_switch_break
:
1035 nir_store_var(&b
->nb
, switch_fall_var
, nir_imm_false(&b
->nb
), 1);
1036 *has_switch_break
= true;
1038 case vtn_branch_type_switch_fallthrough
:
1039 break; /* Nothing to do */
1040 case vtn_branch_type_loop_break
:
1041 nir_jump(&b
->nb
, nir_jump_break
);
1043 case vtn_branch_type_loop_continue
:
1044 nir_jump(&b
->nb
, nir_jump_continue
);
1046 case vtn_branch_type_loop_back_edge
:
1048 case vtn_branch_type_return
:
1049 nir_jump(&b
->nb
, nir_jump_return
);
1051 case vtn_branch_type_discard
: {
1052 nir_intrinsic_instr
*discard
=
1053 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_discard
);
1054 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
1058 vtn_fail("Invalid branch type");
1062 static nir_ssa_def
*
1063 vtn_switch_case_condition(struct vtn_builder
*b
, struct vtn_switch
*swtch
,
1064 nir_ssa_def
*sel
, struct vtn_case
*cse
)
1066 if (cse
->is_default
) {
1067 nir_ssa_def
*any
= nir_imm_false(&b
->nb
);
1068 vtn_foreach_cf_node(other_node
, &swtch
->cases
) {
1069 struct vtn_case
*other
= vtn_cf_node_as_case(other_node
);
1070 if (other
->is_default
)
1073 any
= nir_ior(&b
->nb
, any
,
1074 vtn_switch_case_condition(b
, swtch
, sel
, other
));
1076 return nir_inot(&b
->nb
, any
);
1078 nir_ssa_def
*cond
= nir_imm_false(&b
->nb
);
1079 util_dynarray_foreach(&cse
->values
, uint64_t, val
) {
1080 nir_ssa_def
*imm
= nir_imm_intN_t(&b
->nb
, *val
, sel
->bit_size
);
1081 cond
= nir_ior(&b
->nb
, cond
, nir_ieq(&b
->nb
, sel
, imm
));
1087 static nir_loop_control
1088 vtn_loop_control(struct vtn_builder
*b
, struct vtn_loop
*vtn_loop
)
1090 if (vtn_loop
->control
== SpvLoopControlMaskNone
)
1091 return nir_loop_control_none
;
1092 else if (vtn_loop
->control
& SpvLoopControlDontUnrollMask
)
1093 return nir_loop_control_dont_unroll
;
1094 else if (vtn_loop
->control
& SpvLoopControlUnrollMask
)
1095 return nir_loop_control_unroll
;
1096 else if (vtn_loop
->control
& SpvLoopControlDependencyInfiniteMask
||
1097 vtn_loop
->control
& SpvLoopControlDependencyLengthMask
||
1098 vtn_loop
->control
& SpvLoopControlMinIterationsMask
||
1099 vtn_loop
->control
& SpvLoopControlMaxIterationsMask
||
1100 vtn_loop
->control
& SpvLoopControlIterationMultipleMask
||
1101 vtn_loop
->control
& SpvLoopControlPeelCountMask
||
1102 vtn_loop
->control
& SpvLoopControlPartialCountMask
) {
1103 /* We do not do anything special with these yet. */
1104 return nir_loop_control_none
;
1106 vtn_fail("Invalid loop control");
1110 static nir_selection_control
1111 vtn_selection_control(struct vtn_builder
*b
, struct vtn_if
*vtn_if
)
1113 if (vtn_if
->control
== SpvSelectionControlMaskNone
)
1114 return nir_selection_control_none
;
1115 else if (vtn_if
->control
& SpvSelectionControlDontFlattenMask
)
1116 return nir_selection_control_dont_flatten
;
1117 else if (vtn_if
->control
& SpvSelectionControlFlattenMask
)
1118 return nir_selection_control_flatten
;
1120 vtn_fail("Invalid selection control");
1124 vtn_emit_cf_list(struct vtn_builder
*b
, struct list_head
*cf_list
,
1125 nir_variable
*switch_fall_var
, bool *has_switch_break
,
1126 vtn_instruction_handler handler
)
1128 vtn_foreach_cf_node(node
, cf_list
) {
1129 switch (node
->type
) {
1130 case vtn_cf_node_type_block
: {
1131 struct vtn_block
*block
= vtn_cf_node_as_block(node
);
1133 const uint32_t *block_start
= block
->label
;
1134 const uint32_t *block_end
= block
->merge
? block
->merge
:
1137 block_start
= vtn_foreach_instruction(b
, block_start
, block_end
,
1138 vtn_handle_phis_first_pass
);
1140 vtn_foreach_instruction(b
, block_start
, block_end
, handler
);
1142 block
->end_nop
= nir_intrinsic_instr_create(b
->nb
.shader
,
1144 nir_builder_instr_insert(&b
->nb
, &block
->end_nop
->instr
);
1146 if ((*block
->branch
& SpvOpCodeMask
) == SpvOpReturnValue
) {
1147 vtn_fail_if(b
->func
->type
->return_type
->base_type
==
1149 "Return with a value from a function returning void");
1150 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, block
->branch
[1]);
1151 const struct glsl_type
*ret_type
=
1152 glsl_get_bare_type(b
->func
->type
->return_type
->type
);
1153 nir_deref_instr
*ret_deref
=
1154 nir_build_deref_cast(&b
->nb
, nir_load_param(&b
->nb
, 0),
1155 nir_var_function_temp
, ret_type
, 0);
1156 vtn_local_store(b
, src
, ret_deref
, 0);
1159 if (block
->branch_type
!= vtn_branch_type_none
) {
1160 vtn_emit_branch(b
, block
->branch_type
,
1161 switch_fall_var
, has_switch_break
);
1168 case vtn_cf_node_type_if
: {
1169 struct vtn_if
*vtn_if
= vtn_cf_node_as_if(node
);
1170 bool sw_break
= false;
1173 nir_push_if(&b
->nb
, vtn_ssa_value(b
, vtn_if
->condition
)->def
);
1175 nif
->control
= vtn_selection_control(b
, vtn_if
);
1177 if (vtn_if
->then_type
== vtn_branch_type_none
) {
1178 vtn_emit_cf_list(b
, &vtn_if
->then_body
,
1179 switch_fall_var
, &sw_break
, handler
);
1181 vtn_emit_branch(b
, vtn_if
->then_type
, switch_fall_var
, &sw_break
);
1184 nir_push_else(&b
->nb
, nif
);
1185 if (vtn_if
->else_type
== vtn_branch_type_none
) {
1186 vtn_emit_cf_list(b
, &vtn_if
->else_body
,
1187 switch_fall_var
, &sw_break
, handler
);
1189 vtn_emit_branch(b
, vtn_if
->else_type
, switch_fall_var
, &sw_break
);
1192 nir_pop_if(&b
->nb
, nif
);
1194 /* If we encountered a switch break somewhere inside of the if,
1195 * then it would have been handled correctly by calling
1196 * emit_cf_list or emit_branch for the interrior. However, we
1197 * need to predicate everything following on wether or not we're
1201 *has_switch_break
= true;
1202 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, switch_fall_var
));
1207 case vtn_cf_node_type_loop
: {
1208 struct vtn_loop
*vtn_loop
= vtn_cf_node_as_loop(node
);
1210 nir_loop
*loop
= nir_push_loop(&b
->nb
);
1211 loop
->control
= vtn_loop_control(b
, vtn_loop
);
1213 vtn_emit_cf_list(b
, &vtn_loop
->body
, NULL
, NULL
, handler
);
1215 if (!list_is_empty(&vtn_loop
->cont_body
)) {
1216 /* If we have a non-trivial continue body then we need to put
1217 * it at the beginning of the loop with a flag to ensure that
1218 * it doesn't get executed in the first iteration.
1220 nir_variable
*do_cont
=
1221 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "cont");
1223 b
->nb
.cursor
= nir_before_cf_node(&loop
->cf_node
);
1224 nir_store_var(&b
->nb
, do_cont
, nir_imm_false(&b
->nb
), 1);
1226 b
->nb
.cursor
= nir_before_cf_list(&loop
->body
);
1229 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, do_cont
));
1231 vtn_emit_cf_list(b
, &vtn_loop
->cont_body
, NULL
, NULL
, handler
);
1233 nir_pop_if(&b
->nb
, cont_if
);
1235 nir_store_var(&b
->nb
, do_cont
, nir_imm_true(&b
->nb
), 1);
1237 b
->has_loop_continue
= true;
1240 nir_pop_loop(&b
->nb
, loop
);
1244 case vtn_cf_node_type_switch
: {
1245 struct vtn_switch
*vtn_switch
= vtn_cf_node_as_switch(node
);
1247 /* Before we can emit anything, we need to sort the list of cases in
1248 * fall-through order.
1250 vtn_switch_order_cases(vtn_switch
);
1252 /* First, we create a variable to keep track of whether or not the
1253 * switch is still going at any given point. Any switch breaks
1254 * will set this variable to false.
1256 nir_variable
*fall_var
=
1257 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "fall");
1258 nir_store_var(&b
->nb
, fall_var
, nir_imm_false(&b
->nb
), 1);
1260 nir_ssa_def
*sel
= vtn_ssa_value(b
, vtn_switch
->selector
)->def
;
1262 /* Now we can walk the list of cases and actually emit code */
1263 vtn_foreach_cf_node(case_node
, &vtn_switch
->cases
) {
1264 struct vtn_case
*cse
= vtn_cf_node_as_case(case_node
);
1266 /* Figure out the condition */
1268 vtn_switch_case_condition(b
, vtn_switch
, sel
, cse
);
1269 /* Take fallthrough into account */
1270 cond
= nir_ior(&b
->nb
, cond
, nir_load_var(&b
->nb
, fall_var
));
1272 nir_if
*case_if
= nir_push_if(&b
->nb
, cond
);
1274 bool has_break
= false;
1275 nir_store_var(&b
->nb
, fall_var
, nir_imm_true(&b
->nb
), 1);
1276 vtn_emit_cf_list(b
, &cse
->body
, fall_var
, &has_break
, handler
);
1277 (void)has_break
; /* We don't care */
1279 nir_pop_if(&b
->nb
, case_if
);
1286 vtn_fail("Invalid CF node type");
1292 vtn_function_emit(struct vtn_builder
*b
, struct vtn_function
*func
,
1293 vtn_instruction_handler instruction_handler
)
1295 nir_builder_init(&b
->nb
, func
->impl
);
1297 b
->nb
.cursor
= nir_after_cf_list(&func
->impl
->body
);
1298 b
->nb
.exact
= b
->exact
;
1299 b
->has_loop_continue
= false;
1300 b
->phi_table
= _mesa_pointer_hash_table_create(b
);
1302 vtn_emit_cf_list(b
, &func
->body
, NULL
, NULL
, instruction_handler
);
1304 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
1305 vtn_handle_phi_second_pass
);
1307 nir_rematerialize_derefs_in_use_blocks_impl(func
->impl
);
1309 /* Continue blocks for loops get inserted before the body of the loop
1310 * but instructions in the continue may use SSA defs in the loop body.
1311 * Therefore, we need to repair SSA to insert the needed phi nodes.
1313 if (b
->has_loop_continue
)
1314 nir_repair_ssa_impl(func
->impl
);
1316 func
->emitted
= true;