2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "vtn_private.h"
25 #include "nir/nir_vla.h"
27 static struct vtn_block
*
28 vtn_block(struct vtn_builder
*b
, uint32_t value_id
)
30 return vtn_value(b
, value_id
, vtn_value_type_block
)->block
;
33 static struct vtn_pointer
*
34 vtn_load_param_pointer(struct vtn_builder
*b
,
35 struct vtn_type
*param_type
,
38 struct vtn_type
*ptr_type
= param_type
;
39 if (param_type
->base_type
!= vtn_base_type_pointer
) {
40 assert(param_type
->base_type
== vtn_base_type_image
||
41 param_type
->base_type
== vtn_base_type_sampler
);
42 ptr_type
= rzalloc(b
, struct vtn_type
);
43 ptr_type
->base_type
= vtn_base_type_pointer
;
44 ptr_type
->deref
= param_type
;
45 ptr_type
->storage_class
= SpvStorageClassUniformConstant
;
48 return vtn_pointer_from_ssa(b
, nir_load_param(&b
->nb
, param_idx
), ptr_type
);
52 vtn_type_count_function_params(struct vtn_type
*type
)
54 switch (type
->base_type
) {
55 case vtn_base_type_array
:
56 case vtn_base_type_matrix
:
57 return type
->length
* vtn_type_count_function_params(type
->array_element
);
59 case vtn_base_type_struct
: {
61 for (unsigned i
= 0; i
< type
->length
; i
++)
62 count
+= vtn_type_count_function_params(type
->members
[i
]);
66 case vtn_base_type_sampled_image
:
75 vtn_type_add_to_function_params(struct vtn_type
*type
,
79 static const nir_parameter nir_deref_param
= {
84 switch (type
->base_type
) {
85 case vtn_base_type_array
:
86 case vtn_base_type_matrix
:
87 for (unsigned i
= 0; i
< type
->length
; i
++)
88 vtn_type_add_to_function_params(type
->array_element
, func
, param_idx
);
91 case vtn_base_type_struct
:
92 for (unsigned i
= 0; i
< type
->length
; i
++)
93 vtn_type_add_to_function_params(type
->members
[i
], func
, param_idx
);
96 case vtn_base_type_sampled_image
:
97 func
->params
[(*param_idx
)++] = nir_deref_param
;
98 func
->params
[(*param_idx
)++] = nir_deref_param
;
101 case vtn_base_type_image
:
102 case vtn_base_type_sampler
:
103 func
->params
[(*param_idx
)++] = nir_deref_param
;
106 case vtn_base_type_pointer
:
108 func
->params
[(*param_idx
)++] = (nir_parameter
) {
109 .num_components
= glsl_get_vector_elements(type
->type
),
110 .bit_size
= glsl_get_bit_size(type
->type
),
113 func
->params
[(*param_idx
)++] = nir_deref_param
;
118 func
->params
[(*param_idx
)++] = (nir_parameter
) {
119 .num_components
= glsl_get_vector_elements(type
->type
),
120 .bit_size
= glsl_get_bit_size(type
->type
),
126 vtn_ssa_value_add_to_call_params(struct vtn_builder
*b
,
127 struct vtn_ssa_value
*value
,
128 struct vtn_type
*type
,
129 nir_call_instr
*call
,
132 switch (type
->base_type
) {
133 case vtn_base_type_array
:
134 case vtn_base_type_matrix
:
135 for (unsigned i
= 0; i
< type
->length
; i
++) {
136 vtn_ssa_value_add_to_call_params(b
, value
->elems
[i
],
142 case vtn_base_type_struct
:
143 for (unsigned i
= 0; i
< type
->length
; i
++) {
144 vtn_ssa_value_add_to_call_params(b
, value
->elems
[i
],
151 call
->params
[(*param_idx
)++] = nir_src_for_ssa(value
->def
);
157 vtn_ssa_value_load_function_param(struct vtn_builder
*b
,
158 struct vtn_ssa_value
*value
,
159 struct vtn_type
*type
,
162 switch (type
->base_type
) {
163 case vtn_base_type_array
:
164 case vtn_base_type_matrix
:
165 for (unsigned i
= 0; i
< type
->length
; i
++) {
166 vtn_ssa_value_load_function_param(b
, value
->elems
[i
],
167 type
->array_element
, param_idx
);
171 case vtn_base_type_struct
:
172 for (unsigned i
= 0; i
< type
->length
; i
++) {
173 vtn_ssa_value_load_function_param(b
, value
->elems
[i
],
174 type
->members
[i
], param_idx
);
179 value
->def
= nir_load_param(&b
->nb
, (*param_idx
)++);
185 vtn_handle_function_call(struct vtn_builder
*b
, SpvOp opcode
,
186 const uint32_t *w
, unsigned count
)
188 struct vtn_type
*res_type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
189 struct vtn_function
*vtn_callee
=
190 vtn_value(b
, w
[3], vtn_value_type_function
)->func
;
191 struct nir_function
*callee
= vtn_callee
->impl
->function
;
193 vtn_callee
->referenced
= true;
195 nir_call_instr
*call
= nir_call_instr_create(b
->nb
.shader
, callee
);
197 unsigned param_idx
= 0;
199 nir_deref_instr
*ret_deref
= NULL
;
200 struct vtn_type
*ret_type
= vtn_callee
->type
->return_type
;
201 if (ret_type
->base_type
!= vtn_base_type_void
) {
202 nir_variable
*ret_tmp
=
203 nir_local_variable_create(b
->nb
.impl
,
204 glsl_get_bare_type(ret_type
->type
),
206 ret_deref
= nir_build_deref_var(&b
->nb
, ret_tmp
);
207 call
->params
[param_idx
++] = nir_src_for_ssa(&ret_deref
->dest
.ssa
);
210 for (unsigned i
= 0; i
< vtn_callee
->type
->length
; i
++) {
211 struct vtn_type
*arg_type
= vtn_callee
->type
->params
[i
];
212 unsigned arg_id
= w
[4 + i
];
214 if (arg_type
->base_type
== vtn_base_type_sampled_image
) {
215 struct vtn_sampled_image
*sampled_image
=
216 vtn_value(b
, arg_id
, vtn_value_type_sampled_image
)->sampled_image
;
218 call
->params
[param_idx
++] =
219 nir_src_for_ssa(&sampled_image
->image
->deref
->dest
.ssa
);
220 call
->params
[param_idx
++] =
221 nir_src_for_ssa(&sampled_image
->sampler
->deref
->dest
.ssa
);
222 } else if (arg_type
->base_type
== vtn_base_type_pointer
||
223 arg_type
->base_type
== vtn_base_type_image
||
224 arg_type
->base_type
== vtn_base_type_sampler
) {
225 struct vtn_pointer
*pointer
=
226 vtn_value(b
, arg_id
, vtn_value_type_pointer
)->pointer
;
227 call
->params
[param_idx
++] =
228 nir_src_for_ssa(vtn_pointer_to_ssa(b
, pointer
));
230 vtn_ssa_value_add_to_call_params(b
, vtn_ssa_value(b
, arg_id
),
231 arg_type
, call
, ¶m_idx
);
234 assert(param_idx
== call
->num_params
);
236 nir_builder_instr_insert(&b
->nb
, &call
->instr
);
238 if (ret_type
->base_type
== vtn_base_type_void
) {
239 vtn_push_value(b
, w
[2], vtn_value_type_undef
);
241 vtn_push_ssa(b
, w
[2], res_type
, vtn_local_load(b
, ret_deref
, 0));
246 vtn_cfg_handle_prepass_instruction(struct vtn_builder
*b
, SpvOp opcode
,
247 const uint32_t *w
, unsigned count
)
250 case SpvOpFunction
: {
251 vtn_assert(b
->func
== NULL
);
252 b
->func
= rzalloc(b
, struct vtn_function
);
254 b
->func
->node
.type
= vtn_cf_node_type_function
;
255 list_inithead(&b
->func
->body
);
256 b
->func
->control
= w
[3];
258 UNUSED
const struct glsl_type
*result_type
=
259 vtn_value(b
, w
[1], vtn_value_type_type
)->type
->type
;
260 struct vtn_value
*val
= vtn_push_value(b
, w
[2], vtn_value_type_function
);
263 b
->func
->type
= vtn_value(b
, w
[4], vtn_value_type_type
)->type
;
264 const struct vtn_type
*func_type
= b
->func
->type
;
266 vtn_assert(func_type
->return_type
->type
== result_type
);
269 nir_function_create(b
->shader
, ralloc_strdup(b
->shader
, val
->name
));
271 unsigned num_params
= 0;
272 for (unsigned i
= 0; i
< func_type
->length
; i
++)
273 num_params
+= vtn_type_count_function_params(func_type
->params
[i
]);
275 /* Add one parameter for the function return value */
276 if (func_type
->return_type
->base_type
!= vtn_base_type_void
)
279 func
->num_params
= num_params
;
280 func
->params
= ralloc_array(b
->shader
, nir_parameter
, num_params
);
283 if (func_type
->return_type
->base_type
!= vtn_base_type_void
) {
284 nir_address_format addr_format
=
285 vtn_mode_to_address_format(b
, vtn_variable_mode_function
);
286 /* The return value is a regular pointer */
287 func
->params
[idx
++] = (nir_parameter
) {
288 .num_components
= nir_address_format_num_components(addr_format
),
289 .bit_size
= nir_address_format_bit_size(addr_format
),
293 for (unsigned i
= 0; i
< func_type
->length
; i
++)
294 vtn_type_add_to_function_params(func_type
->params
[i
], func
, &idx
);
295 assert(idx
== num_params
);
297 b
->func
->impl
= nir_function_impl_create(func
);
298 nir_builder_init(&b
->nb
, func
->impl
);
299 b
->nb
.cursor
= nir_before_cf_list(&b
->func
->impl
->body
);
300 b
->nb
.exact
= b
->exact
;
302 b
->func_param_idx
= 0;
304 /* The return value is the first parameter */
305 if (func_type
->return_type
->base_type
!= vtn_base_type_void
)
310 case SpvOpFunctionEnd
:
315 case SpvOpFunctionParameter
: {
316 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
318 vtn_assert(b
->func_param_idx
< b
->func
->impl
->function
->num_params
);
320 if (type
->base_type
== vtn_base_type_sampled_image
) {
321 /* Sampled images are actually two parameters. The first is the
322 * image and the second is the sampler.
324 struct vtn_value
*val
=
325 vtn_push_value(b
, w
[2], vtn_value_type_sampled_image
);
327 val
->sampled_image
= ralloc(b
, struct vtn_sampled_image
);
329 struct vtn_type
*sampler_type
= rzalloc(b
, struct vtn_type
);
330 sampler_type
->base_type
= vtn_base_type_sampler
;
331 sampler_type
->type
= glsl_bare_sampler_type();
333 val
->sampled_image
->image
=
334 vtn_load_param_pointer(b
, type
, b
->func_param_idx
++);
335 val
->sampled_image
->sampler
=
336 vtn_load_param_pointer(b
, sampler_type
, b
->func_param_idx
++);
337 } else if (type
->base_type
== vtn_base_type_pointer
&&
338 type
->type
!= NULL
) {
339 /* This is a pointer with an actual storage type */
340 nir_ssa_def
*ssa_ptr
= nir_load_param(&b
->nb
, b
->func_param_idx
++);
341 vtn_push_value_pointer(b
, w
[2], vtn_pointer_from_ssa(b
, ssa_ptr
, type
));
342 } else if (type
->base_type
== vtn_base_type_pointer
||
343 type
->base_type
== vtn_base_type_image
||
344 type
->base_type
== vtn_base_type_sampler
) {
345 vtn_push_value_pointer(b
, w
[2], vtn_load_param_pointer(b
, type
, b
->func_param_idx
++));
347 /* We're a regular SSA value. */
348 struct vtn_ssa_value
*value
= vtn_create_ssa_value(b
, type
->type
);
349 vtn_ssa_value_load_function_param(b
, value
, type
, &b
->func_param_idx
);
350 vtn_push_ssa(b
, w
[2], type
, value
);
356 vtn_assert(b
->block
== NULL
);
357 b
->block
= rzalloc(b
, struct vtn_block
);
358 b
->block
->node
.type
= vtn_cf_node_type_block
;
360 vtn_push_value(b
, w
[1], vtn_value_type_block
)->block
= b
->block
;
362 if (b
->func
->start_block
== NULL
) {
363 /* This is the first block encountered for this function. In this
364 * case, we set the start block and add it to the list of
365 * implemented functions that we'll walk later.
367 b
->func
->start_block
= b
->block
;
368 list_addtail(&b
->func
->node
.link
, &b
->functions
);
373 case SpvOpSelectionMerge
:
375 vtn_assert(b
->block
&& b
->block
->merge
== NULL
);
380 case SpvOpBranchConditional
:
384 case SpvOpReturnValue
:
385 case SpvOpUnreachable
:
386 vtn_assert(b
->block
&& b
->block
->branch
== NULL
);
387 b
->block
->branch
= w
;
392 /* Continue on as per normal */
400 vtn_add_case(struct vtn_builder
*b
, struct vtn_switch
*swtch
,
401 struct vtn_block
*break_block
,
402 uint32_t block_id
, uint64_t val
, bool is_default
)
404 struct vtn_block
*case_block
= vtn_block(b
, block_id
);
406 /* Don't create dummy cases that just break */
407 if (case_block
== break_block
)
410 if (case_block
->switch_case
== NULL
) {
411 struct vtn_case
*c
= ralloc(b
, struct vtn_case
);
413 c
->node
.type
= vtn_cf_node_type_case
;
414 list_inithead(&c
->body
);
415 c
->start_block
= case_block
;
416 c
->fallthrough
= NULL
;
417 util_dynarray_init(&c
->values
, b
);
418 c
->is_default
= false;
421 list_addtail(&c
->node
.link
, &swtch
->cases
);
423 case_block
->switch_case
= c
;
427 case_block
->switch_case
->is_default
= true;
429 util_dynarray_append(&case_block
->switch_case
->values
, uint64_t, val
);
433 /* This function performs a depth-first search of the cases and puts them
434 * in fall-through order.
437 vtn_order_case(struct vtn_switch
*swtch
, struct vtn_case
*cse
)
444 list_del(&cse
->node
.link
);
446 if (cse
->fallthrough
) {
447 vtn_order_case(swtch
, cse
->fallthrough
);
449 /* If we have a fall-through, place this case right before the case it
450 * falls through to. This ensures that fallthroughs come one after
451 * the other. These two can never get separated because that would
452 * imply something else falling through to the same case. Also, this
453 * can't break ordering because the DFS ensures that this case is
454 * visited before anything that falls through to it.
456 list_addtail(&cse
->node
.link
, &cse
->fallthrough
->node
.link
);
458 list_add(&cse
->node
.link
, &swtch
->cases
);
462 static enum vtn_branch_type
463 vtn_get_branch_type(struct vtn_builder
*b
,
464 struct vtn_block
*block
,
465 struct vtn_case
*swcase
, struct vtn_block
*switch_break
,
466 struct vtn_block
*loop_break
, struct vtn_block
*loop_cont
)
468 if (block
->switch_case
) {
469 /* This branch is actually a fallthrough */
470 vtn_assert(swcase
->fallthrough
== NULL
||
471 swcase
->fallthrough
== block
->switch_case
);
472 swcase
->fallthrough
= block
->switch_case
;
473 return vtn_branch_type_switch_fallthrough
;
474 } else if (block
== loop_break
) {
475 return vtn_branch_type_loop_break
;
476 } else if (block
== loop_cont
) {
477 return vtn_branch_type_loop_continue
;
478 } else if (block
== switch_break
) {
479 return vtn_branch_type_switch_break
;
481 return vtn_branch_type_none
;
486 vtn_cfg_walk_blocks(struct vtn_builder
*b
, struct list_head
*cf_list
,
487 struct vtn_block
*start
, struct vtn_case
*switch_case
,
488 struct vtn_block
*switch_break
,
489 struct vtn_block
*loop_break
, struct vtn_block
*loop_cont
,
490 struct vtn_block
*end
)
492 struct vtn_block
*block
= start
;
493 while (block
!= end
) {
494 if (block
->merge
&& (*block
->merge
& SpvOpCodeMask
) == SpvOpLoopMerge
&&
496 struct vtn_loop
*loop
= ralloc(b
, struct vtn_loop
);
498 loop
->node
.type
= vtn_cf_node_type_loop
;
499 list_inithead(&loop
->body
);
500 list_inithead(&loop
->cont_body
);
501 loop
->control
= block
->merge
[3];
503 list_addtail(&loop
->node
.link
, cf_list
);
506 struct vtn_block
*new_loop_break
= vtn_block(b
, block
->merge
[1]);
507 struct vtn_block
*new_loop_cont
= vtn_block(b
, block
->merge
[2]);
509 /* Note: This recursive call will start with the current block as
510 * its start block. If we weren't careful, we would get here
511 * again and end up in infinite recursion. This is why we set
512 * block->loop above and check for it before creating one. This
513 * way, we only create the loop once and the second call that
514 * tries to handle this loop goes to the cases below and gets
515 * handled as a regular block.
517 * Note: When we make the recursive walk calls, we pass NULL for
518 * the switch break since you have to break out of the loop first.
519 * We do, however, still pass the current switch case because it's
520 * possible that the merge block for the loop is the start of
523 vtn_cfg_walk_blocks(b
, &loop
->body
, block
, switch_case
, NULL
,
524 new_loop_break
, new_loop_cont
, NULL
);
525 vtn_cfg_walk_blocks(b
, &loop
->cont_body
, new_loop_cont
, NULL
, NULL
,
526 new_loop_break
, NULL
, block
);
528 enum vtn_branch_type branch_type
=
529 vtn_get_branch_type(b
, new_loop_break
, switch_case
, switch_break
,
530 loop_break
, loop_cont
);
532 if (branch_type
!= vtn_branch_type_none
) {
533 /* Stop walking through the CFG when this inner loop's break block
534 * ends up as the same block as the outer loop's continue block
535 * because we are already going to visit it.
537 vtn_assert(branch_type
== vtn_branch_type_loop_continue
);
541 block
= new_loop_break
;
545 vtn_assert(block
->node
.link
.next
== NULL
);
546 list_addtail(&block
->node
.link
, cf_list
);
548 switch (*block
->branch
& SpvOpCodeMask
) {
550 struct vtn_block
*branch_block
= vtn_block(b
, block
->branch
[1]);
552 block
->branch_type
= vtn_get_branch_type(b
, branch_block
,
553 switch_case
, switch_break
,
554 loop_break
, loop_cont
);
556 if (block
->branch_type
!= vtn_branch_type_none
)
559 block
= branch_block
;
564 case SpvOpReturnValue
:
565 block
->branch_type
= vtn_branch_type_return
;
569 block
->branch_type
= vtn_branch_type_discard
;
572 case SpvOpBranchConditional
: {
573 struct vtn_block
*then_block
= vtn_block(b
, block
->branch
[2]);
574 struct vtn_block
*else_block
= vtn_block(b
, block
->branch
[3]);
576 struct vtn_if
*if_stmt
= ralloc(b
, struct vtn_if
);
578 if_stmt
->node
.type
= vtn_cf_node_type_if
;
579 if_stmt
->condition
= block
->branch
[1];
580 list_inithead(&if_stmt
->then_body
);
581 list_inithead(&if_stmt
->else_body
);
583 list_addtail(&if_stmt
->node
.link
, cf_list
);
586 (*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
) {
587 if_stmt
->control
= block
->merge
[2];
589 if_stmt
->control
= SpvSelectionControlMaskNone
;
592 if_stmt
->then_type
= vtn_get_branch_type(b
, then_block
,
593 switch_case
, switch_break
,
594 loop_break
, loop_cont
);
595 if_stmt
->else_type
= vtn_get_branch_type(b
, else_block
,
596 switch_case
, switch_break
,
597 loop_break
, loop_cont
);
599 if (then_block
== else_block
) {
600 block
->branch_type
= if_stmt
->then_type
;
601 if (block
->branch_type
== vtn_branch_type_none
) {
607 } else if (if_stmt
->then_type
== vtn_branch_type_none
&&
608 if_stmt
->else_type
== vtn_branch_type_none
) {
609 /* Neither side of the if is something we can short-circuit. */
610 vtn_assert((*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
);
611 struct vtn_block
*merge_block
= vtn_block(b
, block
->merge
[1]);
613 vtn_cfg_walk_blocks(b
, &if_stmt
->then_body
, then_block
,
614 switch_case
, switch_break
,
615 loop_break
, loop_cont
, merge_block
);
616 vtn_cfg_walk_blocks(b
, &if_stmt
->else_body
, else_block
,
617 switch_case
, switch_break
,
618 loop_break
, loop_cont
, merge_block
);
620 enum vtn_branch_type merge_type
=
621 vtn_get_branch_type(b
, merge_block
, switch_case
, switch_break
,
622 loop_break
, loop_cont
);
623 if (merge_type
== vtn_branch_type_none
) {
629 } else if (if_stmt
->then_type
!= vtn_branch_type_none
&&
630 if_stmt
->else_type
!= vtn_branch_type_none
) {
631 /* Both sides were short-circuited. We're done here. */
634 /* Exeactly one side of the branch could be short-circuited.
635 * We set the branch up as a predicated break/continue and we
636 * continue on with the other side as if it were what comes
639 if (if_stmt
->then_type
== vtn_branch_type_none
) {
646 vtn_fail("Should have returned or continued");
650 vtn_assert((*block
->merge
& SpvOpCodeMask
) == SpvOpSelectionMerge
);
651 struct vtn_block
*break_block
= vtn_block(b
, block
->merge
[1]);
653 struct vtn_switch
*swtch
= ralloc(b
, struct vtn_switch
);
655 swtch
->node
.type
= vtn_cf_node_type_switch
;
656 swtch
->selector
= block
->branch
[1];
657 list_inithead(&swtch
->cases
);
659 list_addtail(&swtch
->node
.link
, cf_list
);
661 /* First, we go through and record all of the cases. */
662 const uint32_t *branch_end
=
663 block
->branch
+ (block
->branch
[0] >> SpvWordCountShift
);
665 struct vtn_value
*cond_val
= vtn_untyped_value(b
, block
->branch
[1]);
666 vtn_fail_if(!cond_val
->type
||
667 cond_val
->type
->base_type
!= vtn_base_type_scalar
,
668 "Selector of OpSelect must have a type of OpTypeInt");
670 nir_alu_type cond_type
=
671 nir_get_nir_type_for_glsl_type(cond_val
->type
->type
);
672 vtn_fail_if(nir_alu_type_get_base_type(cond_type
) != nir_type_int
&&
673 nir_alu_type_get_base_type(cond_type
) != nir_type_uint
,
674 "Selector of OpSelect must have a type of OpTypeInt");
676 bool is_default
= true;
677 const unsigned bitsize
= nir_alu_type_get_type_size(cond_type
);
678 for (const uint32_t *w
= block
->branch
+ 2; w
< branch_end
;) {
679 uint64_t literal
= 0;
684 assert(bitsize
== 64);
685 literal
= vtn_u64_literal(w
);
690 uint32_t block_id
= *(w
++);
692 vtn_add_case(b
, swtch
, break_block
, block_id
, literal
, is_default
);
696 /* Now, we go through and walk the blocks. While we walk through
697 * the blocks, we also gather the much-needed fall-through
700 vtn_foreach_cf_node(case_node
, &swtch
->cases
) {
701 struct vtn_case
*cse
= vtn_cf_node_as_case(case_node
);
702 vtn_assert(cse
->start_block
!= break_block
);
703 vtn_cfg_walk_blocks(b
, &cse
->body
, cse
->start_block
, cse
,
704 break_block
, loop_break
, loop_cont
, NULL
);
707 /* Finally, we walk over all of the cases one more time and put
708 * them in fall-through order.
710 for (const uint32_t *w
= block
->branch
+ 2; w
< branch_end
;) {
711 struct vtn_block
*case_block
= vtn_block(b
, *w
);
716 assert(bitsize
== 64);
720 if (case_block
== break_block
)
723 vtn_assert(case_block
->switch_case
);
725 vtn_order_case(swtch
, case_block
->switch_case
);
728 enum vtn_branch_type branch_type
=
729 vtn_get_branch_type(b
, break_block
, switch_case
, NULL
,
730 loop_break
, loop_cont
);
732 if (branch_type
!= vtn_branch_type_none
) {
733 /* It is possible that the break is actually the continue block
734 * for the containing loop. In this case, we need to bail and let
735 * the loop parsing code handle the continue properly.
737 vtn_assert(branch_type
== vtn_branch_type_loop_continue
);
745 case SpvOpUnreachable
:
749 vtn_fail("Unhandled opcode");
755 vtn_build_cfg(struct vtn_builder
*b
, const uint32_t *words
, const uint32_t *end
)
757 vtn_foreach_instruction(b
, words
, end
,
758 vtn_cfg_handle_prepass_instruction
);
760 vtn_foreach_cf_node(node
, &b
->functions
) {
761 struct vtn_function
*func
= vtn_cf_node_as_function(node
);
762 vtn_cfg_walk_blocks(b
, &func
->body
, func
->start_block
,
763 NULL
, NULL
, NULL
, NULL
, NULL
);
768 vtn_handle_phis_first_pass(struct vtn_builder
*b
, SpvOp opcode
,
769 const uint32_t *w
, unsigned count
)
771 if (opcode
== SpvOpLabel
)
772 return true; /* Nothing to do */
774 /* If this isn't a phi node, stop. */
775 if (opcode
!= SpvOpPhi
)
778 /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
779 * For each phi, we create a variable with the appropreate type and
780 * do a load from that variable. Then, in a second pass, we add
781 * stores to that variable to each of the predecessor blocks.
783 * We could do something more intelligent here. However, in order to
784 * handle loops and things properly, we really need dominance
785 * information. It would end up basically being the into-SSA
786 * algorithm all over again. It's easier if we just let
787 * lower_vars_to_ssa do that for us instead of repeating it here.
789 struct vtn_type
*type
= vtn_value(b
, w
[1], vtn_value_type_type
)->type
;
790 nir_variable
*phi_var
=
791 nir_local_variable_create(b
->nb
.impl
, type
->type
, "phi");
792 _mesa_hash_table_insert(b
->phi_table
, w
, phi_var
);
794 vtn_push_ssa(b
, w
[2], type
,
795 vtn_local_load(b
, nir_build_deref_var(&b
->nb
, phi_var
), 0));
801 vtn_handle_phi_second_pass(struct vtn_builder
*b
, SpvOp opcode
,
802 const uint32_t *w
, unsigned count
)
804 if (opcode
!= SpvOpPhi
)
807 struct hash_entry
*phi_entry
= _mesa_hash_table_search(b
->phi_table
, w
);
808 vtn_assert(phi_entry
);
809 nir_variable
*phi_var
= phi_entry
->data
;
811 for (unsigned i
= 3; i
< count
; i
+= 2) {
812 struct vtn_block
*pred
= vtn_block(b
, w
[i
+ 1]);
814 /* If block does not have end_nop, that is because it is an unreacheable
815 * block, and hence it is not worth to handle it */
819 b
->nb
.cursor
= nir_after_instr(&pred
->end_nop
->instr
);
821 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, w
[i
]);
823 vtn_local_store(b
, src
, nir_build_deref_var(&b
->nb
, phi_var
), 0);
830 vtn_emit_branch(struct vtn_builder
*b
, enum vtn_branch_type branch_type
,
831 nir_variable
*switch_fall_var
, bool *has_switch_break
)
833 switch (branch_type
) {
834 case vtn_branch_type_switch_break
:
835 nir_store_var(&b
->nb
, switch_fall_var
, nir_imm_false(&b
->nb
), 1);
836 *has_switch_break
= true;
838 case vtn_branch_type_switch_fallthrough
:
839 break; /* Nothing to do */
840 case vtn_branch_type_loop_break
:
841 nir_jump(&b
->nb
, nir_jump_break
);
843 case vtn_branch_type_loop_continue
:
844 nir_jump(&b
->nb
, nir_jump_continue
);
846 case vtn_branch_type_return
:
847 nir_jump(&b
->nb
, nir_jump_return
);
849 case vtn_branch_type_discard
: {
850 nir_intrinsic_instr
*discard
=
851 nir_intrinsic_instr_create(b
->nb
.shader
, nir_intrinsic_discard
);
852 nir_builder_instr_insert(&b
->nb
, &discard
->instr
);
856 vtn_fail("Invalid branch type");
861 vtn_switch_case_condition(struct vtn_builder
*b
, struct vtn_switch
*swtch
,
862 nir_ssa_def
*sel
, struct vtn_case
*cse
)
864 if (cse
->is_default
) {
865 nir_ssa_def
*any
= nir_imm_false(&b
->nb
);
866 vtn_foreach_cf_node(other_node
, &swtch
->cases
) {
867 struct vtn_case
*other
= vtn_cf_node_as_case(other_node
);
868 if (other
->is_default
)
871 any
= nir_ior(&b
->nb
, any
,
872 vtn_switch_case_condition(b
, swtch
, sel
, other
));
874 return nir_inot(&b
->nb
, any
);
876 nir_ssa_def
*cond
= nir_imm_false(&b
->nb
);
877 util_dynarray_foreach(&cse
->values
, uint64_t, val
) {
878 nir_ssa_def
*imm
= nir_imm_intN_t(&b
->nb
, *val
, sel
->bit_size
);
879 cond
= nir_ior(&b
->nb
, cond
, nir_ieq(&b
->nb
, sel
, imm
));
885 static nir_loop_control
886 vtn_loop_control(struct vtn_builder
*b
, struct vtn_loop
*vtn_loop
)
888 if (vtn_loop
->control
== SpvLoopControlMaskNone
)
889 return nir_loop_control_none
;
890 else if (vtn_loop
->control
& SpvLoopControlDontUnrollMask
)
891 return nir_loop_control_dont_unroll
;
892 else if (vtn_loop
->control
& SpvLoopControlUnrollMask
)
893 return nir_loop_control_unroll
;
894 else if (vtn_loop
->control
& SpvLoopControlDependencyInfiniteMask
||
895 vtn_loop
->control
& SpvLoopControlDependencyLengthMask
||
896 vtn_loop
->control
& SpvLoopControlMinIterationsMask
||
897 vtn_loop
->control
& SpvLoopControlMaxIterationsMask
||
898 vtn_loop
->control
& SpvLoopControlIterationMultipleMask
||
899 vtn_loop
->control
& SpvLoopControlPeelCountMask
||
900 vtn_loop
->control
& SpvLoopControlPartialCountMask
) {
901 /* We do not do anything special with these yet. */
902 return nir_loop_control_none
;
904 vtn_fail("Invalid loop control");
908 static nir_selection_control
909 vtn_selection_control(struct vtn_builder
*b
, struct vtn_if
*vtn_if
)
911 if (vtn_if
->control
== SpvSelectionControlMaskNone
)
912 return nir_selection_control_none
;
913 else if (vtn_if
->control
& SpvSelectionControlDontFlattenMask
)
914 return nir_selection_control_dont_flatten
;
915 else if (vtn_if
->control
& SpvSelectionControlFlattenMask
)
916 return nir_selection_control_flatten
;
918 vtn_fail("Invalid selection control");
922 vtn_emit_cf_list(struct vtn_builder
*b
, struct list_head
*cf_list
,
923 nir_variable
*switch_fall_var
, bool *has_switch_break
,
924 vtn_instruction_handler handler
)
926 vtn_foreach_cf_node(node
, cf_list
) {
927 switch (node
->type
) {
928 case vtn_cf_node_type_block
: {
929 struct vtn_block
*block
= vtn_cf_node_as_block(node
);
931 const uint32_t *block_start
= block
->label
;
932 const uint32_t *block_end
= block
->merge
? block
->merge
:
935 block_start
= vtn_foreach_instruction(b
, block_start
, block_end
,
936 vtn_handle_phis_first_pass
);
938 vtn_foreach_instruction(b
, block_start
, block_end
, handler
);
940 block
->end_nop
= nir_intrinsic_instr_create(b
->nb
.shader
,
942 nir_builder_instr_insert(&b
->nb
, &block
->end_nop
->instr
);
944 if ((*block
->branch
& SpvOpCodeMask
) == SpvOpReturnValue
) {
945 vtn_fail_if(b
->func
->type
->return_type
->base_type
==
947 "Return with a value from a function returning void");
948 struct vtn_ssa_value
*src
= vtn_ssa_value(b
, block
->branch
[1]);
949 const struct glsl_type
*ret_type
=
950 glsl_get_bare_type(b
->func
->type
->return_type
->type
);
951 nir_deref_instr
*ret_deref
=
952 nir_build_deref_cast(&b
->nb
, nir_load_param(&b
->nb
, 0),
953 nir_var_function_temp
, ret_type
, 0);
954 vtn_local_store(b
, src
, ret_deref
, 0);
957 if (block
->branch_type
!= vtn_branch_type_none
) {
958 vtn_emit_branch(b
, block
->branch_type
,
959 switch_fall_var
, has_switch_break
);
966 case vtn_cf_node_type_if
: {
967 struct vtn_if
*vtn_if
= vtn_cf_node_as_if(node
);
968 bool sw_break
= false;
971 nir_push_if(&b
->nb
, vtn_ssa_value(b
, vtn_if
->condition
)->def
);
973 nif
->control
= vtn_selection_control(b
, vtn_if
);
975 if (vtn_if
->then_type
== vtn_branch_type_none
) {
976 vtn_emit_cf_list(b
, &vtn_if
->then_body
,
977 switch_fall_var
, &sw_break
, handler
);
979 vtn_emit_branch(b
, vtn_if
->then_type
, switch_fall_var
, &sw_break
);
982 nir_push_else(&b
->nb
, nif
);
983 if (vtn_if
->else_type
== vtn_branch_type_none
) {
984 vtn_emit_cf_list(b
, &vtn_if
->else_body
,
985 switch_fall_var
, &sw_break
, handler
);
987 vtn_emit_branch(b
, vtn_if
->else_type
, switch_fall_var
, &sw_break
);
990 nir_pop_if(&b
->nb
, nif
);
992 /* If we encountered a switch break somewhere inside of the if,
993 * then it would have been handled correctly by calling
994 * emit_cf_list or emit_branch for the interrior. However, we
995 * need to predicate everything following on wether or not we're
999 *has_switch_break
= true;
1000 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, switch_fall_var
));
1005 case vtn_cf_node_type_loop
: {
1006 struct vtn_loop
*vtn_loop
= vtn_cf_node_as_loop(node
);
1008 nir_loop
*loop
= nir_push_loop(&b
->nb
);
1009 loop
->control
= vtn_loop_control(b
, vtn_loop
);
1011 vtn_emit_cf_list(b
, &vtn_loop
->body
, NULL
, NULL
, handler
);
1013 if (!list_is_empty(&vtn_loop
->cont_body
)) {
1014 /* If we have a non-trivial continue body then we need to put
1015 * it at the beginning of the loop with a flag to ensure that
1016 * it doesn't get executed in the first iteration.
1018 nir_variable
*do_cont
=
1019 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "cont");
1021 b
->nb
.cursor
= nir_before_cf_node(&loop
->cf_node
);
1022 nir_store_var(&b
->nb
, do_cont
, nir_imm_false(&b
->nb
), 1);
1024 b
->nb
.cursor
= nir_before_cf_list(&loop
->body
);
1027 nir_push_if(&b
->nb
, nir_load_var(&b
->nb
, do_cont
));
1029 vtn_emit_cf_list(b
, &vtn_loop
->cont_body
, NULL
, NULL
, handler
);
1031 nir_pop_if(&b
->nb
, cont_if
);
1033 nir_store_var(&b
->nb
, do_cont
, nir_imm_true(&b
->nb
), 1);
1035 b
->has_loop_continue
= true;
1038 nir_pop_loop(&b
->nb
, loop
);
1042 case vtn_cf_node_type_switch
: {
1043 struct vtn_switch
*vtn_switch
= vtn_cf_node_as_switch(node
);
1045 /* First, we create a variable to keep track of whether or not the
1046 * switch is still going at any given point. Any switch breaks
1047 * will set this variable to false.
1049 nir_variable
*fall_var
=
1050 nir_local_variable_create(b
->nb
.impl
, glsl_bool_type(), "fall");
1051 nir_store_var(&b
->nb
, fall_var
, nir_imm_false(&b
->nb
), 1);
1053 nir_ssa_def
*sel
= vtn_ssa_value(b
, vtn_switch
->selector
)->def
;
1055 /* Now we can walk the list of cases and actually emit code */
1056 vtn_foreach_cf_node(case_node
, &vtn_switch
->cases
) {
1057 struct vtn_case
*cse
= vtn_cf_node_as_case(case_node
);
1059 /* Figure out the condition */
1061 vtn_switch_case_condition(b
, vtn_switch
, sel
, cse
);
1062 /* Take fallthrough into account */
1063 cond
= nir_ior(&b
->nb
, cond
, nir_load_var(&b
->nb
, fall_var
));
1065 nir_if
*case_if
= nir_push_if(&b
->nb
, cond
);
1067 bool has_break
= false;
1068 nir_store_var(&b
->nb
, fall_var
, nir_imm_true(&b
->nb
), 1);
1069 vtn_emit_cf_list(b
, &cse
->body
, fall_var
, &has_break
, handler
);
1070 (void)has_break
; /* We don't care */
1072 nir_pop_if(&b
->nb
, case_if
);
1079 vtn_fail("Invalid CF node type");
1085 vtn_function_emit(struct vtn_builder
*b
, struct vtn_function
*func
,
1086 vtn_instruction_handler instruction_handler
)
1088 nir_builder_init(&b
->nb
, func
->impl
);
1090 b
->nb
.cursor
= nir_after_cf_list(&func
->impl
->body
);
1091 b
->nb
.exact
= b
->exact
;
1092 b
->has_loop_continue
= false;
1093 b
->phi_table
= _mesa_pointer_hash_table_create(b
);
1095 vtn_emit_cf_list(b
, &func
->body
, NULL
, NULL
, instruction_handler
);
1097 vtn_foreach_instruction(b
, func
->start_block
->label
, func
->end
,
1098 vtn_handle_phi_second_pass
);
1100 nir_rematerialize_derefs_in_use_blocks_impl(func
->impl
);
1102 /* Continue blocks for loops get inserted before the body of the loop
1103 * but instructions in the continue may use SSA defs in the loop body.
1104 * Therefore, we need to repair SSA to insert the needed phi nodes.
1106 if (b
->has_loop_continue
)
1107 nir_repair_ssa_impl(func
->impl
);
1109 func
->emitted
= true;