#include "vtn_private.h"
#include "nir/nir_vla.h"
+static struct vtn_block *
+vtn_block(struct vtn_builder *b, uint32_t value_id)
+{
+ return vtn_value(b, value_id, vtn_value_type_block)->block;
+}
+
static struct vtn_pointer *
vtn_load_param_pointer(struct vtn_builder *b,
struct vtn_type *param_type,
struct vtn_block *break_block,
uint32_t block_id, uint64_t val, bool is_default)
{
- struct vtn_block *case_block =
- vtn_value(b, block_id, vtn_value_type_block)->block;
+ struct vtn_block *case_block = vtn_block(b, block_id);
/* Don't create dummy cases that just break */
if (case_block == break_block)
list_addtail(&loop->node.link, cf_list);
block->loop = loop;
- struct vtn_block *new_loop_break =
- vtn_value(b, block->merge[1], vtn_value_type_block)->block;
- struct vtn_block *new_loop_cont =
- vtn_value(b, block->merge[2], vtn_value_type_block)->block;
+ struct vtn_block *new_loop_break = vtn_block(b, block->merge[1]);
+ struct vtn_block *new_loop_cont = vtn_block(b, block->merge[2]);
/* Note: This recursive call will start with the current block as
* its start block. If we weren't careful, we would get here
switch (*block->branch & SpvOpCodeMask) {
case SpvOpBranch: {
- struct vtn_block *branch_block =
- vtn_value(b, block->branch[1], vtn_value_type_block)->block;
+ struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
block->branch_type = vtn_get_branch_type(b, branch_block,
switch_case, switch_break,
return;
case SpvOpBranchConditional: {
- struct vtn_block *then_block =
- vtn_value(b, block->branch[2], vtn_value_type_block)->block;
- struct vtn_block *else_block =
- vtn_value(b, block->branch[3], vtn_value_type_block)->block;
+ struct vtn_block *then_block = vtn_block(b, block->branch[2]);
+ struct vtn_block *else_block = vtn_block(b, block->branch[3]);
struct vtn_if *if_stmt = ralloc(b, struct vtn_if);
if_stmt->else_type == vtn_branch_type_none) {
/* Neither side of the if is something we can short-circuit. */
vtn_assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
- struct vtn_block *merge_block =
- vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+ struct vtn_block *merge_block = vtn_block(b, block->merge[1]);
vtn_cfg_walk_blocks(b, &if_stmt->then_body, then_block,
switch_case, switch_break,
case SpvOpSwitch: {
vtn_assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
- struct vtn_block *break_block =
- vtn_value(b, block->merge[1], vtn_value_type_block)->block;
+ struct vtn_block *break_block = vtn_block(b, block->merge[1]);
struct vtn_switch *swtch = ralloc(b, struct vtn_switch);
* them in fall-through order.
*/
for (const uint32_t *w = block->branch + 2; w < branch_end;) {
- struct vtn_block *case_block =
- vtn_value(b, *w, vtn_value_type_block)->block;
+ struct vtn_block *case_block = vtn_block(b, *w);
if (bitsize <= 32) {
w += 2;
nir_variable *phi_var = phi_entry->data;
for (unsigned i = 3; i < count; i += 2) {
- struct vtn_block *pred =
- vtn_value(b, w[i + 1], vtn_value_type_block)->block;
+ struct vtn_block *pred = vtn_block(b, w[i + 1]);
/* If block does not have end_nop, that is because it is an unreacheable
* block, and hence it is not worth to handle it */