}
}
-static void
-bi_print_branch(struct bi_branch *branch, FILE *fp)
-{
- fprintf(fp, ".%s", bi_cond_name(branch->cond));
-}
-
static void
bi_print_texture(struct bi_texture *tex, FILE *fp)
{
fprintf(fp, "%s", bi_minmax_mode_name(ins->minmax));
else if (ins->type == BI_LOAD_VAR)
bi_print_load_vary(&ins->load_vary, fp);
- else if (ins->type == BI_BRANCH)
- bi_print_branch(&ins->branch, fp);
- else if (ins->type == BI_CSEL || ins->type == BI_CMP)
- fprintf(fp, ".%s", bi_cond_name(ins->cond));
else if (ins->type == BI_BLEND)
fprintf(fp, ".loc%u", ins->blend_location);
else if (ins->type == BI_TEX) {
} else if (ins->type == BI_BITWISE)
fprintf(fp, ".%cshift", ins->bitwise.rshift ? 'r' : 'l');
+ if (bi_class_props[ins->type] & BI_CONDITIONAL)
+ fprintf(fp, ".%s", bi_cond_name(ins->cond));
+
if (ins->vector_channels)
fprintf(fp, ".v%u", ins->vector_channels);
}
if (ins->type == BI_BRANCH) {
- if (ins->branch.target)
- fprintf(fp, "-> block%u", ins->branch.target->base.name);
- else
- fprintf(fp, "-> blockhole");
+ assert(ins->branch_target);
+ fprintf(fp, "-> block%u", ins->branch_target->base.name);
} else if (ins->type == BI_TEX) {
bi_print_texture(&ins->texture, fp);
}
unsigned bi_class_props[BI_NUM_CLASSES] = {
[BI_ADD] = BI_MODS | BI_SCHED_ALL | BI_NO_ABS_ABS_FP16_FMA,
[BI_ATEST] = BI_SCHED_HI_LATENCY | BI_SCHED_ADD,
- [BI_BRANCH] = BI_SCHED_HI_LATENCY | BI_SCHED_ADD,
- [BI_CMP] = BI_MODS | BI_SCHED_ALL,
+ [BI_BRANCH] = BI_SCHED_HI_LATENCY | BI_SCHED_ADD | BI_CONDITIONAL,
+ [BI_CMP] = BI_MODS | BI_SCHED_ALL | BI_CONDITIONAL,
[BI_BLEND] = BI_SCHED_HI_LATENCY | BI_SCHED_ADD | BI_VECTOR | BI_DATA_REG_SRC,
[BI_BITWISE] = BI_SCHED_ALL,
[BI_COMBINE] = 0,
[BI_CONVERT] = BI_SCHED_ADD | BI_SWIZZLABLE | BI_ROUNDMODE, /* +FMA on G71 */
- [BI_CSEL] = BI_SCHED_FMA,
- [BI_DISCARD] = BI_SCHED_HI_LATENCY | BI_SCHED_ADD,
+ [BI_CSEL] = BI_SCHED_FMA | BI_CONDITIONAL,
+ [BI_DISCARD] = BI_SCHED_HI_LATENCY | BI_SCHED_ADD | BI_CONDITIONAL,
[BI_FMA] = BI_ROUNDMODE | BI_SCHED_FMA | BI_MODS,
[BI_FREXP] = BI_SCHED_ALL,
[BI_ISUB] = BI_SCHED_ALL,
switch (instr->type) {
case nir_jump_break:
- branch->branch.target = ctx->break_block;
+ branch->branch_target = ctx->break_block;
break;
case nir_jump_continue:
- branch->branch.target = ctx->continue_block;
+ branch->branch_target = ctx->continue_block;
break;
default:
unreachable("Unhandled jump type");
}
- pan_block_add_successor(&ctx->current_block->base, &branch->branch.target->base);
+ pan_block_add_successor(&ctx->current_block->base, &branch->branch_target->base);
}
static bi_instruction
{
bi_instruction branch = {
.type = BI_BRANCH,
- .branch = {
- .cond = BI_COND_ALWAYS
- }
+ .cond = BI_COND_ALWAYS
};
return bi_emit(ctx, branch);
branch->src[0] = pan_src_index(cond);
branch->src[1] = BIR_INDEX_ZERO;
branch->src_types[0] = branch->src_types[1] = nir_type_uint16;
- branch->branch.cond = invert ? BI_COND_EQ : BI_COND_NE;
+ branch->cond = invert ? BI_COND_EQ : BI_COND_NE;
}
static void
if (ctx->instruction_count == count_in) {
/* The else block is empty, so don't emit an exit jump */
bi_remove_instruction(then_exit);
- then_branch->branch.target = ctx->after_block;
+ then_branch->branch_target = ctx->after_block;
} else {
- then_branch->branch.target = else_block;
- then_exit->branch.target = ctx->after_block;
- pan_block_add_successor(&end_then_block->base, &then_exit->branch.target->base);
+ then_branch->branch_target = else_block;
+ then_exit->branch_target = ctx->after_block;
+ pan_block_add_successor(&end_then_block->base, &then_exit->branch_target->base);
}
/* Wire up the successors */
- pan_block_add_successor(&before_block->base, &then_branch->branch.target->base); /* then_branch */
+ pan_block_add_successor(&before_block->base, &then_branch->branch_target->base); /* then_branch */
pan_block_add_successor(&before_block->base, &then_block->base); /* fallthrough */
pan_block_add_successor(&end_else_block->base, &ctx->after_block->base); /* fallthrough */
/* Branch back to loop back */
bi_instruction *br_back = bi_emit_branch(ctx);
- br_back->branch.target = ctx->continue_block;
+ br_back->branch_target = ctx->continue_block;
pan_block_add_successor(&start_block->base, &ctx->continue_block->base);
pan_block_add_successor(&ctx->current_block->base, &ctx->continue_block->base);
/* abs/neg/outmod valid for a float op */
#define BI_MODS (1 << 0)
-/* bit 1 unused */
+/* Accepts a bi_cond */
+#define BI_CONDITIONAL (1 << 1)
/* Accepts a bifrost_roundmode */
#define BI_ROUNDMODE (1 << 2)
BI_COND_NE,
};
-struct bi_branch {
- /* Types are specified in src_types and must be compatible (either both
- * int, or both float, 16/32, and same size or 32/16 if float. Types
- * ignored if BI_COND_ALWAYS is set for an unconditional branch. */
-
- enum bi_cond cond;
- struct bi_block *target;
-};
-
/* Opcodes within a class */
enum bi_minmax_op {
BI_MINMAX_MIN,
/* For VECTOR ops, how many channels are written? */
unsigned vector_channels;
+ /* The comparison op. BI_COND_ALWAYS may not be valid. */
+ enum bi_cond cond;
+
/* A class-specific op from which the actual opcode can be derived
* (along with the above information) */
union {
enum bifrost_minmax_mode minmax;
struct bi_load_vary load_vary;
- struct bi_branch branch;
-
- /* For CSEL, the comparison op. BI_COND_ALWAYS doesn't make
- * sense here but you can always just use a move for that */
- enum bi_cond cond;
+ struct bi_block *branch_target;
/* For BLEND -- the location 0-7 */
unsigned blend_location;