/* Current NIR function */
nir_function *func;
+ /* Allocated compiler temporary counter */
+ unsigned temp_alloc;
+
/* Unordered list of midgard_blocks */
int block_count;
struct list_head blocks;
list_addtail(&(mir_upload_ins(ins))->link, &ctx->current_block->instructions);
}
-static inline void
+static inline struct midgard_instruction *
mir_insert_instruction_before(struct midgard_instruction *tag, struct midgard_instruction ins)
{
- list_addtail(&(mir_upload_ins(ins))->link, &tag->link);
+ struct midgard_instruction *u = mir_upload_ins(ins);
+ list_addtail(&u->link, &tag->link);
+ return u;
}
static inline void
mir_foreach_block(ctx, v_block) \
mir_foreach_instr_in_block_safe(v_block, v)
-
-
static inline midgard_instruction *
mir_last_in_block(struct midgard_block *block)
{
/* Broad types of register classes so we can handle special
* registers */
-#define NR_REG_CLASSES 3
+#define NR_REG_CLASSES 5
#define REG_CLASS_WORK 0
#define REG_CLASS_LDST 1
#define REG_CLASS_LDST27 2
-#define REG_CLASS_TEX 3
+#define REG_CLASS_TEXR 3
+#define REG_CLASS_TEXW 4
void mir_lower_special_reads(compiler_context *ctx);
struct ra_graph* allocate_registers(compiler_context *ctx, bool *spilled);
}
}
+static unsigned
+make_compiler_temp(compiler_context *ctx)
+{
+ return ctx->func->impl->ssa_alloc + ctx->func->impl->reg_alloc + ctx->temp_alloc++;
+}
+
static int sysval_for_instr(compiler_context *ctx, nir_instr *instr,
unsigned *dest)
{
//assert (!instr->sampler);
//assert (!instr->texture_array_size);
- /* Allocate registers via a round robin scheme to alternate between the two registers */
- int reg = ctx->texture_op_count & 1;
- int in_reg = reg, out_reg = reg;
-
int texture_index = instr->texture_index;
int sampler_index = texture_index;
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = 0xF,
+ .ssa_args = {
+ .dest = nir_dest_index(ctx, &instr->dest),
+ .src0 = -1,
+ .src1 = -1,
+ },
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
.texture_handle = texture_index,
.sampler_handle = sampler_index,
-
- /* TODO: Regalloc it in */
.swizzle = SWIZZLE_XYZW,
+ .in_reg_swizzle = SWIZZLE_XYZW,
/* TODO: half */
.in_reg_full = 1,
};
for (unsigned i = 0; i < instr->num_srcs; ++i) {
- int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
int index = nir_src_index(ctx, &instr->src[i].src);
- int nr_comp = nir_src_num_components(instr->src[i].src);
midgard_vector_alu_src alu_src = blank_alu_src;
switch (instr->src[i].src_type) {
case nir_tex_src_coord: {
+ emit_explicit_constant(ctx, index, index);
+
+ /* Texelfetch coordinates uses all four elements
+ * (xyz/index) regardless of texture dimensionality,
+ * which means it's necessary to zero the unused
+ * components to keep everything happy */
+
+ if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
+ unsigned old_index = index;
+
+ index = make_compiler_temp(ctx);
+
+ /* mov index, old_index */
+ midgard_instruction mov = v_mov(old_index, blank_alu_src, index);
+ mov.mask = 0x3;
+ emit_mir_instruction(ctx, mov);
+
+ /* mov index.zw, #0 */
+ mov = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT),
+ blank_alu_src, index);
+ mov.has_constants = true;
+ mov.mask = (1 << COMPONENT_Z) | (1 << COMPONENT_W);
+ emit_mir_instruction(ctx, mov);
+ }
+
if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
/* texelFetch is undefined on samplerCube */
assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH);
* select the face and copy the xy into the
* texture register */
- midgard_instruction st = m_st_cubemap_coords(reg, 0);
+ unsigned temp = make_compiler_temp(ctx);
+
+ midgard_instruction st = m_st_cubemap_coords(temp, 0);
st.ssa_args.src0 = index;
st.load_store.unknown = 0x24; /* XXX: What is this? */
st.mask = 0x3; /* xy */
st.load_store.swizzle = alu_src.swizzle;
emit_mir_instruction(ctx, st);
- ins.texture.in_reg_swizzle = swizzle_of(2);
+ ins.ssa_args.src0 = temp;
} else {
- ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp);
-
- midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.mask = mask_of(nr_comp);
- emit_mir_instruction(ctx, mov);
-
- if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) {
- /* Texel fetch opcodes care about the
- * values of z and w, so we actually
- * need to spill into a second register
- * for a texel fetch with register bias
- * (for non-2D). TODO: Implement that
- */
-
- assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D);
-
- midgard_instruction zero = v_mov(index, alu_src, reg);
- zero.ssa_args.inline_constant = true;
- zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
- zero.has_constants = true;
- zero.mask = ~mov.mask;
- emit_mir_instruction(ctx, zero);
+ ins.ssa_args.src0 = index;
+ }
- ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
- } else {
- /* Non-texel fetch doesn't need that
- * nonsense. However we do use the Z
- * for array indexing */
- bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D;
- ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ;
- }
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_2D) {
+ /* Array component in w but NIR wants it in z */
+ ins.texture.in_reg_swizzle = SWIZZLE_XYZZ;
}
break;
if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
break;
- /* Otherwise we use a register. To keep RA simple, we
- * put the bias/LOD into the w component of the input
- * source, which is otherwise in xy */
-
- alu_src.swizzle = SWIZZLE_XXXX;
-
- midgard_instruction mov = v_mov(index, alu_src, reg);
- mov.mask = 1 << COMPONENT_W;
- emit_mir_instruction(ctx, mov);
-
ins.texture.lod_register = true;
-
- midgard_tex_register_select sel = {
- .select = in_reg,
- .full = 1,
- .component = COMPONENT_W,
- };
-
- uint8_t packed;
- memcpy(&packed, &sel, sizeof(packed));
- ins.texture.bias = packed;
+ ins.ssa_args.src1 = index;
+ emit_explicit_constant(ctx, index, index);
break;
};
}
}
- /* Set registers to read and write from the same place */
- ins.texture.in_reg_select = in_reg;
- ins.texture.out_reg_select = out_reg;
-
emit_mir_instruction(ctx, ins);
- int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest);
- midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index);
- emit_mir_instruction(ctx, ins2);
-
/* Used for .cont and .last hinting */
ctx->texture_op_count++;
}
.nir = nir,
.screen = screen,
.stage = nir->info.stage,
+ .temp_alloc = 0,
.is_blend = is_blend,
.blend_constant_offset = 0,
struct ra_regs *regs[9];
- /* Work register classes corresponds to the above register
- * sets. 12 per set for 4 classes per work/ldst/tex */
+ /* Work register classes corresponds to the above register sets. 20 per
+ * set for 4 classes per work/ldst/ldst27/texr/texw. TODO: Unify with
+ * compiler.h */
- unsigned reg_classes[9][12];
+ unsigned reg_classes[9][4 * 5];
};
/* Define the general compiler entry point */
*/
#define WORK_STRIDE 10
+
+/* We have overlapping register classes for special registers, handled via
+ * shadows */
+
#define SHADOW_R27 17
+#define SHADOW_R28 18
+#define SHADOW_R29 19
/* Prepacked masks/swizzles for virtual register types */
static unsigned reg_type_to_mask[WORK_STRIDE] = {
/* Apply shadow registers */
- if (phys == SHADOW_R27)
- phys = 27;
+ if (phys >= SHADOW_R27 && phys <= SHADOW_R29)
+ phys += 27 - SHADOW_R27;
struct phys_reg r = {
.reg = phys,
* work registers, although it is also used to create the register set for
* special register allocation */
+static void
+add_shadow_conflicts (struct ra_regs *regs, unsigned base, unsigned shadow)
+{
+ for (unsigned a = 0; a < WORK_STRIDE; ++a) {
+ unsigned reg_a = (WORK_STRIDE * base) + a;
+
+ for (unsigned b = 0; b < WORK_STRIDE; ++b) {
+ unsigned reg_b = (WORK_STRIDE * shadow) + b;
+
+ ra_add_reg_conflict(regs, reg_a, reg_b);
+ ra_add_reg_conflict(regs, reg_b, reg_a);
+ }
+ }
+}
+
static struct ra_regs *
create_register_set(unsigned work_count, unsigned *classes)
{
unsigned first_reg =
(c == REG_CLASS_LDST) ? 26 :
(c == REG_CLASS_LDST27) ? SHADOW_R27 :
- (c == REG_CLASS_TEX) ? 28 : 0;
+ (c == REG_CLASS_TEXR) ? 28 :
+ (c == REG_CLASS_TEXW) ? SHADOW_R28 :
+ 0;
/* Add the full set of work registers */
for (unsigned i = first_reg; i < (first_reg + count); ++i) {
}
- /* All of the r27 registers in in LDST conflict with all of the
- * registers in LD27 (pseudo/shadow register) */
-
- for (unsigned a = 0; a < WORK_STRIDE; ++a) {
- unsigned reg_a = (WORK_STRIDE * 27) + a;
-
- for (unsigned b = 0; b < WORK_STRIDE; ++b) {
- unsigned reg_b = (WORK_STRIDE * SHADOW_R27) + b;
-
- ra_add_reg_conflict(regs, reg_a, reg_b);
- ra_add_reg_conflict(regs, reg_b, reg_a);
- }
- }
+ /* We have duplicate classes */
+ add_shadow_conflicts(regs, 27, SHADOW_R27);
+ add_shadow_conflicts(regs, 28, SHADOW_R28);
+ add_shadow_conflicts(regs, 29, SHADOW_R29);
/* We're done setting up */
ra_set_finalize(regs, NULL);
case REG_CLASS_LDST:
case REG_CLASS_LDST27:
return (tag == TAG_LOAD_STORE_4);
- default:
+ case REG_CLASS_TEXR:
+ return (tag == TAG_TEXTURE_4);
+ case REG_CLASS_TEXW:
return (tag != TAG_LOAD_STORE_4);
+ case REG_CLASS_WORK:
+ return (tag == TAG_ALU_4);
+ default:
+ unreachable("Invalid class");
+ }
+}
+
+static bool
+check_write_class(unsigned *classes, unsigned tag, unsigned node)
+{
+ /* Non-nodes are implicitly ok */
+ if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ return true;
+
+ unsigned current_class = classes[node] >> 2;
+
+ switch (current_class) {
+ case REG_CLASS_TEXR:
+ return true;
+ case REG_CLASS_TEXW:
+ return (tag == TAG_TEXTURE_4);
+ case REG_CLASS_LDST:
+ case REG_CLASS_LDST27:
+ case REG_CLASS_WORK:
+ return (tag == TAG_ALU_4) || (tag == TAG_LOAD_STORE_4);
+ default:
+ unreachable("Invalid class");
}
}
BITSET_SET(bitfield, node);
}
-static midgard_instruction *
-mir_find_last_write(compiler_context *ctx, unsigned i)
-{
- midgard_instruction *last_write = NULL;
-
- mir_foreach_instr_global(ctx, ins) {
- if (ins->compact_branch) continue;
-
- if (ins->ssa_args.dest == i)
- last_write = ins;
- }
-
- return last_write;
-}
-
void
mir_lower_special_reads(compiler_context *ctx)
{
/* Bitfields for the various types of registers we could have */
unsigned *alur = calloc(sz, 1);
+ unsigned *aluw = calloc(sz, 1);
unsigned *ldst = calloc(sz, 1);
unsigned *texr = calloc(sz, 1);
unsigned *texw = calloc(sz, 1);
switch (ins->type) {
case TAG_ALU_4:
+ mark_node_class(aluw, ins->ssa_args.dest);
mark_node_class(alur, ins->ssa_args.src0);
- mark_node_class(alur, ins->ssa_args.src1);
+
+ if (!ins->ssa_args.inline_constant)
+ mark_node_class(alur, ins->ssa_args.src1);
+
break;
case TAG_LOAD_STORE_4:
mark_node_class(ldst, ins->ssa_args.src0);
for (unsigned i = 0; i < ctx->temp_count; ++i) {
bool is_alur = BITSET_TEST(alur, i);
+ bool is_aluw = BITSET_TEST(aluw, i);
bool is_ldst = BITSET_TEST(ldst, i);
bool is_texr = BITSET_TEST(texr, i);
bool is_texw = BITSET_TEST(texw, i);
(is_alur && (is_ldst || is_texr)) ||
(is_ldst && (is_alur || is_texr || is_texw)) ||
(is_texr && (is_alur || is_ldst)) ||
- (is_texw && (is_ldst));
+ (is_texw && (is_aluw || is_ldst));
if (!collision)
continue;
/* Use the index as-is as the work copy. Emit copies for
* special uses */
- if (is_ldst) {
+ unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4 };
+ bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw };
+
+ for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) {
+ if (!collisions[j]) continue;
+
+ /* When the hazard is from reading, we move and rewrite
+ * sources (typical case). When it's from writing, we
+ * flip the move and rewrite destinations (obscure,
+ * only from control flow -- impossible in SSA) */
+
+ bool hazard_write = (j == 2);
+
unsigned idx = spill_idx++;
- midgard_instruction m = v_mov(i, blank_alu_src, idx);
- midgard_instruction *use = mir_next_op(mir_find_last_write(ctx, i));
- assert(use);
- mir_insert_instruction_before(use, m);
+
+ midgard_instruction m = hazard_write ?
+ v_mov(idx, blank_alu_src, i) :
+ v_mov(i, blank_alu_src, idx);
+
+ /* Insert move after each write */
+ mir_foreach_instr_global_safe(ctx, pre_use) {
+ if (pre_use->compact_branch) continue;
+ if (pre_use->ssa_args.dest != i)
+ continue;
+
+ /* If the hazard is writing, we need to
+ * specific insert moves for the contentious
+ * class. If the hazard is reading, we insert
+ * moves whenever it is written */
+
+ if (hazard_write && pre_use->type != classes[j])
+ continue;
+
+ midgard_instruction *use = mir_next_op(pre_use);
+ assert(use);
+ mir_insert_instruction_before(use, m);
+ }
/* Rewrite to use */
- mir_rewrite_index_src_tag(ctx, i, idx, TAG_LOAD_STORE_4);
+ if (hazard_write)
+ mir_rewrite_index_dst_tag(ctx, i, idx, classes[j]);
+ else
+ mir_rewrite_index_src_tag(ctx, i, idx, classes[j]);
}
}
free(alur);
+ free(aluw);
free(ldst);
free(texr);
free(texw);
force_vec4(found_class, ins->ssa_args.src0);
force_vec4(found_class, ins->ssa_args.src1);
}
+ } else if (ins->type == TAG_TEXTURE_4) {
+ set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW);
+ set_class(found_class, ins->ssa_args.src0, REG_CLASS_TEXR);
+ set_class(found_class, ins->ssa_args.src1, REG_CLASS_TEXR);
}
}
mir_foreach_instr_global(ctx, ins) {
if (ins->compact_branch) continue;
- /* Non-load-store cannot read load/store */
+ assert(check_write_class(found_class, ins->type, ins->ssa_args.dest));
assert(check_read_class(found_class, ins->type, ins->ssa_args.src0));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src1));
+
+ if (!ins->ssa_args.inline_constant)
+ assert(check_read_class(found_class, ins->type, ins->ssa_args.src1));
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
if (OP_IS_STORE_R26(ins->load_store.op) && fixed) {
ins->load_store.reg = SSA_REG_FROM_FIXED(args.src0);
- } else if (ins->load_store.op == midgard_op_st_cubemap_coords) {
- ins->load_store.reg = SSA_REG_FROM_FIXED(args.dest);
} else if (OP_IS_STORE_VARY(ins->load_store.op)) {
struct phys_reg src = index_to_reg(ctx, g, args.src0);
assert(src.reg == 26 || src.reg == 27);
* whether we are loading or storing -- think about the
* logical dataflow */
- unsigned r = OP_IS_STORE(ins->load_store.op) ?
+ bool encodes_src =
+ OP_IS_STORE(ins->load_store.op) &&
+ ins->load_store.op != midgard_op_st_cubemap_coords;
+
+ unsigned r = encodes_src ?
args.src0 : args.dest;
+
struct phys_reg src = index_to_reg(ctx, g, r);
ins->load_store.reg = src.reg;
break;
}
+ case TAG_TEXTURE_4: {
+ /* Grab RA results */
+ struct phys_reg dest = index_to_reg(ctx, g, args.dest);
+ struct phys_reg coord = index_to_reg(ctx, g, args.src0);
+ struct phys_reg lod = index_to_reg(ctx, g, args.src1);
+
+ assert(dest.reg == 28 || dest.reg == 29);
+ assert(coord.reg == 28 || coord.reg == 29);
+
+ /* First, install the texture coordinate */
+ ins->texture.in_reg_full = 1;
+ ins->texture.in_reg_upper = 0;
+ ins->texture.in_reg_select = coord.reg - 28;
+ ins->texture.in_reg_swizzle =
+ compose_swizzle(ins->texture.in_reg_swizzle, 0xF, coord, dest);
+
+ /* Next, install the destination */
+ ins->texture.out_full = 1;
+ ins->texture.out_upper = 0;
+ ins->texture.out_reg_select = dest.reg - 28;
+ ins->texture.swizzle = dest.swizzle;
+ ins->texture.mask = dest.mask;
+
+ /* If there is a register LOD/bias, use it */
+ if (args.src1 > -1) {
+ midgard_tex_register_select sel = {
+ .select = lod.reg,
+ .full = 1,
+ .component = lod.swizzle & 3,
+ };
+
+ uint8_t packed;
+ memcpy(&packed, &sel, sizeof(packed));
+ ins->texture.bias = packed;
+ }
+
+ break;
+ }
+
default:
break;
}
mir_squeeze_index(ctx);
mir_lower_special_reads(ctx);
+ /* Lowering can introduce some dead moves */
+
+ mir_foreach_block(ctx, block) {
+ midgard_opt_dead_move_eliminate(ctx, block);
+ }
+
do {
/* If we spill, find the best spill node and spill it */
* registers */
unsigned class = ra_get_node_class(g, spill_node);
bool is_special = (class >> 2) != REG_CLASS_WORK;
+ bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
/* Allocate TLS slot (maybe) */
unsigned spill_slot = !is_special ? spill_count++ : 0;
+ midgard_instruction *spill_move = NULL;
/* For TLS, replace all stores to the spilled node. For
- * special, just keep as-is; the class will be demoted
- * implicitly */
+ * special reads, just keep as-is; the class will be demoted
+ * implicitly. For special writes, spill to a work register */
- if (!is_special) {
+ if (!is_special || is_special_w) {
mir_foreach_instr_global_safe(ctx, ins) {
if (ins->compact_branch) continue;
if (ins->ssa_args.dest != spill_node) continue;
- ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
- midgard_instruction st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
- mir_insert_instruction_before(mir_next_op(ins), st);
+ midgard_instruction st;
+
+ if (is_special_w) {
+ spill_slot = spill_index++;
+ st = v_mov(spill_node, blank_alu_src, spill_slot);
+ } else {
+ ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
+ st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
+ }
- ctx->spills++;
+ spill_move = mir_insert_instruction_before(mir_next_op(ins), st);
+
+ if (!is_special)
+ ctx->spills++;
}
}
mir_foreach_instr_in_block(block, ins) {
if (ins->compact_branch) continue;
+
+ /* We can't rewrite the move used to spill in the first place */
+ if (ins == spill_move) continue;
if (!mir_has_arg(ins, spill_node)) {
consecutive_skip = false;
continue;
}
- consecutive_index = ++spill_index;
+ if (!is_special_w) {
+ consecutive_index = ++spill_index;
+
+ midgard_instruction *before = ins;
- midgard_instruction *before = ins;
+ /* For a csel, go back one more not to break up the bundle */
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
+ before = mir_prev_op(before);
- /* For a csel, go back one more not to break up the bundle */
- if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
- before = mir_prev_op(before);
+ midgard_instruction st;
- midgard_instruction st;
+ if (is_special) {
+ /* Move */
+ st = v_mov(spill_node, blank_alu_src, consecutive_index);
+ } else {
+ /* TLS load */
+ st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
+ }
- if (is_special) {
- /* Move */
- st = v_mov(spill_node, blank_alu_src, consecutive_index);
+ mir_insert_instruction_before(before, st);
+ // consecutive_skip = true;
} else {
- /* TLS load */
- st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
+ /* Special writes already have their move spilled in */
+ consecutive_index = spill_slot;
}
- mir_insert_instruction_before(before, st);
- // consecutive_skip = true;
-
/* Rewrite to use */
mir_rewrite_index_src_single(ins, spill_node, consecutive_index);