#include "midgard_ops.h"
#include "util/u_math.h"
#include "util/u_memory.h"
-#include "lcra.h"
#include "midgard_quirks.h"
struct phys_reg {
/* Byte offset into the physical register: 0-15 */
unsigned offset;
- /* Number of bytes in a component of this register */
- unsigned size;
+ /* log2(bytes per component) for fast mul/div */
+ unsigned shift;
};
/* Shift up by reg_offset and horizontally by dst_offset. */
static void
-offset_swizzle(unsigned *swizzle, unsigned reg_offset, unsigned srcsize, unsigned dst_offset)
+offset_swizzle(unsigned *swizzle, unsigned reg_offset, unsigned srcshift, unsigned dstshift, unsigned dst_offset)
{
unsigned out[MIR_VEC_COMPONENTS];
- signed reg_comp = reg_offset / srcsize;
- signed dst_comp = dst_offset / srcsize;
+ signed reg_comp = reg_offset >> srcshift;
+ signed dst_comp = dst_offset >> dstshift;
- unsigned max_component = (16 / srcsize) - 1;
+ unsigned max_component = (16 >> srcshift) - 1;
- assert(reg_comp * srcsize == reg_offset);
- assert(dst_comp * srcsize == dst_offset);
+ assert(reg_comp << srcshift == reg_offset);
+ assert(dst_comp << dstshift == dst_offset);
for (signed c = 0; c < MIR_VEC_COMPONENTS; ++c) {
signed comp = MAX2(c - dst_comp, 0);
/* Helper to return the default phys_reg for a given register */
static struct phys_reg
-default_phys_reg(int reg, midgard_reg_mode size)
+default_phys_reg(int reg, unsigned shift)
{
struct phys_reg r = {
.reg = reg,
.offset = 0,
- .size = mir_bytes_for_mode(size)
+ .shift = shift
};
return r;
* register corresponds to */
static struct phys_reg
-index_to_reg(compiler_context *ctx, struct lcra_state *l, unsigned reg, midgard_reg_mode size)
+index_to_reg(compiler_context *ctx, struct lcra_state *l, unsigned reg, unsigned shift)
{
/* Check for special cases */
if (reg == ~0)
- return default_phys_reg(REGISTER_UNUSED, size);
+ return default_phys_reg(REGISTER_UNUSED, shift);
else if (reg >= SSA_FIXED_MINIMUM)
- return default_phys_reg(SSA_REG_FROM_FIXED(reg), size);
+ return default_phys_reg(SSA_REG_FROM_FIXED(reg), shift);
else if (!l)
- return default_phys_reg(REGISTER_UNUSED, size);
+ return default_phys_reg(REGISTER_UNUSED, shift);
struct phys_reg r = {
.reg = l->solutions[reg] / 16,
.offset = l->solutions[reg] & 0xF,
- .size = mir_bytes_for_mode(size)
+ .shift = shift
};
/* Report that we actually use this register, and return it */
unsigned idx = spill_idx++;
- midgard_instruction m = hazard_write ?
- v_mov(idx, i) : v_mov(i, idx);
-
/* Insert move before each read/write, depending on the
* hazard we're trying to account for */
if (hazard_write) {
if (pre_use->dest != i)
continue;
- } else {
- if (!mir_has_arg(pre_use, i))
- continue;
- }
- if (hazard_write) {
+ midgard_instruction m = v_mov(idx, i);
+ m.dest_type = pre_use->dest_type;
+ m.src_types[1] = m.dest_type;
+ m.mask = pre_use->mask;
+
midgard_instruction *use = mir_next_op(pre_use);
assert(use);
mir_insert_instruction_before(ctx, use, m);
mir_rewrite_index_dst_single(pre_use, i, idx);
} else {
+ if (!mir_has_arg(pre_use, i))
+ continue;
+
idx = spill_idx++;
- m = v_mov(i, idx);
- m.mask = mir_from_bytemask(mir_bytemask_of_read_components(pre_use, i), midgard_reg_mode_32);
+
+ midgard_instruction m = v_mov(i, idx);
+ m.mask = mir_from_bytemask(mir_round_bytemask_up(
+ mir_bytemask_of_read_components(pre_use, i), 32), 32);
mir_insert_instruction_before(ctx, pre_use, m);
mir_rewrite_index_src_single(pre_use, i, idx);
}
free(texw);
}
-/* We register allocate after scheduling, so we need to ensure instructions
- * executing in parallel within a segment of a bundle don't clobber each
- * other's registers. This is mostly a non-issue thanks to scheduling, but
- * there are edge cases. In particular, after a register is written in a
- * segment, it interferes with anything reading. */
-
static void
-mir_compute_segment_interference(
+mir_compute_interference(
compiler_context *ctx,
- struct lcra_state *l,
- midgard_bundle *bun,
- unsigned pivot,
- unsigned i)
+ struct lcra_state *l)
{
- for (unsigned j = pivot; j < i; ++j) {
- mir_foreach_src(bun->instructions[j], s) {
- if (bun->instructions[j]->src[s] >= ctx->temp_count)
- continue;
-
- for (unsigned q = pivot; q < i; ++q) {
- if (bun->instructions[q]->dest >= ctx->temp_count)
- continue;
+ /* First, we need liveness information to be computed per block */
+ mir_compute_liveness(ctx);
- /* See dEQP-GLES2.functional.shaders.return.output_write_in_func_dynamic_fragment */
+ /* We need to force r1.w live throughout a blend shader */
- if (q >= j) {
- if (!(bun->instructions[j]->unit == UNIT_SMUL && bun->instructions[q]->unit == UNIT_VLUT))
- continue;
- }
+ if (ctx->is_blend) {
+ unsigned r1w = ~0;
- unsigned mask = mir_bytemask(bun->instructions[q]);
- unsigned rmask = mir_bytemask_of_read_components(bun->instructions[j], bun->instructions[j]->src[s]);
- lcra_add_node_interference(l, bun->instructions[q]->dest, mask, bun->instructions[j]->src[s], rmask);
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ mir_foreach_instr_in_block_rev(block, ins) {
+ if (ins->writeout)
+ r1w = ins->dest;
}
- }
- }
-}
-
-static void
-mir_compute_bundle_interference(
- compiler_context *ctx,
- struct lcra_state *l,
- midgard_bundle *bun)
-{
- if (!IS_ALU(bun->tag))
- return;
- bool old = bun->instructions[0]->unit >= UNIT_VADD;
- unsigned pivot = 0;
-
- for (unsigned i = 1; i < bun->instruction_count; ++i) {
- bool new = bun->instructions[i]->unit >= UNIT_VADD;
+ if (r1w != ~0)
+ break;
+ }
- if (old != new) {
- mir_compute_segment_interference(ctx, l, bun, 0, i);
- pivot = i;
- break;
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->dest < ctx->temp_count)
+ lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), r1w, 0xF);
}
}
- mir_compute_segment_interference(ctx, l, bun, pivot, bun->instruction_count);
-}
-
-static void
-mir_compute_interference(
- compiler_context *ctx,
- struct lcra_state *l)
-{
- /* First, we need liveness information to be computed per block */
- mir_compute_liveness(ctx);
-
/* Now that every block has live_in/live_out computed, we can determine
* interference by walking each block linearly. Take live_out at the
* end of each block and walk the block backwards. */
- mir_foreach_block(ctx, blk) {
- uint16_t *live = mem_dup(blk->live_out, ctx->temp_count * sizeof(uint16_t));
+ mir_foreach_block(ctx, _blk) {
+ midgard_block *blk = (midgard_block *) _blk;
+ uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(blk, ins) {
/* Mark all registers live after the instruction as
mir_liveness_ins_update(live, ins, ctx->temp_count);
}
- mir_foreach_bundle_in_block(blk, bun)
- mir_compute_bundle_interference(ctx, l, bun);
-
free(live);
}
}
+static bool
+mir_is_64(midgard_instruction *ins)
+{
+ if (nir_alu_type_get_type_size(ins->dest_type) == 64)
+ return true;
+
+ mir_foreach_src(ins, v) {
+ if (nir_alu_type_get_type_size(ins->src_types[v]) == 64)
+ return true;
+ }
+
+ return false;
+}
+
/* This routine performs the actual register allocation. It should be succeeded
* by install_registers */
allocate_registers(compiler_context *ctx, bool *spilled)
{
/* The number of vec4 work registers available depends on when the
- * uniforms start, so compute that first */
- int work_count = 16 - MAX2((ctx->uniform_cutoff - 8), 0);
+ * uniforms start and the shader stage. By ABI we limit blend shaders
+ * to 8 registers, should be lower XXX */
+ int work_count = ctx->is_blend ? 8 :
+ 16 - MAX2((ctx->uniform_cutoff - 8), 0);
/* No register allocation to do with no SSA */
if (!ctx->temp_count)
return NULL;
- struct lcra_state *l = lcra_alloc_equations(ctx->temp_count, 1, 8, 16, 5);
+ /* Initialize LCRA. Allocate an extra node at the end for a precoloured
+ * r1 for interference */
+
+ struct lcra_state *l = lcra_alloc_equations(ctx->temp_count + 1, 5);
+ unsigned node_r1 = ctx->temp_count;
/* Starts of classes, in bytes */
l->class_start[REG_CLASS_WORK] = 16 * 0;
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
+ unsigned *min_bound = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
+ /* Swizzles of 32-bit sources on 64-bit instructions need to be
+ * aligned to either bottom (xy) or top (zw). More general
+ * swizzle lowering should happen prior to scheduling (TODO),
+ * but once we get RA we shouldn't disrupt this further. Align
+ * sources of 64-bit instructions. */
+
+ if (ins->type == TAG_ALU_4 && mir_is_64(ins)) {
+ mir_foreach_src(ins, v) {
+ unsigned s = ins->src[v];
+
+ if (s < ctx->temp_count)
+ min_alignment[s] = 3;
+ }
+ }
+
+ if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->op)) {
+ mir_foreach_src(ins, v) {
+ unsigned s = ins->src[v];
+ unsigned size = nir_alu_type_get_type_size(ins->src_types[v]);
+
+ if (s < ctx->temp_count)
+ min_alignment[s] = (size == 64) ? 3 : 2;
+ }
+ }
+
if (ins->dest >= SSA_FIXED_MINIMUM) continue;
+ unsigned size = nir_alu_type_get_type_size(ins->dest_type);
+
+ if (ins->is_pack)
+ size = 32;
+
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
- int class = util_logbase2(ins->mask);
+ int comps1 = util_logbase2(ins->mask);
+
+ int bytes = (comps1 + 1) * (size / 8);
/* Use the largest class if there's ambiguity, this
* handles partial writes */
int dest = ins->dest;
- found_class[dest] = MAX2(found_class[dest], class);
+ found_class[dest] = MAX2(found_class[dest], bytes);
- /* XXX: Ensure swizzles align the right way with more LCRA constraints? */
- if (ins->type == TAG_ALU_4 && ins->alu.reg_mode != midgard_reg_mode_32)
- min_alignment[dest] = 3; /* (1 << 3) = 8 */
+ min_alignment[dest] =
+ (size == 16) ? 1 : /* (1 << 1) = 2-byte */
+ (size == 32) ? 2 : /* (1 << 2) = 4-byte */
+ (size == 64) ? 3 : /* (1 << 3) = 8-byte */
+ 3; /* 8-bit todo */
- if (ins->type == TAG_LOAD_STORE_4 && ins->load_64)
- min_alignment[dest] = 3;
+ /* We can't cross xy/zw boundaries. TODO: vec8 can */
+ if (size == 16)
+ min_bound[dest] = 8;
/* We don't have a swizzle for the conditional and we don't
* want to muck with the conditional itself, so just force
* alignment for now */
- if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op))
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
+ /* LCRA assumes bound >= alignment */
+ min_bound[dest] = 16;
+ }
+
+ /* Since ld/st swizzles and masks are 32-bit only, we need them
+ * aligned to enable final packing */
+ if (ins->type == TAG_LOAD_STORE_4)
+ min_alignment[dest] = MAX2(min_alignment[dest], 2);
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
- lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2);
- lcra_restrict_range(l, i, (found_class[i] + 1) * 4);
+ lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2,
+ min_bound[i] ? min_bound[i] : 16);
+ lcra_restrict_range(l, i, found_class[i]);
}
free(found_class);
free(min_alignment);
+ free(min_bound);
/* Next, we'll determine semantic class. We default to zero (work).
* But, if we're used with a special operation, that will force us to a
set_class(l->class, ins->src[1], REG_CLASS_LDST);
set_class(l->class, ins->src[2], REG_CLASS_LDST);
- if (OP_IS_VEC4_ONLY(ins->load_store.op)) {
+ if (OP_IS_VEC4_ONLY(ins->op)) {
lcra_restrict_range(l, ins->dest, 16);
lcra_restrict_range(l, ins->src[0], 16);
lcra_restrict_range(l, ins->src[1], 16);
assert(check_read_class(l->class, ins->type, ins->src[2]));
}
- /* Mark writeout to r0, render target to r1.z, unknown to r1.w */
+ /* Mark writeout to r0, depth to r1.x, stencil to r1.y,
+ * render target to r1.z, unknown to r1.w */
mir_foreach_instr_global(ctx, ins) {
if (!(ins->compact_branch && ins->writeout)) continue;
if (ins->src[0] < ctx->temp_count)
l->solutions[ins->src[0]] = 0;
+ if (ins->src[2] < ctx->temp_count)
+ l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_X * 4;
+
+ if (ins->src[3] < ctx->temp_count)
+ l->solutions[ins->src[3]] = (16 * 1) + COMPONENT_Y * 4;
+
if (ins->src[1] < ctx->temp_count)
l->solutions[ins->src[1]] = (16 * 1) + COMPONENT_Z * 4;
- if (ins->src[2] < ctx->temp_count)
- l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_W * 4;
+ if (ins->dest < ctx->temp_count)
+ l->solutions[ins->dest] = (16 * 1) + COMPONENT_W * 4;
}
-
+
+ /* Destinations of instructions in a writeout block cannot be assigned
+ * to r1 unless they are actually used as r1 from the writeout itself,
+ * since the writes to r1 are special. A code sequence like:
+ *
+ * sadd.fmov r1.x, [...]
+ * vadd.fadd r0, r1, r2
+ * [writeout branch]
+ *
+ * will misbehave since the r1.x write will be interpreted as a
+ * gl_FragDepth write so it won't show up correctly when r1 is read in
+ * the following segment. We model this as interference.
+ */
+
+ l->solutions[node_r1] = (16 * 1);
+
+ mir_foreach_block(ctx, _blk) {
+ midgard_block *blk = (midgard_block *) _blk;
+
+ mir_foreach_bundle_in_block(blk, v) {
+ /* We need at least a writeout and nonwriteout instruction */
+ if (v->instruction_count < 2)
+ continue;
+
+ /* Branches always come at the end */
+ midgard_instruction *br = v->instructions[v->instruction_count - 1];
+
+ if (!br->writeout)
+ continue;
+
+ for (signed i = v->instruction_count - 2; i >= 0; --i) {
+ midgard_instruction *ins = v->instructions[i];
+
+ if (ins->dest >= ctx->temp_count)
+ continue;
+
+ bool used_as_r1 = (br->dest == ins->dest);
+
+ mir_foreach_src(br, s)
+ used_as_r1 |= (s > 0) && (br->src[s] == ins->dest);
+
+ if (!used_as_r1)
+ lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), node_r1, 0xFFFF);
+ }
+ }
+ }
+
+ /* Precolour blend input to r0. Note writeout is necessarily at the end
+ * and blend shaders are single-RT only so there is only a single
+ * writeout block, so this cannot conflict with the writeout r0 (there
+ * is no need to have an intermediate move) */
+
+ if (ctx->blend_input != ~0) {
+ assert(ctx->blend_input < ctx->temp_count);
+ l->solutions[ctx->blend_input] = 0;
+ }
+
+ /* Same for the dual-source blend input/output, except here we use r2,
+ * which is also set in the fragment shader. */
+
+ if (ctx->blend_src1 != ~0) {
+ assert(ctx->blend_src1 < ctx->temp_count);
+ l->solutions[ctx->blend_src1] = (16 * 2);
+ ctx->work_registers = MAX2(ctx->work_registers, 2);
+ }
+
mir_compute_interference(ctx, l);
*spilled = !lcra_solve(l);
return l;
}
-/* Reverses 2 bits, used to pack swizzles of offsets for some reason */
-
-static unsigned
-mir_reverse2(unsigned in)
-{
- return (in >> 1) | ((in & 1) << 1);
-}
/* Once registers have been decided via register allocation
* (allocate_registers), we need to rewrite the MIR to use registers instead of
struct lcra_state *l,
midgard_instruction *ins)
{
+ unsigned src_shift[MIR_SRC_COUNT];
+
+ for (unsigned i = 0; i < MIR_SRC_COUNT; ++i) {
+ src_shift[i] =
+ util_logbase2(nir_alu_type_get_type_size(ins->src_types[i]) / 8);
+ }
+
+ unsigned dest_shift =
+ util_logbase2(nir_alu_type_get_type_size(ins->dest_type) / 8);
+
switch (ins->type) {
case TAG_ALU_4:
case TAG_ALU_8:
if (ins->compact_branch)
return;
- struct phys_reg src1 = index_to_reg(ctx, l, ins->src[0], mir_srcsize(ins, 0));
- struct phys_reg src2 = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1));
- struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
+ struct phys_reg src1 = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
+ struct phys_reg src2 = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
+ struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
unsigned dest_offset =
- GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? 0 :
+ GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
dest.offset;
- offset_swizzle(ins->swizzle[0], src1.offset, src1.size, dest_offset);
-
- ins->registers.src1_reg = src1.reg;
-
- ins->registers.src2_imm = ins->has_inline_constant;
-
- if (ins->has_inline_constant) {
- /* Encode inline 16-bit constant. See disassembler for
- * where the algorithm is from */
-
- ins->registers.src2_reg = ins->inline_constant >> 11;
-
- int lower_11 = ins->inline_constant & ((1 << 12) - 1);
- uint16_t imm = ((lower_11 >> 8) & 0x7) |
- ((lower_11 & 0xFF) << 3);
-
- ins->alu.src2 = imm << 2;
- } else {
- midgard_vector_alu_src mod2 =
- vector_alu_from_unsigned(ins->alu.src2);
- offset_swizzle(ins->swizzle[1], src2.offset, src2.size, dest_offset);
- ins->alu.src2 = vector_alu_srco_unsigned(mod2);
-
- ins->registers.src2_reg = src2.reg;
- }
-
- ins->registers.out_reg = dest.reg;
+ offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
+ if (!ins->has_inline_constant)
+ offset_swizzle(ins->swizzle[1], src2.offset, src2.shift, dest.shift, dest_offset);
+ if (ins->src[0] != ~0)
+ ins->src[0] = SSA_FIXED_REGISTER(src1.reg);
+ if (ins->src[1] != ~0)
+ ins->src[1] = SSA_FIXED_REGISTER(src2.reg);
+ if (ins->dest != ~0)
+ ins->dest = SSA_FIXED_REGISTER(dest.reg);
break;
}
* whether we are loading or storing -- think about the
* logical dataflow */
- bool encodes_src = OP_IS_STORE(ins->load_store.op);
+ bool encodes_src = OP_IS_STORE(ins->op);
if (encodes_src) {
- struct phys_reg src = index_to_reg(ctx, l, ins->src[0], mir_srcsize(ins, 0));
+ struct phys_reg src = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
assert(src.reg == 26 || src.reg == 27);
- ins->load_store.reg = src.reg - 26;
- offset_swizzle(ins->swizzle[0], src.offset, src.size, 0);
+ ins->src[0] = SSA_FIXED_REGISTER(src.reg);
+ offset_swizzle(ins->swizzle[0], src.offset, src.shift, 0, 0);
} else {
- struct phys_reg dst = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
+ struct phys_reg dst = index_to_reg(ctx, l, ins->dest, dest_shift);
- ins->load_store.reg = dst.reg;
- offset_swizzle(ins->swizzle[0], 0, 4, dst.offset);
+ ins->dest = SSA_FIXED_REGISTER(dst.reg);
+ offset_swizzle(ins->swizzle[0], 0, 2, 2, dst.offset);
mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
}
unsigned src3 = ins->src[2];
if (src2 != ~0) {
- struct phys_reg src = index_to_reg(ctx, l, src2, mir_srcsize(ins, 1));
- unsigned component = src.offset / src.size;
- assert(component * src.size == src.offset);
- ins->load_store.arg_1 |= midgard_ldst_reg(src.reg, component);
+ struct phys_reg src = index_to_reg(ctx, l, src2, src_shift[1]);
+ unsigned component = src.offset >> src.shift;
+ assert(component << src.shift == src.offset);
+ ins->src[1] = SSA_FIXED_REGISTER(src.reg);
+ ins->swizzle[1][0] += component;
}
if (src3 != ~0) {
- struct phys_reg src = index_to_reg(ctx, l, src3, mir_srcsize(ins, 2));
- unsigned component = src.offset / src.size;
- assert(component * src.size == src.offset);
- ins->load_store.arg_2 |= midgard_ldst_reg(src.reg, component);
+ struct phys_reg src = index_to_reg(ctx, l, src3, src_shift[2]);
+ unsigned component = src.offset >> src.shift;
+ assert(component << src.shift == src.offset);
+ ins->src[2] = SSA_FIXED_REGISTER(src.reg);
+ ins->swizzle[2][0] += component;
}
break;
}
case TAG_TEXTURE_4: {
+ if (ins->op == TEXTURE_OP_BARRIER)
+ break;
+
/* Grab RA results */
- struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
- struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1));
- struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], mir_srcsize(ins, 2));
- struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], mir_srcsize(ins, 2));
+ struct phys_reg dest = index_to_reg(ctx, l, ins->dest, dest_shift);
+ struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], src_shift[1]);
+ struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], src_shift[2]);
+ struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], src_shift[3]);
/* First, install the texture coordinate */
- ins->texture.in_reg_full = 1;
- ins->texture.in_reg_upper = 0;
- ins->texture.in_reg_select = coord.reg & 1;
- offset_swizzle(ins->swizzle[1], coord.offset, coord.size, 0);
+ if (ins->src[1] != ~0)
+ ins->src[1] = SSA_FIXED_REGISTER(coord.reg);
+ offset_swizzle(ins->swizzle[1], coord.offset, coord.shift, dest.shift, 0);
/* Next, install the destination */
- ins->texture.out_full = 1;
- ins->texture.out_upper = 0;
- ins->texture.out_reg_select = dest.reg & 1;
- offset_swizzle(ins->swizzle[0], 0, 4, dest.offset);
+ if (ins->dest != ~0)
+ ins->dest = SSA_FIXED_REGISTER(dest.reg);
+ offset_swizzle(ins->swizzle[0], 0, 2, dest.shift,
+ dest_shift == 1 ? dest.offset % 8 :
+ dest.offset);
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
/* If there is a register LOD/bias, use it */
if (ins->src[2] != ~0) {
assert(!(lod.offset & 3));
- midgard_tex_register_select sel = {
- .select = lod.reg & 1,
- .full = 1,
- .component = lod.offset / 4
- };
-
- uint8_t packed;
- memcpy(&packed, &sel, sizeof(packed));
- ins->texture.bias = packed;
+ ins->src[2] = SSA_FIXED_REGISTER(lod.reg);
+ ins->swizzle[2][0] = lod.offset / 4;
}
/* If there is an offset register, install it */
if (ins->src[3] != ~0) {
- ins->texture.offset_x =
- (1) | /* full */
- (offset.reg & 1) << 1 | /* select */
- 0 << 2; /* upper */
-
- unsigned x = offset.offset / 4;
- unsigned y = x + 1;
- unsigned z = x + 2;
-
- ins->texture.offset_y =
- mir_reverse2(y) | (mir_reverse2(x) << 2);
-
- ins->texture.offset_z =
- mir_reverse2(z);
+ ins->src[3] = SSA_FIXED_REGISTER(offset.reg);
+ ins->swizzle[3][0] = offset.offset / 4;
}
break;
unsigned spill_class,
unsigned *spill_count)
{
+ if (spill_class == REG_CLASS_WORK && ctx->is_blend)
+ unreachable("Blend shader spilling is currently unimplemented");
+
unsigned spill_index = ctx->temp_count;
/* We have a spill node, so check the class. Work registers
if (is_special_w)
spill_slot = spill_index++;
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->dest != spill_node) continue;
midgard_instruction st;
+ /* Note: it's important to match the mask of the spill
+ * with the mask of the instruction whose destination
+ * we're spilling, or otherwise we'll read invalid
+ * components and can fail RA in a subsequent iteration
+ */
+
if (is_special_w) {
st = v_mov(spill_node, spill_slot);
st.no_spill |= (1 << spill_class);
+ st.mask = ins->mask;
+ st.dest_type = st.src_types[0] = ins->dest_type;
} else {
ins->dest = spill_index++;
ins->no_spill |= (1 << spill_class);
* work registers to back special registers; TLS
* spilling is to use memory to back work registers) */
- mir_foreach_block(ctx, block) {
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_instr_in_block(block, ins) {
/* We can't rewrite the moves used to spill in the
* first place. These moves are hinted. */
/* Mask the load based on the component count
* actually needed to prevent RA loops */
- st.mask = mir_from_bytemask(read_bytemask, midgard_reg_mode_32);
+ st.mask = mir_from_bytemask(mir_round_bytemask_up(
+ read_bytemask, 32), 32);
mir_insert_instruction_before_scheduled(ctx, block, before, st);
} else {