#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
-#define M_LOAD_STORE(name, store) \
+#define M_LOAD_STORE(name, store, T) \
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
midgard_instruction i = { \
.type = TAG_LOAD_STORE_4, \
} \
}; \
\
- if (store) \
+ if (store) { \
i.src[0] = ssa; \
- else \
+ i.src_types[0] = T; \
+ } else { \
i.dest = ssa; \
- \
+ i.dest_type = T; \
+ } \
return i; \
}
-#define M_LOAD(name) M_LOAD_STORE(name, false)
-#define M_STORE(name) M_LOAD_STORE(name, true)
+#define M_LOAD(name, T) M_LOAD_STORE(name, false, T)
+#define M_STORE(name, T) M_LOAD_STORE(name, true, T)
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count,
+vector_alu_modifiers(bool abs, bool neg, bool is_int,
bool half, bool sext)
{
/* Figure out how many components there are so we can adjust.
* ball2/3 work.
*/
- if (broadcast_count && src) {
- uint8_t last_component = src->swizzle[broadcast_count - 1];
-
- for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) {
- src->swizzle[c] = last_component;
- }
- }
-
midgard_vector_alu_src alu_src = {
.rep_low = 0,
.rep_high = 0,
}
/* These should have been lowered away */
- if (src)
- assert(!(src->abs || src->negate));
+ assert(!(abs || neg));
} else {
- if (src)
- alu_src.mod = (src->abs << 0) | (src->negate << 1);
+ alu_src.mod = (abs << 0) | (neg << 1);
}
return alu_src;
}
-/* load/store instructions have both 32-bit and 16-bit variants, depending on
- * whether we are using vectors composed of highp or mediump. At the moment, we
- * don't support half-floats -- this requires changes in other parts of the
- * compiler -- therefore the 16-bit versions are commented out. */
-
-//M_LOAD(ld_attr_16);
-M_LOAD(ld_attr_32);
-//M_LOAD(ld_vary_16);
-M_LOAD(ld_vary_32);
-M_LOAD(ld_ubo_int4);
-M_LOAD(ld_int4);
-M_STORE(st_int4);
-M_LOAD(ld_color_buffer_32u);
-//M_STORE(st_vary_16);
-M_STORE(st_vary_32);
-M_LOAD(ld_cubemap_coords);
-M_LOAD(ld_compute_id);
+M_LOAD(ld_attr_32, nir_type_uint32);
+M_LOAD(ld_vary_32, nir_type_uint32);
+M_LOAD(ld_ubo_int4, nir_type_uint32);
+M_LOAD(ld_int4, nir_type_uint32);
+M_STORE(st_int4, nir_type_uint32);
+M_LOAD(ld_color_buffer_32u, nir_type_uint32);
+M_STORE(st_vary_32, nir_type_uint32);
+M_LOAD(ld_cubemap_coords, nir_type_uint32);
+M_LOAD(ld_compute_id, nir_type_uint32);
static midgard_instruction
v_branch(bool conditional, bool invert)
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
+ NIR_PASS(progress, nir, nir_opt_algebraic_distribute_src_mods);
/* We implement booleans as 32-bit 0/~0 */
NIR_PASS(progress, nir, nir_lower_bool_to_int32);
/* Now that booleans are lowered, we can run out late opts */
NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
+ NIR_PASS(progress, nir, midgard_nir_cancel_inot);
- /* Lower mods for float ops only. Integer ops don't support modifiers
- * (saturate doesn't make sense on integers, neg/abs require dedicated
- * instructions) */
-
- NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_float_source_mods);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
broadcast_swizzle = count; \
assert(src_bitsize == dst_bitsize); \
break;
-static bool
-nir_is_fzero_constant(nir_src src)
-{
- if (!nir_src_is_const(src))
- return false;
-
- for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
- if (nir_src_comp_as_float(src, c) != 0.0)
- return false;
- }
-
- return true;
-}
-
/* Analyze the sizes of the inputs to determine which reg mode. Ops needed
* special treatment override this anyway. */
}
}
+/* Compare mir_lower_invert */
+static bool
+nir_accepts_inot(nir_op op, unsigned src)
+{
+ switch (op) {
+ case nir_op_ior:
+ case nir_op_iand:
+ case nir_op_ixor:
+ return true;
+ case nir_op_b32csel:
+ /* Only the condition */
+ return (src == 0);
+ default:
+ return false;
+ }
+}
+
+static void
+mir_copy_src(midgard_instruction *ins, nir_alu_instr *instr, unsigned i, unsigned to, bool *abs, bool *neg, bool *not, bool is_int, unsigned bcast_count)
+{
+ nir_alu_src src = instr->src[i];
+
+ if (!is_int) {
+ if (pan_has_source_mod(&src, nir_op_fneg))
+ *neg = !(*neg);
+
+ if (pan_has_source_mod(&src, nir_op_fabs))
+ *abs = true;
+ }
+
+ if (nir_accepts_inot(instr->op, i) && pan_has_source_mod(&src, nir_op_inot))
+ *not = true;
+
+ unsigned bits = nir_src_bit_size(src.src);
+
+ ins->src[to] = nir_src_index(NULL, &src.src);
+ ins->src_types[to] = nir_op_infos[instr->op].input_types[i] | bits;
+
+ for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c) {
+ ins->swizzle[to][c] = src.swizzle[
+ (!bcast_count || c < bcast_count) ? c :
+ (bcast_count - 1)];
+ }
+}
+
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
{
+ nir_dest *dest = &instr->dest.dest;
+
+ if (dest->is_ssa && BITSET_TEST(ctx->already_emitted, dest->ssa.index))
+ return;
+
/* Derivatives end up emitted on the texture pipe, not the ALUs. This
* is handled elsewhere */
return;
}
- bool is_ssa = instr->dest.dest.is_ssa;
+ bool is_ssa = dest->is_ssa;
- unsigned dest = nir_dest_index(&instr->dest.dest);
- unsigned nr_components = nir_dest_num_components(instr->dest.dest);
+ unsigned nr_components = nir_dest_num_components(*dest);
unsigned nr_inputs = nir_op_infos[instr->op].num_inputs;
-
- /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are
- * supported. A few do not and are commented for now. Also, there are a
- * number of NIR ops which Midgard does not support and need to be
- * lowered, also TODO. This switch block emits the opcode and calling
- * convention of the Midgard instruction; actual packing is done in
- * emit_alu below */
-
- unsigned op;
+ unsigned op = 0;
/* Number of components valid to check for the instruction (the rest
* will be forced to the last), or 0 to use as-is. Relevant as
bool half_1 = false, sext_1 = false;
bool half_2 = false, sext_2 = false;
+ /* Should we swap arguments? */
+ bool flip_src12 = false;
+
unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
- unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest);
+ unsigned dst_bitsize = nir_dest_bit_size(*dest);
switch (instr->op) {
ALU_CASE(fadd, fadd);
ALU_CASE(fsin, fsin);
ALU_CASE(fcos, fcos);
- /* We'll set invert */
- ALU_CASE(inot, imov);
+ /* We'll get 0 in the second arg, so:
+ * ~a = ~(a | 0) = nor(a, 0) */
+ ALU_CASE(inot, inor);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
ALU_CASE(fabs, fmov);
ALU_CASE(fneg, fmov);
ALU_CASE(fsat, fmov);
+ ALU_CASE(fsat_signed, fmov);
+ ALU_CASE(fclamp_pos, fmov);
/* For size conversion, we use a move. Ideally though we would squash
* these ops together; maybe that has to happen after in NIR as part of
instr->op == nir_op_uge32 ? midgard_alu_op_ule :
0;
- /* Swap via temporary */
- nir_alu_src temp = instr->src[1];
- instr->src[1] = instr->src[0];
- instr->src[0] = temp;
-
+ flip_src12 = true;
break;
}
bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel;
- /* The condition is the first argument; move the other
- * arguments up one to be a binary instruction for
- * Midgard with the condition last */
-
- nir_alu_src temp = instr->src[2];
-
- instr->src[2] = instr->src[0];
- instr->src[0] = instr->src[1];
- instr->src[1] = temp;
-
break;
}
}
/* Midgard can perform certain modifiers on output of an ALU op */
- unsigned outmod;
- if (midgard_is_integer_out_op(op)) {
- outmod = midgard_outmod_int_wrap;
- } else {
- bool sat = instr->dest.saturate || instr->op == nir_op_fsat;
- outmod = sat ? midgard_outmod_sat : midgard_outmod_none;
- }
+ unsigned outmod = 0;
- /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
+ bool abs[4] = { false };
+ bool neg[4] = { false };
+ bool is_int = midgard_is_integer_op(op);
- if (instr->op == nir_op_fmax) {
- if (nir_is_fzero_constant(instr->src[0].src)) {
- op = midgard_alu_op_fmov;
- nr_inputs = 1;
- outmod = midgard_outmod_pos;
- instr->src[0] = instr->src[1];
- } else if (nir_is_fzero_constant(instr->src[1].src)) {
- op = midgard_alu_op_fmov;
- nr_inputs = 1;
- outmod = midgard_outmod_pos;
- }
+ if (midgard_is_integer_out_op(op)) {
+ outmod = midgard_outmod_int_wrap;
+ } else if (instr->op == nir_op_fsat) {
+ outmod = midgard_outmod_sat;
+ } else if (instr->op == nir_op_fsat_signed) {
+ outmod = midgard_outmod_sat_signed;
+ } else if (instr->op == nir_op_fclamp_pos) {
+ outmod = midgard_outmod_pos;
}
/* Fetch unit, quirks, etc information */
unsigned opcode_props = alu_opcode_props[op].props;
bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
- /* src0 will always exist afaik, but src1 will not for 1-argument
- * instructions. The latter can only be fetched if the instruction
- * needs it, or else we may segfault. */
-
- unsigned src0 = nir_alu_src_index(ctx, &instr->src[0]);
- unsigned src1 = nr_inputs >= 2 ? nir_alu_src_index(ctx, &instr->src[1]) : ~0;
- unsigned src2 = nr_inputs == 3 ? nir_alu_src_index(ctx, &instr->src[2]) : ~0;
- assert(nr_inputs <= 3);
-
- /* Rather than use the instruction generation helpers, we do it
- * ourselves here to avoid the mess */
-
midgard_instruction ins = {
.type = TAG_ALU_4,
- .src = {
- quirk_flipped_r24 ? ~0 : src0,
- quirk_flipped_r24 ? src0 : src1,
- src2,
- ~0
- },
- .dest = dest,
+ .dest = nir_dest_index(dest),
+ .dest_type = nir_op_infos[instr->op].output_type
+ | nir_dest_bit_size(*dest),
};
- nir_alu_src *nirmods[3] = { NULL };
+ for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
+ ins.src[i] = ~0;
- if (nr_inputs >= 2) {
- nirmods[0] = &instr->src[0];
- nirmods[1] = &instr->src[1];
- } else if (nr_inputs == 1) {
- nirmods[quirk_flipped_r24] = &instr->src[0];
+ if (quirk_flipped_r24) {
+ ins.src[0] = ~0;
+ mir_copy_src(&ins, instr, 0, 1, &abs[1], &neg[1], &ins.src_invert[1], is_int, broadcast_swizzle);
} else {
- assert(0);
- }
+ for (unsigned i = 0; i < nr_inputs; ++i) {
+ unsigned to = i;
+
+ if (instr->op == nir_op_b32csel) {
+ /* The condition is the first argument; move
+ * the other arguments up one to be a binary
+ * instruction for Midgard with the condition
+ * last */
+
+ if (i == 0)
+ to = 2;
+ else if (flip_src12)
+ to = 2 - i;
+ else
+ to = i - 1;
+ } else if (flip_src12) {
+ to = 1 - to;
+ }
- if (nr_inputs == 3)
- nirmods[2] = &instr->src[2];
+ mir_copy_src(&ins, instr, i, to, &abs[to], &neg[to], &ins.src_invert[to], is_int, broadcast_swizzle);
- /* These were lowered to a move, so apply the corresponding mod */
+ /* (!c) ? a : b = c ? b : a */
+ if (instr->op == nir_op_b32csel && ins.src_invert[2]) {
+ ins.src_invert[2] = false;
+ flip_src12 ^= true;
+ }
+ }
+ }
if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) {
- nir_alu_src *s = nirmods[quirk_flipped_r24];
-
+ /* Lowered to move */
if (instr->op == nir_op_fneg)
- s->negate = !s->negate;
+ neg[1] = !neg[1];
if (instr->op == nir_op_fabs)
- s->abs = !s->abs;
+ abs[1] = true;
}
- bool is_int = midgard_is_integer_op(op);
-
ins.mask = mask_of(nr_components);
midgard_vector_alu alu = {
.dest_override = dest_override,
.outmod = outmod,
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(abs[0], neg[0], is_int, half_1, sext_1)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(abs[1], neg[1], is_int, half_2, sext_2)),
};
- /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
+ /* Apply writemask if non-SSA, keeping in mind that we can't write to
+ * components that don't exist. Note modifier => SSA => !reg => no
+ * writemask, so we don't have to worry about writemasks here.*/
if (!is_ssa)
ins.mask &= instr->dest.write_mask;
- for (unsigned m = 0; m < 3; ++m) {
- if (!nirmods[m])
- continue;
-
- for (unsigned c = 0; c < NIR_MAX_VEC_COMPONENTS; ++c)
- ins.swizzle[m][c] = nirmods[m]->swizzle[c];
-
- /* Replicate. TODO: remove when vec16 lands */
- for (unsigned c = NIR_MAX_VEC_COMPONENTS; c < MIR_VEC_COMPONENTS; ++c)
- ins.swizzle[m][c] = nirmods[m]->swizzle[NIR_MAX_VEC_COMPONENTS - 1];
- }
+ ins.alu = alu;
- if (nr_inputs == 3) {
- /* Conditions can't have mods */
- assert(!nirmods[2]->abs);
- assert(!nirmods[2]->negate);
+ /* Arrange for creation of iandnot/iornot */
+ if (ins.src_invert[0] && !ins.src_invert[1]) {
+ mir_flip(&ins);
+ ins.src_invert[0] = false;
+ ins.src_invert[1] = true;
}
- ins.alu = alu;
-
/* Late fixup for emulated instructions */
if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
ins.has_inline_constant = false;
ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.src_types[1] = nir_type_float32;
ins.has_constants = true;
if (instr->op == nir_op_b2f32)
/* Lots of instructions need a 0 plonked in */
ins.has_inline_constant = false;
ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.src_types[1] = nir_type_uint32;
ins.has_constants = true;
ins.constants.u32[0] = 0;
for (unsigned c = 0; c < 16; ++c)
ins.swizzle[1][c] = 0;
- } else if (instr->op == nir_op_inot) {
- ins.invert = true;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
unsigned orig_mask = ins.mask;
+ unsigned swizzle_back[MIR_VEC_COMPONENTS];
+ memcpy(&swizzle_back, ins.swizzle[0], sizeof(swizzle_back));
+
for (int i = 0; i < nr_components; ++i) {
/* Mask the associated component, dropping the
* instruction if needed */
continue;
for (unsigned j = 0; j < MIR_VEC_COMPONENTS; ++j)
- ins.swizzle[0][j] = nirmods[0]->swizzle[i]; /* Pull from the correct component */
+ ins.swizzle[0][j] = swizzle_back[i]; /* Pull from the correct component */
emit_mir_instruction(ctx, ins);
}
if (indirect_offset) {
ins.src[2] = nir_src_index(ctx, indirect_offset);
+ ins.src_types[2] = nir_type_uint32;
ins.load_store.arg_2 = (indirect_shift << 5);
} else {
ins.load_store.arg_2 = 0x1E;
memcpy(&u, &p, sizeof(p));
ins.load_store.varying_parameters = u;
- if (indirect_offset)
+ if (indirect_offset) {
ins.src[2] = nir_src_index(ctx, indirect_offset);
- else
+ ins.src_types[2] = nir_type_uint32;
+ } else
ins.load_store.arg_2 = 0x1E;
ins.load_store.arg_1 = 0x9E;
/* Add dependencies */
ins.src[0] = src;
+ ins.src_types[0] = nir_type_uint32;
ins.constants.u32[0] = rt == MIDGARD_ZS_RT ?
0xFF : (rt - MIDGARD_COLOR_RT0) * 0x100;
struct midgard_instruction discard = v_branch(conditional, false);
discard.branch.target_type = TARGET_DISCARD;
- if (conditional)
+ if (conditional) {
discard.src[0] = nir_src_index(ctx, &instr->src[0]);
+ discard.src_types[0] = nir_type_uint32;
+ }
emit_mir_instruction(ctx, discard);
schedule_barrier(ctx);
return true;
}
-static enum mali_sampler_type
-midgard_sampler_type(nir_alu_type t) {
- switch (nir_alu_type_get_base_type(t))
- {
- case nir_type_float:
- return MALI_SAMPLER_FLOAT;
- case nir_type_int:
- return MALI_SAMPLER_SIGNED;
- case nir_type_uint:
- return MALI_SAMPLER_UNSIGNED;
- default:
- unreachable("Unknown sampler type");
- }
-}
-
static void
emit_texop_native(compiler_context *ctx, nir_tex_instr *instr,
unsigned midgard_texop)
int texture_index = instr->texture_index;
int sampler_index = texture_index;
- /* No helper to build texture words -- we do it all here */
+ nir_alu_type dest_base = nir_alu_type_get_base_type(instr->dest_type);
+ nir_alu_type dest_type = dest_base | nir_dest_bit_size(instr->dest);
+
midgard_instruction ins = {
.type = TAG_TEXTURE_4,
.mask = 0xF,
.dest = nir_dest_index(&instr->dest),
.src = { ~0, ~0, ~0, ~0 },
+ .dest_type = dest_type,
.swizzle = SWIZZLE_IDENTITY_4,
.texture = {
.op = midgard_texop,
.format = midgard_tex_format(instr->sampler_dim),
.texture_handle = texture_index,
.sampler_handle = sampler_index,
-
- /* TODO: half */
- .in_reg_full = 1,
- .out_full = 1,
-
- .sampler_type = midgard_sampler_type(instr->dest_type),
.shadow = instr->is_shadow,
}
};
+ if (instr->is_shadow && !instr->is_new_style_shadow)
+ for (int i = 0; i < 4; ++i)
+ ins.swizzle[0][i] = COMPONENT_X;
+
/* We may need a temporary for the coordinate */
bool needs_temp_coord =
for (unsigned i = 0; i < instr->num_srcs; ++i) {
int index = nir_src_index(ctx, &instr->src[i].src);
unsigned nr_components = nir_src_num_components(instr->src[i].src);
+ unsigned sz = nir_src_bit_size(instr->src[i].src);
+ nir_alu_type T = nir_tex_instr_src_type(instr, i) | sz;
switch (instr->src[i].src_type) {
case nir_tex_src_coord: {
midgard_instruction ld = m_ld_cubemap_coords(coords, 0);
ld.src[1] = index;
+ ld.src_types[1] = T;
ld.mask = 0x3; /* xy */
ld.load_store.arg_1 = 0x20;
ld.swizzle[1][3] = COMPONENT_X;
}
ins.src[1] = coords;
+ ins.src_types[1] = T;
/* Texelfetch coordinates uses all four elements
* (xyz/index) regardless of texture dimensionality,
ins.texture.lod_register = true;
ins.src[2] = index;
+ ins.src_types[2] = T;
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
ins.swizzle[2][c] = COMPONENT_X;
case nir_tex_src_offset: {
ins.texture.offset_register = true;
ins.src[3] = index;
+ ins.src_types[3] = T;
for (unsigned c = 0; c < MIR_VEC_COMPONENTS; ++c)
ins.swizzle[3][c] = (c > COMPONENT_Z) ? 0 : c;
}
emit_mir_instruction(ctx, ins);
-
- /* Used for .cont and .last hinting */
- ctx->texture_op_count++;
}
static void
/* Corner case: _two_ vec4 constants, for instance with a
* csel. For this case, we can only use a constant
* register for one, we'll have to emit a move for the
- * other. Note, if both arguments are constants, then
- * necessarily neither argument depends on the value of
- * any particular register. As the destination register
- * will be wiped, that means we can spill the constant
- * to the destination register.
- */
+ * other. */
void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
- unsigned scratch = alu->dest;
+ unsigned scratch = make_compiler_temp(ctx);
if (entry) {
midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), scratch);
}
}
-/* Being a little silly with the names, but returns the op that is the bitwise
- * inverse of the op with the argument switched. I.e. (f and g are
- * contrapositives):
- *
- * f(a, b) = ~g(b, a)
- *
- * Corollary: if g is the contrapositve of f, f is the contrapositive of g:
- *
- * f(a, b) = ~g(b, a)
- * ~f(a, b) = g(b, a)
- * ~f(a, b) = ~h(a, b) where h is the contrapositive of g
- * f(a, b) = h(a, b)
- *
- * Thus we define this function in pairs.
- */
-
-static inline midgard_alu_op
-mir_contrapositive(midgard_alu_op op)
-{
- switch (op) {
- case midgard_alu_op_flt:
- return midgard_alu_op_fle;
- case midgard_alu_op_fle:
- return midgard_alu_op_flt;
-
- case midgard_alu_op_ilt:
- return midgard_alu_op_ile;
- case midgard_alu_op_ile:
- return midgard_alu_op_ilt;
-
- default:
- unreachable("No known contrapositive");
- }
-}
-
/* Midgard supports two types of constants, embedded constants (128-bit) and
* inline constants (16-bit). Sometimes, especially with scalar ops, embedded
* constants can be demoted to inline constants, for space savings and
int op = ins->alu.op;
- if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
- bool flip = alu_opcode_props[op].props & OP_COMMUTES;
-
- switch (op) {
- /* Conditionals can be inverted */
- case midgard_alu_op_flt:
- case midgard_alu_op_ilt:
- case midgard_alu_op_fle:
- case midgard_alu_op_ile:
- ins->alu.op = mir_contrapositive(ins->alu.op);
- ins->invert = true;
- flip = true;
- break;
-
- case midgard_alu_op_fcsel:
- case midgard_alu_op_icsel:
- DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
- default:
- break;
- }
-
- if (flip)
- mir_flip(ins);
+ if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
+ alu_opcode_props[op].props & OP_COMMUTES) {
+ mir_flip(ins);
}
if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
* per block is legal semantically */
static void
-midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block)
+midgard_cull_dead_branch(compiler_context *ctx, midgard_block *block)
{
bool branched = false;
}
}
-/* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then
- * the move can be propagated away entirely */
-
-static bool
-mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp)
-{
- /* Nothing to do */
- if (comp == midgard_outmod_none)
- return true;
-
- if (*outmod == midgard_outmod_none) {
- *outmod = comp;
- return true;
- }
-
- /* TODO: Compose rules */
- return false;
-}
-
-static bool
-midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block)
-{
- bool progress = false;
-
- mir_foreach_instr_in_block_safe(block, ins) {
- if (ins->type != TAG_ALU_4) continue;
- if (ins->alu.op != midgard_alu_op_fmov) continue;
- if (ins->alu.outmod != midgard_outmod_pos) continue;
-
- /* TODO: Registers? */
- unsigned src = ins->src[1];
- if (src & IS_REG) continue;
-
- /* There might be a source modifier, too */
- if (mir_nontrivial_source2_mod(ins)) continue;
-
- /* Backpropagate the modifier */
- mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
- if (v->type != TAG_ALU_4) continue;
- if (v->dest != src) continue;
-
- /* Can we even take a float outmod? */
- if (midgard_is_integer_out_op(v->alu.op)) continue;
-
- midgard_outmod_float temp = v->alu.outmod;
- progress |= mir_compose_float_outmod(&temp, ins->alu.outmod);
-
- /* Throw in the towel.. */
- if (!progress) break;
-
- /* Otherwise, transfer the modifier */
- v->alu.outmod = temp;
- ins->alu.outmod = midgard_outmod_none;
-
- break;
- }
- }
-
- return progress;
-}
-
static unsigned
emit_fragment_epilogue(compiler_context *ctx, unsigned rt)
{
EMIT(branch, true, true);
midgard_instruction *then_branch = mir_last_in_block(ctx->current_block);
then_branch->src[0] = nir_src_index(ctx, &nif->condition);
+ then_branch->src_types[0] = nir_type_uint32;
/* Emit the two subblocks. */
midgard_block *then_block = emit_cf_list(ctx, &nif->then_list);
unsigned popped = br->branch.target_block;
pan_block_add_successor(&(mir_get_block(ctx, popped - 1)->base), &ctx->current_block->base);
br->branch.target_block = emit_fragment_epilogue(ctx, rt);
+ br->branch.target_type = TARGET_GOTO;
/* If we have more RTs, we'll need to restore back after our
* loop terminates */
if ((rt + 1) < ARRAY_SIZE(ctx->writeout_branch) && ctx->writeout_branch[rt + 1]) {
midgard_instruction uncond = v_branch(false, false);
uncond.branch.target_block = popped;
+ uncond.branch.target_type = TARGET_GOTO;
emit_mir_instruction(ctx, uncond);
pan_block_add_successor(&ctx->current_block->base, &(mir_get_block(ctx, popped)->base));
schedule_barrier(ctx);
list_inithead(&ctx->blocks);
ctx->block_count = 0;
ctx->func = func;
+ ctx->already_emitted = calloc(BITSET_WORDS(func->impl->ssa_alloc), sizeof(BITSET_WORD));
emit_cf_list(ctx, &func->impl->body);
+ free(ctx->already_emitted);
break; /* TODO: Multi-function shaders */
}
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *) _block;
- progress |= midgard_opt_pos_propagate(ctx, block);
progress |= midgard_opt_copy_prop(ctx, block);
progress |= midgard_opt_dead_code_eliminate(ctx, block);
progress |= midgard_opt_combine_projection(ctx, block);
progress |= midgard_opt_varying_projection(ctx, block);
+#if 0
progress |= midgard_opt_not_propagate(ctx, block);
progress |= midgard_opt_fuse_src_invert(ctx, block);
progress |= midgard_opt_fuse_dest_invert(ctx, block);
progress |= midgard_opt_csel_invert(ctx, block);
progress |= midgard_opt_drop_cmp_invert(ctx, block);
progress |= midgard_opt_invert_branch(ctx, block);
+#endif
}
} while (progress);
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *) _block;
- midgard_lower_invert(ctx, block);
+ //midgard_lower_invert(ctx, block);
midgard_lower_derivatives(ctx, block);
}
* them */
mir_foreach_block(ctx, _block) {
midgard_block *block = (midgard_block *) _block;
- midgard_opt_cull_dead_branch(ctx, block);
+ midgard_cull_dead_branch(ctx, block);
}
/* Ensure we were lowered */
if (ctx->stage == MESA_SHADER_FRAGMENT)
mir_add_writeout_loops(ctx);
+ /* Analyze now that the code is known but before scheduling creates
+ * pipeline registers which are harder to track */
+ mir_analyze_helper_terminate(ctx);
+ mir_analyze_helper_requirements(ctx);
+
/* Schedule! */
midgard_schedule_program(ctx);
mir_ra(ctx);