#include <stdio.h>
#include <err.h>
+#include "main/mtypes.h"
#include "compiler/glsl/glsl_to_nir.h"
#include "compiler/nir_types.h"
#include "main/imports.h"
#include "compiler/nir/nir_builder.h"
#include "util/half_float.h"
#include "util/register_allocate.h"
+#include "util/u_debug.h"
#include "util/u_dynarray.h"
#include "util/list.h"
#include "main/mtypes.h"
#include "disassemble.h"
+static const struct debug_named_value debug_options[] = {
+ {"msgs", MIDGARD_DBG_MSGS, "Print debug messages"},
+ {"shaders", MIDGARD_DBG_SHADERS, "Dump shaders in NIR and MIR"},
+ DEBUG_NAMED_VALUE_END
+};
+
+DEBUG_GET_ONCE_FLAGS_OPTION(midgard_debug, "MIDGARD_MESA_DEBUG", debug_options, 0)
+
+int midgard_debug = 0;
+
+#define DBG(fmt, ...) \
+ do { if (midgard_debug & MIDGARD_DBG_MSGS) \
+ fprintf(stderr, "%s:%d: "fmt, \
+ __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
+
/* Instruction arguments represented as block-local SSA indices, rather than
* registers. Negative values mean unused. */
struct midgard_block;
/* Target types. Defaults to TARGET_GOTO (the type corresponding directly to
- * the hardware), hence why that must be zero */
+ * the hardware), hence why that must be zero. TARGET_DISCARD signals this
+ * instruction is actually a discard op. */
#define TARGET_GOTO 0
#define TARGET_BREAK 1
#define TARGET_CONTINUE 2
+#define TARGET_DISCARD 3
typedef struct midgard_branch {
/* If conditional, the condition is specified in r31.w */
};
} midgard_branch;
+static bool
+midgard_is_branch_unit(unsigned unit)
+{
+ return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT);
+}
+
/* Generic in-memory data type repesenting a single logical instruction, rather
* than a single instruction group. This is the preferred form for code gen.
* Multiple midgard_insturctions will later be combined during scheduling,
midgard_load_store_word load_store;
midgard_vector_alu alu;
midgard_texture_word texture;
+ midgard_branch_extended branch_extended;
uint16_t br_compact;
/* General branch, rather than packed br_compact. Higher level
/* Number of quadwords _actually_ emitted, as determined after scheduling */
unsigned quadword_count;
- struct midgard_block *next_fallthrough;
+ /* Successors: always one forward (the block after us), maybe
+ * one backwards (for a backward branch). No need for a second
+ * forward, since graph traversal would get there eventually
+ * anyway */
+ struct midgard_block *successors[2];
+ unsigned nr_successors;
+
+ /* The successors pointer form a graph, and in the case of
+ * complex control flow, this graph has a cycles. To aid
+ * traversal during liveness analysis, we have a visited?
+ * boolean for passes to use as they see fit, provided they
+ * clean up later */
+ bool visited;
} midgard_block;
+static void
+midgard_block_add_successor(midgard_block *block, midgard_block *successor)
+{
+ block->successors[block->nr_successors++] = successor;
+ assert(block->nr_successors <= ARRAY_SIZE(block->successors));
+}
+
/* Helpers to generate midgard_instruction's using macro magic, since every
* driver seems to do it that way */
#define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__));
+#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
#define M_LOAD_STORE(name, rname, uname) \
static midgard_instruction m_##name(unsigned ssa, unsigned address) { \
.load_store = { \
.op = midgard_op_##name, \
.mask = 0xF, \
- .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W), \
+ .swizzle = SWIZZLE_XYZW, \
.address = address \
} \
}; \
.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
};
+const midgard_vector_alu_src blank_alu_src_xxxx = {
+ .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X),
+};
+
const midgard_scalar_alu_src blank_scalar_alu_src = {
.full = true
};
return u;
}
+static midgard_vector_alu_src
+vector_alu_from_unsigned(unsigned u)
+{
+ midgard_vector_alu_src s;
+ memcpy(&s, &u, sizeof(s));
+ return s;
+}
+
/* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs
* the corresponding Midgard source */
static midgard_vector_alu_src
-vector_alu_modifiers(nir_alu_src *src)
+vector_alu_modifiers(nir_alu_src *src, bool is_int)
{
if (!src) return blank_alu_src;
midgard_vector_alu_src alu_src = {
- .abs = src->abs,
- .negate = src->negate,
.rep_low = 0,
.rep_high = 0,
.half = 0, /* TODO */
.swizzle = SWIZZLE_FROM_ARRAY(src->swizzle)
};
+ if (is_int) {
+ /* TODO: sign-extend/zero-extend */
+ alu_src.mod = midgard_int_normal;
+
+ /* These should have been lowered away */
+ assert(!(src->abs || src->negate));
+ } else {
+ alu_src.mod = (src->abs << 0) | (src->negate << 1);
+ }
+
return alu_src;
}
+static bool
+mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask)
+{
+ /* abs or neg */
+ if (!is_int && src.mod) return true;
+
+ /* swizzle */
+ for (unsigned c = 0; c < 4; ++c) {
+ if (!(mask & (1 << c))) continue;
+ if (((src.swizzle >> (2*c)) & 3) != c) return true;
+ }
+
+ return false;
+}
+
/* 'Intrinsic' move for misc aliasing uses independent of actual NIR ALU code */
static midgard_instruction
},
.alu = {
.op = midgard_alu_op_fmov,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(zero_alu_src),
M_LOAD(load_color_buffer_8);
//M_STORE(store_vary_16);
M_STORE(store_vary_32);
+M_STORE(store_cubemap_coords);
static midgard_instruction
v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond)
{
midgard_instruction ins = {
.type = TAG_ALU_4,
- .unit = ALU_ENAB_BR_COMPACT,
+ .unit = ALU_ENAB_BRANCH,
.compact_branch = true,
.branch = {
.conditional = conditional,
return ins;
}
+static midgard_branch_extended
+midgard_create_branch_extended( midgard_condition cond,
+ midgard_jmp_writeout_op op,
+ unsigned dest_tag,
+ signed quadword_offset)
+{
+ /* For unclear reasons, the condition code is repeated 8 times */
+ uint16_t duplicated_cond =
+ (cond << 14) |
+ (cond << 12) |
+ (cond << 10) |
+ (cond << 8) |
+ (cond << 6) |
+ (cond << 4) |
+ (cond << 2) |
+ (cond << 0);
+
+ midgard_branch_extended branch = {
+ .op = op,
+ .dest_tag = dest_tag,
+ .offset = quadword_offset,
+ .cond = duplicated_cond
+ };
+
+ return branch;
+}
+
typedef struct midgard_bundle {
/* Tag for the overall bundle */
int tag;
/* List of midgard_instructions emitted for the current block */
midgard_block *current_block;
- /* The index corresponding to the current loop, e.g. for breaks/contineus */
- int current_loop;
+ /* The current "depth" of the loop, for disambiguating breaks/continues
+ * when using nested loops */
+ int current_loop_depth;
/* Constants which have been loaded, for later inlining */
struct hash_table_u64 *ssa_constants;
int temp_count;
int max_hash;
- /* Uniform IDs for mdg */
- struct hash_table_u64 *uniform_nir_to_mdg;
- int uniform_count;
-
- struct hash_table_u64 *varying_nir_to_mdg;
- int varying_count;
-
/* Just the count of the max register used. Higher count => higher
* register pressure */
int work_registers;
/* Mapping of texture register -> SSA index for unaliasing */
int texture_index[2];
- /* Count of special uniforms (viewport, etc) in vec4 units */
- int special_uniforms;
-
/* If any path hits a discard instruction */
bool can_discard;
/* The index corresponding to the fragment output */
unsigned fragment_output;
+
+ /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */
+ unsigned sysvals[MAX_SYSVAL_COUNT];
+ unsigned sysval_count;
+ struct hash_table_u64 *sysval_to_id;
} compiler_context;
/* Append instruction to end of current block */
return list_first_entry(&(ins->link), midgard_instruction, link);
}
-static midgard_block *
-mir_next_block(struct midgard_block *blk)
-{
- return list_first_entry(&(blk->link), midgard_block, link);
-}
-
-
#define mir_foreach_block(ctx, v) list_for_each_entry(struct midgard_block, v, &ctx->blocks, link)
#define mir_foreach_block_from(ctx, from, v) list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link)
#define mir_foreach_instr_in_block_safe(block, v) list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_safe_rev(block, v) list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link)
#define mir_foreach_instr_in_block_from(block, v, from) list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link)
+#define mir_foreach_instr_in_block_from_rev(block, v, from) list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link)
static midgard_instruction *
switch (ins->type) {
case TAG_ALU_4: {
midgard_alu_op op = ins->alu.op;
- const char *name = alu_opcode_names[op];
+ const char *name = alu_opcode_props[op].name;
if (ins->unit)
printf("%d.", ins->unit);
printf("}\n");
}
-
-
static void
attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name)
{
}
static int
-glsl_type_size(const struct glsl_type *type)
+glsl_type_size(const struct glsl_type *type, bool bindless)
{
return glsl_count_attribute_slots(type, false);
}
nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(sum));
}
+/* Lower csel with mixed condition channels to mulitple csel instructions. For
+ * context, the csel ops on Midgard are vector in *outputs*, but not in
+ * *conditions*. So, if the condition is e.g. yyyy, a single op can select a
+ * vec4. But if the condition is e.g. xyzw, four ops are needed as the ISA
+ * can't cope with the divergent channels.*/
+
+static void
+midgard_nir_lower_mixed_csel_body(nir_builder *b, nir_alu_instr *alu)
+{
+ if (alu->op != nir_op_bcsel)
+ return;
+
+ b->cursor = nir_before_instr(&alu->instr);
+
+ /* Must be run before registering */
+ assert(alu->dest.dest.is_ssa);
+
+ /* Check for mixed condition */
+
+ unsigned comp = alu->src[0].swizzle[0];
+ unsigned nr_components = alu->dest.dest.ssa.num_components;
+
+ bool mixed = false;
+
+ for (unsigned c = 1; c < nr_components; ++c)
+ mixed |= (alu->src[0].swizzle[c] != comp);
+
+ if (!mixed)
+ return;
+
+ /* We're mixed, so lower */
+
+ assert(nr_components <= 4);
+ nir_ssa_def *results[4];
+
+ nir_ssa_def *cond = nir_ssa_for_alu_src(b, alu, 0);
+ nir_ssa_def *choice0 = nir_ssa_for_alu_src(b, alu, 1);
+ nir_ssa_def *choice1 = nir_ssa_for_alu_src(b, alu, 2);
+
+ for (unsigned c = 0; c < nr_components; ++c) {
+ results[c] = nir_bcsel(b,
+ nir_channel(b, cond, c),
+ nir_channel(b, choice0, c),
+ nir_channel(b, choice1, c));
+ }
+
+ /* Replace with our scalarized version */
+
+ nir_ssa_def *result = nir_vec(b, results, nr_components);
+ nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(result));
+}
+
+static int
+midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr)
+{
+ switch (instr->intrinsic) {
+ case nir_intrinsic_load_viewport_scale:
+ return PAN_SYSVAL_VIEWPORT_SCALE;
+ case nir_intrinsic_load_viewport_offset:
+ return PAN_SYSVAL_VIEWPORT_OFFSET;
+ default:
+ return -1;
+ }
+}
+
+static void
+midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr)
+{
+ int sysval = -1;
+
+ if (instr->type == nir_instr_type_intrinsic) {
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ sysval = midgard_nir_sysval_for_intrinsic(intr);
+ }
+
+ if (sysval < 0)
+ return;
+
+ /* We have a sysval load; check if it's already been assigned */
+
+ if (_mesa_hash_table_u64_search(ctx->sysval_to_id, sysval))
+ return;
+
+ /* It hasn't -- so assign it now! */
+
+ unsigned id = ctx->sysval_count++;
+ _mesa_hash_table_u64_insert(ctx->sysval_to_id, sysval, (void *) ((uintptr_t) id + 1));
+ ctx->sysvals[id] = sysval;
+}
+
+static void
+midgard_nir_assign_sysvals(compiler_context *ctx, nir_shader *shader)
+{
+ ctx->sysval_count = 0;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl) continue;
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ midgard_nir_assign_sysval_body(ctx, instr);
+ }
+ }
+ }
+}
+
static bool
midgard_nir_lower_fdot2(nir_shader *shader)
{
return progress;
}
+static bool
+midgard_nir_lower_mixed_csel(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl) continue;
+
+ nir_builder _b;
+ nir_builder *b = &_b;
+ nir_builder_init(b, function->impl);
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr_safe(instr, block) {
+ if (instr->type != nir_instr_type_alu) continue;
+
+ nir_alu_instr *alu = nir_instr_as_alu(instr);
+ midgard_nir_lower_mixed_csel_body(b, alu);
+
+ progress |= true;
+ }
+ }
+
+ nir_metadata_preserve(function->impl, nir_metadata_block_index | nir_metadata_dominance);
+
+ }
+
+ return progress;
+}
+
static void
optimise_nir(nir_shader *nir)
{
bool progress;
+ unsigned lower_flrp =
+ (nir->options->lower_flrp16 ? 16 : 0) |
+ (nir->options->lower_flrp32 ? 32 : 0) |
+ (nir->options->lower_flrp64 ? 64 : 0);
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
+ NIR_PASS(progress, nir, midgard_nir_lower_mixed_csel);
nir_lower_tex_options lower_tex_options = {
.lower_rect = true
do {
progress = false;
- NIR_PASS(progress, nir, midgard_nir_lower_algebraic);
- NIR_PASS(progress, nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
NIR_PASS(progress, nir, nir_lower_var_copies);
NIR_PASS(progress, nir, nir_lower_vars_to_ssa);
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
+
+ if (lower_flrp != 0) {
+ bool lower_flrp_progress;
+ NIR_PASS(lower_flrp_progress,
+ nir,
+ nir_lower_flrp,
+ lower_flrp,
+ false /* always_precise */,
+ nir->options->lower_ffma);
+ if (lower_flrp_progress) {
+ NIR_PASS(progress, nir,
+ nir_opt_constant_folding);
+ progress = true;
+ }
+
+ /* Nothing should rematerialize any flrps, so we only
+ * need to do this lowering once.
+ */
+ lower_flrp = 0;
+ }
+
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
} while (progress);
NIR_PASS(progress, nir, nir_opt_algebraic_late);
+ NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late);
- /* Lower mods */
- NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_all_source_mods);
+ /* Lower mods for float ops only. Integer ops don't support modifiers
+ * (saturate doesn't make sense on integers, neg/abs require dedicated
+ * instructions) */
+
+ NIR_PASS(progress, nir, nir_lower_to_source_mods, nir_lower_float_source_mods);
NIR_PASS(progress, nir, nir_copy_prop);
NIR_PASS(progress, nir, nir_opt_dce);
+ /* We implement booleans as 32-bit 0/~0 */
+ NIR_PASS(progress, nir, nir_lower_bool_to_int32);
+
/* Take us out of SSA */
NIR_PASS(progress, nir, nir_lower_locals_to_regs);
NIR_PASS(progress, nir, nir_convert_from_ssa, true);
nir_ssa_def def = instr->def;
float *v = ralloc_array(NULL, float, 4);
- memcpy(v, &instr->value.f32, 4 * sizeof(float));
+ nir_const_load_to_arr(v, instr, f32);
_mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v);
}
/* Channel count is off-by-one to fit in two-bits (0 channel makes no
* sense) */
- unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op]);
+ unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op].props);
/* If there is a fixed channel count, construct the appropriate mask */
{
if (src->is_ssa)
return src->ssa->index;
- else
+ else {
+ assert(!src->reg.indirect);
return ctx->func->impl->ssa_alloc + src->reg.reg->index;
+ }
}
static unsigned
{
if (dst->is_ssa)
return dst->ssa.index;
- else
+ else {
+ assert(!dst->reg.indirect);
return ctx->func->impl->ssa_alloc + dst->reg.reg->index;
+ }
}
static unsigned
* a conditional test) into that register */
static void
-emit_condition(compiler_context *ctx, nir_src *src, bool for_branch)
+emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component)
{
- /* XXX: Force component correct */
int condition = nir_src_index(ctx, src);
+ /* Source to swizzle the desired component into w */
+
const midgard_vector_alu_src alu_src = {
- .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X),
+ .swizzle = SWIZZLE(component, component, component, component),
};
/* There is no boolean move instruction. Instead, we simulate a move by
},
.alu = {
.op = midgard_alu_op_iand,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.mask = (0x3 << 6), /* w */
.src1 = vector_alu_srco_unsigned(alu_src),
emit_mir_instruction(ctx, ins);
}
+/* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise
+ * pinning to eliminate this move in all known cases */
+
+static void
+emit_indirect_offset(compiler_context *ctx, nir_src *src)
+{
+ int offset = nir_src_index(ctx, src);
+
+ midgard_instruction ins = {
+ .type = TAG_ALU_4,
+ .ssa_args = {
+ .src0 = SSA_UNUSED_1,
+ .src1 = offset,
+ .dest = SSA_FIXED_REGISTER(REGISTER_OFFSET),
+ },
+ .alu = {
+ .op = midgard_alu_op_imov,
+ .reg_mode = midgard_reg_mode_32,
+ .dest_override = midgard_dest_override_none,
+ .mask = (0x3 << 6), /* w */
+ .src1 = vector_alu_srco_unsigned(zero_alu_src),
+ .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx)
+ },
+ };
+
+ emit_mir_instruction(ctx, ins);
+}
+
#define ALU_CASE(nir, _op) \
case nir_op_##nir: \
op = midgard_alu_op_##_op; \
break;
+static bool
+nir_is_fzero_constant(nir_src src)
+{
+ if (!nir_src_is_const(src))
+ return false;
+
+ for (unsigned c = 0; c < nir_src_num_components(src); ++c) {
+ if (nir_src_comp_as_float(src, c) != 0.0)
+ return false;
+ }
+
+ return true;
+}
+
static void
emit_alu(compiler_context *ctx, nir_alu_instr *instr)
{
ALU_CASE(fmax, fmax);
ALU_CASE(imin, imin);
ALU_CASE(imax, imax);
+ ALU_CASE(umin, umin);
+ ALU_CASE(umax, umax);
ALU_CASE(fmov, fmov);
ALU_CASE(ffloor, ffloor);
+ ALU_CASE(fround_even, froundeven);
+ ALU_CASE(ftrunc, ftrunc);
ALU_CASE(fceil, fceil);
ALU_CASE(fdot3, fdot3);
ALU_CASE(fdot4, fdot4);
ALU_CASE(iadd, iadd);
ALU_CASE(isub, isub);
ALU_CASE(imul, imul);
+ ALU_CASE(iabs, iabs);
+ ALU_CASE(imov, imov);
+
+ ALU_CASE(feq32, feq);
+ ALU_CASE(fne32, fne);
+ ALU_CASE(flt32, flt);
+ ALU_CASE(ieq32, ieq);
+ ALU_CASE(ine32, ine);
+ ALU_CASE(ilt32, ilt);
+ ALU_CASE(ult32, ult);
+
+ /* We don't have a native b2f32 instruction. Instead, like many
+ * GPUs, we exploit booleans as 0/~0 for false/true, and
+ * correspondingly AND
+ * by 1.0 to do the type conversion. For the moment, prime us
+ * to emit:
+ *
+ * iand [whatever], #0
+ *
+ * At the end of emit_alu (as MIR), we'll fix-up the constant
+ */
+
+ ALU_CASE(b2f32, iand);
+ ALU_CASE(b2i32, iand);
- /* XXX: Use fmov, not imov, since imov was causing major
- * issues with texture precision? XXX research */
- ALU_CASE(imov, fmov);
+ /* Likewise, we don't have a dedicated f2b32 instruction, but
+ * we can do a "not equal to 0.0" test. */
- ALU_CASE(feq, feq);
- ALU_CASE(fne, fne);
- ALU_CASE(flt, flt);
- ALU_CASE(ieq, ieq);
- ALU_CASE(ine, ine);
- ALU_CASE(ilt, ilt);
+ ALU_CASE(f2b32, fne);
+ ALU_CASE(i2b32, ine);
ALU_CASE(frcp, frcp);
ALU_CASE(frsq, frsqrt);
ALU_CASE(iand, iand);
ALU_CASE(ior, ior);
ALU_CASE(ixor, ixor);
- ALU_CASE(inot, inot);
+ ALU_CASE(inot, inand);
ALU_CASE(ishl, ishl);
ALU_CASE(ishr, iasr);
ALU_CASE(ushr, ilsr);
- ALU_CASE(ball_fequal4, fball_eq);
- ALU_CASE(bany_fnequal4, fbany_neq);
- ALU_CASE(ball_iequal4, iball_eq);
- ALU_CASE(bany_inequal4, ibany_neq);
+ ALU_CASE(b32all_fequal2, fball_eq);
+ ALU_CASE(b32all_fequal3, fball_eq);
+ ALU_CASE(b32all_fequal4, fball_eq);
+
+ ALU_CASE(b32any_fnequal2, fbany_neq);
+ ALU_CASE(b32any_fnequal3, fbany_neq);
+ ALU_CASE(b32any_fnequal4, fbany_neq);
- /* For greater-or-equal, we use less-or-equal and flip the
+ ALU_CASE(b32all_iequal2, iball_eq);
+ ALU_CASE(b32all_iequal3, iball_eq);
+ ALU_CASE(b32all_iequal4, iball_eq);
+
+ ALU_CASE(b32any_inequal2, ibany_neq);
+ ALU_CASE(b32any_inequal3, ibany_neq);
+ ALU_CASE(b32any_inequal4, ibany_neq);
+
+ /* For greater-or-equal, we lower to less-or-equal and flip the
* arguments */
- case nir_op_ige: {
- op = midgard_alu_op_ile;
+ case nir_op_fge:
+ case nir_op_fge32:
+ case nir_op_ige32:
+ case nir_op_uge32: {
+ op =
+ instr->op == nir_op_fge ? midgard_alu_op_fle :
+ instr->op == nir_op_fge32 ? midgard_alu_op_fle :
+ instr->op == nir_op_ige32 ? midgard_alu_op_ile :
+ instr->op == nir_op_uge32 ? midgard_alu_op_ule :
+ 0;
/* Swap via temporary */
nir_alu_src temp = instr->src[1];
break;
}
- case nir_op_bcsel: {
- op = midgard_alu_op_fcsel;
+ /* For a few special csel cases not handled by NIR, we can opt to
+ * bitwise. Otherwise, we emit the condition and do a real csel */
+
+ case nir_op_b32csel: {
+ if (nir_is_fzero_constant(instr->src[2].src)) {
+ /* (b ? v : 0) = (b & v) */
+ op = midgard_alu_op_iand;
+ nr_inputs = 2;
+ } else if (nir_is_fzero_constant(instr->src[1].src)) {
+ /* (b ? 0 : v) = (!b ? v : 0) = (~b & v) = (v & ~b) */
+ op = midgard_alu_op_iandnot;
+ nr_inputs = 2;
+ instr->src[1] = instr->src[0];
+ instr->src[0] = instr->src[2];
+ } else {
+ /* Midgard features both fcsel and icsel, depending on
+ * the type of the arguments/output. However, as long
+ * as we're careful we can _always_ use icsel and
+ * _never_ need fcsel, since the latter does additional
+ * floating-point-specific processing whereas the
+ * former just moves bits on the wire. It's not obvious
+ * why these are separate opcodes, save for the ability
+ * to do things like sat/pos/abs/neg for free */
- /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
- nr_inputs = 2;
+ op = midgard_alu_op_icsel;
- emit_condition(ctx, &instr->src[0].src, false);
+ /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */
+ nr_inputs = 2;
- /* The condition is the first argument; move the other
- * arguments up one to be a binary instruction for
- * Midgard */
+ /* Figure out which component the condition is in */
- memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
- break;
- }
+ unsigned comp = instr->src[0].swizzle[0];
- /* We don't have a native b2f32 instruction. Instead, like many GPUs,
- * we exploit booleans as 0/~0 for false/true, and correspondingly AND
- * by 1.0 to do the type conversion. For the moment, prime us to emit:
- *
- * iand [whatever], #0
- *
- * At the end of emit_alu (as MIR), we'll fix-up the constant */
+ /* Make sure NIR isn't throwing a mixed condition at us */
+
+ for (unsigned c = 1; c < nr_components; ++c)
+ assert(instr->src[0].swizzle[c] == comp);
+
+ /* Emit the condition into r31.w */
+ emit_condition(ctx, &instr->src[0].src, false, comp);
- case nir_op_b2f32: {
- op = midgard_alu_op_iand;
+ /* The condition is the first argument; move the other
+ * arguments up one to be a binary instruction for
+ * Midgard */
+
+ memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src));
+ }
break;
}
default:
- printf("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
+ DBG("Unhandled ALU op %s\n", nir_op_infos[instr->op].name);
assert(0);
return;
}
+ /* Midgard can perform certain modifiers on output ofa n ALU op */
+ midgard_outmod outmod =
+ instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
+
+ /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */
+
+ if (instr->op == nir_op_fmax) {
+ if (nir_is_fzero_constant(instr->src[0].src)) {
+ op = midgard_alu_op_fmov;
+ nr_inputs = 1;
+ outmod = midgard_outmod_pos;
+ instr->src[0] = instr->src[1];
+ } else if (nir_is_fzero_constant(instr->src[1].src)) {
+ op = midgard_alu_op_fmov;
+ nr_inputs = 1;
+ outmod = midgard_outmod_pos;
+ }
+ }
+
/* Fetch unit, quirks, etc information */
- unsigned opcode_props = alu_opcode_props[op];
+ unsigned opcode_props = alu_opcode_props[op].props;
bool quirk_flipped_r24 = opcode_props & QUIRK_FLIPPED_R24;
- /* Initialise fields common between scalar/vector instructions */
- midgard_outmod outmod = instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none;
-
/* src0 will always exist afaik, but src1 will not for 1-argument
* instructions. The latter can only be fetched if the instruction
* needs it, or else we may segfault. */
.src0 = quirk_flipped_r24 ? SSA_UNUSED_1 : src0,
.src1 = quirk_flipped_r24 ? src0 : src1,
.dest = dest,
- .inline_constant = (nr_inputs == 1) && !quirk_flipped_r24
}
};
assert(0);
}
+ bool is_int = midgard_is_integer_op(op);
+
midgard_vector_alu alu = {
.op = op,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = outmod,
/* Writemask only valid for non-SSA NIR */
.mask = expand_writemask((1 << nr_components) - 1),
- .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0])),
- .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1])),
+ .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)),
+ .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)),
};
/* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */
/* Late fixup for emulated instructions */
- if (instr->op == nir_op_b2f32) {
+ if (instr->op == nir_op_b2f32 || instr->op == nir_op_b2i32) {
/* Presently, our second argument is an inline #0 constant.
* Switch over to an embedded 1.0 constant (that can't fit
* inline, since we're 32-bit, not 16-bit like the inline
ins.ssa_args.inline_constant = false;
ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
ins.has_constants = true;
- ins.constants[0] = 1.0;
+
+ if (instr->op == nir_op_b2f32) {
+ ins.constants[0] = 1.0f;
+ } else {
+ /* Type pun it into place */
+ uint32_t one = 0x1;
+ memcpy(&ins.constants[0], &one, sizeof(uint32_t));
+ }
+
+ ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
+ } else if (instr->op == nir_op_f2b32 || instr->op == nir_op_i2b32) {
+ ins.ssa_args.inline_constant = false;
+ ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
+ ins.has_constants = true;
+ ins.constants[0] = 0.0f;
+ ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx);
+ } else if (instr->op == nir_op_inot) {
+ /* ~b = ~(b & b), so duplicate the source */
+ ins.ssa_args.src1 = ins.ssa_args.src0;
+ ins.alu.src2 = ins.alu.src1;
}
if ((opcode_props & UNITS_ALL) == UNIT_VLUT) {
for (int j = 0; j < 4; ++j)
nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */
- ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0]));
+ ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int));
emit_mir_instruction(ctx, ins);
}
} else {
#undef ALU_CASE
+static void
+emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset)
+{
+ /* TODO: half-floats */
+
+ if (!indirect_offset && offset < ctx->uniform_cutoff) {
+ /* Fast path: For the first 16 uniforms, direct accesses are
+ * 0-cycle, since they're just a register fetch in the usual
+ * case. So, we alias the registers while we're still in
+ * SSA-space */
+
+ int reg_slot = 23 - offset;
+ alias_ssa(ctx, dest, SSA_FIXED_REGISTER(reg_slot));
+ } else {
+ /* Otherwise, read from the 'special' UBO to access
+ * higher-indexed uniforms, at a performance cost. More
+ * generally, we're emitting a UBO read instruction. */
+
+ midgard_instruction ins = m_load_uniform_32(dest, offset);
+
+ /* TODO: Don't split */
+ ins.load_store.varying_parameters = (offset & 7) << 7;
+ ins.load_store.address = offset >> 3;
+
+ if (indirect_offset) {
+ emit_indirect_offset(ctx, indirect_offset);
+ ins.load_store.unknown = 0x8700; /* xxx: what is this? */
+ } else {
+ ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
+ }
+
+ emit_mir_instruction(ctx, ins);
+ }
+}
+
+static void
+emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr)
+{
+ /* First, pull out the destination */
+ unsigned dest = nir_dest_index(ctx, &instr->dest);
+
+ /* Now, figure out which uniform this is */
+ int sysval = midgard_nir_sysval_for_intrinsic(instr);
+ void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval);
+
+ /* Sysvals are prefix uniforms */
+ unsigned uniform = ((uintptr_t) val) - 1;
+
+ /* Emit the read itself -- this is never indirect */
+ emit_uniform_read(ctx, dest, uniform, NULL);
+}
+
static void
emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
{
- nir_const_value *const_offset;
unsigned offset, reg;
switch (instr->intrinsic) {
case nir_intrinsic_discard_if:
- emit_condition(ctx, &instr->src[0], true);
+ emit_condition(ctx, &instr->src[0], true, COMPONENT_X);
/* fallthrough */
case nir_intrinsic_discard: {
- midgard_condition cond = instr->intrinsic == nir_intrinsic_discard_if ?
- midgard_condition_true : midgard_condition_always;
+ bool conditional = instr->intrinsic == nir_intrinsic_discard_if;
+ struct midgard_instruction discard = v_branch(conditional, false);
+ discard.branch.target_type = TARGET_DISCARD;
+ emit_mir_instruction(ctx, discard);
- EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_discard, 0, 2, cond);
ctx->can_discard = true;
break;
}
case nir_intrinsic_load_uniform:
case nir_intrinsic_load_input:
- const_offset = nir_src_as_const_value(instr->src[0]);
- assert (const_offset && "no indirect inputs");
+ offset = nir_intrinsic_base(instr);
- offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ bool direct = nir_src_is_const(instr->src[0]);
+
+ if (direct) {
+ offset += nir_src_as_uint(instr->src[0]);
+ }
reg = nir_dest_index(ctx, &instr->dest);
if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) {
- /* TODO: half-floats */
-
- int uniform_offset = 0;
-
- if (offset >= SPECIAL_UNIFORM_BASE) {
- /* XXX: Resolve which uniform */
- uniform_offset = 0;
- } else {
- /* Offset away from the special
- * uniform block */
-
- void *entry = _mesa_hash_table_u64_search(ctx->uniform_nir_to_mdg, offset + 1);
-
- /* XXX */
- if (!entry) {
- printf("WARNING: Unknown uniform %d\n", offset);
- break;
- }
-
- uniform_offset = (uintptr_t) (entry) - 1;
- uniform_offset += ctx->special_uniforms;
- }
-
- if (uniform_offset < ctx->uniform_cutoff) {
- /* Fast path: For the first 16 uniform,
- * accesses are 0-cycle, since they're
- * just a register fetch in the usual
- * case. So, we alias the registers
- * while we're still in SSA-space */
-
- int reg_slot = 23 - uniform_offset;
- alias_ssa(ctx, reg, SSA_FIXED_REGISTER(reg_slot));
- } else {
- /* Otherwise, read from the 'special'
- * UBO to access higher-indexed
- * uniforms, at a performance cost */
-
- midgard_instruction ins = m_load_uniform_32(reg, uniform_offset);
-
- /* TODO: Don't split */
- ins.load_store.varying_parameters = (uniform_offset & 7) << 7;
- ins.load_store.address = uniform_offset >> 3;
-
- ins.load_store.unknown = 0x1E00; /* xxx: what is this? */
- emit_mir_instruction(ctx, ins);
- }
+ emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL);
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
/* XXX: Half-floats? */
/* TODO: swizzle, mask */
memcpy(&u, &p, sizeof(p));
ins.load_store.varying_parameters = u;
- ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
+ if (direct) {
+ /* We have the offset totally ready */
+ ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */
+ } else {
+ /* We have it partially ready, but we need to
+ * add in the dynamic index, moved to r27.w */
+ emit_indirect_offset(ctx, &instr->src[0]);
+ ins.load_store.unknown = 0x79e; /* xxx: what is this? */
+ }
+
emit_mir_instruction(ctx, ins);
} else if (ctx->is_blend && instr->intrinsic == nir_intrinsic_load_uniform) {
/* Constant encoded as a pinned constant */
emit_mir_instruction(ctx, ins);
}
- /* vadd.u2f hr2, abs(hr2), #0 */
+ /* vadd.u2f hr2, zext(hr2), #0 */
midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.abs = true;
+ alu_src.mod = midgard_int_zero_extend;
alu_src.half = true;
midgard_instruction u2f = {
},
.alu = {
.op = midgard_alu_op_u2f,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_none,
.mask = 0xF,
.src1 = vector_alu_srco_unsigned(alu_src),
/* vmul.fmul.sat r1, hr2, #0.00392151 */
- alu_src.abs = false;
+ alu_src.mod = 0;
midgard_instruction fmul = {
.type = TAG_ALU_4,
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_none,
.outmod = midgard_outmod_sat,
.mask = 0xFF,
emit_mir_instruction(ctx, fmul);
} else {
- printf("Unknown input in blend shader\n");
+ DBG("Unknown input in blend shader\n");
assert(0);
}
} else if (ctx->stage == MESA_SHADER_VERTEX) {
ins.load_store.mask = (1 << instr->num_components) - 1;
emit_mir_instruction(ctx, ins);
} else {
- printf("Unknown load\n");
+ DBG("Unknown load\n");
assert(0);
}
break;
case nir_intrinsic_store_output:
- const_offset = nir_src_as_const_value(instr->src[1]);
- assert(const_offset && "no indirect outputs");
+ assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
- offset = nir_intrinsic_base(instr) + const_offset->u32[0];
+ offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
reg = nir_src_index(ctx, &instr->src[0]);
* guarantee correctness when considering some
* (common) edge cases XXX: FIXME */
- /* Look up how it was actually laid out */
-
- void *entry = _mesa_hash_table_u64_search(ctx->varying_nir_to_mdg, offset + 1);
-
- if (!entry) {
- printf("WARNING: skipping varying\n");
- break;
- }
-
- offset = (uintptr_t) (entry) - 1;
-
/* If this varying corresponds to a constant (why?!),
* emit that now since it won't get picked up by
* hoisting (since there is no corresponding move
_mesa_hash_table_u64_insert(ctx->ssa_varyings, reg + 1, (void *) ((uintptr_t) (offset + 1)));
}
} else {
- printf("Unknown store\n");
+ DBG("Unknown store\n");
assert(0);
}
_mesa_hash_table_u64_insert(ctx->ssa_constants, instr->dest.ssa.index + 1, v);
break;
+ case nir_intrinsic_load_viewport_scale:
+ case nir_intrinsic_load_viewport_offset:
+ emit_sysval_read(ctx, instr);
+ break;
default:
printf ("Unhandled intrinsic\n");
return TEXTURE_CUBE;
default:
- printf("Unknown sampler dim type\n");
+ DBG("Unknown sampler dim type\n");
assert(0);
return 0;
}
int index = nir_src_index(ctx, &instr->src[i].src);
midgard_vector_alu_src alu_src = blank_alu_src;
- alu_src.swizzle = (COMPONENT_Y << 2);
- midgard_instruction ins = v_fmov(index, alu_src, SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg));
- emit_mir_instruction(ctx, ins);
+ int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg);
- //midgard_pin_output(ctx, index, REGISTER_TEXTURE_BASE + in_reg);
+ if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
+ /* For cubemaps, we need to load coords into
+ * special r27, and then use a special ld/st op
+ * to copy into the texture register */
+
+ alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X);
+
+ midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27));
+ emit_mir_instruction(ctx, move);
+
+ midgard_instruction st = m_store_cubemap_coords(reg, 0);
+ st.load_store.unknown = 0x24; /* XXX: What is this? */
+ st.load_store.mask = 0x3; /* xy? */
+ st.load_store.swizzle = alu_src.swizzle;
+ emit_mir_instruction(ctx, st);
+
+ } else {
+ alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X);
+
+ midgard_instruction ins = v_fmov(index, alu_src, reg);
+ emit_mir_instruction(ctx, ins);
+ }
break;
}
default: {
- printf("Unknown source type\n");
+ DBG("Unknown source type\n");
//assert(0);
break;
}
/* Emit a branch out of the loop */
struct midgard_instruction br = v_branch(false, false);
br.branch.target_type = TARGET_BREAK;
- br.branch.target_break = ctx->current_loop;
+ br.branch.target_break = ctx->current_loop_depth;
emit_mir_instruction(ctx, br);
- printf("break..\n");
+ DBG("break..\n");
break;
}
default:
- printf("Unknown jump type %d\n", instr->type);
+ DBG("Unknown jump type %d\n", instr->type);
break;
}
}
break;
default:
- printf("Unhandled instruction type\n");
+ DBG("Unhandled instruction type\n");
break;
}
}
if (reg >= 0) {
assert(reg < maxreg);
+ assert(g);
int r = ra_get_node_reg(g, reg);
ctx->work_registers = MAX2(ctx->work_registers, r);
return r;
return REGISTER_UNUSED;
default:
- printf("Unknown SSA register alias %d\n", reg);
+ DBG("Unknown SSA register alias %d\n", reg);
assert(0);
return 31;
}
return false;
}
+/* Determine if a variable is live in the successors of a block */
+static bool
+is_live_after_successors(compiler_context *ctx, midgard_block *bl, int src)
+{
+ for (unsigned i = 0; i < bl->nr_successors; ++i) {
+ midgard_block *succ = bl->successors[i];
+
+ /* If we already visited, the value we're seeking
+ * isn't down this path (or we would have short
+ * circuited */
+
+ if (succ->visited) continue;
+
+ /* Otherwise (it's visited *now*), check the block */
+
+ succ->visited = true;
+
+ mir_foreach_instr_in_block(succ, ins) {
+ if (midgard_is_live_in_instr(ins, src))
+ return true;
+ }
+
+ /* ...and also, check *its* successors */
+ if (is_live_after_successors(ctx, succ, src))
+ return true;
+
+ }
+
+ /* Welp. We're really not live. */
+
+ return false;
+}
+
static bool
is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src)
{
/* Check the rest of the block for liveness */
+
mir_foreach_instr_in_block_from(block, ins, mir_next_op(start)) {
if (midgard_is_live_in_instr(ins, src))
return true;
}
- /* Check the rest of the blocks for liveness */
- mir_foreach_block_from(ctx, mir_next_block(block), b) {
- mir_foreach_instr_in_block(b, ins) {
- if (midgard_is_live_in_instr(ins, src))
- return true;
- }
- }
+ /* Check the rest of the blocks for liveness recursively */
- /* TODO: How does control flow interact in complex shaders? */
+ bool succ = is_live_after_successors(ctx, block, src);
- return false;
+ mir_foreach_block(ctx, block) {
+ block->visited = false;
+ }
+
+ return succ;
}
+/* Once registers have been decided via register allocation
+ * (allocate_registers), we need to rewrite the MIR to use registers instead of
+ * SSA */
+
static void
+install_registers(compiler_context *ctx, struct ra_graph *g)
+{
+ mir_foreach_block(ctx, block) {
+ mir_foreach_instr_in_block(block, ins) {
+ if (ins->compact_branch) continue;
+
+ ssa_args args = ins->ssa_args;
+
+ switch (ins->type) {
+ case TAG_ALU_4:
+ ins->registers.src1_reg = dealias_register(ctx, g, args.src0, ctx->temp_count);
+
+ ins->registers.src2_imm = args.inline_constant;
+
+ if (args.inline_constant) {
+ /* Encode inline 16-bit constant as a vector by default */
+
+ ins->registers.src2_reg = ins->inline_constant >> 11;
+
+ int lower_11 = ins->inline_constant & ((1 << 12) - 1);
+
+ uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
+ ins->alu.src2 = imm << 2;
+ } else {
+ ins->registers.src2_reg = dealias_register(ctx, g, args.src1, ctx->temp_count);
+ }
+
+ ins->registers.out_reg = dealias_register(ctx, g, args.dest, ctx->temp_count);
+
+ break;
+
+ case TAG_LOAD_STORE_4: {
+ if (OP_IS_STORE_VARY(ins->load_store.op)) {
+ /* TODO: use ssa_args for store_vary */
+ ins->load_store.reg = 0;
+ } else {
+ bool has_dest = args.dest >= 0;
+ int ssa_arg = has_dest ? args.dest : args.src0;
+
+ ins->load_store.reg = dealias_register(ctx, g, ssa_arg, ctx->temp_count);
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+}
+
+/* This routine performs the actual register allocation. It should be succeeded
+ * by install_registers */
+
+static struct ra_graph *
allocate_registers(compiler_context *ctx)
{
/* First, initialize the RA */
ins->ssa_args.src1 = find_or_allocate_temp(ctx, ins->ssa_args.src1);
ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
}
-
- print_mir_block(block);
+ if (midgard_debug & MIDGARD_DBG_SHADERS)
+ print_mir_block(block);
}
+ /* No register allocation to do with no SSA */
+
+ if (!ctx->temp_count)
+ return NULL;
+
/* Let's actually do register allocation */
int nodes = ctx->temp_count;
struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
mir_foreach_instr_in_block(block, ins) {
if (ins->compact_branch) continue;
+ /* Dest is < 0 for store_vary instructions, which break
+ * the usual SSA conventions. Liveness analysis doesn't
+ * make sense on these instructions, so skip them to
+ * avoid memory corruption */
+
+ if (ins->ssa_args.dest < 0) continue;
+
if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
/* If this destination is not yet live, it is now since we just wrote it */
ra_set_select_reg_callback(g, midgard_ra_select_callback, NULL);
if (!ra_allocate(g)) {
- printf("Error allocating registers\n");
- assert(0);
- }
-
- /* Cleanup */
- free(live_start);
- free(live_end);
-
- mir_foreach_block(ctx, block) {
- mir_foreach_instr_in_block(block, ins) {
- if (ins->compact_branch) continue;
-
- ssa_args args = ins->ssa_args;
-
- switch (ins->type) {
- case TAG_ALU_4:
- ins->registers.src1_reg = dealias_register(ctx, g, args.src0, nodes);
-
- ins->registers.src2_imm = args.inline_constant;
-
- if (args.inline_constant) {
- /* Encode inline 16-bit constant as a vector by default */
-
- ins->registers.src2_reg = ins->inline_constant >> 11;
-
- int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-
- uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3);
- ins->alu.src2 = imm << 2;
- } else {
- ins->registers.src2_reg = dealias_register(ctx, g, args.src1, nodes);
- }
-
- ins->registers.out_reg = dealias_register(ctx, g, args.dest, nodes);
-
- break;
-
- case TAG_LOAD_STORE_4: {
- if (OP_IS_STORE(ins->load_store.op)) {
- /* TODO: use ssa_args for store_vary */
- ins->load_store.reg = 0;
- } else {
- bool has_dest = args.dest >= 0;
- int ssa_arg = has_dest ? args.dest : args.src0;
-
- ins->load_store.reg = dealias_register(ctx, g, ssa_arg, nodes);
- }
-
- break;
- }
-
- default:
- break;
- }
- }
+ DBG("Error allocating registers\n");
+ assert(0);
}
+
+ /* Cleanup */
+ free(live_start);
+ free(live_end);
+
+ return g;
}
/* Midgard IR only knows vector ALU types, but we sometimes need to actually
}
static unsigned
-vector_to_scalar_source(unsigned u)
+vector_to_scalar_source(unsigned u, bool is_int)
{
midgard_vector_alu_src v;
memcpy(&v, &u, sizeof(v));
+ /* TODO: Integers */
+
midgard_scalar_alu_src s = {
- .abs = v.abs,
- .negate = v.negate,
.full = !v.half,
.component = (v.swizzle & 3) << 1
};
+ if (is_int) {
+ /* TODO */
+ } else {
+ s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
+ s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
+ }
+
unsigned o;
memcpy(&o, &s, sizeof(s));
static midgard_scalar_alu
vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
{
+ bool is_int = midgard_is_integer_op(v.op);
+
/* The output component is from the mask */
midgard_scalar_alu s = {
.op = v.op,
- .src1 = vector_to_scalar_source(v.src1),
- .src2 = vector_to_scalar_source(v.src2),
+ .src1 = vector_to_scalar_source(v.src1, is_int),
+ .src2 = vector_to_scalar_source(v.src2, is_int),
.unknown = 0,
.outmod = v.outmod,
.output_full = 1, /* TODO: Half */
return true;
}
+static bool
+midgard_has_hazard(
+ midgard_instruction **segment, unsigned segment_size,
+ midgard_instruction *ains)
+{
+ for (int s = 0; s < segment_size; ++s)
+ if (!can_run_concurrent_ssa(segment[s], ains))
+ return true;
+
+ return false;
+
+
+}
+
/* Schedules, but does not emit, a single basic block. After scheduling, the
* final tag and size of the block are known, which are necessary for branching
* */
if (!unit) {
int op = ains->alu.op;
- int units = alu_opcode_props[op];
+ int units = alu_opcode_props[op].props;
/* TODO: Promotion of scalars to vectors */
int vector = ((!is_single_component_mask(ains->alu.mask)) || ((units & UNITS_SCALAR) == 0)) && (units & UNITS_ANY_VECTOR);
else
break;
} else {
- if ((units & UNIT_SADD) && !(control & UNIT_SADD))
+ if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains))
unit = UNIT_SADD;
else if (units & UNIT_SMUL)
- unit = UNIT_SMUL;
+ unit = ((units & UNIT_VMUL) && !(control & UNIT_VMUL)) ? UNIT_VMUL : UNIT_SMUL;
else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
unit = UNIT_VADD;
else
if (last_unit < UNIT_VADD && unit >= UNIT_VADD)
segment_size = 0;
- /* Check for data hazards */
- int has_hazard = false;
-
- for (int s = 0; s < segment_size; ++s)
- if (!can_run_concurrent_ssa(segment[s], ains))
- has_hazard = true;
-
- if (has_hazard)
+ if (midgard_has_hazard(segment, segment_size, ains))
break;
/* We're good to go -- emit the instruction */
/* ERRATA (?): In a bundle ending in a fragment writeout, the register dependencies of r0 cannot be written within this bundle (discovered in -bshading:shading=phong) */
if (register_dep_mask & written_mask) {
- printf("ERRATA WORKAROUND: Breakup for writeout dependency masks %X vs %X (common %X)\n", register_dep_mask, written_mask, register_dep_mask & written_mask);
+ DBG("ERRATA WORKAROUND: Breakup for writeout dependency masks %X vs %X (common %X)\n", register_dep_mask, written_mask, register_dep_mask & written_mask);
break;
}
}
}
- bundle.body_size[bundle.body_words_count] = sizeof(ains->br_compact);
- memcpy(&bundle.body_words[bundle.body_words_count++], &ains->br_compact, sizeof(ains->br_compact));
- bytes_emitted += sizeof(ains->br_compact);
+ if (ains->unit == ALU_ENAB_BRANCH) {
+ bundle.body_size[bundle.body_words_count] = sizeof(midgard_branch_extended);
+ memcpy(&bundle.body_words[bundle.body_words_count++], &ains->branch_extended, sizeof(midgard_branch_extended));
+ bytes_emitted += sizeof(midgard_branch_extended);
+ } else {
+ bundle.body_size[bundle.body_words_count] = sizeof(ains->br_compact);
+ memcpy(&bundle.body_words[bundle.body_words_count++], &ains->br_compact, sizeof(ains->br_compact));
+ bytes_emitted += sizeof(ains->br_compact);
+ }
} else {
memcpy(&bundle.register_words[bundle.register_words_count++], &ains->registers, sizeof(ains->registers));
bytes_emitted += sizeof(midgard_reg_info);
break;
}
-#if 0
-
- case TAG_TEXTURE_4:
- /* TODO: Schedule texture ops */
- break;
-#endif
-
default:
- /* XXX: What happens with textures? */
+ /* Texture ops default to single-op-per-bundle scheduling */
break;
}
static void
schedule_program(compiler_context *ctx)
{
- allocate_registers(ctx);
+ /* We run RA prior to scheduling */
+ struct ra_graph *g = allocate_registers(ctx);
+ install_registers(ctx, g);
mir_foreach_block(ctx, block) {
schedule_block(ctx, block);
memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins.alu, sizeof(midgard_vector_alu));
}
- memcpy(util_dynarray_grow(emission, sizeof(ins->br_compact)), &ins->br_compact, sizeof(ins->br_compact));
+ if (ins->unit == ALU_ENAB_BR_COMPACT) {
+ memcpy(util_dynarray_grow(emission, sizeof(ins->br_compact)), &ins->br_compact, sizeof(ins->br_compact));
+ } else {
+ memcpy(util_dynarray_grow(emission, sizeof(ins->branch_extended)), &ins->branch_extended, sizeof(ins->branch_extended));
+ }
} else {
/* Scalar */
midgard_scalar_alu scalarised = vector_to_scalar_alu(ins->alu, ins);
}
default:
- printf("Unknown midgard instruction type\n");
+ DBG("Unknown midgard instruction type\n");
assert(0);
break;
}
/* If there is already a constant here, we can do nothing */
if (alu->has_constants) continue;
+ /* It makes no sense to inline constants on a branch */
+ if (alu->compact_branch || alu->prepacked_branch) continue;
+
CONDITIONAL_ATTACH(src0);
if (!alu->has_constants) {
int op = ins->alu.op;
if (ins->ssa_args.src0 == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
- /* Flip based on op. Fallthrough intentional */
-
switch (op) {
- /* These ops require an operational change to flip their arguments TODO */
+ /* These ops require an operational change to flip
+ * their arguments TODO */
case midgard_alu_op_flt:
case midgard_alu_op_fle:
case midgard_alu_op_ilt:
case midgard_alu_op_ile:
case midgard_alu_op_fcsel:
case midgard_alu_op_icsel:
- case midgard_alu_op_isub:
- printf("Missed non-commutative flip (%s)\n", alu_opcode_names[op]);
+ DBG("Missed non-commutative flip (%s)\n", alu_opcode_props[op].name);
+ default:
break;
+ }
- /* These ops are commutative and Just Flip */
- case midgard_alu_op_fne:
- case midgard_alu_op_fadd:
- case midgard_alu_op_fmul:
- case midgard_alu_op_fmin:
- case midgard_alu_op_fmax:
- case midgard_alu_op_iadd:
- case midgard_alu_op_imul:
- case midgard_alu_op_feq:
- case midgard_alu_op_ieq:
- case midgard_alu_op_ine:
- case midgard_alu_op_iand:
- case midgard_alu_op_ior:
- case midgard_alu_op_ixor:
+ if (alu_opcode_props[op].props & OP_COMMUTES) {
/* Flip the SSA numbers */
ins->ssa_args.src0 = ins->ssa_args.src1;
ins->ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
src_temp = ins->alu.src2;
ins->alu.src2 = ins->alu.src1;
ins->alu.src1 = src_temp;
-
- default:
- break;
}
}
if (scaled_constant != iconstants[component])
continue;
} else {
- scaled_constant = _mesa_float_to_half((float) ins->constants[component]);
+ float original = (float) ins->constants[component];
+ scaled_constant = _mesa_float_to_half(original);
+
+ /* Check for loss of precision. If this is
+ * mediump, we don't care, but for a highp
+ * shader, we need to pay attention. NIR
+ * doesn't yet tell us which mode we're in!
+ * Practically this prevents most constants
+ * from being inlined, sadly. */
+
+ float fp32 = _mesa_half_to_float(scaled_constant);
+
+ if (fp32 != original)
+ continue;
}
/* We don't know how to handle these with a constant */
- if (src->abs || src->negate || src->half || src->rep_low || src->rep_high) {
- printf("Bailing inline constant...\n");
+ if (src->mod || src->half || src->rep_low || src->rep_high) {
+ DBG("Bailing inline constant...\n");
continue;
}
}
}
-#define AS_SRC(to, u) \
- int q##to = ins->alu.src2; \
- midgard_vector_alu_src *to = (midgard_vector_alu_src *) &q##to;
+/* Basic dead code elimination on the MIR itself, which cleans up e.g. the
+ * texture pipeline */
-/* Removing unused moves is necessary to clean up the texture pipeline results.
- *
- * To do so, we find moves in the MIR. We check if their destination is live later. If it's not, the move is redundant. */
+static bool
+midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (ins->compact_branch) continue;
+
+ if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue;
+ if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+
+ mir_remove_instruction(ins);
+ progress = true;
+ }
+
+ return progress;
+}
+
+static bool
+midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* We only work on pure SSA */
+
+ if (to >= SSA_FIXED_MINIMUM) continue;
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (to >= ctx->func->impl->ssa_alloc) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+
+ /* Also, if the move has side effects, we're helpless */
+
+ midgard_vector_alu_src src =
+ vector_alu_from_unsigned(ins->alu.src2);
+ unsigned mask = squeeze_writemask(ins->alu.mask);
+ bool is_int = midgard_is_integer_op(ins->alu.op);
+
+ if (mir_nontrivial_mod(src, is_int, mask)) continue;
+ if (ins->alu.outmod != midgard_outmod_none) continue;
+
+ mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) {
+ if (v->ssa_args.src0 == to) {
+ v->ssa_args.src0 = from;
+ progress = true;
+ }
+
+ if (v->ssa_args.src1 == to && !v->ssa_args.inline_constant) {
+ v->ssa_args.src1 = from;
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
+
+static bool
+midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block)
+{
+ bool progress = false;
+
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ unsigned from = ins->ssa_args.src1;
+ unsigned to = ins->ssa_args.dest;
+
+ /* Make sure it's simple enough for us to handle */
+
+ if (from >= SSA_FIXED_MINIMUM) continue;
+ if (from >= ctx->func->impl->ssa_alloc) continue;
+ if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue;
+ if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue;
+
+ bool eliminated = false;
+
+ mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) {
+ /* The texture registers are not SSA so be careful.
+ * Conservatively, just stop if we hit a texture op
+ * (even if it may not write) to where we are */
+
+ if (v->type != TAG_ALU_4)
+ break;
+
+ if (v->ssa_args.dest == from) {
+ /* We don't want to track partial writes ... */
+ if (v->alu.mask == 0xF) {
+ v->ssa_args.dest = to;
+ eliminated = true;
+ }
+
+ break;
+ }
+ }
+
+ if (eliminated)
+ mir_remove_instruction(ins);
+
+ progress |= eliminated;
+ }
+
+ return progress;
+}
+
+/* We don't really understand the imov/fmov split, so always use fmov (but let
+ * it be imov in the IR so we don't do unsafe floating point "optimizations"
+ * and break things */
static void
-midgard_eliminate_orphan_moves(compiler_context *ctx, midgard_block *block)
+midgard_imov_workaround(compiler_context *ctx, midgard_block *block)
{
mir_foreach_instr_in_block_safe(block, ins) {
if (ins->type != TAG_ALU_4) continue;
+ if (ins->alu.op != midgard_alu_op_imov) continue;
- if (ins->alu.op != midgard_alu_op_fmov) continue;
+ ins->alu.op = midgard_alu_op_fmov;
+ ins->alu.outmod = midgard_outmod_none;
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ /* Remove flags that don't make sense */
- if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue;
+ midgard_vector_alu_src s =
+ vector_alu_from_unsigned(ins->alu.src2);
- if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue;
+ s.mod = 0;
- mir_remove_instruction(ins);
+ ins->alu.src2 = vector_alu_srco_unsigned(s);
}
}
if (c->type != TAG_LOAD_STORE_4) continue;
+ /* Stores cannot be reordered, since they have
+ * dependencies. For the same reason, indirect
+ * loads cannot be reordered as their index is
+ * loaded in r27.w */
+
if (OP_IS_STORE(c->load_store.op)) continue;
+ /* It appears the 0x800 bit is set whenever a
+ * load is direct, unset when it is indirect.
+ * Skip indirect loads. */
+
+ if (!(c->load_store.unknown & 0x800)) continue;
+
/* We found one! Move it up to pair and remove it from the old location */
mir_insert_instruction_before(ins, *c);
emit_leftover_move(ctx);
}
-/* Vertex shaders do not write gl_Position as is; instead, they write a
- * transformed screen space position as a varying. See section 12.5 "Coordinate
- * Transformation" of the ES 3.2 full specification for details.
- *
- * This transformation occurs early on, as NIR and prior to optimisation, in
- * order to take advantage of NIR optimisation passes of the transform itself.
- * */
-
-static void
-write_transformed_position(nir_builder *b, nir_src input_point_src, int uniform_no)
-{
- nir_ssa_def *input_point = nir_ssa_for_src(b, input_point_src, 4);
-
- /* Get viewport from the uniforms */
- nir_intrinsic_instr *load;
- load = nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
- load->num_components = 4;
- load->src[0] = nir_src_for_ssa(nir_imm_int(b, uniform_no));
- nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
- nir_builder_instr_insert(b, &load->instr);
-
- /* Formatted as <width, height, centerx, centery> */
- nir_ssa_def *viewport_vec4 = &load->dest.ssa;
- nir_ssa_def *viewport_width_2 = nir_channel(b, viewport_vec4, 0);
- nir_ssa_def *viewport_height_2 = nir_channel(b, viewport_vec4, 1);
- nir_ssa_def *viewport_offset = nir_channels(b, viewport_vec4, 0x8 | 0x4);
-
- /* XXX: From uniforms? */
- nir_ssa_def *depth_near = nir_imm_float(b, 0.0);
- nir_ssa_def *depth_far = nir_imm_float(b, 1.0);
-
- /* World space to normalised device coordinates */
-
- nir_ssa_def *w_recip = nir_frcp(b, nir_channel(b, input_point, 3));
- nir_ssa_def *ndc_point = nir_fmul(b, nir_channels(b, input_point, 0x7), w_recip);
-
- /* Normalised device coordinates to screen space */
-
- nir_ssa_def *viewport_multiplier = nir_vec2(b, viewport_width_2, viewport_height_2);
- nir_ssa_def *viewport_xy = nir_fadd(b, nir_fmul(b, nir_channels(b, ndc_point, 0x3), viewport_multiplier), viewport_offset);
-
- nir_ssa_def *depth_multiplier = nir_fmul(b, nir_fsub(b, depth_far, depth_near), nir_imm_float(b, 0.5f));
- nir_ssa_def *depth_offset = nir_fmul(b, nir_fadd(b, depth_far, depth_near), nir_imm_float(b, 0.5f));
- nir_ssa_def *screen_depth = nir_fadd(b, nir_fmul(b, nir_channel(b, ndc_point, 2), depth_multiplier), depth_offset);
-
- /* gl_Position will be written out in screenspace xyz, with w set to
- * the reciprocal we computed earlier. The transformed w component is
- * then used for perspective-correct varying interpolation */
-
- nir_ssa_def *screen_space = nir_vec4(b,
- nir_channel(b, viewport_xy, 0),
- nir_channel(b, viewport_xy, 1),
- screen_depth,
- nir_fabs(b, w_recip));
-
- /* Finally, write out the transformed values to the varying */
-
- nir_intrinsic_instr *store;
- store = nir_intrinsic_instr_create(b->shader, nir_intrinsic_store_output);
- store->num_components = 4;
- nir_intrinsic_set_base(store, 0);
- nir_intrinsic_set_write_mask(store, 0xf);
- store->src[0].ssa = screen_space;
- store->src[0].is_ssa = true;
- store->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
- nir_builder_instr_insert(b, &store->instr);
-}
-
-static void
-transform_position_writes(nir_shader *shader)
-{
- nir_foreach_function(func, shader) {
- nir_foreach_block(block, func->impl) {
- nir_foreach_instr_safe(instr, block) {
- if (instr->type != nir_instr_type_intrinsic) continue;
-
- nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- nir_variable *out = NULL;
-
- switch (intr->intrinsic) {
- case nir_intrinsic_store_output:
- /* already had i/o lowered.. lookup the matching output var: */
- nir_foreach_variable(var, &shader->outputs) {
- int drvloc = var->data.driver_location;
-
- if (nir_intrinsic_base(intr) == drvloc) {
- out = var;
- break;
- }
- }
-
- break;
-
- default:
- break;
- }
-
- if (!out) continue;
-
- if (out->data.mode != nir_var_shader_out)
- continue;
-
- if (out->data.location != VARYING_SLOT_POS)
- continue;
-
- nir_builder b;
- nir_builder_init(&b, func->impl);
- b.cursor = nir_before_instr(instr);
-
- write_transformed_position(&b, intr->src[0], UNIFORM_VIEWPORT);
- nir_instr_remove(instr);
- }
- }
- }
-}
-
static void
emit_fragment_epilogue(compiler_context *ctx)
{
},
.alu = {
.op = midgard_alu_op_fmul,
- .reg_mode = midgard_reg_mode_full,
+ .reg_mode = midgard_reg_mode_32,
.dest_override = midgard_dest_override_lower,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
},
.alu = {
.op = midgard_alu_op_f2u8,
- .reg_mode = midgard_reg_mode_half,
+ .reg_mode = midgard_reg_mode_16,
.dest_override = midgard_dest_override_lower,
.outmod = midgard_outmod_pos,
.mask = 0xF,
},
.alu = {
.op = midgard_alu_op_imov,
- .reg_mode = midgard_reg_mode_quarter,
+ .reg_mode = midgard_reg_mode_8,
.dest_override = midgard_dest_override_none,
.mask = 0xFF,
.src1 = vector_alu_srco_unsigned(blank_alu_src),
static midgard_block *
emit_block(compiler_context *ctx, nir_block *block)
{
- midgard_block *this_block = malloc(sizeof(midgard_block));
+ midgard_block *this_block = calloc(sizeof(midgard_block), 1);
list_addtail(&this_block->link, &ctx->blocks);
this_block->is_scheduled = false;
ctx->texture_index[0] = -1;
ctx->texture_index[1] = -1;
+ /* Add us as a successor to the block we are following */
+ if (ctx->current_block)
+ midgard_block_add_successor(ctx->current_block, this_block);
+
/* Set up current block */
list_inithead(&this_block->instructions);
ctx->current_block = this_block;
actualise_ssa_to_alias(ctx);
midgard_emit_store(ctx, this_block);
- midgard_eliminate_orphan_moves(ctx, this_block);
midgard_pair_load_store(ctx, this_block);
+ midgard_imov_workaround(ctx, this_block);
/* Append fragment shader epilogue (value writeout) */
if (ctx->stage == MESA_SHADER_FRAGMENT) {
}
}
- /* Fallthrough save */
- this_block->next_fallthrough = ctx->previous_source_block;
-
if (block == nir_start_block(ctx->func->impl))
ctx->initial_block = this_block;
{
/* Conditional branches expect the condition in r31.w; emit a move for
* that in the _previous_ block (which is the current block). */
- emit_condition(ctx, &nif->condition, true);
+ emit_condition(ctx, &nif->condition, true, COMPONENT_X);
/* Speculatively emit the branch, but we can't fill it in until later */
EMIT(branch, true, true);
int else_idx = ctx->block_count;
int count_in = ctx->instruction_count;
midgard_block *else_block = emit_cf_list(ctx, &nif->else_list);
+ int after_else_idx = ctx->block_count;
/* Now that we have the subblocks emitted, fix up the branches */
assert(then_block);
assert(else_block);
-
if (ctx->instruction_count == count_in) {
/* The else block is empty, so don't emit an exit jump */
mir_remove_instruction(then_exit);
- then_branch->branch.target_block = else_idx + 1;
+ then_branch->branch.target_block = after_else_idx;
} else {
then_branch->branch.target_block = else_idx;
- then_exit->branch.target_block = else_idx + 1;
+ then_exit->branch.target_block = after_else_idx;
}
}
/* Remember where we are */
midgard_block *start_block = ctx->current_block;
- /* Allocate a loop number for this. TODO: Nested loops. Instead of a
- * single current_loop variable, maybe we need a stack */
-
- int loop_idx = ++ctx->current_loop;
+ /* Allocate a loop number, growing the current inner loop depth */
+ int loop_idx = ++ctx->current_loop_depth;
/* Get index from before the body so we can loop back later */
int start_idx = ctx->block_count;
br_back.branch.target_block = start_idx;
emit_mir_instruction(ctx, br_back);
+ /* Mark down that branch in the graph. Note that we're really branching
+ * to the block *after* we started in. TODO: Why doesn't the branch
+ * itself have an off-by-one then...? */
+ midgard_block_add_successor(ctx->current_block, start_block->successors[0]);
+
/* Find the index of the block about to follow us (note: we don't add
* one; blocks are 0-indexed so we get a fencepost problem) */
int break_block_idx = ctx->block_count;
* now that we can allocate a block number for them */
list_for_each_entry_from(struct midgard_block, block, start_block, &ctx->blocks, link) {
- print_mir_block(block);
mir_foreach_instr_in_block(block, ins) {
if (ins->type != TAG_ALU_4) continue;
if (!ins->compact_branch) continue;
ins->branch.target_block = break_block_idx;
}
}
+
+ /* Now that we've finished emitting the loop, free up the depth again
+ * so we play nice with recursion amid nested loops */
+ --ctx->current_loop_depth;
}
static midgard_block *
return start_block;
}
+/* Due to lookahead, we need to report the first tag executed in the command
+ * stream and in branch targets. An initial block might be empty, so iterate
+ * until we find one that 'works' */
+
+static unsigned
+midgard_get_first_tag_from_block(compiler_context *ctx, unsigned block_idx)
+{
+ midgard_block *initial_block = mir_get_block(ctx, block_idx);
+
+ unsigned first_tag = 0;
+
+ do {
+ midgard_bundle *initial_bundle = util_dynarray_element(&initial_block->bundles, midgard_bundle, 0);
+
+ if (initial_bundle) {
+ first_tag = initial_bundle->tag;
+ break;
+ }
+
+ /* Initial block is empty, try the next block */
+ initial_block = list_first_entry(&(initial_block->link), midgard_block, link);
+ } while(initial_block != NULL);
+
+ assert(first_tag);
+ return first_tag;
+}
+
int
midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_blend)
{
struct util_dynarray *compiled = &program->compiled;
+ midgard_debug = debug_get_option_midgard_debug();
+
compiler_context ictx = {
.nir = nir,
.stage = nir->info.stage,
/* TODO: Decide this at runtime */
ctx->uniform_cutoff = 8;
- switch (ctx->stage) {
- case MESA_SHADER_VERTEX:
- ctx->special_uniforms = 1;
- break;
-
- default:
- ctx->special_uniforms = 0;
- break;
- }
-
- /* Append epilogue uniforms if necessary. The cmdstream depends on
- * these being at the -end-; see assign_var_locations. */
-
- if (ctx->stage == MESA_SHADER_VERTEX) {
- nir_variable_create(nir, nir_var_uniform, glsl_vec4_type(), "viewport");
- }
-
/* Assign var locations early, so the epilogue can use them if necessary */
nir_assign_var_locations(&nir->outputs, &nir->num_outputs, glsl_type_size);
ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL);
ctx->ssa_to_register = _mesa_hash_table_u64_create(NULL);
ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
+ ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL);
ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
- /* Assign actual uniform location, skipping over samplers */
-
- ctx->uniform_nir_to_mdg = _mesa_hash_table_u64_create(NULL);
-
- nir_foreach_variable(var, &nir->uniforms) {
- if (glsl_get_base_type(var->type) == GLSL_TYPE_SAMPLER) continue;
-
- unsigned length = glsl_get_aoa_size(var->type);
-
- if (!length) {
- length = glsl_get_length(var->type);
- }
-
- if (!length) {
- length = glsl_get_matrix_columns(var->type);
- }
-
- for (int col = 0; col < length; ++col) {
- int id = ctx->uniform_count++;
- _mesa_hash_table_u64_insert(ctx->uniform_nir_to_mdg, var->data.driver_location + col + 1, (void *) ((uintptr_t) (id + 1)));
- }
- }
+ /* Record the varying mapping for the command stream's bookkeeping */
- if (ctx->stage == MESA_SHADER_VERTEX) {
- ctx->varying_nir_to_mdg = _mesa_hash_table_u64_create(NULL);
-
- /* First, collect the special varyings */
- nir_foreach_variable(var, &nir->outputs) {
- if (var->data.location == VARYING_SLOT_POS) {
- /* Set position first, always. It takes up two
- * spots, the latter one is de facto unused (at
- * least from the shader's perspective), we
- * just need to skip over the spot*/
-
- _mesa_hash_table_u64_insert(ctx->varying_nir_to_mdg, var->data.driver_location + 1, (void *) ((uintptr_t) (0 + 1)));
- ctx->varying_count = MAX2(ctx->varying_count, 2);
- } else if (var->data.location == VARYING_SLOT_PSIZ) {
- /* Set point size second (third, see above) */
- _mesa_hash_table_u64_insert(ctx->varying_nir_to_mdg, var->data.driver_location + 1, (void *) ((uintptr_t) (2 + 1)));
- ctx->varying_count = MAX2(ctx->varying_count, 3);
-
- program->writes_point_size = true;
- }
- }
+ struct exec_list *varyings =
+ ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs;
- /* Now, collect normal varyings */
-
- nir_foreach_variable(var, &nir->outputs) {
- if (var->data.location == VARYING_SLOT_POS || var->data.location == VARYING_SLOT_PSIZ) continue;
+ nir_foreach_variable(var, varyings) {
+ unsigned loc = var->data.driver_location;
+ unsigned sz = glsl_type_size(var->type, FALSE);
- for (int col = 0; col < glsl_get_matrix_columns(var->type); ++col) {
- int id = ctx->varying_count++;
- _mesa_hash_table_u64_insert(ctx->varying_nir_to_mdg, var->data.driver_location + col + 1, (void *) ((uintptr_t) (id + 1)));
- }
+ for (int c = 0; c < sz; ++c) {
+ program->varyings[loc + c] = var->data.location;
}
}
+ /* Lower gl_Position pre-optimisation */
-
- /* Lower vars -- not I/O -- before epilogue */
+ if (ctx->stage == MESA_SHADER_VERTEX)
+ NIR_PASS_V(nir, nir_lower_viewport_transform);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
NIR_PASS_V(nir, nir_lower_global_vars_to_local);
NIR_PASS_V(nir, nir_lower_var_copies);
NIR_PASS_V(nir, nir_lower_vars_to_ssa);
- NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
-
- /* Append vertex epilogue before optimisation, so the epilogue itself
- * is optimised */
- if (ctx->stage == MESA_SHADER_VERTEX)
- transform_position_writes(nir);
+ NIR_PASS_V(nir, nir_lower_io, nir_var_all, glsl_type_size, 0);
/* Optimisation passes */
optimise_nir(nir);
- nir_print_shader(nir, stdout);
+ if (midgard_debug & MIDGARD_DBG_SHADERS) {
+ nir_print_shader(nir, stdout);
+ }
+
+ /* Assign sysvals and counts, now that we're sure
+ * (post-optimisation) */
+
+ midgard_nir_assign_sysvals(ctx, nir);
- /* Assign counts, now that we're sure (post-optimisation) */
program->uniform_count = nir->num_uniforms;
+ program->sysval_count = ctx->sysval_count;
+ memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count);
program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0;
program->varying_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_outputs : ((ctx->stage == MESA_SHADER_FRAGMENT) ? nir->num_inputs : 0);
-
nir_foreach_function(func, nir) {
if (!func->impl)
continue;
util_dynarray_init(compiled, NULL);
+ /* MIR-level optimizations */
+
+ bool progress = false;
+
+ do {
+ progress = false;
+
+ mir_foreach_block(ctx, block) {
+ progress |= midgard_opt_copy_prop(ctx, block);
+ progress |= midgard_opt_copy_prop_tex(ctx, block);
+ progress |= midgard_opt_dead_code_eliminate(ctx, block);
+ }
+ } while (progress);
+
/* Schedule! */
schedule_program(ctx);
for (int c = 0; c < bundle->instruction_count; ++c) {
midgard_instruction *ins = &bundle->instructions[c];
- if (ins->unit != ALU_ENAB_BR_COMPACT) continue;
+ if (!midgard_is_branch_unit(ins->unit)) continue;
if (ins->prepacked_branch) continue;
- uint16_t compact;
+ /* Parse some basic branch info */
+ bool is_compact = ins->unit == ALU_ENAB_BR_COMPACT;
+ bool is_conditional = ins->branch.conditional;
+ bool is_inverted = ins->branch.invert_conditional;
+ bool is_discard = ins->branch.target_type == TARGET_DISCARD;
/* Determine the block we're jumping to */
int target_number = ins->branch.target_block;
- midgard_block *target = mir_get_block(ctx, target_number);
- assert(target);
-
- /* Determine the destination tag */
- midgard_bundle *first = util_dynarray_element(&target->bundles, midgard_bundle, 0);
- assert(first);
-
- int dest_tag = first->tag;
+ /* Report the destination tag. Discards don't need this */
+ int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number);
/* Count up the number of quadwords we're jumping over. That is, the number of quadwords in each of the blocks between (br_block_idx, target_number) */
int quadword_offset = 0;
- if (target_number > br_block_idx) {
+ if (is_discard) {
+ /* Jump to the end of the shader. We
+ * need to include not only the
+ * following blocks, but also the
+ * contents of our current block (since
+ * discard can come in the middle of
+ * the block) */
+
+ midgard_block *blk = mir_get_block(ctx, br_block_idx + 1);
+
+ for (midgard_bundle *bun = bundle + 1; bun < (midgard_bundle *)((char*) block->bundles.data + block->bundles.size); ++bun) {
+ quadword_offset += quadword_size(bun->tag);
+ }
+
+ mir_foreach_block_from(ctx, blk, b) {
+ quadword_offset += b->quadword_count;
+ }
+
+ } else if (target_number > br_block_idx) {
/* Jump forward */
for (int idx = br_block_idx + 1; idx < target_number; ++idx) {
}
}
- if (ins->branch.conditional) {
+ /* Unconditional extended branches (far jumps)
+ * have issues, so we always use a conditional
+ * branch, setting the condition to always for
+ * unconditional. For compact unconditional
+ * branches, cond isn't used so it doesn't
+ * matter what we pick. */
+
+ midgard_condition cond =
+ !is_conditional ? midgard_condition_always :
+ is_inverted ? midgard_condition_false :
+ midgard_condition_true;
+
+ midgard_jmp_writeout_op op =
+ is_discard ? midgard_jmp_writeout_op_discard :
+ (is_compact && !is_conditional) ? midgard_jmp_writeout_op_branch_uncond :
+ midgard_jmp_writeout_op_branch_cond;
+
+ if (!is_compact) {
+ midgard_branch_extended branch =
+ midgard_create_branch_extended(
+ cond, op,
+ dest_tag,
+ quadword_offset);
+
+ memcpy(&ins->branch_extended, &branch, sizeof(branch));
+ } else if (is_conditional || is_discard) {
midgard_branch_cond branch = {
- .op = midgard_jmp_writeout_op_branch_cond,
+ .op = op,
.dest_tag = dest_tag,
.offset = quadword_offset,
- .cond = ins->branch.invert_conditional ? midgard_condition_false : midgard_condition_true
+ .cond = cond
};
- memcpy(&compact, &branch, sizeof(branch));
+ assert(branch.offset == quadword_offset);
+
+ memcpy(&ins->br_compact, &branch, sizeof(branch));
} else {
+ assert(op == midgard_jmp_writeout_op_branch_uncond);
+
midgard_branch_uncond branch = {
- .op = midgard_jmp_writeout_op_branch_uncond,
+ .op = op,
.dest_tag = dest_tag,
.offset = quadword_offset,
.unknown = 1
};
- memcpy(&compact, &branch, sizeof(branch));
- }
+ assert(branch.offset == quadword_offset);
- /* Swap in the generic branch for our actual branch */
- ins->unit = ALU_ENAB_BR_COMPACT;
- ins->br_compact = compact;
+ memcpy(&ins->br_compact, &branch, sizeof(branch));
+ }
}
-
}
++br_block_idx;
free(source_order_bundles);
- /* Due to lookahead, we need to report in the command stream the first
- * tag executed. An initial block might be empty, so iterate until we
- * find one that 'works' */
-
- midgard_block *initial_block = list_first_entry(&ctx->blocks, midgard_block, link);
-
- program->first_tag = 0;
-
- do {
- midgard_bundle *initial_bundle = util_dynarray_element(&initial_block->bundles, midgard_bundle, 0);
-
- if (initial_bundle) {
- program->first_tag = initial_bundle->tag;
- break;
- }
-
- /* Initial block is empty, try the next block */
- initial_block = list_first_entry(&(initial_block->link), midgard_block, link);
- } while(initial_block != NULL);
-
- /* Make sure we actually set the tag */
- assert(program->first_tag);
+ /* Report the very first tag executed */
+ program->first_tag = midgard_get_first_tag_from_block(ctx, 0);
/* Deal with off-by-one related to the fencepost problem */
program->work_register_count = ctx->work_registers + 1;
program->blend_patch_offset = ctx->blend_constant_offset;
- disassemble_midgard(program->compiled.data, program->compiled.size);
+ if (midgard_debug & MIDGARD_DBG_SHADERS)
+ disassemble_midgard(program->compiled.data, program->compiled.size);
return 0;
}