X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fpanfrost%2Fmidgard%2Fmidgard_compile.c;h=1e08e349eee8e651419834dedd18d538512e87e0;hb=318e9933b13a98aa3392fc5fce2d2dc4314932f9;hp=9098727aa15d09a2485808697aa9d4b1deaeaed1;hpb=53d6e11393744423b7cbf29459b81bde80ba1516;p=mesa.git diff --git a/src/gallium/drivers/panfrost/midgard/midgard_compile.c b/src/gallium/drivers/panfrost/midgard/midgard_compile.c index 9098727aa15..1e08e349eee 100644 --- a/src/gallium/drivers/panfrost/midgard/midgard_compile.c +++ b/src/gallium/drivers/panfrost/midgard/midgard_compile.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2018 Alyssa Rosenzweig + * Copyright (C) 2018-2019 Alyssa Rosenzweig * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -36,7 +36,7 @@ #include "main/imports.h" #include "compiler/nir/nir_builder.h" #include "util/half_float.h" -#include "util/register_allocate.h" +#include "util/u_math.h" #include "util/u_debug.h" #include "util/u_dynarray.h" #include "util/list.h" @@ -45,7 +45,9 @@ #include "midgard.h" #include "midgard_nir.h" #include "midgard_compile.h" +#include "midgard_ops.h" #include "helpers.h" +#include "compiler.h" #include "disassemble.h" @@ -64,132 +66,12 @@ int midgard_debug = 0; fprintf(stderr, "%s:%d: "fmt, \ __FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0) -/* Instruction arguments represented as block-local SSA indices, rather than - * registers. Negative values mean unused. */ - -typedef struct { - int src0; - int src1; - int dest; - - /* src1 is -not- SSA but instead a 16-bit inline constant to be smudged - * in. Only valid for ALU ops. */ - bool inline_constant; -} ssa_args; - -/* Forward declare so midgard_branch can reference */ -struct midgard_block; - -/* Target types. Defaults to TARGET_GOTO (the type corresponding directly to - * the hardware), hence why that must be zero. TARGET_DISCARD signals this - * instruction is actually a discard op. */ - -#define TARGET_GOTO 0 -#define TARGET_BREAK 1 -#define TARGET_CONTINUE 2 -#define TARGET_DISCARD 3 - -typedef struct midgard_branch { - /* If conditional, the condition is specified in r31.w */ - bool conditional; - - /* For conditionals, if this is true, we branch on FALSE. If false, we branch on TRUE. */ - bool invert_conditional; - - /* Branch targets: the start of a block, the start of a loop (continue), the end of a loop (break). Value is one of TARGET_ */ - unsigned target_type; - - /* The actual target */ - union { - int target_block; - int target_break; - int target_continue; - }; -} midgard_branch; - static bool midgard_is_branch_unit(unsigned unit) { return (unit == ALU_ENAB_BRANCH) || (unit == ALU_ENAB_BR_COMPACT); } -/* Generic in-memory data type repesenting a single logical instruction, rather - * than a single instruction group. This is the preferred form for code gen. - * Multiple midgard_insturctions will later be combined during scheduling, - * though this is not represented in this structure. Its format bridges - * the low-level binary representation with the higher level semantic meaning. - * - * Notably, it allows registers to be specified as block local SSA, for code - * emitted before the register allocation pass. - */ - -typedef struct midgard_instruction { - /* Must be first for casting */ - struct list_head link; - - unsigned type; /* ALU, load/store, texture */ - - /* If the register allocator has not run yet... */ - ssa_args ssa_args; - - /* Special fields for an ALU instruction */ - midgard_reg_info registers; - - /* I.e. (1 << alu_bit) */ - int unit; - - bool has_constants; - float constants[4]; - uint16_t inline_constant; - bool has_blend_constant; - - bool compact_branch; - bool writeout; - bool prepacked_branch; - - union { - midgard_load_store_word load_store; - midgard_vector_alu alu; - midgard_texture_word texture; - midgard_branch_extended branch_extended; - uint16_t br_compact; - - /* General branch, rather than packed br_compact. Higher level - * than the other components */ - midgard_branch branch; - }; -} midgard_instruction; - -typedef struct midgard_block { - /* Link to next block. Must be first for mir_get_block */ - struct list_head link; - - /* List of midgard_instructions emitted for the current block */ - struct list_head instructions; - - bool is_scheduled; - - /* List of midgard_bundles emitted (after the scheduler has run) */ - struct util_dynarray bundles; - - /* Number of quadwords _actually_ emitted, as determined after scheduling */ - unsigned quadword_count; - - /* Successors: always one forward (the block after us), maybe - * one backwards (for a backward branch). No need for a second - * forward, since graph traversal would get there eventually - * anyway */ - struct midgard_block *successors[2]; - unsigned nr_successors; - - /* The successors pointer form a graph, and in the case of - * complex control flow, this graph has a cycles. To aid - * traversal during liveness analysis, we have a visited? - * boolean for passes to use as they see fit, provided they - * clean up later */ - bool visited; -} midgard_block; - static void midgard_block_add_successor(midgard_block *block, midgard_block *successor) { @@ -201,12 +83,12 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) * driver seems to do it that way */ #define EMIT(op, ...) emit_mir_instruction(ctx, v_##op(__VA_ARGS__)); -#define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W) #define M_LOAD_STORE(name, rname, uname) \ static midgard_instruction m_##name(unsigned ssa, unsigned address) { \ midgard_instruction i = { \ .type = TAG_LOAD_STORE_4, \ + .mask = 0xF, \ .ssa_args = { \ .rname = ssa, \ .uname = -1, \ @@ -214,7 +96,6 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) }, \ .load_store = { \ .op = midgard_op_##name, \ - .mask = 0xF, \ .swizzle = SWIZZLE_XYZW, \ .address = address \ } \ @@ -226,58 +107,46 @@ midgard_block_add_successor(midgard_block *block, midgard_block *successor) #define M_LOAD(name) M_LOAD_STORE(name, dest, src0) #define M_STORE(name) M_LOAD_STORE(name, src0, dest) -const midgard_vector_alu_src blank_alu_src = { - .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W), -}; - -const midgard_vector_alu_src blank_alu_src_xxxx = { - .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X), -}; - -const midgard_scalar_alu_src blank_scalar_alu_src = { - .full = true -}; - -/* Used for encoding the unused source of 1-op instructions */ -const midgard_vector_alu_src zero_alu_src = { 0 }; - -/* Coerce structs to integer */ - -static unsigned -vector_alu_srco_unsigned(midgard_vector_alu_src src) -{ - unsigned u; - memcpy(&u, &src, sizeof(src)); - return u; -} - -static midgard_vector_alu_src -vector_alu_from_unsigned(unsigned u) -{ - midgard_vector_alu_src s; - memcpy(&s, &u, sizeof(s)); - return s; -} - /* Inputs a NIR ALU source, with modifiers attached if necessary, and outputs * the corresponding Midgard source */ static midgard_vector_alu_src -vector_alu_modifiers(nir_alu_src *src, bool is_int) +vector_alu_modifiers(nir_alu_src *src, bool is_int, unsigned broadcast_count, + bool half, bool sext) { if (!src) return blank_alu_src; + /* Figure out how many components there are so we can adjust the + * swizzle. Specifically we want to broadcast the last channel so + * things like ball2/3 work + */ + + if (broadcast_count) { + uint8_t last_component = src->swizzle[broadcast_count - 1]; + + for (unsigned c = broadcast_count; c < NIR_MAX_VEC_COMPONENTS; ++c) { + src->swizzle[c] = last_component; + } + } + midgard_vector_alu_src alu_src = { .rep_low = 0, .rep_high = 0, - .half = 0, /* TODO */ + .half = half, .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle) }; if (is_int) { - /* TODO: sign-extend/zero-extend */ alu_src.mod = midgard_int_normal; + /* Sign/zero-extend if needed */ + + if (half) { + alu_src.mod = sext ? + midgard_int_sign_extend + : midgard_int_zero_extend; + } + /* These should have been lowered away */ assert(!(src->abs || src->negate)); } else { @@ -287,61 +156,21 @@ vector_alu_modifiers(nir_alu_src *src, bool is_int) return alu_src; } -static bool -mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask) -{ - /* abs or neg */ - if (!is_int && src.mod) return true; - - /* swizzle */ - for (unsigned c = 0; c < 4; ++c) { - if (!(mask & (1 << c))) continue; - if (((src.swizzle >> (2*c)) & 3) != c) return true; - } - - return false; -} - -/* 'Intrinsic' move for misc aliasing uses independent of actual NIR ALU code */ - -static midgard_instruction -v_fmov(unsigned src, midgard_vector_alu_src mod, unsigned dest) -{ - midgard_instruction ins = { - .type = TAG_ALU_4, - .ssa_args = { - .src0 = SSA_UNUSED_1, - .src1 = src, - .dest = dest, - }, - .alu = { - .op = midgard_alu_op_fmov, - .reg_mode = midgard_reg_mode_full, - .dest_override = midgard_dest_override_none, - .mask = 0xFF, - .src1 = vector_alu_srco_unsigned(zero_alu_src), - .src2 = vector_alu_srco_unsigned(mod) - }, - }; - - return ins; -} - /* load/store instructions have both 32-bit and 16-bit variants, depending on * whether we are using vectors composed of highp or mediump. At the moment, we * don't support half-floats -- this requires changes in other parts of the * compiler -- therefore the 16-bit versions are commented out. */ -//M_LOAD(load_attr_16); -M_LOAD(load_attr_32); -//M_LOAD(load_vary_16); -M_LOAD(load_vary_32); -//M_LOAD(load_uniform_16); -M_LOAD(load_uniform_32); -M_LOAD(load_color_buffer_8); -//M_STORE(store_vary_16); -M_STORE(store_vary_32); -M_STORE(store_cubemap_coords); +//M_LOAD(ld_attr_16); +M_LOAD(ld_attr_32); +//M_LOAD(ld_vary_16); +M_LOAD(ld_vary_32); +//M_LOAD(ld_uniform_16); +M_LOAD(ld_uniform_32); +M_LOAD(ld_color_buffer_8); +//M_STORE(st_vary_16); +M_STORE(st_vary_32); +M_STORE(st_cubemap_coords); static midgard_instruction v_alu_br_compact_cond(midgard_jmp_writeout_op op, unsigned tag, signed offset, unsigned cond) @@ -413,278 +242,11 @@ midgard_create_branch_extended( midgard_condition cond, return branch; } -typedef struct midgard_bundle { - /* Tag for the overall bundle */ - int tag; - - /* Instructions contained by the bundle */ - int instruction_count; - midgard_instruction instructions[5]; - - /* Bundle-wide ALU configuration */ - int padding; - int control; - bool has_embedded_constants; - float constants[4]; - bool has_blend_constant; - - uint16_t register_words[8]; - int register_words_count; - - uint64_t body_words[8]; - size_t body_size[8]; - int body_words_count; -} midgard_bundle; - -typedef struct compiler_context { - nir_shader *nir; - gl_shader_stage stage; - - /* Is internally a blend shader? Depends on stage == FRAGMENT */ - bool is_blend; - - /* Tracking for blend constant patching */ - int blend_constant_number; - int blend_constant_offset; - - /* Current NIR function */ - nir_function *func; - - /* Unordered list of midgard_blocks */ - int block_count; - struct list_head blocks; - - midgard_block *initial_block; - midgard_block *previous_source_block; - midgard_block *final_block; - - /* List of midgard_instructions emitted for the current block */ - midgard_block *current_block; - - /* The current "depth" of the loop, for disambiguating breaks/continues - * when using nested loops */ - int current_loop_depth; - - /* Constants which have been loaded, for later inlining */ - struct hash_table_u64 *ssa_constants; - - /* SSA indices to be outputted to corresponding varying offset */ - struct hash_table_u64 *ssa_varyings; - - /* SSA values / registers which have been aliased. Naively, these - * demand a fmov output; instead, we alias them in a later pass to - * avoid the wasted op. - * - * A note on encoding: to avoid dynamic memory management here, rather - * than ampping to a pointer, we map to the source index; the key - * itself is just the destination index. */ - - struct hash_table_u64 *ssa_to_alias; - struct set *leftover_ssa_to_alias; - - /* Actual SSA-to-register for RA */ - struct hash_table_u64 *ssa_to_register; - - /* Mapping of hashes computed from NIR indices to the sequential temp indices ultimately used in MIR */ - struct hash_table_u64 *hash_to_temp; - int temp_count; - int max_hash; - - /* Just the count of the max register used. Higher count => higher - * register pressure */ - int work_registers; - - /* Used for cont/last hinting. Increase when a tex op is added. - * Decrease when a tex op is removed. */ - int texture_op_count; - - /* Mapping of texture register -> SSA index for unaliasing */ - int texture_index[2]; - - /* If any path hits a discard instruction */ - bool can_discard; - - /* The number of uniforms allowable for the fast path */ - int uniform_cutoff; - - /* Count of instructions emitted from NIR overall, across all blocks */ - int instruction_count; - - /* Alpha ref value passed in */ - float alpha_ref; - - /* The index corresponding to the fragment output */ - unsigned fragment_output; - - /* The mapping of sysvals to uniforms, the count, and the off-by-one inverse */ - unsigned sysvals[MAX_SYSVAL_COUNT]; - unsigned sysval_count; - struct hash_table_u64 *sysval_to_id; -} compiler_context; - -/* Append instruction to end of current block */ - -static midgard_instruction * -mir_upload_ins(struct midgard_instruction ins) -{ - midgard_instruction *heap = malloc(sizeof(ins)); - memcpy(heap, &ins, sizeof(ins)); - return heap; -} - -static void -emit_mir_instruction(struct compiler_context *ctx, struct midgard_instruction ins) -{ - list_addtail(&(mir_upload_ins(ins))->link, &ctx->current_block->instructions); -} - -static void -mir_insert_instruction_before(struct midgard_instruction *tag, struct midgard_instruction ins) -{ - list_addtail(&(mir_upload_ins(ins))->link, &tag->link); -} - -static void -mir_remove_instruction(struct midgard_instruction *ins) -{ - list_del(&ins->link); -} - -static midgard_instruction* -mir_prev_op(struct midgard_instruction *ins) -{ - return list_last_entry(&(ins->link), midgard_instruction, link); -} - -static midgard_instruction* -mir_next_op(struct midgard_instruction *ins) -{ - return list_first_entry(&(ins->link), midgard_instruction, link); -} - -#define mir_foreach_block(ctx, v) list_for_each_entry(struct midgard_block, v, &ctx->blocks, link) -#define mir_foreach_block_from(ctx, from, v) list_for_each_entry_from(struct midgard_block, v, from, &ctx->blocks, link) - -#define mir_foreach_instr(ctx, v) list_for_each_entry(struct midgard_instruction, v, &ctx->current_block->instructions, link) -#define mir_foreach_instr_safe(ctx, v) list_for_each_entry_safe(struct midgard_instruction, v, &ctx->current_block->instructions, link) -#define mir_foreach_instr_in_block(block, v) list_for_each_entry(struct midgard_instruction, v, &block->instructions, link) -#define mir_foreach_instr_in_block_safe(block, v) list_for_each_entry_safe(struct midgard_instruction, v, &block->instructions, link) -#define mir_foreach_instr_in_block_safe_rev(block, v) list_for_each_entry_safe_rev(struct midgard_instruction, v, &block->instructions, link) -#define mir_foreach_instr_in_block_from(block, v, from) list_for_each_entry_from(struct midgard_instruction, v, from, &block->instructions, link) -#define mir_foreach_instr_in_block_from_rev(block, v, from) list_for_each_entry_from_rev(struct midgard_instruction, v, from, &block->instructions, link) - - -static midgard_instruction * -mir_last_in_block(struct midgard_block *block) -{ - return list_last_entry(&block->instructions, struct midgard_instruction, link); -} - -static midgard_block * -mir_get_block(compiler_context *ctx, int idx) -{ - struct list_head *lst = &ctx->blocks; - - while ((idx--) + 1) - lst = lst->next; - - return (struct midgard_block *) lst; -} - -/* Pretty printer for internal Midgard IR */ - -static void -print_mir_source(int source) -{ - if (source >= SSA_FIXED_MINIMUM) { - /* Specific register */ - int reg = SSA_REG_FROM_FIXED(source); - - /* TODO: Moving threshold */ - if (reg > 16 && reg < 24) - printf("u%d", 23 - reg); - else - printf("r%d", reg); - } else { - printf("%d", source); - } -} - -static void -print_mir_instruction(midgard_instruction *ins) -{ - printf("\t"); - - switch (ins->type) { - case TAG_ALU_4: { - midgard_alu_op op = ins->alu.op; - const char *name = alu_opcode_props[op].name; - - if (ins->unit) - printf("%d.", ins->unit); - - printf("%s", name ? name : "??"); - break; - } - - case TAG_LOAD_STORE_4: { - midgard_load_store_op op = ins->load_store.op; - const char *name = load_store_opcode_names[op]; - - assert(name); - printf("%s", name); - break; - } - - case TAG_TEXTURE_4: { - printf("texture"); - break; - } - - default: - assert(0); - } - - ssa_args *args = &ins->ssa_args; - - printf(" %d, ", args->dest); - - print_mir_source(args->src0); - printf(", "); - - if (args->inline_constant) - printf("#%d", ins->inline_constant); - else - print_mir_source(args->src1); - - if (ins->has_constants) - printf(" <%f, %f, %f, %f>", ins->constants[0], ins->constants[1], ins->constants[2], ins->constants[3]); - - printf("\n"); -} - -static void -print_mir_block(midgard_block *block) -{ - printf("{\n"); - - mir_foreach_instr_in_block(block, ins) { - print_mir_instruction(ins); - } - - printf("}\n"); -} - static void attach_constants(compiler_context *ctx, midgard_instruction *ins, void *constants, int name) { ins->has_constants = true; memcpy(&ins->constants, constants, 16); - - /* If this is the special blend constant, mark this instruction */ - - if (ctx->is_blend && ctx->blend_constant_number == name) - ins->has_blend_constant = true; } static int @@ -728,16 +290,59 @@ midgard_nir_sysval_for_intrinsic(nir_intrinsic_instr *instr) } } -static void -midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr) +static unsigned +nir_dest_index(compiler_context *ctx, nir_dest *dst) +{ + if (dst->is_ssa) + return dst->ssa.index; + else { + assert(!dst->reg.indirect); + return ctx->func->impl->ssa_alloc + dst->reg.reg->index; + } +} + +static int sysval_for_instr(compiler_context *ctx, nir_instr *instr, + unsigned *dest) { + nir_intrinsic_instr *intr; + nir_dest *dst = NULL; + nir_tex_instr *tex; int sysval = -1; - if (instr->type == nir_instr_type_intrinsic) { - nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); + switch (instr->type) { + case nir_instr_type_intrinsic: + intr = nir_instr_as_intrinsic(instr); sysval = midgard_nir_sysval_for_intrinsic(intr); + dst = &intr->dest; + break; + case nir_instr_type_tex: + tex = nir_instr_as_tex(instr); + if (tex->op != nir_texop_txs) + break; + + sysval = PAN_SYSVAL(TEXTURE_SIZE, + PAN_TXS_SYSVAL_ID(tex->texture_index, + nir_tex_instr_dest_size(tex) - + (tex->is_array ? 1 : 0), + tex->is_array)); + dst = &tex->dest; + break; + default: + break; } + if (dest && dst) + *dest = nir_dest_index(ctx, dst); + + return sysval; +} + +static void +midgard_nir_assign_sysval_body(compiler_context *ctx, nir_instr *instr) +{ + int sysval; + + sysval = sysval_for_instr(ctx, instr, NULL); if (sysval < 0) return; @@ -803,15 +408,26 @@ static void optimise_nir(nir_shader *nir) { bool progress; + unsigned lower_flrp = + (nir->options->lower_flrp16 ? 16 : 0) | + (nir->options->lower_flrp32 ? 32 : 0) | + (nir->options->lower_flrp64 ? 64 : 0); NIR_PASS(progress, nir, nir_lower_regs_to_ssa); NIR_PASS(progress, nir, midgard_nir_lower_fdot2); + NIR_PASS(progress, nir, nir_lower_idiv); - nir_lower_tex_options lower_tex_options = { - .lower_rect = true + nir_lower_tex_options lower_tex_1st_pass_options = { + .lower_rect = true, + .lower_txp = ~0 }; - NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_options); + nir_lower_tex_options lower_tex_2nd_pass_options = { + .lower_txs_lod = true, + }; + + NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_1st_pass_options); + NIR_PASS(progress, nir, nir_lower_tex, &lower_tex_2nd_pass_options); do { progress = false; @@ -826,14 +442,34 @@ optimise_nir(nir_shader *nir) NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true); NIR_PASS(progress, nir, nir_opt_algebraic); NIR_PASS(progress, nir, nir_opt_constant_folding); + + if (lower_flrp != 0) { + bool lower_flrp_progress = false; + NIR_PASS(lower_flrp_progress, + nir, + nir_lower_flrp, + lower_flrp, + false /* always_precise */, + nir->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, nir, + nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, nir, nir_opt_undef); NIR_PASS(progress, nir, nir_opt_loop_unroll, nir_var_shader_in | nir_var_shader_out | nir_var_function_temp); - /* TODO: Enable vectorize when merged upstream */ - // NIR_PASS(progress, nir, nir_opt_vectorize); + NIR_PASS(progress, nir, nir_opt_vectorize); } while (progress); /* Must be run at the end to prevent creation of fsin/fcos ops */ @@ -849,6 +485,11 @@ optimise_nir(nir_shader *nir) } while (progress); NIR_PASS(progress, nir, nir_opt_algebraic_late); + + /* We implement booleans as 32-bit 0/~0 */ + NIR_PASS(progress, nir, nir_lower_bool_to_int32); + + /* Now that booleans are lowered, we can run out late opts */ NIR_PASS(progress, nir, midgard_nir_lower_algebraic_late); /* Lower mods for float ops only. Integer ops don't support modifiers @@ -859,9 +500,6 @@ optimise_nir(nir_shader *nir) NIR_PASS(progress, nir, nir_copy_prop); NIR_PASS(progress, nir, nir_opt_dce); - /* We implement booleans as 32-bit 0/~0 */ - NIR_PASS(progress, nir, nir_lower_bool_to_int32); - /* Take us out of SSA */ NIR_PASS(progress, nir, nir_lower_locals_to_regs); NIR_PASS(progress, nir, nir_convert_from_ssa, true); @@ -893,18 +531,6 @@ unalias_ssa(compiler_context *ctx, int dest) /* TODO: Remove from leftover or no? */ } -static void -midgard_pin_output(compiler_context *ctx, int index, int reg) -{ - _mesa_hash_table_u64_insert(ctx->ssa_to_register, index + 1, (void *) ((uintptr_t) reg + 1)); -} - -static bool -midgard_is_pinned(compiler_context *ctx, int index) -{ - return _mesa_hash_table_u64_search(ctx->ssa_to_register, index + 1) != NULL; -} - /* Do not actually emit a load; instead, cache the constant for inlining */ static void @@ -912,117 +538,97 @@ emit_load_const(compiler_context *ctx, nir_load_const_instr *instr) { nir_ssa_def def = instr->def; - float *v = ralloc_array(NULL, float, 4); + float *v = rzalloc_array(NULL, float, 4); nir_const_load_to_arr(v, instr, f32); _mesa_hash_table_u64_insert(ctx->ssa_constants, def.index + 1, v); } -/* Duplicate bits to convert sane 4-bit writemask to obscure 8-bit format (or - * do the inverse) */ - -static unsigned -expand_writemask(unsigned mask) -{ - unsigned o = 0; - - for (int i = 0; i < 4; ++i) - if (mask & (1 << i)) - o |= (3 << (2 * i)); - - return o; -} - static unsigned -squeeze_writemask(unsigned mask) +nir_src_index(compiler_context *ctx, nir_src *src) { - unsigned o = 0; - - for (int i = 0; i < 4; ++i) - if (mask & (3 << (2 * i))) - o |= (1 << i); - - return o; - + if (src->is_ssa) + return src->ssa->index; + else { + assert(!src->reg.indirect); + return ctx->func->impl->ssa_alloc + src->reg.reg->index; + } } -/* Determines effective writemask, taking quirks and expansion into account */ static unsigned -effective_writemask(midgard_vector_alu *alu) +nir_alu_src_index(compiler_context *ctx, nir_alu_src *src) { - /* Channel count is off-by-one to fit in two-bits (0 channel makes no - * sense) */ - - unsigned channel_count = GET_CHANNEL_COUNT(alu_opcode_props[alu->op].props); + return nir_src_index(ctx, &src->src); +} - /* If there is a fixed channel count, construct the appropriate mask */ +static bool +nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components) +{ + unsigned comp = src->swizzle[0]; - if (channel_count) - return (1 << channel_count) - 1; + for (unsigned c = 1; c < nr_components; ++c) { + if (src->swizzle[c] != comp) + return true; + } - /* Otherwise, just squeeze the existing mask */ - return squeeze_writemask(alu->mask); + return false; } -static unsigned -find_or_allocate_temp(compiler_context *ctx, unsigned hash) +/* Midgard puts scalar conditionals in r31.w; move an arbitrary source (the + * output of a conditional test) into that register */ + +static void +emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component) { - if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM)) - return hash; + int condition = nir_src_index(ctx, src); - unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(ctx->hash_to_temp, hash + 1); + /* Source to swizzle the desired component into w */ - if (temp) - return temp - 1; + const midgard_vector_alu_src alu_src = { + .swizzle = SWIZZLE(component, component, component, component), + }; - /* If no temp is find, allocate one */ - temp = ctx->temp_count++; - ctx->max_hash = MAX2(ctx->max_hash, hash); + /* There is no boolean move instruction. Instead, we simulate a move by + * ANDing the condition with itself to get it into r31.w */ - _mesa_hash_table_u64_insert(ctx->hash_to_temp, hash + 1, (void *) ((uintptr_t) temp + 1)); + midgard_instruction ins = { + .type = TAG_ALU_4, - return temp; -} + /* We need to set the conditional as close as possible */ + .precede_break = true, + .unit = for_branch ? UNIT_SMUL : UNIT_SADD, + .mask = 1 << COMPONENT_W, -static unsigned -nir_src_index(compiler_context *ctx, nir_src *src) -{ - if (src->is_ssa) - return src->ssa->index; - else { - assert(!src->reg.indirect); - return ctx->func->impl->ssa_alloc + src->reg.reg->index; - } -} + .ssa_args = { + .src0 = condition, + .src1 = condition, + .dest = SSA_FIXED_REGISTER(31), + }, -static unsigned -nir_dest_index(compiler_context *ctx, nir_dest *dst) -{ - if (dst->is_ssa) - return dst->ssa.index; - else { - assert(!dst->reg.indirect); - return ctx->func->impl->ssa_alloc + dst->reg.reg->index; - } -} + .alu = { + .op = midgard_alu_op_iand, + .outmod = midgard_outmod_int_wrap, + .reg_mode = midgard_reg_mode_32, + .dest_override = midgard_dest_override_none, + .src1 = vector_alu_srco_unsigned(alu_src), + .src2 = vector_alu_srco_unsigned(alu_src) + }, + }; -static unsigned -nir_alu_src_index(compiler_context *ctx, nir_alu_src *src) -{ - return nir_src_index(ctx, &src->src); + emit_mir_instruction(ctx, ins); } -/* Midgard puts conditionals in r31.w; move an arbitrary source (the output of - * a conditional test) into that register */ +/* Or, for mixed conditions (with csel_v), here's a vector version using all of + * r31 instead */ static void -emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned component) +emit_condition_mixed(compiler_context *ctx, nir_alu_src *src, unsigned nr_comp) { - int condition = nir_src_index(ctx, src); + int condition = nir_src_index(ctx, &src->src); /* Source to swizzle the desired component into w */ const midgard_vector_alu_src alu_src = { - .swizzle = SWIZZLE(component, component, component, component), + .swizzle = SWIZZLE_FROM_ARRAY(src->swizzle), }; /* There is no boolean move instruction. Instead, we simulate a move by @@ -1030,7 +636,8 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co midgard_instruction ins = { .type = TAG_ALU_4, - .unit = for_branch ? UNIT_SMUL : UNIT_SADD, /* TODO: DEDUCE THIS */ + .precede_break = true, + .mask = mask_of(nr_comp), .ssa_args = { .src0 = condition, .src1 = condition, @@ -1038,9 +645,9 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co }, .alu = { .op = midgard_alu_op_iand, - .reg_mode = midgard_reg_mode_full, + .outmod = midgard_outmod_int_wrap, + .reg_mode = midgard_reg_mode_32, .dest_override = midgard_dest_override_none, - .mask = (0x3 << 6), /* w */ .src1 = vector_alu_srco_unsigned(alu_src), .src2 = vector_alu_srco_unsigned(alu_src) }, @@ -1049,6 +656,8 @@ emit_condition(compiler_context *ctx, nir_src *src, bool for_branch, unsigned co emit_mir_instruction(ctx, ins); } + + /* Likewise, indirect offsets are put in r27.w. TODO: Allow componentwise * pinning to eliminate this move in all known cases */ @@ -1059,6 +668,7 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src) midgard_instruction ins = { .type = TAG_ALU_4, + .mask = 1 << COMPONENT_W, .ssa_args = { .src0 = SSA_UNUSED_1, .src1 = offset, @@ -1066,9 +676,9 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src) }, .alu = { .op = midgard_alu_op_imov, - .reg_mode = midgard_reg_mode_full, + .outmod = midgard_outmod_int_wrap, + .reg_mode = midgard_reg_mode_32, .dest_override = midgard_dest_override_none, - .mask = (0x3 << 6), /* w */ .src1 = vector_alu_srco_unsigned(zero_alu_src), .src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx) }, @@ -1080,8 +690,15 @@ emit_indirect_offset(compiler_context *ctx, nir_src *src) #define ALU_CASE(nir, _op) \ case nir_op_##nir: \ op = midgard_alu_op_##_op; \ + assert(src_bitsize == dst_bitsize); \ break; +#define ALU_CASE_BCAST(nir, _op, count) \ + case nir_op_##nir: \ + op = midgard_alu_op_##_op; \ + broadcast_swizzle = count; \ + assert(src_bitsize == dst_bitsize); \ + break; static bool nir_is_fzero_constant(nir_src src) { @@ -1096,13 +713,35 @@ nir_is_fzero_constant(nir_src src) return true; } +/* Analyze the sizes of the inputs to determine which reg mode. Ops needed + * special treatment override this anyway. */ + +static midgard_reg_mode +reg_mode_for_nir(nir_alu_instr *instr) +{ + unsigned src_bitsize = nir_src_bit_size(instr->src[0].src); + + switch (src_bitsize) { + case 8: + return midgard_reg_mode_8; + case 16: + return midgard_reg_mode_16; + case 32: + return midgard_reg_mode_32; + case 64: + return midgard_reg_mode_64; + default: + unreachable("Invalid bit size"); + } +} + static void emit_alu(compiler_context *ctx, nir_alu_instr *instr) { bool is_ssa = instr->dest.dest.is_ssa; unsigned dest = nir_dest_index(ctx, &instr->dest.dest); - unsigned nr_components = is_ssa ? instr->dest.dest.ssa.num_components : instr->dest.dest.reg.reg->num_components; + unsigned nr_components = nir_dest_num_components(instr->dest.dest); unsigned nr_inputs = nir_op_infos[instr->op].num_inputs; /* Most Midgard ALU ops have a 1:1 correspondance to NIR ops; these are @@ -1114,6 +753,31 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) unsigned op; + /* Number of components valid to check for the instruction (the rest + * will be forced to the last), or 0 to use as-is. Relevant as + * ball-type instructions have a channel count in NIR but are all vec4 + * in Midgard */ + + unsigned broadcast_swizzle = 0; + + /* What register mode should we operate in? */ + midgard_reg_mode reg_mode = + reg_mode_for_nir(instr); + + /* Do we need a destination override? Used for inline + * type conversion */ + + midgard_dest_override dest_override = + midgard_dest_override_none; + + /* Should we use a smaller respective source and sign-extend? */ + + bool half_1 = false, sext_1 = false; + bool half_2 = false, sext_2 = false; + + unsigned src_bitsize = nir_src_bit_size(instr->src[0].src); + unsigned dst_bitsize = nir_dest_bit_size(instr->dest.dest); + switch (instr->op) { ALU_CASE(fadd, fadd); ALU_CASE(fmul, fmul); @@ -1123,7 +787,6 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) ALU_CASE(imax, imax); ALU_CASE(umin, umin); ALU_CASE(umax, umax); - ALU_CASE(fmov, fmov); ALU_CASE(ffloor, ffloor); ALU_CASE(fround_even, froundeven); ALU_CASE(ftrunc, ftrunc); @@ -1133,13 +796,11 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) ALU_CASE(iadd, iadd); ALU_CASE(isub, isub); ALU_CASE(imul, imul); - ALU_CASE(iabs, iabs); - /* XXX: Use fmov, not imov for now, since NIR does not - * differentiate well (it'll happily emits imov for floats, - * which the hardware rather dislikes and breaks e.g - * -bjellyfish */ - ALU_CASE(imov, fmov); + /* Zero shoved as second-arg */ + ALU_CASE(iabs, iabsdiff); + + ALU_CASE(mov, imov); ALU_CASE(feq32, feq); ALU_CASE(fne32, fne); @@ -1175,38 +836,100 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) ALU_CASE(fexp2, fexp2); ALU_CASE(flog2, flog2); - ALU_CASE(f2i32, f2i); - ALU_CASE(f2u32, f2u); - ALU_CASE(i2f32, i2f); - ALU_CASE(u2f32, u2f); + ALU_CASE(f2i32, f2i_rtz); + ALU_CASE(f2u32, f2u_rtz); + ALU_CASE(i2f32, i2f_rtz); + ALU_CASE(u2f32, u2f_rtz); + + ALU_CASE(f2i16, f2i_rtz); + ALU_CASE(f2u16, f2u_rtz); + ALU_CASE(i2f16, i2f_rtz); + ALU_CASE(u2f16, u2f_rtz); ALU_CASE(fsin, fsin); ALU_CASE(fcos, fcos); + /* Second op implicit #0 */ + ALU_CASE(inot, inor); ALU_CASE(iand, iand); ALU_CASE(ior, ior); ALU_CASE(ixor, ixor); - ALU_CASE(inot, inand); ALU_CASE(ishl, ishl); ALU_CASE(ishr, iasr); ALU_CASE(ushr, ilsr); - ALU_CASE(b32all_fequal2, fball_eq); - ALU_CASE(b32all_fequal3, fball_eq); + ALU_CASE_BCAST(b32all_fequal2, fball_eq, 2); + ALU_CASE_BCAST(b32all_fequal3, fball_eq, 3); ALU_CASE(b32all_fequal4, fball_eq); - ALU_CASE(b32any_fnequal2, fbany_neq); - ALU_CASE(b32any_fnequal3, fbany_neq); + ALU_CASE_BCAST(b32any_fnequal2, fbany_neq, 2); + ALU_CASE_BCAST(b32any_fnequal3, fbany_neq, 3); ALU_CASE(b32any_fnequal4, fbany_neq); - ALU_CASE(b32all_iequal2, iball_eq); - ALU_CASE(b32all_iequal3, iball_eq); + ALU_CASE_BCAST(b32all_iequal2, iball_eq, 2); + ALU_CASE_BCAST(b32all_iequal3, iball_eq, 3); ALU_CASE(b32all_iequal4, iball_eq); - ALU_CASE(b32any_inequal2, ibany_neq); - ALU_CASE(b32any_inequal3, ibany_neq); + ALU_CASE_BCAST(b32any_inequal2, ibany_neq, 2); + ALU_CASE_BCAST(b32any_inequal3, ibany_neq, 3); ALU_CASE(b32any_inequal4, ibany_neq); + /* Source mods will be shoved in later */ + ALU_CASE(fabs, fmov); + ALU_CASE(fneg, fmov); + ALU_CASE(fsat, fmov); + + /* For size conversion, we use a move. Ideally though we would squash + * these ops together; maybe that has to happen after in NIR as part of + * propagation...? An earlier algebraic pass ensured we step down by + * only / exactly one size. If stepping down, we use a dest override to + * reduce the size; if stepping up, we use a larger-sized move with a + * half source and a sign/zero-extension modifier */ + + case nir_op_i2i8: + case nir_op_i2i16: + case nir_op_i2i32: + /* If we end up upscale, we'll need a sign-extend on the + * operand (the second argument) */ + + sext_2 = true; + case nir_op_u2u8: + case nir_op_u2u16: + case nir_op_u2u32: { + op = midgard_alu_op_imov; + + if (dst_bitsize == (src_bitsize * 2)) { + /* Converting up */ + half_2 = true; + + /* Use a greater register mode */ + reg_mode++; + } else if (src_bitsize == (dst_bitsize * 2)) { + /* Converting down */ + dest_override = midgard_dest_override_lower; + } + + break; + } + + case nir_op_f2f16: { + assert(src_bitsize == 32); + + op = midgard_alu_op_fmov; + dest_override = midgard_dest_override_lower; + break; + } + + case nir_op_f2f32: { + assert(src_bitsize == 16); + + op = midgard_alu_op_fmov; + half_2 = true; + reg_mode++; + break; + } + + /* For greater-or-equal, we lower to less-or-equal and flip the * arguments */ @@ -1229,44 +952,34 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) break; } - /* For a few special csel cases not handled by NIR, we can opt to - * bitwise. Otherwise, we emit the condition and do a real csel */ - case nir_op_b32csel: { - if (nir_is_fzero_constant(instr->src[2].src)) { - /* (b ? v : 0) = (b & v) */ - op = midgard_alu_op_iand; - nr_inputs = 2; - } else if (nir_is_fzero_constant(instr->src[1].src)) { - /* (b ? 0 : v) = (!b ? v : 0) = (~b & v) = (v & ~b) */ - op = midgard_alu_op_iandnot; - nr_inputs = 2; - instr->src[1] = instr->src[0]; - instr->src[0] = instr->src[2]; - } else { - op = midgard_alu_op_fcsel; + /* Midgard features both fcsel and icsel, depending on + * the type of the arguments/output. However, as long + * as we're careful we can _always_ use icsel and + * _never_ need fcsel, since the latter does additional + * floating-point-specific processing whereas the + * former just moves bits on the wire. It's not obvious + * why these are separate opcodes, save for the ability + * to do things like sat/pos/abs/neg for free */ - /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */ - nr_inputs = 2; + bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components); + op = mixed ? midgard_alu_op_icsel_v : midgard_alu_op_icsel; - /* Figure out which component the condition is in */ + /* csel works as a two-arg in Midgard, since the condition is hardcoded in r31.w */ + nr_inputs = 2; - unsigned comp = instr->src[0].swizzle[0]; + /* Emit the condition into r31 */ - /* Make sure NIR isn't throwing a mixed condition at us */ - - for (unsigned c = 1; c < nr_components; ++c) - assert(instr->src[0].swizzle[c] == comp); - - /* Emit the condition into r31.w */ - emit_condition(ctx, &instr->src[0].src, false, comp); + if (mixed) + emit_condition_mixed(ctx, &instr->src[0], nr_components); + else + emit_condition(ctx, &instr->src[0].src, false, instr->src[0].swizzle[0]); - /* The condition is the first argument; move the other - * arguments up one to be a binary instruction for - * Midgard */ + /* The condition is the first argument; move the other + * arguments up one to be a binary instruction for + * Midgard */ - memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src)); - } + memmove(instr->src, instr->src + 1, 2 * sizeof(nir_alu_src)); break; } @@ -1276,9 +989,15 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) return; } - /* Midgard can perform certain modifiers on output ofa n ALU op */ - midgard_outmod outmod = - instr->dest.saturate ? midgard_outmod_sat : midgard_outmod_none; + /* Midgard can perform certain modifiers on output of an ALU op */ + unsigned outmod; + + if (midgard_is_integer_out_op(op)) { + outmod = midgard_outmod_int_wrap; + } else { + bool sat = instr->dest.saturate || instr->op == nir_op_fsat; + outmod = sat ? midgard_outmod_sat : midgard_outmod_none; + } /* fmax(a, 0.0) can turn into a .pos modifier as an optimization */ @@ -1329,25 +1048,36 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) assert(0); } + /* These were lowered to a move, so apply the corresponding mod */ + + if (instr->op == nir_op_fneg || instr->op == nir_op_fabs) { + nir_alu_src *s = nirmods[quirk_flipped_r24]; + + if (instr->op == nir_op_fneg) + s->negate = !s->negate; + + if (instr->op == nir_op_fabs) + s->abs = !s->abs; + } + bool is_int = midgard_is_integer_op(op); + ins.mask = mask_of(nr_components); + midgard_vector_alu alu = { .op = op, - .reg_mode = midgard_reg_mode_full, - .dest_override = midgard_dest_override_none, + .reg_mode = reg_mode, + .dest_override = dest_override, .outmod = outmod, - /* Writemask only valid for non-SSA NIR */ - .mask = expand_writemask((1 << nr_components) - 1), - - .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)), - .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int)), + .src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, sext_1)), + .src2 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[1], is_int, broadcast_swizzle, half_2, sext_2)), }; /* Apply writemask if non-SSA, keeping in mind that we can't write to components that don't exist */ if (!is_ssa) - alu.mask &= expand_writemask(instr->dest.write_mask); + ins.mask &= instr->dest.write_mask; ins.alu = alu; @@ -1372,7 +1102,8 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) } ins.alu.src2 = vector_alu_srco_unsigned(blank_alu_src_xxxx); - } else if (instr->op == nir_op_f2b32 || instr->op == nir_op_i2b32) { + } else if (nr_inputs == 1 && !quirk_flipped_r24) { + /* Lots of instructions need a 0 plonked in */ ins.ssa_args.inline_constant = false; ins.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT); ins.has_constants = true; @@ -1391,14 +1122,22 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) uint8_t original_swizzle[4]; memcpy(original_swizzle, nirmods[0]->swizzle, sizeof(nirmods[0]->swizzle)); + unsigned orig_mask = ins.mask; for (int i = 0; i < nr_components; ++i) { - ins.alu.mask = (0x3) << (2 * i); /* Mask the associated component */ + /* Mask the associated component, dropping the + * instruction if needed */ + + ins.mask = 1 << i; + ins.mask &= orig_mask; + + if (!ins.mask) + continue; for (int j = 0; j < 4; ++j) nirmods[0]->swizzle[j] = original_swizzle[i]; /* Pull from the correct component */ - ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int)); + ins.alu.src1 = vector_alu_srco_unsigned(vector_alu_modifiers(nirmods[0], is_int, broadcast_swizzle, half_1, false)); emit_mir_instruction(ctx, ins); } } else { @@ -1408,12 +1147,20 @@ emit_alu(compiler_context *ctx, nir_alu_instr *instr) #undef ALU_CASE +/* Uniforms and UBOs use a shared code path, as uniforms are just (slightly + * optimized) versions of UBO #0 */ + static void -emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src *indirect_offset) +emit_ubo_read( + compiler_context *ctx, + unsigned dest, + unsigned offset, + nir_src *indirect_offset, + unsigned index) { /* TODO: half-floats */ - if (!indirect_offset && offset < ctx->uniform_cutoff) { + if (!indirect_offset && offset < ctx->uniform_cutoff && index == 0) { /* Fast path: For the first 16 uniforms, direct accesses are * 0-cycle, since they're just a register fetch in the usual * case. So, we alias the registers while we're still in @@ -1426,7 +1173,7 @@ emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src * higher-indexed uniforms, at a performance cost. More * generally, we're emitting a UBO read instruction. */ - midgard_instruction ins = m_load_uniform_32(dest, offset); + midgard_instruction ins = m_ld_uniform_32(dest, offset); /* TODO: Don't split */ ins.load_store.varying_parameters = (offset & 7) << 7; @@ -1434,36 +1181,89 @@ emit_uniform_read(compiler_context *ctx, unsigned dest, unsigned offset, nir_src if (indirect_offset) { emit_indirect_offset(ctx, indirect_offset); - ins.load_store.unknown = 0x8700; /* xxx: what is this? */ + ins.load_store.unknown = 0x8700 | index; /* xxx: what is this? */ } else { - ins.load_store.unknown = 0x1E00; /* xxx: what is this? */ + ins.load_store.unknown = 0x1E00 | index; /* xxx: what is this? */ } + /* TODO respect index */ + emit_mir_instruction(ctx, ins); } } static void -emit_sysval_read(compiler_context *ctx, nir_intrinsic_instr *instr) -{ - /* First, pull out the destination */ - unsigned dest = nir_dest_index(ctx, &instr->dest); +emit_varying_read( + compiler_context *ctx, + unsigned dest, unsigned offset, + unsigned nr_comp, unsigned component, + nir_src *indirect_offset, nir_alu_type type) +{ + /* XXX: Half-floats? */ + /* TODO: swizzle, mask */ + + midgard_instruction ins = m_ld_vary_32(dest, offset); + ins.mask = mask_of(nr_comp); + ins.load_store.swizzle = SWIZZLE_XYZW >> (2 * component); + + midgard_varying_parameter p = { + .is_varying = 1, + .interpolation = midgard_interp_default, + .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0 + }; + + unsigned u; + memcpy(&u, &p, sizeof(p)); + ins.load_store.varying_parameters = u; + + if (indirect_offset) { + /* We need to add in the dynamic index, moved to r27.w */ + emit_indirect_offset(ctx, indirect_offset); + ins.load_store.unknown = 0x79e; /* xxx: what is this? */ + } else { + /* Just a direct load */ + ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */ + } + + /* Use the type appropriate load */ + switch (type) { + case nir_type_uint: + case nir_type_bool: + ins.load_store.op = midgard_op_ld_vary_32u; + break; + case nir_type_int: + ins.load_store.op = midgard_op_ld_vary_32i; + break; + case nir_type_float: + ins.load_store.op = midgard_op_ld_vary_32; + break; + default: + unreachable("Attempted to load unknown type"); + break; + } + + emit_mir_instruction(ctx, ins); +} - /* Now, figure out which uniform this is */ - int sysval = midgard_nir_sysval_for_intrinsic(instr); +static void +emit_sysval_read(compiler_context *ctx, nir_instr *instr) +{ + unsigned dest; + /* Figure out which uniform this is */ + int sysval = sysval_for_instr(ctx, instr, &dest); void *val = _mesa_hash_table_u64_search(ctx->sysval_to_id, sysval); /* Sysvals are prefix uniforms */ unsigned uniform = ((uintptr_t) val) - 1; /* Emit the read itself -- this is never indirect */ - emit_uniform_read(ctx, dest, uniform, NULL); + emit_ubo_read(ctx, dest, uniform, NULL, 0); } static void emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) { - unsigned offset, reg; + unsigned offset = 0, reg; switch (instr->intrinsic) { case nir_intrinsic_discard_if: @@ -1482,155 +1282,117 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) } case nir_intrinsic_load_uniform: - case nir_intrinsic_load_input: - offset = nir_intrinsic_base(instr); + case nir_intrinsic_load_ubo: + case nir_intrinsic_load_input: { + bool is_uniform = instr->intrinsic == nir_intrinsic_load_uniform; + bool is_ubo = instr->intrinsic == nir_intrinsic_load_ubo; + + /* Get the base type of the intrinsic */ + /* TODO: Infer type? Does it matter? */ + nir_alu_type t = + is_ubo ? nir_type_uint : nir_intrinsic_type(instr); + t = nir_alu_type_get_base_type(t); + + if (!is_ubo) { + offset = nir_intrinsic_base(instr); + } - bool direct = nir_src_is_const(instr->src[0]); + unsigned nr_comp = nir_intrinsic_dest_components(instr); - if (direct) { - offset += nir_src_as_uint(instr->src[0]); - } + nir_src *src_offset = nir_get_io_offset_src(instr); - reg = nir_dest_index(ctx, &instr->dest); + bool direct = nir_src_is_const(*src_offset); - if (instr->intrinsic == nir_intrinsic_load_uniform && !ctx->is_blend) { - emit_uniform_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL); - } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) { - /* XXX: Half-floats? */ - /* TODO: swizzle, mask */ + if (direct) + offset += nir_src_as_uint(*src_offset); - midgard_instruction ins = m_load_vary_32(reg, offset); + /* We may need to apply a fractional offset */ + int component = instr->intrinsic == nir_intrinsic_load_input ? + nir_intrinsic_component(instr) : 0; + reg = nir_dest_index(ctx, &instr->dest); - midgard_varying_parameter p = { - .is_varying = 1, - .interpolation = midgard_interp_default, - .flat = /*var->data.interpolation == INTERP_MODE_FLAT*/ 0 - }; + if (is_uniform && !ctx->is_blend) { + emit_ubo_read(ctx, reg, ctx->sysval_count + offset, !direct ? &instr->src[0] : NULL, 0); + } else if (is_ubo) { + nir_src index = instr->src[0]; - unsigned u; - memcpy(&u, &p, sizeof(p)); - ins.load_store.varying_parameters = u; + /* We don't yet support indirect UBOs. For indirect + * block numbers (if that's possible), we don't know + * enough about the hardware yet. For indirect sources, + * we know what we need but we need to add some NIR + * support for lowering correctly with respect to + * 128-bit reads */ - if (direct) { - /* We have the offset totally ready */ - ins.load_store.unknown = 0x1e9e; /* xxx: what is this? */ - } else { - /* We have it partially ready, but we need to - * add in the dynamic index, moved to r27.w */ - emit_indirect_offset(ctx, &instr->src[0]); - ins.load_store.unknown = 0x79e; /* xxx: what is this? */ - } + assert(nir_src_is_const(index)); + assert(nir_src_is_const(*src_offset)); - emit_mir_instruction(ctx, ins); - } else if (ctx->is_blend && instr->intrinsic == nir_intrinsic_load_uniform) { - /* Constant encoded as a pinned constant */ + /* TODO: Alignment */ + assert((offset & 0xF) == 0); - midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg); - ins.has_constants = true; - ins.has_blend_constant = true; - emit_mir_instruction(ctx, ins); + uint32_t uindex = nir_src_as_uint(index) + 1; + emit_ubo_read(ctx, reg, offset / 16, NULL, uindex); + } else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) { + emit_varying_read(ctx, reg, offset, nr_comp, component, !direct ? &instr->src[0] : NULL, t); } else if (ctx->is_blend) { - /* For blend shaders, a load might be - * translated various ways depending on what - * we're loading. Figure out how this is used */ - - nir_variable *out = NULL; + /* For blend shaders, load the input color, which is + * preloaded to r0 */ - nir_foreach_variable(var, &ctx->nir->inputs) { - int drvloc = var->data.driver_location; + midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0)); + emit_mir_instruction(ctx, move); + } else if (ctx->stage == MESA_SHADER_VERTEX) { + midgard_instruction ins = m_ld_attr_32(reg, offset); + ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */ + ins.mask = mask_of(nr_comp); - if (nir_intrinsic_base(instr) == drvloc) { - out = var; + /* Use the type appropriate load */ + switch (t) { + case nir_type_uint: + case nir_type_bool: + ins.load_store.op = midgard_op_ld_attr_32u; + break; + case nir_type_int: + ins.load_store.op = midgard_op_ld_attr_32i; + break; + case nir_type_float: + ins.load_store.op = midgard_op_ld_attr_32; + break; + default: + unreachable("Attempted to load unknown type"); break; - } } - assert(out); - - if (out->data.location == VARYING_SLOT_COL0) { - /* Source color preloaded to r0 */ + emit_mir_instruction(ctx, ins); + } else { + DBG("Unknown load\n"); + assert(0); + } - midgard_pin_output(ctx, reg, 0); - } else if (out->data.location == VARYING_SLOT_COL1) { - /* Destination color must be read from framebuffer */ + break; + } - midgard_instruction ins = m_load_color_buffer_8(reg, 0); - ins.load_store.swizzle = 0; /* xxxx */ + /* Reads 128-bit value raw off the tilebuffer during blending, tasty */ - /* Read each component sequentially */ + case nir_intrinsic_load_raw_output_pan: + reg = nir_dest_index(ctx, &instr->dest); + assert(ctx->is_blend); - for (int c = 0; c < 4; ++c) { - ins.load_store.mask = (1 << c); - ins.load_store.unknown = c; - emit_mir_instruction(ctx, ins); - } + midgard_instruction ins = m_ld_color_buffer_8(reg, 0); + emit_mir_instruction(ctx, ins); + break; - /* vadd.u2f hr2, zext(hr2), #0 */ - - midgard_vector_alu_src alu_src = blank_alu_src; - alu_src.mod = midgard_int_zero_extend; - alu_src.half = true; - - midgard_instruction u2f = { - .type = TAG_ALU_4, - .ssa_args = { - .src0 = reg, - .src1 = SSA_UNUSED_0, - .dest = reg, - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_u2f, - .reg_mode = midgard_reg_mode_half, - .dest_override = midgard_dest_override_none, - .mask = 0xF, - .src1 = vector_alu_srco_unsigned(alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, u2f); - - /* vmul.fmul.sat r1, hr2, #0.00392151 */ - - alu_src.mod = 0; - - midgard_instruction fmul = { - .type = TAG_ALU_4, - .inline_constant = _mesa_float_to_half(1.0 / 255.0), - .ssa_args = { - .src0 = reg, - .dest = reg, - .src1 = SSA_UNUSED_0, - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_fmul, - .reg_mode = midgard_reg_mode_full, - .dest_override = midgard_dest_override_none, - .outmod = midgard_outmod_sat, - .mask = 0xFF, - .src1 = vector_alu_srco_unsigned(alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; + case nir_intrinsic_load_blend_const_color_rgba: { + assert(ctx->is_blend); + reg = nir_dest_index(ctx, &instr->dest); - emit_mir_instruction(ctx, fmul); - } else { - DBG("Unknown input in blend shader\n"); - assert(0); - } - } else if (ctx->stage == MESA_SHADER_VERTEX) { - midgard_instruction ins = m_load_attr_32(reg, offset); - ins.load_store.unknown = 0x1E1E; /* XXX: What is this? */ - ins.load_store.mask = (1 << instr->num_components) - 1; - emit_mir_instruction(ctx, ins); - } else { - DBG("Unknown load\n"); - assert(0); - } + /* Blend constants are embedded directly in the shader and + * patched in, so we use some magic routing */ + midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, reg); + ins.has_constants = true; + ins.has_blend_constant = true; + emit_mir_instruction(ctx, ins); break; + } case nir_intrinsic_store_output: assert(nir_src_is_const(instr->src[1]) && "no indirect outputs"); @@ -1646,7 +1408,8 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) * framebuffer writeout dance. TODO: Defer * writes */ - midgard_pin_output(ctx, reg, 0); + midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0)); + emit_mir_instruction(ctx, move); /* Save the index we're writing to for later reference * in the epilogue */ @@ -1654,44 +1417,27 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) ctx->fragment_output = reg; } else if (ctx->stage == MESA_SHADER_VERTEX) { /* Varyings are written into one of two special - * varying register, r26 or r27. The register itself is selected as the register - * in the st_vary instruction, minus the base of 26. E.g. write into r27 and then call st_vary(1) - * - * Normally emitting fmov's is frowned upon, - * but due to unique constraints of - * REGISTER_VARYING, fmov emission + a - * dedicated cleanup pass is the only way to - * guarantee correctness when considering some - * (common) edge cases XXX: FIXME */ - - /* If this varying corresponds to a constant (why?!), - * emit that now since it won't get picked up by - * hoisting (since there is no corresponding move - * emitted otherwise) */ - - void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, reg + 1); - - if (constant_value) { - /* Special case: emit the varying write - * directly to r26 (looks funny in asm but it's - * fine) and emit the store _now_. Possibly - * slightly slower, but this is a really stupid - * special case anyway (why on earth would you - * have a constant varying? Your own fault for - * slightly worse perf :P) */ - - midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(26)); - attach_constants(ctx, &ins, constant_value, reg + 1); - emit_mir_instruction(ctx, ins); - - midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(0), offset); - st.load_store.unknown = 0x1E9E; /* XXX: What is this? */ - emit_mir_instruction(ctx, st); - } else { - /* Do not emit the varying yet -- instead, just mark down that we need to later */ + * varying register, r26 or r27. The register itself is + * selected as the register in the st_vary instruction, + * minus the base of 26. E.g. write into r27 and then + * call st_vary(1) */ - _mesa_hash_table_u64_insert(ctx->ssa_varyings, reg + 1, (void *) ((uintptr_t) (offset + 1))); - } + midgard_instruction ins = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(26)); + emit_mir_instruction(ctx, ins); + + /* We should have been vectorized, though we don't + * currently check that st_vary is emitted only once + * per slot (this is relevant, since there's not a mask + * parameter available on the store [set to 0 by the + * blob]). We do respect the component by adjusting the + * swizzle. */ + + unsigned component = nir_intrinsic_component(instr); + + midgard_instruction st = m_st_vary_32(SSA_FIXED_REGISTER(0), offset); + st.load_store.unknown = 0x1E9E; /* XXX: What is this? */ + st.load_store.swizzle = SWIZZLE_XYZW << (2*component); + emit_mir_instruction(ctx, st); } else { DBG("Unknown store\n"); assert(0); @@ -1699,6 +1445,17 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) break; + /* Special case of store_output for lowered blend shaders */ + case nir_intrinsic_store_raw_output_pan: + assert (ctx->stage == MESA_SHADER_FRAGMENT); + reg = nir_src_index(ctx, &instr->src[0]); + + midgard_instruction move = v_mov(reg, blank_alu_src, SSA_FIXED_REGISTER(0)); + emit_mir_instruction(ctx, move); + ctx->fragment_output = reg; + + break; + case nir_intrinsic_load_alpha_ref_float: assert(instr->dest.is_ssa); @@ -1711,7 +1468,7 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr) case nir_intrinsic_load_viewport_scale: case nir_intrinsic_load_viewport_offset: - emit_sysval_read(ctx, instr); + emit_sysval_read(ctx, &instr->instr); break; default: @@ -1725,15 +1482,19 @@ static unsigned midgard_tex_format(enum glsl_sampler_dim dim) { switch (dim) { + case GLSL_SAMPLER_DIM_1D: + case GLSL_SAMPLER_DIM_BUF: + return MALI_TEX_1D; + case GLSL_SAMPLER_DIM_2D: case GLSL_SAMPLER_DIM_EXTERNAL: - return TEXTURE_2D; + return MALI_TEX_2D; case GLSL_SAMPLER_DIM_3D: - return TEXTURE_3D; + return MALI_TEX_3D; case GLSL_SAMPLER_DIM_CUBE: - return TEXTURE_CUBE; + return MALI_TEX_CUBE; default: DBG("Unknown sampler dim type\n"); @@ -1742,13 +1503,61 @@ midgard_tex_format(enum glsl_sampler_dim dim) } } +/* Tries to attach an explicit LOD / bias as a constant. Returns whether this + * was successful */ + +static bool +pan_attach_constant_bias( + compiler_context *ctx, + nir_src lod, + midgard_texture_word *word) +{ + /* To attach as constant, it has to *be* constant */ + + if (!nir_src_is_const(lod)) + return false; + + float f = nir_src_as_float(lod); + + /* Break into fixed-point */ + signed lod_int = f; + float lod_frac = f - lod_int; + + /* Carry over negative fractions */ + if (lod_frac < 0.0) { + lod_int--; + lod_frac += 1.0; + } + + /* Encode */ + word->bias = float_to_ubyte(lod_frac); + word->bias_int = lod_int; + + return true; +} + +static enum mali_sampler_type +midgard_sampler_type(nir_alu_type t) +{ + switch (nir_alu_type_get_base_type(t)) { + case nir_type_float: + return MALI_SAMPLER_FLOAT; + case nir_type_int: + return MALI_SAMPLER_SIGNED; + case nir_type_uint: + return MALI_SAMPLER_UNSIGNED; + default: + unreachable("Unknown sampler type"); + } +} + static void -emit_tex(compiler_context *ctx, nir_tex_instr *instr) +emit_texop_native(compiler_context *ctx, nir_tex_instr *instr, + unsigned midgard_texop) { /* TODO */ //assert (!instr->sampler); //assert (!instr->texture_array_size); - assert (instr->op == nir_texop_tex); /* Allocate registers via a round robin scheme to alternate between the two registers */ int reg = ctx->texture_op_count & 1; @@ -1762,1153 +1571,229 @@ emit_tex(compiler_context *ctx, nir_tex_instr *instr) int texture_index = instr->texture_index; int sampler_index = texture_index; - for (unsigned i = 0; i < instr->num_srcs; ++i) { - switch (instr->src[i].src_type) { - case nir_tex_src_coord: { - int index = nir_src_index(ctx, &instr->src[i].src); - - midgard_vector_alu_src alu_src = blank_alu_src; - - int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg); - - if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { - /* For cubemaps, we need to load coords into - * special r27, and then use a special ld/st op - * to copy into the texture register */ - - alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X); - - midgard_instruction move = v_fmov(index, alu_src, SSA_FIXED_REGISTER(27)); - emit_mir_instruction(ctx, move); - - midgard_instruction st = m_store_cubemap_coords(reg, 0); - st.load_store.unknown = 0x24; /* XXX: What is this? */ - st.load_store.mask = 0x3; /* xy? */ - st.load_store.swizzle = alu_src.swizzle; - emit_mir_instruction(ctx, st); - - } else { - alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X); - - midgard_instruction ins = v_fmov(index, alu_src, reg); - emit_mir_instruction(ctx, ins); - } - - break; - } - - default: { - DBG("Unknown source type\n"); - //assert(0); - break; - } - } - } - /* No helper to build texture words -- we do it all here */ midgard_instruction ins = { .type = TAG_TEXTURE_4, + .mask = 0xF, .texture = { - .op = TEXTURE_OP_NORMAL, + .op = midgard_texop, .format = midgard_tex_format(instr->sampler_dim), .texture_handle = texture_index, .sampler_handle = sampler_index, - /* TODO: Don't force xyzw */ - .swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W), - .mask = 0xF, + /* TODO: Regalloc it in */ + .swizzle = SWIZZLE_XYZW, /* TODO: half */ - //.in_reg_full = 1, + .in_reg_full = 1, .out_full = 1, - .filter = 1, - - /* Always 1 */ - .unknown7 = 1, - - /* Assume we can continue; hint it out later */ - .cont = 1, + .sampler_type = midgard_sampler_type(instr->dest_type), } }; - /* Set registers to read and write from the same place */ - ins.texture.in_reg_select = in_reg; - ins.texture.out_reg_select = out_reg; - - /* TODO: Dynamic swizzle input selection, half-swizzles? */ - if (instr->sampler_dim == GLSL_SAMPLER_DIM_3D) { - ins.texture.in_reg_swizzle_right = COMPONENT_X; - ins.texture.in_reg_swizzle_left = COMPONENT_Y; - //ins.texture.in_reg_swizzle_third = COMPONENT_Z; - } else { - ins.texture.in_reg_swizzle_left = COMPONENT_X; - ins.texture.in_reg_swizzle_right = COMPONENT_Y; - //ins.texture.in_reg_swizzle_third = COMPONENT_X; - } - - emit_mir_instruction(ctx, ins); - - /* Simultaneously alias the destination and emit a move for it. The move will be eliminated if possible */ - - int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest); - alias_ssa(ctx, o_index, SSA_FIXED_REGISTER(o_reg)); - ctx->texture_index[reg] = o_index; - - midgard_instruction ins2 = v_fmov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index); - emit_mir_instruction(ctx, ins2); - - /* Used for .cont and .last hinting */ - ctx->texture_op_count++; -} - -static void -emit_jump(compiler_context *ctx, nir_jump_instr *instr) -{ - switch (instr->type) { - case nir_jump_break: { - /* Emit a branch out of the loop */ - struct midgard_instruction br = v_branch(false, false); - br.branch.target_type = TARGET_BREAK; - br.branch.target_break = ctx->current_loop_depth; - emit_mir_instruction(ctx, br); - - DBG("break..\n"); - break; - } - - default: - DBG("Unknown jump type %d\n", instr->type); - break; - } -} - -static void -emit_instr(compiler_context *ctx, struct nir_instr *instr) -{ - switch (instr->type) { - case nir_instr_type_load_const: - emit_load_const(ctx, nir_instr_as_load_const(instr)); - break; - - case nir_instr_type_intrinsic: - emit_intrinsic(ctx, nir_instr_as_intrinsic(instr)); - break; - - case nir_instr_type_alu: - emit_alu(ctx, nir_instr_as_alu(instr)); - break; - - case nir_instr_type_tex: - emit_tex(ctx, nir_instr_as_tex(instr)); - break; - - case nir_instr_type_jump: - emit_jump(ctx, nir_instr_as_jump(instr)); - break; - - case nir_instr_type_ssa_undef: - /* Spurious */ - break; - - default: - DBG("Unhandled instruction type\n"); - break; - } -} - -/* Determine the actual hardware from the index based on the RA results or special values */ - -static int -dealias_register(compiler_context *ctx, struct ra_graph *g, int reg, int maxreg) -{ - if (reg >= SSA_FIXED_MINIMUM) - return SSA_REG_FROM_FIXED(reg); - - if (reg >= 0) { - assert(reg < maxreg); - int r = ra_get_node_reg(g, reg); - ctx->work_registers = MAX2(ctx->work_registers, r); - return r; - } - - switch (reg) { - /* fmov style unused */ - case SSA_UNUSED_0: - return REGISTER_UNUSED; - - /* lut style unused */ - case SSA_UNUSED_1: - return REGISTER_UNUSED; - - default: - DBG("Unknown SSA register alias %d\n", reg); - assert(0); - return 31; - } -} - -static unsigned int -midgard_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data) -{ - /* Choose the first available register to minimise reported register pressure */ - - for (int i = 0; i < 16; ++i) { - if (BITSET_TEST(regs, i)) { - return i; - } - } - - assert(0); - return 0; -} - -static bool -midgard_is_live_in_instr(midgard_instruction *ins, int src) -{ - if (ins->ssa_args.src0 == src) return true; - if (ins->ssa_args.src1 == src) return true; - - return false; -} - -/* Determine if a variable is live in the successors of a block */ -static bool -is_live_after_successors(compiler_context *ctx, midgard_block *bl, int src) -{ - for (unsigned i = 0; i < bl->nr_successors; ++i) { - midgard_block *succ = bl->successors[i]; - - /* If we already visited, the value we're seeking - * isn't down this path (or we would have short - * circuited */ - - if (succ->visited) continue; - - /* Otherwise (it's visited *now*), check the block */ - - succ->visited = true; - - mir_foreach_instr_in_block(succ, ins) { - if (midgard_is_live_in_instr(ins, src)) - return true; - } - - /* ...and also, check *its* successors */ - if (is_live_after_successors(ctx, succ, src)) - return true; - - } - - /* Welp. We're really not live. */ - - return false; -} - -static bool -is_live_after(compiler_context *ctx, midgard_block *block, midgard_instruction *start, int src) -{ - /* Check the rest of the block for liveness */ - - mir_foreach_instr_in_block_from(block, ins, mir_next_op(start)) { - if (midgard_is_live_in_instr(ins, src)) - return true; - } - - /* Check the rest of the blocks for liveness recursively */ - - bool succ = is_live_after_successors(ctx, block, src); - - mir_foreach_block(ctx, block) { - block->visited = false; - } - - return succ; -} - -static void -allocate_registers(compiler_context *ctx) -{ - /* First, initialize the RA */ - struct ra_regs *regs = ra_alloc_reg_set(NULL, 32, true); - - /* Create a primary (general purpose) class, as well as special purpose - * pipeline register classes */ - - int primary_class = ra_alloc_reg_class(regs); - int varying_class = ra_alloc_reg_class(regs); - - /* Add the full set of work registers */ - int work_count = 16 - MAX2((ctx->uniform_cutoff - 8), 0); - for (int i = 0; i < work_count; ++i) - ra_class_add_reg(regs, primary_class, i); - - /* Add special registers */ - ra_class_add_reg(regs, varying_class, REGISTER_VARYING_BASE); - ra_class_add_reg(regs, varying_class, REGISTER_VARYING_BASE + 1); - - /* We're done setting up */ - ra_set_finalize(regs, NULL); - - /* Transform the MIR into squeezed index form */ - mir_foreach_block(ctx, block) { - mir_foreach_instr_in_block(block, ins) { - if (ins->compact_branch) continue; - - ins->ssa_args.src0 = find_or_allocate_temp(ctx, ins->ssa_args.src0); - ins->ssa_args.src1 = find_or_allocate_temp(ctx, ins->ssa_args.src1); - ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest); - } - if (midgard_debug & MIDGARD_DBG_SHADERS) - print_mir_block(block); - } - - /* Let's actually do register allocation */ - int nodes = ctx->temp_count; - struct ra_graph *g = ra_alloc_interference_graph(regs, nodes); - - /* Set everything to the work register class, unless it has somewhere - * special to go */ - - mir_foreach_block(ctx, block) { - mir_foreach_instr_in_block(block, ins) { - if (ins->compact_branch) continue; - - if (ins->ssa_args.dest < 0) continue; - - if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue; - - int class = primary_class; - - ra_set_node_class(g, ins->ssa_args.dest, class); - } - } - - for (int index = 0; index <= ctx->max_hash; ++index) { - unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_register, index + 1); - - if (temp) { - unsigned reg = temp - 1; - int t = find_or_allocate_temp(ctx, index); - ra_set_node_reg(g, t, reg); - } - } - - /* Determine liveness */ - - int *live_start = malloc(nodes * sizeof(int)); - int *live_end = malloc(nodes * sizeof(int)); - - /* Initialize as non-existent */ - - for (int i = 0; i < nodes; ++i) { - live_start[i] = live_end[i] = -1; - } - - int d = 0; - - mir_foreach_block(ctx, block) { - mir_foreach_instr_in_block(block, ins) { - if (ins->compact_branch) continue; - - if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) { - /* If this destination is not yet live, it is now since we just wrote it */ - - int dest = ins->ssa_args.dest; - - if (live_start[dest] == -1) - live_start[dest] = d; - } - - /* Since we just used a source, the source might be - * dead now. Scan the rest of the block for - * invocations, and if there are none, the source dies - * */ - - int sources[2] = { ins->ssa_args.src0, ins->ssa_args.src1 }; - - for (int src = 0; src < 2; ++src) { - int s = sources[src]; - - if (s < 0) continue; - - if (s >= SSA_FIXED_MINIMUM) continue; - - if (!is_live_after(ctx, block, ins, s)) { - live_end[s] = d; - } - } - - ++d; - } - } - - /* If a node still hasn't been killed, kill it now */ - - for (int i = 0; i < nodes; ++i) { - /* live_start == -1 most likely indicates a pinned output */ - - if (live_end[i] == -1) - live_end[i] = d; - } - - /* Setup interference between nodes that are live at the same time */ - - for (int i = 0; i < nodes; ++i) { - for (int j = i + 1; j < nodes; ++j) { - if (!(live_start[i] >= live_end[j] || live_start[j] >= live_end[i])) - ra_add_node_interference(g, i, j); - } - } - - ra_set_select_reg_callback(g, midgard_ra_select_callback, NULL); - - if (!ra_allocate(g)) { - DBG("Error allocating registers\n"); - assert(0); - } - - /* Cleanup */ - free(live_start); - free(live_end); - - mir_foreach_block(ctx, block) { - mir_foreach_instr_in_block(block, ins) { - if (ins->compact_branch) continue; - - ssa_args args = ins->ssa_args; - - switch (ins->type) { - case TAG_ALU_4: - ins->registers.src1_reg = dealias_register(ctx, g, args.src0, nodes); - - ins->registers.src2_imm = args.inline_constant; - - if (args.inline_constant) { - /* Encode inline 16-bit constant as a vector by default */ - - ins->registers.src2_reg = ins->inline_constant >> 11; - - int lower_11 = ins->inline_constant & ((1 << 12) - 1); - - uint16_t imm = ((lower_11 >> 8) & 0x7) | ((lower_11 & 0xFF) << 3); - ins->alu.src2 = imm << 2; - } else { - ins->registers.src2_reg = dealias_register(ctx, g, args.src1, nodes); - } - - ins->registers.out_reg = dealias_register(ctx, g, args.dest, nodes); - - break; - - case TAG_LOAD_STORE_4: { - if (OP_IS_STORE_VARY(ins->load_store.op)) { - /* TODO: use ssa_args for store_vary */ - ins->load_store.reg = 0; - } else { - bool has_dest = args.dest >= 0; - int ssa_arg = has_dest ? args.dest : args.src0; - - ins->load_store.reg = dealias_register(ctx, g, ssa_arg, nodes); - } - - break; - } - - default: - break; - } - } - } -} - -/* Midgard IR only knows vector ALU types, but we sometimes need to actually - * use scalar ALU instructions, for functional or performance reasons. To do - * this, we just demote vector ALU payloads to scalar. */ - -static int -component_from_mask(unsigned mask) -{ - for (int c = 0; c < 4; ++c) { - if (mask & (3 << (2 * c))) - return c; - } - - assert(0); - return 0; -} - -static bool -is_single_component_mask(unsigned mask) -{ - int components = 0; - - for (int c = 0; c < 4; ++c) - if (mask & (3 << (2 * c))) - components++; - - return components == 1; -} - -/* Create a mask of accessed components from a swizzle to figure out vector - * dependencies */ - -static unsigned -swizzle_to_access_mask(unsigned swizzle) -{ - unsigned component_mask = 0; - - for (int i = 0; i < 4; ++i) { - unsigned c = (swizzle >> (2 * i)) & 3; - component_mask |= (1 << c); - } - - return component_mask; -} - -static unsigned -vector_to_scalar_source(unsigned u, bool is_int) -{ - midgard_vector_alu_src v; - memcpy(&v, &u, sizeof(v)); - - /* TODO: Integers */ - - midgard_scalar_alu_src s = { - .full = !v.half, - .component = (v.swizzle & 3) << 1 - }; - - if (is_int) { - /* TODO */ - } else { - s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS; - s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG; - } - - unsigned o; - memcpy(&o, &s, sizeof(s)); - - return o & ((1 << 6) - 1); -} - -static midgard_scalar_alu -vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins) -{ - bool is_int = midgard_is_integer_op(v.op); - - /* The output component is from the mask */ - midgard_scalar_alu s = { - .op = v.op, - .src1 = vector_to_scalar_source(v.src1, is_int), - .src2 = vector_to_scalar_source(v.src2, is_int), - .unknown = 0, - .outmod = v.outmod, - .output_full = 1, /* TODO: Half */ - .output_component = component_from_mask(v.mask) << 1, - }; - - /* Inline constant is passed along rather than trying to extract it - * from v */ - - if (ins->ssa_args.inline_constant) { - uint16_t imm = 0; - int lower_11 = ins->inline_constant & ((1 << 12) - 1); - imm |= (lower_11 >> 9) & 3; - imm |= (lower_11 >> 6) & 4; - imm |= (lower_11 >> 2) & 0x38; - imm |= (lower_11 & 63) << 6; - - s.src2 = imm; - } - - return s; -} - -/* Midgard prefetches instruction types, so during emission we need to - * lookahead too. Unless this is the last instruction, in which we return 1. Or - * if this is the second to last and the last is an ALU, then it's also 1... */ - -#define IS_ALU(tag) (tag == TAG_ALU_4 || tag == TAG_ALU_8 || \ - tag == TAG_ALU_12 || tag == TAG_ALU_16) - -#define EMIT_AND_COUNT(type, val) util_dynarray_append(emission, type, val); \ - bytes_emitted += sizeof(type) - -static void -emit_binary_vector_instruction(midgard_instruction *ains, - uint16_t *register_words, int *register_words_count, - uint64_t *body_words, size_t *body_size, int *body_words_count, - size_t *bytes_emitted) -{ - memcpy(®ister_words[(*register_words_count)++], &ains->registers, sizeof(ains->registers)); - *bytes_emitted += sizeof(midgard_reg_info); - - body_size[*body_words_count] = sizeof(midgard_vector_alu); - memcpy(&body_words[(*body_words_count)++], &ains->alu, sizeof(ains->alu)); - *bytes_emitted += sizeof(midgard_vector_alu); -} - -/* Checks for an SSA data hazard between two adjacent instructions, keeping in - * mind that we are a vector architecture and we can write to different - * components simultaneously */ - -static bool -can_run_concurrent_ssa(midgard_instruction *first, midgard_instruction *second) -{ - /* Each instruction reads some registers and writes to a register. See - * where the first writes */ - - /* Figure out where exactly we wrote to */ - int source = first->ssa_args.dest; - int source_mask = first->type == TAG_ALU_4 ? squeeze_writemask(first->alu.mask) : 0xF; - - /* As long as the second doesn't read from the first, we're okay */ - if (second->ssa_args.src0 == source) { - if (first->type == TAG_ALU_4) { - /* Figure out which components we just read from */ - - int q = second->alu.src1; - midgard_vector_alu_src *m = (midgard_vector_alu_src *) &q; - - /* Check if there are components in common, and fail if so */ - if (swizzle_to_access_mask(m->swizzle) & source_mask) - return false; - } else - return false; - - } - - if (second->ssa_args.src1 == source) - return false; - - /* Otherwise, it's safe in that regard. Another data hazard is both - * writing to the same place, of course */ - - if (second->ssa_args.dest == source) { - /* ...but only if the components overlap */ - int dest_mask = second->type == TAG_ALU_4 ? squeeze_writemask(second->alu.mask) : 0xF; - - if (dest_mask & source_mask) - return false; - } - - /* ...That's it */ - return true; -} - -static bool -midgard_has_hazard( - midgard_instruction **segment, unsigned segment_size, - midgard_instruction *ains) -{ - for (int s = 0; s < segment_size; ++s) - if (!can_run_concurrent_ssa(segment[s], ains)) - return true; - - return false; - - -} - -/* Schedules, but does not emit, a single basic block. After scheduling, the - * final tag and size of the block are known, which are necessary for branching - * */ - -static midgard_bundle -schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction *ins, int *skip) -{ - int instructions_emitted = 0, instructions_consumed = -1; - midgard_bundle bundle = { 0 }; - - uint8_t tag = ins->type; - - /* Default to the instruction's tag */ - bundle.tag = tag; - - switch (ins->type) { - case TAG_ALU_4: { - uint32_t control = 0; - size_t bytes_emitted = sizeof(control); - - /* TODO: Constant combining */ - int index = 0, last_unit = 0; - - /* Previous instructions, for the purpose of parallelism */ - midgard_instruction *segment[4] = {0}; - int segment_size = 0; - - instructions_emitted = -1; - midgard_instruction *pins = ins; - - for (;;) { - midgard_instruction *ains = pins; - - /* Advance instruction pointer */ - if (index) { - ains = mir_next_op(pins); - pins = ains; - } - - /* Out-of-work condition */ - if ((struct list_head *) ains == &block->instructions) - break; - - /* Ensure that the chain can continue */ - if (ains->type != TAG_ALU_4) break; - - /* According to the presentation "The ARM - * Mali-T880 Mobile GPU" from HotChips 27, - * there are two pipeline stages. Branching - * position determined experimentally. Lines - * are executed in parallel: - * - * [ VMUL ] [ SADD ] - * [ VADD ] [ SMUL ] [ LUT ] [ BRANCH ] - * - * Verify that there are no ordering dependencies here. - * - * TODO: Allow for parallelism!!! - */ - - /* Pick a unit for it if it doesn't force a particular unit */ - - int unit = ains->unit; - - if (!unit) { - int op = ains->alu.op; - int units = alu_opcode_props[op].props; - - /* TODO: Promotion of scalars to vectors */ - int vector = ((!is_single_component_mask(ains->alu.mask)) || ((units & UNITS_SCALAR) == 0)) && (units & UNITS_ANY_VECTOR); - - if (!vector) - assert(units & UNITS_SCALAR); - - if (vector) { - if (last_unit >= UNIT_VADD) { - if (units & UNIT_VLUT) - unit = UNIT_VLUT; - else - break; - } else { - if ((units & UNIT_VMUL) && !(control & UNIT_VMUL)) - unit = UNIT_VMUL; - else if ((units & UNIT_VADD) && !(control & UNIT_VADD)) - unit = UNIT_VADD; - else if (units & UNIT_VLUT) - unit = UNIT_VLUT; - else - break; - } - } else { - if (last_unit >= UNIT_VADD) { - if ((units & UNIT_SMUL) && !(control & UNIT_SMUL)) - unit = UNIT_SMUL; - else if (units & UNIT_VLUT) - unit = UNIT_VLUT; - else - break; - } else { - if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains)) - unit = UNIT_SADD; - else if (units & UNIT_SMUL) - unit = ((units & UNIT_VMUL) && !(control & UNIT_VMUL)) ? UNIT_VMUL : UNIT_SMUL; - else if ((units & UNIT_VADD) && !(control & UNIT_VADD)) - unit = UNIT_VADD; - else - break; - } - } - - assert(unit & units); - } - - /* Late unit check, this time for encoding (not parallelism) */ - if (unit <= last_unit) break; - - /* Clear the segment */ - if (last_unit < UNIT_VADD && unit >= UNIT_VADD) - segment_size = 0; - - if (midgard_has_hazard(segment, segment_size, ains)) - break; - - /* We're good to go -- emit the instruction */ - ains->unit = unit; - - segment[segment_size++] = ains; - - /* Only one set of embedded constants per - * bundle possible; if we have more, we must - * break the chain early, unfortunately */ - - if (ains->has_constants) { - if (bundle.has_embedded_constants) { - /* ...but if there are already - * constants but these are the - * *same* constants, we let it - * through */ - - if (memcmp(bundle.constants, ains->constants, sizeof(bundle.constants))) - break; - } else { - bundle.has_embedded_constants = true; - memcpy(bundle.constants, ains->constants, sizeof(bundle.constants)); - - /* If this is a blend shader special constant, track it for patching */ - if (ains->has_blend_constant) - bundle.has_blend_constant = true; - } - } - - if (ains->unit & UNITS_ANY_VECTOR) { - emit_binary_vector_instruction(ains, bundle.register_words, - &bundle.register_words_count, bundle.body_words, - bundle.body_size, &bundle.body_words_count, &bytes_emitted); - } else if (ains->compact_branch) { - /* All of r0 has to be written out - * along with the branch writeout. - * (slow!) */ - - if (ains->writeout) { - if (index == 0) { - midgard_instruction ins = v_fmov(0, blank_alu_src, SSA_FIXED_REGISTER(0)); - ins.unit = UNIT_VMUL; - - control |= ins.unit; - - emit_binary_vector_instruction(&ins, bundle.register_words, - &bundle.register_words_count, bundle.body_words, - bundle.body_size, &bundle.body_words_count, &bytes_emitted); - } else { - /* Analyse the group to see if r0 is written in full, on-time, without hanging dependencies*/ - bool written_late = false; - bool components[4] = { 0 }; - uint16_t register_dep_mask = 0; - uint16_t written_mask = 0; - - midgard_instruction *qins = ins; - for (int t = 0; t < index; ++t) { - if (qins->registers.out_reg != 0) { - /* Mark down writes */ - - written_mask |= (1 << qins->registers.out_reg); - } else { - /* Mark down the register dependencies for errata check */ - - if (qins->registers.src1_reg < 16) - register_dep_mask |= (1 << qins->registers.src1_reg); - - if (qins->registers.src2_reg < 16) - register_dep_mask |= (1 << qins->registers.src2_reg); - - int mask = qins->alu.mask; - - for (int c = 0; c < 4; ++c) - if (mask & (0x3 << (2 * c))) - components[c] = true; - - /* ..but if the writeout is too late, we have to break up anyway... for some reason */ - - if (qins->unit == UNIT_VLUT) - written_late = true; - } - - /* Advance instruction pointer */ - qins = mir_next_op(qins); - } - - - /* ERRATA (?): In a bundle ending in a fragment writeout, the register dependencies of r0 cannot be written within this bundle (discovered in -bshading:shading=phong) */ - if (register_dep_mask & written_mask) { - DBG("ERRATA WORKAROUND: Breakup for writeout dependency masks %X vs %X (common %X)\n", register_dep_mask, written_mask, register_dep_mask & written_mask); - break; - } - - if (written_late) - break; - - /* If even a single component is not written, break it up (conservative check). */ - bool breakup = false; - - for (int c = 0; c < 4; ++c) - if (!components[c]) - breakup = true; - - if (breakup) - break; - - /* Otherwise, we're free to proceed */ - } - } - - if (ains->unit == ALU_ENAB_BRANCH) { - bundle.body_size[bundle.body_words_count] = sizeof(midgard_branch_extended); - memcpy(&bundle.body_words[bundle.body_words_count++], &ains->branch_extended, sizeof(midgard_branch_extended)); - bytes_emitted += sizeof(midgard_branch_extended); - } else { - bundle.body_size[bundle.body_words_count] = sizeof(ains->br_compact); - memcpy(&bundle.body_words[bundle.body_words_count++], &ains->br_compact, sizeof(ains->br_compact)); - bytes_emitted += sizeof(ains->br_compact); - } - } else { - memcpy(&bundle.register_words[bundle.register_words_count++], &ains->registers, sizeof(ains->registers)); - bytes_emitted += sizeof(midgard_reg_info); - - bundle.body_size[bundle.body_words_count] = sizeof(midgard_scalar_alu); - bundle.body_words_count++; - bytes_emitted += sizeof(midgard_scalar_alu); - } - - /* Defer marking until after writing to allow for break */ - control |= ains->unit; - last_unit = ains->unit; - ++instructions_emitted; - ++index; - } - - /* Bubble up the number of instructions for skipping */ - instructions_consumed = index - 1; - - int padding = 0; - - /* Pad ALU op to nearest word */ - - if (bytes_emitted & 15) { - padding = 16 - (bytes_emitted & 15); - bytes_emitted += padding; - } - - /* Constants must always be quadwords */ - if (bundle.has_embedded_constants) - bytes_emitted += 16; + for (unsigned i = 0; i < instr->num_srcs; ++i) { + int reg = SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + in_reg); + int index = nir_src_index(ctx, &instr->src[i].src); + int nr_comp = nir_src_num_components(instr->src[i].src); + midgard_vector_alu_src alu_src = blank_alu_src; - /* Size ALU instruction for tag */ - bundle.tag = (TAG_ALU_4) + (bytes_emitted / 16) - 1; - bundle.padding = padding; - bundle.control = bundle.tag | control; + switch (instr->src[i].src_type) { + case nir_tex_src_coord: { + if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { + /* texelFetch is undefined on samplerCube */ + assert(midgard_texop != TEXTURE_OP_TEXEL_FETCH); - break; - } + /* For cubemaps, we need to load coords into + * special r27, and then use a special ld/st op + * to select the face and copy the xy into the + * texture register */ - case TAG_LOAD_STORE_4: { - /* Load store instructions have two words at once. If - * we only have one queued up, we need to NOP pad. - * Otherwise, we store both in succession to save space - * and cycles -- letting them go in parallel -- skip - * the next. The usefulness of this optimisation is - * greatly dependent on the quality of the instruction - * scheduler. - */ + alu_src.swizzle = SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X); - midgard_instruction *next_op = mir_next_op(ins); + midgard_instruction move = v_mov(index, alu_src, SSA_FIXED_REGISTER(27)); + emit_mir_instruction(ctx, move); - if ((struct list_head *) next_op != &block->instructions && next_op->type == TAG_LOAD_STORE_4) { - /* As the two operate concurrently, make sure - * they are not dependent */ + midgard_instruction st = m_st_cubemap_coords(reg, 0); + st.load_store.unknown = 0x24; /* XXX: What is this? */ + st.mask = 0x3; /* xy */ + st.load_store.swizzle = alu_src.swizzle; + emit_mir_instruction(ctx, st); - if (can_run_concurrent_ssa(ins, next_op) || true) { - /* Skip ahead, since it's redundant with the pair */ - instructions_consumed = 1 + (instructions_emitted++); + ins.texture.in_reg_swizzle = swizzle_of(2); + } else { + ins.texture.in_reg_swizzle = alu_src.swizzle = swizzle_of(nr_comp); + + midgard_instruction mov = v_mov(index, alu_src, reg); + mov.mask = mask_of(nr_comp); + emit_mir_instruction(ctx, mov); + + if (midgard_texop == TEXTURE_OP_TEXEL_FETCH) { + /* Texel fetch opcodes care about the + * values of z and w, so we actually + * need to spill into a second register + * for a texel fetch with register bias + * (for non-2D). TODO: Implement that + */ + + assert(instr->sampler_dim == GLSL_SAMPLER_DIM_2D); + + midgard_instruction zero = v_mov(index, alu_src, reg); + zero.ssa_args.inline_constant = true; + zero.ssa_args.src1 = SSA_FIXED_REGISTER(REGISTER_CONSTANT); + zero.has_constants = true; + zero.mask = ~mov.mask; + emit_mir_instruction(ctx, zero); + + ins.texture.in_reg_swizzle = SWIZZLE_XYZZ; + } else { + /* Non-texel fetch doesn't need that + * nonsense. However we do use the Z + * for array indexing */ + bool is_3d = instr->sampler_dim == GLSL_SAMPLER_DIM_3D; + ins.texture.in_reg_swizzle = is_3d ? SWIZZLE_XYZZ : SWIZZLE_XYXZ; + } } + + break; } - break; - } + case nir_tex_src_bias: + case nir_tex_src_lod: { + /* Try as a constant if we can */ - default: - /* Texture ops default to single-op-per-bundle scheduling */ - break; - } + bool is_txf = midgard_texop == TEXTURE_OP_TEXEL_FETCH; + if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture)) + break; - /* Copy the instructions into the bundle */ - bundle.instruction_count = instructions_emitted + 1; + /* Otherwise we use a register. To keep RA simple, we + * put the bias/LOD into the w component of the input + * source, which is otherwise in xy */ - int used_idx = 0; + alu_src.swizzle = SWIZZLE_XXXX; - midgard_instruction *uins = ins; - for (int i = 0; used_idx < bundle.instruction_count; ++i) { - bundle.instructions[used_idx++] = *uins; - uins = mir_next_op(uins); - } + midgard_instruction mov = v_mov(index, alu_src, reg); + mov.mask = 1 << COMPONENT_W; + emit_mir_instruction(ctx, mov); - *skip = (instructions_consumed == -1) ? instructions_emitted : instructions_consumed; + ins.texture.lod_register = true; - return bundle; -} + midgard_tex_register_select sel = { + .select = in_reg, + .full = 1, -static int -quadword_size(int tag) -{ - switch (tag) { - case TAG_ALU_4: - return 1; + /* w */ + .component_lo = 1, + .component_hi = 1 + }; - case TAG_ALU_8: - return 2; + uint8_t packed; + memcpy(&packed, &sel, sizeof(packed)); + ins.texture.bias = packed; - case TAG_ALU_12: - return 3; + break; + }; - case TAG_ALU_16: - return 4; + default: + unreachable("Unknown texture source type\n"); + } + } - case TAG_LOAD_STORE_4: - return 1; + /* Set registers to read and write from the same place */ + ins.texture.in_reg_select = in_reg; + ins.texture.out_reg_select = out_reg; - case TAG_TEXTURE_4: - return 1; + emit_mir_instruction(ctx, ins); - default: - assert(0); - return 0; - } -} + int o_reg = REGISTER_TEXTURE_BASE + out_reg, o_index = nir_dest_index(ctx, &instr->dest); + midgard_instruction ins2 = v_mov(SSA_FIXED_REGISTER(o_reg), blank_alu_src, o_index); + emit_mir_instruction(ctx, ins2); -/* Schedule a single block by iterating its instruction to create bundles. - * While we go, tally about the bundle sizes to compute the block size. */ + /* Used for .cont and .last hinting */ + ctx->texture_op_count++; +} static void -schedule_block(compiler_context *ctx, midgard_block *block) +emit_tex(compiler_context *ctx, nir_tex_instr *instr) { - util_dynarray_init(&block->bundles, NULL); - - block->quadword_count = 0; + /* Fixup op, since only textureLod is permitted in VS but NIR can give + * generic tex in some cases (which confuses the hardware) */ - mir_foreach_instr_in_block(block, ins) { - int skip; - midgard_bundle bundle = schedule_bundle(ctx, block, ins, &skip); - util_dynarray_append(&block->bundles, midgard_bundle, bundle); + bool is_vertex = ctx->stage == MESA_SHADER_VERTEX; - if (bundle.has_blend_constant) { - /* TODO: Multiblock? */ - int quadwords_within_block = block->quadword_count + quadword_size(bundle.tag) - 1; - ctx->blend_constant_offset = quadwords_within_block * 0x10; - } - - while(skip--) - ins = mir_next_op(ins); + if (is_vertex && instr->op == nir_texop_tex) + instr->op = nir_texop_txl; - block->quadword_count += quadword_size(bundle.tag); + switch (instr->op) { + case nir_texop_tex: + case nir_texop_txb: + emit_texop_native(ctx, instr, TEXTURE_OP_NORMAL); + break; + case nir_texop_txl: + emit_texop_native(ctx, instr, TEXTURE_OP_LOD); + break; + case nir_texop_txf: + emit_texop_native(ctx, instr, TEXTURE_OP_TEXEL_FETCH); + break; + case nir_texop_txs: + emit_sysval_read(ctx, &instr->instr); + break; + default: + unreachable("Unhanlded texture op"); } - - block->is_scheduled = true; } static void -schedule_program(compiler_context *ctx) +emit_jump(compiler_context *ctx, nir_jump_instr *instr) { - allocate_registers(ctx); + switch (instr->type) { + case nir_jump_break: { + /* Emit a branch out of the loop */ + struct midgard_instruction br = v_branch(false, false); + br.branch.target_type = TARGET_BREAK; + br.branch.target_break = ctx->current_loop_depth; + emit_mir_instruction(ctx, br); - mir_foreach_block(ctx, block) { - schedule_block(ctx, block); + DBG("break..\n"); + break; + } + + default: + DBG("Unknown jump type %d\n", instr->type); + break; } } -/* After everything is scheduled, emit whole bundles at a time */ - static void -emit_binary_bundle(compiler_context *ctx, midgard_bundle *bundle, struct util_dynarray *emission, int next_tag) +emit_instr(compiler_context *ctx, struct nir_instr *instr) { - int lookahead = next_tag << 4; - - switch (bundle->tag) { - case TAG_ALU_4: - case TAG_ALU_8: - case TAG_ALU_12: - case TAG_ALU_16: { - /* Actually emit each component */ - util_dynarray_append(emission, uint32_t, bundle->control | lookahead); - - for (int i = 0; i < bundle->register_words_count; ++i) - util_dynarray_append(emission, uint16_t, bundle->register_words[i]); - - /* Emit body words based on the instructions bundled */ - for (int i = 0; i < bundle->instruction_count; ++i) { - midgard_instruction *ins = &bundle->instructions[i]; - - if (ins->unit & UNITS_ANY_VECTOR) { - memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins->alu, sizeof(midgard_vector_alu)); - } else if (ins->compact_branch) { - /* Dummy move, XXX DRY */ - if ((i == 0) && ins->writeout) { - midgard_instruction ins = v_fmov(0, blank_alu_src, SSA_FIXED_REGISTER(0)); - memcpy(util_dynarray_grow(emission, sizeof(midgard_vector_alu)), &ins.alu, sizeof(midgard_vector_alu)); - } - - if (ins->unit == ALU_ENAB_BR_COMPACT) { - memcpy(util_dynarray_grow(emission, sizeof(ins->br_compact)), &ins->br_compact, sizeof(ins->br_compact)); - } else { - memcpy(util_dynarray_grow(emission, sizeof(ins->branch_extended)), &ins->branch_extended, sizeof(ins->branch_extended)); - } - } else { - /* Scalar */ - midgard_scalar_alu scalarised = vector_to_scalar_alu(ins->alu, ins); - memcpy(util_dynarray_grow(emission, sizeof(scalarised)), &scalarised, sizeof(scalarised)); - } - } - - /* Emit padding (all zero) */ - memset(util_dynarray_grow(emission, bundle->padding), 0, bundle->padding); - - /* Tack on constants */ - - if (bundle->has_embedded_constants) { - util_dynarray_append(emission, float, bundle->constants[0]); - util_dynarray_append(emission, float, bundle->constants[1]); - util_dynarray_append(emission, float, bundle->constants[2]); - util_dynarray_append(emission, float, bundle->constants[3]); - } - + switch (instr->type) { + case nir_instr_type_load_const: + emit_load_const(ctx, nir_instr_as_load_const(instr)); break; - } - - case TAG_LOAD_STORE_4: { - /* One or two composing instructions */ - - uint64_t current64, next64 = LDST_NOP; - - memcpy(¤t64, &bundle->instructions[0].load_store, sizeof(current64)); - - if (bundle->instruction_count == 2) - memcpy(&next64, &bundle->instructions[1].load_store, sizeof(next64)); - - midgard_load_store instruction = { - .type = bundle->tag, - .next_type = next_tag, - .word1 = current64, - .word2 = next64 - }; - - util_dynarray_append(emission, midgard_load_store, instruction); + case nir_instr_type_intrinsic: + emit_intrinsic(ctx, nir_instr_as_intrinsic(instr)); break; - } - case TAG_TEXTURE_4: { - /* Texture instructions are easy, since there is no - * pipelining nor VLIW to worry about. We may need to set the .last flag */ - - midgard_instruction *ins = &bundle->instructions[0]; - - ins->texture.type = TAG_TEXTURE_4; - ins->texture.next_type = next_tag; + case nir_instr_type_alu: + emit_alu(ctx, nir_instr_as_alu(instr)); + break; - ctx->texture_op_count--; + case nir_instr_type_tex: + emit_tex(ctx, nir_instr_as_tex(instr)); + break; - if (!ctx->texture_op_count) { - ins->texture.cont = 0; - ins->texture.last = 1; - } + case nir_instr_type_jump: + emit_jump(ctx, nir_instr_as_jump(instr)); + break; - util_dynarray_append(emission, midgard_texture_word, ins->texture); + case nir_instr_type_ssa_undef: + /* Spurious */ break; - } default: - DBG("Unknown midgard instruction type\n"); - assert(0); + DBG("Unhandled instruction type\n"); break; } } @@ -2958,7 +1843,7 @@ inline_alu_constants(compiler_context *ctx) unsigned scratch = alu->ssa_args.dest; if (entry) { - midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch); + midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, scratch); attach_constants(ctx, &ins, entry, alu->ssa_args.src1 + 1); /* Force a break XXX Defer r31 writes */ @@ -2990,6 +1875,13 @@ embedded_to_inline_constant(compiler_context *ctx) /* Blend constants must not be inlined by definition */ if (ins->has_blend_constant) continue; + /* We can inline 32-bit (sometimes) or 16-bit (usually) */ + bool is_16 = ins->alu.reg_mode == midgard_reg_mode_16; + bool is_32 = ins->alu.reg_mode == midgard_reg_mode_32; + + if (!(is_16 || is_32)) + continue; + /* src1 cannot be an inline constant due to encoding * restrictions. So, if possible we try to flip the arguments * in that case */ @@ -3040,11 +1932,7 @@ embedded_to_inline_constant(compiler_context *ctx) /* Scale constant appropriately, if we can legally */ uint16_t scaled_constant = 0; - /* XXX: Check legality */ - if (midgard_is_integer_op(op)) { - /* TODO: Inline integer */ - continue; - + if (midgard_is_integer_op(op) || is_16) { unsigned int *iconstants = (unsigned int *) ins->constants; scaled_constant = (uint16_t) iconstants[component]; @@ -3052,7 +1940,20 @@ embedded_to_inline_constant(compiler_context *ctx) if (scaled_constant != iconstants[component]) continue; } else { - scaled_constant = _mesa_float_to_half((float) ins->constants[component]); + float original = (float) ins->constants[component]; + scaled_constant = _mesa_float_to_half(original); + + /* Check for loss of precision. If this is + * mediump, we don't care, but for a highp + * shader, we need to pay attention. NIR + * doesn't yet tell us which mode we're in! + * Practically this prevents most constants + * from being inlined, sadly. */ + + float fp32 = _mesa_half_to_float(scaled_constant); + + if (fp32 != original) + continue; } /* We don't know how to handle these with a constant */ @@ -3070,7 +1971,7 @@ embedded_to_inline_constant(compiler_context *ctx) uint32_t value = cons[component]; bool is_vector = false; - unsigned mask = effective_writemask(&ins->alu); + unsigned mask = effective_writemask(&ins->alu, ins->mask); for (int c = 1; c < 4; ++c) { /* We only care if this component is actually used */ @@ -3103,6 +2004,10 @@ embedded_to_inline_constant(compiler_context *ctx) static void map_ssa_to_alias(compiler_context *ctx, int *ref) { + /* Sign is used quite deliberately for unused */ + if (*ref < 0) + return; + unsigned int alias = (uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_to_alias, *ref + 1); if (alias) { @@ -3132,8 +2037,7 @@ midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block) if (ins->compact_branch) continue; if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue; - if (midgard_is_pinned(ctx, ins->ssa_args.dest)) continue; - if (is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue; + if (mir_is_live_after(ctx, block, ins, ins->ssa_args.dest)) continue; mir_remove_instruction(ins); progress = true; @@ -3142,6 +2046,83 @@ midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block) return progress; } +/* Dead code elimination for branches at the end of a block - only one branch + * per block is legal semantically */ + +static void +midgard_opt_cull_dead_branch(compiler_context *ctx, midgard_block *block) +{ + bool branched = false; + + mir_foreach_instr_in_block_safe(block, ins) { + if (!midgard_is_branch_unit(ins->unit)) continue; + + /* We ignore prepacked branches since the fragment epilogue is + * just generally special */ + if (ins->prepacked_branch) continue; + + /* Discards are similarly special and may not correspond to the + * end of a block */ + + if (ins->branch.target_type == TARGET_DISCARD) continue; + + if (branched) { + /* We already branched, so this is dead */ + mir_remove_instruction(ins); + } + + branched = true; + } +} + +static bool +mir_nontrivial_mod(midgard_vector_alu_src src, bool is_int, unsigned mask) +{ + /* abs or neg */ + if (!is_int && src.mod) return true; + + /* Other int mods don't matter in isolation */ + if (is_int && src.mod == midgard_int_shift) return true; + + /* size-conversion */ + if (src.half) return true; + + /* swizzle */ + for (unsigned c = 0; c < 4; ++c) { + if (!(mask & (1 << c))) continue; + if (((src.swizzle >> (2*c)) & 3) != c) return true; + } + + return false; +} + +static bool +mir_nontrivial_source2_mod(midgard_instruction *ins) +{ + bool is_int = midgard_is_integer_op(ins->alu.op); + + midgard_vector_alu_src src2 = + vector_alu_from_unsigned(ins->alu.src2); + + return mir_nontrivial_mod(src2, is_int, ins->mask); +} + +static bool +mir_nontrivial_outmod(midgard_instruction *ins) +{ + bool is_int = midgard_is_integer_op(ins->alu.op); + unsigned mod = ins->alu.outmod; + + /* Type conversion is a sort of outmod */ + if (ins->alu.dest_override != midgard_dest_override_none) + return true; + + if (is_int) + return mod != midgard_outmod_int_wrap; + else + return mod != midgard_outmod_none; +} + static bool midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block) { @@ -3161,76 +2142,79 @@ midgard_opt_copy_prop(compiler_context *ctx, midgard_block *block) if (to >= ctx->func->impl->ssa_alloc) continue; if (from >= ctx->func->impl->ssa_alloc) continue; - /* Also, if the move has side effects, we're helpless */ + /* Constant propagation is not handled here, either */ + if (ins->ssa_args.inline_constant) continue; + if (ins->has_constants) continue; - midgard_vector_alu_src src = - vector_alu_from_unsigned(ins->alu.src2); - unsigned mask = squeeze_writemask(ins->alu.mask); - bool is_int = midgard_is_integer_op(ins->alu.op); + if (mir_nontrivial_source2_mod(ins)) continue; + if (mir_nontrivial_outmod(ins)) continue; - if (mir_nontrivial_mod(src, is_int, mask)) continue; - if (ins->alu.outmod != midgard_outmod_none) continue; + /* We're clear -- rewrite */ + mir_rewrite_index_src(ctx, to, from); + mir_remove_instruction(ins); + progress |= true; + } - mir_foreach_instr_in_block_from(block, v, mir_next_op(ins)) { - if (v->ssa_args.src0 == to) { - v->ssa_args.src0 = from; - progress = true; - } + return progress; +} - if (v->ssa_args.src1 == to && !v->ssa_args.inline_constant) { - v->ssa_args.src1 = from; - progress = true; - } - } +/* fmov.pos is an idiom for fpos. Propoagate the .pos up to the source, so then + * the move can be propagated away entirely */ + +static bool +mir_compose_float_outmod(midgard_outmod_float *outmod, midgard_outmod_float comp) +{ + /* Nothing to do */ + if (comp == midgard_outmod_none) + return true; + + if (*outmod == midgard_outmod_none) { + *outmod = comp; + return true; } - return progress; + /* TODO: Compose rules */ + return false; } static bool -midgard_opt_copy_prop_tex(compiler_context *ctx, midgard_block *block) +midgard_opt_pos_propagate(compiler_context *ctx, midgard_block *block) { bool progress = false; mir_foreach_instr_in_block_safe(block, ins) { if (ins->type != TAG_ALU_4) continue; - if (!OP_IS_MOVE(ins->alu.op)) continue; - - unsigned from = ins->ssa_args.src1; - unsigned to = ins->ssa_args.dest; - - /* Make sure it's simple enough for us to handle */ + if (ins->alu.op != midgard_alu_op_fmov) continue; + if (ins->alu.outmod != midgard_outmod_pos) continue; - if (from >= SSA_FIXED_MINIMUM) continue; - if (from >= ctx->func->impl->ssa_alloc) continue; - if (to < SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE)) continue; - if (to > SSA_FIXED_REGISTER(REGISTER_TEXTURE_BASE + 1)) continue; + /* TODO: Registers? */ + unsigned src = ins->ssa_args.src1; + if (src >= ctx->func->impl->ssa_alloc) continue; + assert(!mir_has_multiple_writes(ctx, src)); - bool eliminated = false; + /* There might be a source modifier, too */ + if (mir_nontrivial_source2_mod(ins)) continue; + /* Backpropagate the modifier */ mir_foreach_instr_in_block_from_rev(block, v, mir_prev_op(ins)) { - /* The texture registers are not SSA so be careful. - * Conservatively, just stop if we hit a texture op - * (even if it may not write) to where we are */ + if (v->type != TAG_ALU_4) continue; + if (v->ssa_args.dest != src) continue; - if (v->type != TAG_ALU_4) - break; + /* Can we even take a float outmod? */ + if (midgard_is_integer_out_op(v->alu.op)) continue; - if (v->ssa_args.dest == from) { - /* We don't want to track partial writes ... */ - if (v->alu.mask == 0xF) { - v->ssa_args.dest = to; - eliminated = true; - } + midgard_outmod_float temp = v->alu.outmod; + progress |= mir_compose_float_outmod(&temp, ins->alu.outmod); - break; - } - } + /* Throw in the towel.. */ + if (!progress) break; - if (eliminated) - mir_remove_instruction(ins); + /* Otherwise, transfer the modifier */ + v->alu.outmod = temp; + ins->alu.outmod = midgard_outmod_none; - progress |= eliminated; + break; + } } return progress; @@ -3287,40 +2271,6 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block) } } -/* Emit varying stores late */ - -static void -midgard_emit_store(compiler_context *ctx, midgard_block *block) { - /* Iterate in reverse to get the final write, rather than the first */ - - mir_foreach_instr_in_block_safe_rev(block, ins) { - /* Check if what we just wrote needs a store */ - int idx = ins->ssa_args.dest; - uintptr_t varying = ((uintptr_t) _mesa_hash_table_u64_search(ctx->ssa_varyings, idx + 1)); - - if (!varying) continue; - - varying -= 1; - - /* We need to store to the appropriate varying, so emit the - * move/store */ - - /* TODO: Integrate with special purpose RA (and scheduler?) */ - bool high_varying_register = false; - - midgard_instruction mov = v_fmov(idx, blank_alu_src, SSA_FIXED_REGISTER(REGISTER_VARYING_BASE + high_varying_register)); - - midgard_instruction st = m_store_vary_32(SSA_FIXED_REGISTER(high_varying_register), varying); - st.load_store.unknown = 0x1E9E; /* XXX: What is this? */ - - mir_insert_instruction_before(mir_next_op(ins), st); - mir_insert_instruction_before(mir_next_op(ins), mov); - - /* We no longer need to store this varying */ - _mesa_hash_table_u64_remove(ctx->ssa_varyings, idx + 1); - } -} - /* If there are leftovers after the below pass, emit actual fmov * instructions for the slow-but-correct path */ @@ -3332,7 +2282,7 @@ emit_leftover_move(compiler_context *ctx) int mapped = base; map_ssa_to_alias(ctx, &mapped); - EMIT(fmov, mapped, blank_alu_src, base); + EMIT(mov, mapped, blank_alu_src, base); } } @@ -3356,7 +2306,7 @@ emit_fragment_epilogue(compiler_context *ctx) void *constant_value = _mesa_hash_table_u64_search(ctx->ssa_constants, ctx->fragment_output + 1); if (constant_value) { - midgard_instruction ins = v_fmov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0)); + midgard_instruction ins = v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), blank_alu_src, SSA_FIXED_REGISTER(0)); attach_constants(ctx, &ins, constant_value, ctx->fragment_output + 1); emit_mir_instruction(ctx, ins); } @@ -3369,91 +2319,6 @@ emit_fragment_epilogue(compiler_context *ctx) EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always); } -/* For the blend epilogue, we need to convert the blended fragment vec4 (stored - * in r0) to a RGBA8888 value by scaling and type converting. We then output it - * with the int8 analogue to the fragment epilogue */ - -static void -emit_blend_epilogue(compiler_context *ctx) -{ - /* vmul.fmul.none.fulllow hr48, r0, #255 */ - - midgard_instruction scale = { - .type = TAG_ALU_4, - .unit = UNIT_VMUL, - .inline_constant = _mesa_float_to_half(255.0), - .ssa_args = { - .src0 = SSA_FIXED_REGISTER(0), - .src1 = SSA_UNUSED_0, - .dest = SSA_FIXED_REGISTER(24), - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_fmul, - .reg_mode = midgard_reg_mode_full, - .dest_override = midgard_dest_override_lower, - .mask = 0xFF, - .src1 = vector_alu_srco_unsigned(blank_alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, scale); - - /* vadd.f2u8.pos.low hr0, hr48, #0 */ - - midgard_vector_alu_src alu_src = blank_alu_src; - alu_src.half = true; - - midgard_instruction f2u8 = { - .type = TAG_ALU_4, - .ssa_args = { - .src0 = SSA_FIXED_REGISTER(24), - .src1 = SSA_UNUSED_0, - .dest = SSA_FIXED_REGISTER(0), - .inline_constant = true - }, - .alu = { - .op = midgard_alu_op_f2u8, - .reg_mode = midgard_reg_mode_half, - .dest_override = midgard_dest_override_lower, - .outmod = midgard_outmod_pos, - .mask = 0xF, - .src1 = vector_alu_srco_unsigned(alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - emit_mir_instruction(ctx, f2u8); - - /* vmul.imov.quarter r0, r0, r0 */ - - midgard_instruction imov_8 = { - .type = TAG_ALU_4, - .ssa_args = { - .src0 = SSA_UNUSED_1, - .src1 = SSA_FIXED_REGISTER(0), - .dest = SSA_FIXED_REGISTER(0), - }, - .alu = { - .op = midgard_alu_op_imov, - .reg_mode = midgard_reg_mode_quarter, - .dest_override = midgard_dest_override_none, - .mask = 0xFF, - .src1 = vector_alu_srco_unsigned(blank_alu_src), - .src2 = vector_alu_srco_unsigned(blank_alu_src), - } - }; - - /* Emit branch epilogue with the 8-bit move as the source */ - - emit_mir_instruction(ctx, imov_8); - EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, 0, midgard_condition_always); - - emit_mir_instruction(ctx, imov_8); - EMIT(alu_br_compact_cond, midgard_jmp_writeout_op_writeout, TAG_ALU_4, -1, midgard_condition_always); -} - static midgard_block * emit_block(compiler_context *ctx, nir_block *block) { @@ -3485,16 +2350,12 @@ emit_block(compiler_context *ctx, nir_block *block) /* Perform heavylifting for aliasing */ actualise_ssa_to_alias(ctx); - midgard_emit_store(ctx, this_block); midgard_pair_load_store(ctx, this_block); /* Append fragment shader epilogue (value writeout) */ if (ctx->stage == MESA_SHADER_FRAGMENT) { if (block == nir_impl_last_block(ctx->func->impl)) { - if (ctx->is_blend) - emit_blend_epilogue(ctx); - else - emit_fragment_epilogue(ctx); + emit_fragment_epilogue(ctx); } } @@ -3685,7 +2546,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl .stage = nir->info.stage, .is_blend = is_blend, - .blend_constant_offset = -1, + .blend_constant_offset = 0, .alpha_ref = program->alpha_ref }; @@ -3695,18 +2556,10 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl /* TODO: Decide this at runtime */ ctx->uniform_cutoff = 8; - /* Assign var locations early, so the epilogue can use them if necessary */ - - nir_assign_var_locations(&nir->outputs, &nir->num_outputs, glsl_type_size); - nir_assign_var_locations(&nir->inputs, &nir->num_inputs, glsl_type_size); - nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, glsl_type_size); - /* Initialize at a global (not block) level hash tables */ ctx->ssa_constants = _mesa_hash_table_u64_create(NULL); - ctx->ssa_varyings = _mesa_hash_table_u64_create(NULL); ctx->ssa_to_alias = _mesa_hash_table_u64_create(NULL); - ctx->ssa_to_register = _mesa_hash_table_u64_create(NULL); ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL); ctx->sysval_to_id = _mesa_hash_table_u64_create(NULL); ctx->leftover_ssa_to_alias = _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal); @@ -3716,16 +2569,22 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl struct exec_list *varyings = ctx->stage == MESA_SHADER_VERTEX ? &nir->outputs : &nir->inputs; + unsigned max_varying = 0; nir_foreach_variable(var, varyings) { unsigned loc = var->data.driver_location; unsigned sz = glsl_type_size(var->type, FALSE); for (int c = 0; c < sz; ++c) { - program->varyings[loc + c] = var->data.location; + program->varyings[loc + c] = var->data.location + c; + max_varying = MAX2(max_varying, loc + c); } } - /* Lower gl_Position pre-optimisation */ + /* Lower gl_Position pre-optimisation, but after lowering vars to ssa + * (so we don't accidentally duplicate the epilogue since mesa/st has + * messed with our I/O quite a bit already) */ + + NIR_PASS_V(nir, nir_lower_vars_to_ssa); if (ctx->stage == MESA_SHADER_VERTEX) NIR_PASS_V(nir, nir_lower_viewport_transform); @@ -3758,7 +2617,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl memcpy(program->sysvals, ctx->sysvals, sizeof(ctx->sysvals[0]) * ctx->sysval_count); program->attribute_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_inputs : 0; - program->varying_count = (ctx->stage == MESA_SHADER_VERTEX) ? nir->num_outputs : ((ctx->stage == MESA_SHADER_FRAGMENT) ? nir->num_inputs : 0); + program->varying_count = max_varying + 1; /* Fencepost off-by-one */ nir_foreach_function(func, nir) { if (!func->impl) @@ -3784,12 +2643,19 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl progress = false; mir_foreach_block(ctx, block) { + progress |= midgard_opt_pos_propagate(ctx, block); progress |= midgard_opt_copy_prop(ctx, block); - progress |= midgard_opt_copy_prop_tex(ctx, block); progress |= midgard_opt_dead_code_eliminate(ctx, block); } } while (progress); + /* Nested control-flow can result in dead branches at the end of the + * block. This messes with our analysis and is just dead code, so cull + * them */ + mir_foreach_block(ctx, block) { + midgard_opt_cull_dead_branch(ctx, block); + } + /* Schedule! */ schedule_program(ctx); @@ -3801,7 +2667,7 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl mir_foreach_block(ctx, block) { util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) { for (int c = 0; c < bundle->instruction_count; ++c) { - midgard_instruction *ins = &bundle->instructions[c]; + midgard_instruction *ins = bundle->instructions[c]; if (!midgard_is_branch_unit(ins->unit)) continue; @@ -3816,10 +2682,13 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl /* Determine the block we're jumping to */ int target_number = ins->branch.target_block; - /* Report the destination tag. Discards don't need this */ + /* Report the destination tag */ int dest_tag = is_discard ? 0 : midgard_get_first_tag_from_block(ctx, target_number); - /* Count up the number of quadwords we're jumping over. That is, the number of quadwords in each of the blocks between (br_block_idx, target_number) */ + /* Count up the number of quadwords we're + * jumping over = number of quadwords until + * (br_block_idx, target_number) */ + int quadword_offset = 0; if (is_discard) { @@ -3936,8 +2805,13 @@ midgard_compile_shader_nir(nir_shader *nir, midgard_program *program, bool is_bl int current_bundle = 0; + /* Midgard prefetches instruction types, so during emission we + * need to lookahead. Unless this is the last instruction, in + * which we return 1. Or if this is the second to last and the + * last is an ALU, then it's also 1... */ + mir_foreach_block(ctx, block) { - util_dynarray_foreach(&block->bundles, midgard_bundle, bundle) { + mir_foreach_bundle_in_block(block, bundle) { int lookahead = 1; if (current_bundle + 1 < bundle_count) {