X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_program.c;h=2d0a52bb5fb88a515f3081b1299b0e936d75c86a;hb=0bf667984b074105be62116fa76be42b2a422e28;hp=598fb0bd3baf4e49bf0be073d06594f89d91c895;hpb=226bd9294541f65c91cad44924ef68b6da18f2a2;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 598fb0bd3ba..2d0a52bb5fb 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -24,7 +24,7 @@ #include #include "util/u_format.h" -#include "util/u_hash.h" +#include "util/crc32.h" #include "util/u_math.h" #include "util/u_memory.h" #include "util/ralloc.h" @@ -33,16 +33,29 @@ #include "tgsi/tgsi_parse.h" #include "compiler/nir/nir.h" #include "compiler/nir/nir_builder.h" +#include "compiler/nir_types.h" #include "nir/tgsi_to_nir.h" #include "vc4_context.h" #include "vc4_qpu.h" #include "vc4_qir.h" -#ifdef USE_VC4_SIMULATOR -#include "simpenrose/simpenrose.h" -#endif +#include "mesa/state_tracker/st_glsl_types.h" static struct qreg ntq_get_src(struct vc4_compile *c, nir_src src, int i); +static void +ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list); + +static int +type_size(const struct glsl_type *type) +{ + return glsl_count_attribute_slots(type, false); +} + +static int +uniforms_type_size(const struct glsl_type *type) +{ + return st_glsl_storage_type_size(type, false); +} static void resize_qreg_array(struct vc4_compile *c, @@ -65,11 +78,28 @@ resize_qreg_array(struct vc4_compile *c, (*regs)[i] = c->undef; } +static void +ntq_emit_thrsw(struct vc4_compile *c) +{ + if (!c->fs_threaded) + return; + + /* Always thread switch after each texture operation for now. + * + * We could do better by batching a bunch of texture fetches up and + * then doing one thread switch and collecting all their results + * afterward. + */ + qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef, + c->undef, c->undef)); + c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL); +} + static struct qreg indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) { struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0); - uint32_t offset = intr->const_index[0]; + uint32_t offset = nir_intrinsic_base(intr); struct vc4_compiler_ubo_range *range = NULL; unsigned i; for (i = 0; i < c->num_uniform_ranges; i++) { @@ -99,27 +129,45 @@ indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) /* Clamp to [0, array size). Note that MIN/MAX are signed. */ indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0)); - indirect_offset = qir_MIN(c, indirect_offset, - qir_uniform_ui(c, (range->dst_offset + - range->size - 4))); + indirect_offset = qir_MIN_NOIMM(c, indirect_offset, + qir_uniform_ui(c, (range->dst_offset + + range->size - 4))); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + indirect_offset, + qir_uniform(c, QUNIFORM_UBO_ADDR, 0)); - qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0)); c->num_texture_samples++; + + ntq_emit_thrsw(c); + return qir_TEX_RESULT(c); } -nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b, - enum quniform_contents contents) +static struct qreg +vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr) { - nir_intrinsic_instr *intr = - nir_intrinsic_instr_create(b->shader, - nir_intrinsic_load_uniform); - intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4; - intr->num_components = 1; - intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0)); - nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32, NULL); - nir_builder_instr_insert(b, &intr->instr); - return &intr->dest.ssa; + nir_const_value *buffer_index = + nir_src_as_const_value(intr->src[0]); + assert(buffer_index->u32[0] == 1); + assert(c->stage == QSTAGE_FRAG); + + struct qreg offset = ntq_get_src(c, intr->src[1], 0); + + /* Clamp to [0, array size). Note that MIN/MAX are signed. */ + offset = qir_MAX(c, offset, qir_uniform_ui(c, 0)); + offset = qir_MIN_NOIMM(c, offset, + qir_uniform_ui(c, c->fs_key->ubo_1_size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + offset, + qir_uniform(c, QUNIFORM_UBO_ADDR, buffer_index->u32[0])); + + c->num_texture_samples++; + + ntq_emit_thrsw(c); + + return qir_TEX_RESULT(c); } nir_ssa_def * @@ -151,6 +199,86 @@ ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def) return qregs; } +/** + * This function is responsible for getting QIR results into the associated + * storage for a NIR instruction. + * + * If it's a NIR SSA def, then we just set the associated hash table entry to + * the new result. + * + * If it's a NIR reg, then we need to update the existing qreg assigned to the + * NIR destination with the incoming value. To do that without introducing + * new MOVs, we require that the incoming qreg either be a uniform, or be + * SSA-defined by the previous QIR instruction in the block and rewritable by + * this function. That lets us sneak ahead and insert the SF flag beforehand + * (knowing that the previous instruction doesn't depend on flags) and rewrite + * its destination to be the NIR reg's destination + */ +static void +ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan, + struct qreg result) +{ + struct qinst *last_inst = NULL; + if (!list_empty(&c->cur_block->instructions)) + last_inst = (struct qinst *)c->cur_block->instructions.prev; + + assert(result.file == QFILE_UNIF || + (result.file == QFILE_TEMP && + last_inst && last_inst == c->defs[result.index])); + + if (dest->is_ssa) { + assert(chan < dest->ssa.num_components); + + struct qreg *qregs; + struct hash_entry *entry = + _mesa_hash_table_search(c->def_ht, &dest->ssa); + + if (entry) + qregs = entry->data; + else + qregs = ntq_init_ssa_def(c, &dest->ssa); + + qregs[chan] = result; + } else { + nir_register *reg = dest->reg.reg; + assert(dest->reg.base_offset == 0); + assert(reg->num_array_elems == 0); + struct hash_entry *entry = + _mesa_hash_table_search(c->def_ht, reg); + struct qreg *qregs = entry->data; + + /* Insert a MOV if the source wasn't an SSA def in the + * previous instruction. + */ + if (result.file == QFILE_UNIF) { + result = qir_MOV(c, result); + last_inst = c->defs[result.index]; + } + + /* We know they're both temps, so just rewrite index. */ + c->defs[last_inst->dst.index] = NULL; + last_inst->dst.index = qregs[chan].index; + + /* If we're in control flow, then make this update of the reg + * conditional on the execution mask. + */ + if (c->execute.file != QFILE_NULL) { + last_inst->dst.index = qregs[chan].index; + + /* Set the flags to the current exec mask. To insert + * the SF, we temporarily remove our SSA instruction. + */ + list_del(&last_inst->link); + qir_SF(c, c->execute); + list_addtail(&last_inst->link, + &c->cur_block->instructions); + + last_inst->cond = QPU_COND_ZS; + last_inst->cond_is_exec_mask = true; + } + } +} + static struct qreg * ntq_get_dest(struct vc4_compile *c, nir_dest *dest) { @@ -192,7 +320,7 @@ static struct qreg ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr, unsigned src) { - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); unsigned chan = ffs(instr->dest.write_mask) - 1; struct qreg r = ntq_get_src(c, instr->src[src].src, instr->src[src].swizzle[chan]); @@ -293,22 +421,25 @@ ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr) /* Perform the clamping required by kernel validation. */ addr = qir_MAX(c, addr, qir_uniform_ui(c, 0)); - addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4)); + addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); - qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); + ntq_emit_thrsw(c); struct qreg tex = qir_TEX_RESULT(c); c->num_texture_samples++; - struct qreg *dest = ntq_get_dest(c, &instr->dest); enum pipe_format format = c->key->tex[unit].format; if (util_format_is_depth_or_stencil(format)) { struct qreg scaled = ntq_scale_depth_texture(c, tex); for (int i = 0; i < 4; i++) - dest[i] = scaled; + ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled)); } else { for (int i = 0; i < 4; i++) - dest[i] = qir_UNPACK_8_F(c, tex, i); + ntq_store_dest(c, &instr->dest, i, + qir_UNPACK_8_F(c, tex, i)); } } @@ -343,7 +474,7 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) lod = ntq_get_src(c, instr->src[i].src, 0); is_txl = true; break; - case nir_tex_src_comparitor: + case nir_tex_src_comparator: compare = ntq_get_src(c, instr->src[i].src, 0); break; default: @@ -351,6 +482,22 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) } } + if (c->stage != QSTAGE_FRAG && !is_txl) { + /* From the GLSL 1.20 spec: + * + * "If it is mip-mapped and running on the vertex shader, + * then the base texture is used." + */ + is_txl = true; + lod = qir_uniform_ui(c, 0); + } + + if (c->key->tex[unit].force_first_level) { + lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit); + is_txl = true; + is_txb = false; + } + struct qreg texture_u[] = { qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit), qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit), @@ -375,14 +522,20 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) unit | (is_txl << 16)); } + struct qinst *tmu; if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { - qir_TEX_R(c, r, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) { - qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit), - texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), + qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, + unit)); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) { @@ -393,14 +546,23 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) t = qir_SAT(c, t); } - qir_TEX_T(c, t, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; - if (is_txl || is_txb) - qir_TEX_B(c, lod, texture_u[next_texture_u++]); + if (is_txl || is_txb) { + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; + } - qir_TEX_S(c, s, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s); + tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++]; c->num_texture_samples++; + + ntq_emit_thrsw(c); + struct qreg tex = qir_TEX_RESULT(c); enum pipe_format format = c->key->tex[unit].format; @@ -413,6 +575,15 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) struct qreg u0 = qir_uniform_f(c, 0.0f); struct qreg u1 = qir_uniform_f(c, 1.0f); if (c->key->tex[unit].compare_mode) { + /* From the GL_ARB_shadow spec: + * + * "Let Dt (D subscript t) be the depth texture + * value, in the range [0, 1]. Let R be the + * interpolated texture coordinate clamped to the + * range [0, 1]." + */ + compare = qir_SAT(c, compare); + switch (c->key->tex[unit].compare_func) { case PIPE_FUNC_NEVER: depth_output = qir_uniform_f(c, 0.0f); @@ -467,8 +638,11 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); struct qreg diff = qir_FSUB(c, src, trunc); qir_SF(c, diff); - return qir_SEL(c, QPU_COND_NS, - qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff); + + qir_FADD_dest(c, diff, + diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, diff); } /** @@ -478,15 +652,18 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) static struct qreg ntq_ffloor(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was < 0 in the first place. */ - qir_SF(c, qir_FSUB(c, src, trunc)); + qir_SF(c, qir_FSUB(c, src, result)); + + struct qinst *sub = qir_FSUB_dest(c, result, + result, qir_uniform_f(c, 1.0)); + sub->cond = QPU_COND_NS; - return qir_SEL(c, QPU_COND_NS, - qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc); + return qir_MOV(c, result); } /** @@ -496,36 +673,58 @@ ntq_ffloor(struct vc4_compile *c, struct qreg src) static struct qreg ntq_fceil(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was > 0 in the first place. */ - qir_SF(c, qir_FSUB(c, trunc, src)); + qir_SF(c, qir_FSUB(c, result, src)); + + qir_FADD_dest(c, result, + result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, result); +} + +static struct qreg +ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x) +{ + /* Since we're using a Taylor approximation, we want to have a small + * number of coefficients and take advantage of sin/cos repeating + * every 2pi. We keep our x as close to 0 as we can, since the series + * will be less accurate as |x| increases. (Also, be careful of + * shifting the input x value to be tricky with sin/cos relations, + * because getting accurate values for x==0 is very important for SDL + * rendering) + */ + struct qreg scaled_x = + qir_FMUL(c, x, + qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); + /* Note: FTOI truncates toward 0. */ + struct qreg x_frac = qir_FSUB(c, scaled_x, + qir_ITOF(c, qir_FTOI(c, scaled_x))); + /* Map [0.5, 1] to [-0.5, 0] */ + qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC; + /* Map [-1, -0.5] to [0, 0.5] */ + qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; - return qir_SEL(c, QPU_COND_NS, - qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc); + return x_frac; } static struct qreg ntq_fsin(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -2.0 * M_PI, - pow(2.0 * M_PI, 3) / (3 * 2 * 1), - -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 2.0 * M_PI, + -pow(2.0 * M_PI, 3) / (3 * 2 * 1), + pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, - src, - qir_uniform_f(c, 1.0 / (M_PI * 2.0))); - - struct qreg x = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); + struct qreg x = ntq_shrink_sincos_input_range(c, src); struct qreg x2 = qir_FMUL(c, x, x); struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0])); for (int i = 1; i < ARRAY_SIZE(coeff); i++) { @@ -543,21 +742,15 @@ static struct qreg ntq_fcos(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -1.0f, - pow(2.0 * M_PI, 2) / (2 * 1), - -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), - pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 1.0f, + -pow(2.0 * M_PI, 2) / (2 * 1), + pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, src, - qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); - struct qreg x_frac = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); - + struct qreg x_frac = ntq_shrink_sincos_input_range(c, src); struct qreg sum = qir_uniform_f(c, coeff[0]); struct qreg x2 = qir_FMUL(c, x_frac, x_frac); struct qreg x = x2; /* Current x^2, x^4, or x^6 */ @@ -565,13 +758,10 @@ ntq_fcos(struct vc4_compile *c, struct qreg src) if (i != 1) x = qir_FMUL(c, x, x2); - struct qreg mul = qir_FMUL(c, + sum = qir_FADD(c, qir_FMUL(c, x, - qir_uniform_f(c, coeff[i])); - if (i == 0) - sum = mul; - else - sum = qir_FADD(c, sum, mul); + qir_uniform_f(c, coeff[i])), + sum); } return sum; } @@ -585,7 +775,7 @@ ntq_fsign(struct vc4_compile *c, struct qreg src) qir_MOV_dest(c, t, qir_uniform_f(c, 0.0)); qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC; qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS; - return t; + return qir_MOV(c, t); } static void @@ -731,10 +921,10 @@ ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr) if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] && instr->src[0].swizzle[0] == instr->src[0].swizzle[2] && instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) { - struct qreg *dest = ntq_get_dest(c, &instr->dest.dest); - *dest = qir_PACK_8888_F(c, - ntq_get_src(c, instr->src[0].src, - instr->src[0].swizzle[0])); + struct qreg rep = ntq_get_src(c, + instr->src[0].src, + instr->src[0].swizzle[0]); + ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep)); return; } @@ -764,8 +954,7 @@ ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr) qir_PACK_8_F(c, result, src, i); } - struct qreg *dest = ntq_get_dest(c, &instr->dest.dest); - *dest = result; + ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result)); } /** Handles sign-extended bitfield extracts for 16 bits. */ @@ -815,24 +1004,24 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, enum qpu_cond cond; switch (compare_instr->op) { - case nir_op_feq: - case nir_op_ieq: + case nir_op_feq32: + case nir_op_ieq32: case nir_op_seq: cond = QPU_COND_ZS; break; - case nir_op_fne: - case nir_op_ine: + case nir_op_fne32: + case nir_op_ine32: case nir_op_sne: cond = QPU_COND_ZC; break; - case nir_op_fge: - case nir_op_ige: - case nir_op_uge: + case nir_op_fge32: + case nir_op_ige32: + case nir_op_uge32: case nir_op_sge: cond = QPU_COND_NC; break; - case nir_op_flt: - case nir_op_ilt: + case nir_op_flt32: + case nir_op_ilt32: case nir_op_slt: cond = QPU_COND_NS; break; @@ -859,7 +1048,7 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0)); break; - case nir_op_bcsel: + case nir_op_b32csel: *dest = qir_SEL(c, cond, ntq_get_alu_src(c, sel_instr, 1), ntq_get_alu_src(c, sel_instr, 2)); @@ -871,6 +1060,9 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, break; } + /* Make the temporary for nir_store_dest(). */ + *dest = qir_MOV(c, *dest); + return true; } @@ -884,6 +1076,8 @@ static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr, { if (!instr->src[0].src.is_ssa) goto out; + if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu) + goto out; nir_alu_instr *compare = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr); if (!compare) @@ -895,12 +1089,55 @@ static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr, out: qir_SF(c, src[0]); - return qir_SEL(c, QPU_COND_NS, src[1], src[2]); + return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2])); +} + +static struct qreg +ntq_fddx(struct vc4_compile *c, struct qreg src) +{ + /* Make sure that we have a bare temp to use for MUL rotation, so it + * can be allocated to an accumulator. + */ + if (src.pack || src.file != QFILE_TEMP) + src = qir_MOV(c, src); + + struct qreg from_left = qir_ROT_MUL(c, src, 1); + struct qreg from_right = qir_ROT_MUL(c, src, 15); + + /* Distinguish left/right pixels of the quad. */ + qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0), + qir_uniform_ui(c, 1))); + + return qir_MOV(c, qir_SEL(c, QPU_COND_ZS, + qir_FSUB(c, from_right, src), + qir_FSUB(c, src, from_left))); +} + +static struct qreg +ntq_fddy(struct vc4_compile *c, struct qreg src) +{ + if (src.pack || src.file != QFILE_TEMP) + src = qir_MOV(c, src); + + struct qreg from_bottom = qir_ROT_MUL(c, src, 2); + struct qreg from_top = qir_ROT_MUL(c, src, 14); + + /* Distinguish top/bottom pixels of the quad. */ + qir_SF(c, qir_AND(c, + qir_reg(QFILE_QPU_ELEMENT, 0), + qir_uniform_ui(c, 2))); + + return qir_MOV(c, qir_SEL(c, QPU_COND_ZS, + qir_FSUB(c, from_top, src), + qir_FSUB(c, src, from_bottom))); } static void ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) { + /* This should always be lowered to ALU operations for VC4. */ + assert(!instr->dest.saturate); + /* Vectors are special in that they have non-scalarized writemasks, * and just take the first swizzle channel for each argument in order * into each writemask channel. @@ -912,9 +1149,9 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) srcs[i] = ntq_get_src(c, instr->src[i].src, instr->src[i].swizzle[0]); - struct qreg *dest = ntq_get_dest(c, &instr->dest.dest); for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) - dest[i] = srcs[i]; + ntq_store_dest(c, &instr->dest.dest, i, + qir_MOV(c, srcs[i])); return; } @@ -926,10 +1163,10 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) if (instr->op == nir_op_unpack_unorm_4x8) { struct qreg src = ntq_get_src(c, instr->src[0].src, instr->src[0].swizzle[0]); - struct qreg *dest = ntq_get_dest(c, &instr->dest.dest); for (int i = 0; i < 4; i++) { if (instr->dest.write_mask & (1 << i)) - dest[i] = qir_UNPACK_8_F(c, src, i); + ntq_store_dest(c, &instr->dest.dest, i, + qir_UNPACK_8_F(c, src, i)); } return; } @@ -940,190 +1177,198 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) src[i] = ntq_get_alu_src(c, instr, i); } - /* Pick the channel to store the output in. */ - assert(!instr->dest.saturate); - struct qreg *dest = ntq_get_dest(c, &instr->dest.dest); - assert(util_is_power_of_two(instr->dest.write_mask)); - dest += ffs(instr->dest.write_mask) - 1; + struct qreg result; switch (instr->op) { case nir_op_fmov: case nir_op_imov: - *dest = qir_MOV(c, src[0]); + result = qir_MOV(c, src[0]); break; case nir_op_fmul: - *dest = qir_FMUL(c, src[0], src[1]); + result = qir_FMUL(c, src[0], src[1]); break; case nir_op_fadd: - *dest = qir_FADD(c, src[0], src[1]); + result = qir_FADD(c, src[0], src[1]); break; case nir_op_fsub: - *dest = qir_FSUB(c, src[0], src[1]); + result = qir_FSUB(c, src[0], src[1]); break; case nir_op_fmin: - *dest = qir_FMIN(c, src[0], src[1]); + result = qir_FMIN(c, src[0], src[1]); break; case nir_op_fmax: - *dest = qir_FMAX(c, src[0], src[1]); + result = qir_FMAX(c, src[0], src[1]); break; - case nir_op_f2i: - case nir_op_f2u: - *dest = qir_FTOI(c, src[0]); + case nir_op_f2i32: + case nir_op_f2u32: + result = qir_FTOI(c, src[0]); break; - case nir_op_i2f: - case nir_op_u2f: - *dest = qir_ITOF(c, src[0]); + case nir_op_i2f32: + case nir_op_u2f32: + result = qir_ITOF(c, src[0]); break; - case nir_op_b2f: - *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0)); + case nir_op_b2f32: + result = qir_AND(c, src[0], qir_uniform_f(c, 1.0)); break; - case nir_op_b2i: - *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1)); + case nir_op_b2i32: + result = qir_AND(c, src[0], qir_uniform_ui(c, 1)); break; - case nir_op_i2b: - case nir_op_f2b: + case nir_op_i2b32: + case nir_op_f2b32: qir_SF(c, src[0]); - *dest = qir_SEL(c, QPU_COND_ZC, - qir_uniform_ui(c, ~0), - qir_uniform_ui(c, 0)); + result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, + qir_uniform_ui(c, ~0), + qir_uniform_ui(c, 0))); break; case nir_op_iadd: - *dest = qir_ADD(c, src[0], src[1]); + result = qir_ADD(c, src[0], src[1]); break; case nir_op_ushr: - *dest = qir_SHR(c, src[0], src[1]); + result = qir_SHR(c, src[0], src[1]); break; case nir_op_isub: - *dest = qir_SUB(c, src[0], src[1]); + result = qir_SUB(c, src[0], src[1]); break; case nir_op_ishr: - *dest = qir_ASR(c, src[0], src[1]); + result = qir_ASR(c, src[0], src[1]); break; case nir_op_ishl: - *dest = qir_SHL(c, src[0], src[1]); + result = qir_SHL(c, src[0], src[1]); break; case nir_op_imin: - *dest = qir_MIN(c, src[0], src[1]); + result = qir_MIN(c, src[0], src[1]); break; case nir_op_imax: - *dest = qir_MAX(c, src[0], src[1]); + result = qir_MAX(c, src[0], src[1]); break; case nir_op_iand: - *dest = qir_AND(c, src[0], src[1]); + result = qir_AND(c, src[0], src[1]); break; case nir_op_ior: - *dest = qir_OR(c, src[0], src[1]); + result = qir_OR(c, src[0], src[1]); break; case nir_op_ixor: - *dest = qir_XOR(c, src[0], src[1]); + result = qir_XOR(c, src[0], src[1]); break; case nir_op_inot: - *dest = qir_NOT(c, src[0]); + result = qir_NOT(c, src[0]); break; case nir_op_imul: - *dest = ntq_umul(c, src[0], src[1]); + result = ntq_umul(c, src[0], src[1]); break; case nir_op_seq: case nir_op_sne: case nir_op_sge: case nir_op_slt: - case nir_op_feq: - case nir_op_fne: - case nir_op_fge: - case nir_op_flt: - case nir_op_ieq: - case nir_op_ine: - case nir_op_ige: - case nir_op_uge: - case nir_op_ilt: - if (!ntq_emit_comparison(c, dest, instr, instr)) { + case nir_op_feq32: + case nir_op_fne32: + case nir_op_fge32: + case nir_op_flt32: + case nir_op_ieq32: + case nir_op_ine32: + case nir_op_ige32: + case nir_op_uge32: + case nir_op_ilt32: + if (!ntq_emit_comparison(c, &result, instr, instr)) { fprintf(stderr, "Bad comparison instruction\n"); } break; - case nir_op_bcsel: - *dest = ntq_emit_bcsel(c, instr, src); + case nir_op_b32csel: + result = ntq_emit_bcsel(c, instr, src); break; case nir_op_fcsel: qir_SF(c, src[0]); - *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]); + result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2])); break; case nir_op_frcp: - *dest = ntq_rcp(c, src[0]); + result = ntq_rcp(c, src[0]); break; case nir_op_frsq: - *dest = ntq_rsq(c, src[0]); + result = ntq_rsq(c, src[0]); break; case nir_op_fexp2: - *dest = qir_EXP2(c, src[0]); + result = qir_EXP2(c, src[0]); break; case nir_op_flog2: - *dest = qir_LOG2(c, src[0]); + result = qir_LOG2(c, src[0]); break; case nir_op_ftrunc: - *dest = qir_ITOF(c, qir_FTOI(c, src[0])); + result = qir_ITOF(c, qir_FTOI(c, src[0])); break; case nir_op_fceil: - *dest = ntq_fceil(c, src[0]); + result = ntq_fceil(c, src[0]); break; case nir_op_ffract: - *dest = ntq_ffract(c, src[0]); + result = ntq_ffract(c, src[0]); break; case nir_op_ffloor: - *dest = ntq_ffloor(c, src[0]); + result = ntq_ffloor(c, src[0]); break; case nir_op_fsin: - *dest = ntq_fsin(c, src[0]); + result = ntq_fsin(c, src[0]); break; case nir_op_fcos: - *dest = ntq_fcos(c, src[0]); + result = ntq_fcos(c, src[0]); break; case nir_op_fsign: - *dest = ntq_fsign(c, src[0]); + result = ntq_fsign(c, src[0]); break; case nir_op_fabs: - *dest = qir_FMAXABS(c, src[0], src[0]); + result = qir_FMAXABS(c, src[0], src[0]); break; case nir_op_iabs: - *dest = qir_MAX(c, src[0], + result = qir_MAX(c, src[0], qir_SUB(c, qir_uniform_ui(c, 0), src[0])); break; case nir_op_ibitfield_extract: - *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]); + result = ntq_emit_ibfe(c, src[0], src[1], src[2]); break; case nir_op_ubitfield_extract: - *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]); + result = ntq_emit_ubfe(c, src[0], src[1], src[2]); break; case nir_op_usadd_4x8: - *dest = qir_V8ADDS(c, src[0], src[1]); + result = qir_V8ADDS(c, src[0], src[1]); break; case nir_op_ussub_4x8: - *dest = qir_V8SUBS(c, src[0], src[1]); + result = qir_V8SUBS(c, src[0], src[1]); break; case nir_op_umin_4x8: - *dest = qir_V8MIN(c, src[0], src[1]); + result = qir_V8MIN(c, src[0], src[1]); break; case nir_op_umax_4x8: - *dest = qir_V8MAX(c, src[0], src[1]); + result = qir_V8MAX(c, src[0], src[1]); break; case nir_op_umul_unorm_4x8: - *dest = qir_V8MULD(c, src[0], src[1]); + result = qir_V8MULD(c, src[0], src[1]); + break; + + case nir_op_fddx: + case nir_op_fddx_coarse: + case nir_op_fddx_fine: + result = ntq_fddx(c, src[0]); + break; + + case nir_op_fddy: + case nir_op_fddy_coarse: + case nir_op_fddy_fine: + result = ntq_fddy(c, src[0]); break; default: @@ -1132,6 +1377,13 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) fprintf(stderr, "\n"); abort(); } + + /* We have a scalar result, so the instruction should only have a + * single channel written to. + */ + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); + ntq_store_dest(c, &instr->dest.dest, + ffs(instr->dest.write_mask) - 1, result); } static void @@ -1145,7 +1397,7 @@ emit_frag_end(struct vc4_compile *c) } uint32_t discard_cond = QPU_COND_ALWAYS; - if (c->discard.file != QFILE_NULL) { + if (c->s->info.fs.uses_discard) { qir_SF(c, c->discard); discard_cond = QPU_COND_ZS; } @@ -1171,7 +1423,7 @@ emit_frag_end(struct vc4_compile *c) if (c->output_position_index != -1) { qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0), qir_FMUL(c, - c->outputs[c->output_position_index + 2], + c->outputs[c->output_position_index], qir_uniform_f(c, 0xffffff)))->cond = discard_cond; } else { qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0), @@ -1274,7 +1526,7 @@ emit_vert_end(struct vc4_compile *c, struct vc4_varying_slot *fs_inputs, uint32_t num_fs_inputs) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1309,7 +1561,7 @@ emit_vert_end(struct vc4_compile *c, static void emit_coord_end(struct vc4_compile *c) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1332,16 +1584,21 @@ vc4_optimize_nir(struct nir_shader *s) progress = false; NIR_PASS_V(s, nir_lower_vars_to_ssa); - NIR_PASS_V(s, nir_lower_alu_to_scalar); - NIR_PASS_V(s, nir_lower_phis_to_scalar); - + NIR_PASS(progress, s, nir_lower_alu_to_scalar); + NIR_PASS(progress, s, nir_lower_phis_to_scalar); NIR_PASS(progress, s, nir_copy_prop); + NIR_PASS(progress, s, nir_opt_remove_phis); NIR_PASS(progress, s, nir_opt_dce); + NIR_PASS(progress, s, nir_opt_dead_cf); NIR_PASS(progress, s, nir_opt_cse); - NIR_PASS(progress, s, nir_opt_peephole_select); + NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); NIR_PASS(progress, s, nir_opt_undef); + NIR_PASS(progress, s, nir_opt_loop_unroll, + nir_var_shader_in | + nir_var_shader_out | + nir_var_function_temp); } while (progress); } @@ -1387,14 +1644,11 @@ ntq_setup_inputs(struct vc4_compile *c) if (c->stage == QSTAGE_FRAG) { if (var->data.location == VARYING_SLOT_POS) { emit_fragcoord_input(c, loc); - } else if (var->data.location == VARYING_SLOT_FACE) { - c->inputs[loc * 4 + 0] = - qir_ITOF(c, qir_reg(QFILE_FRAG_REV_FLAG, - 0)); - } else if (var->data.location >= VARYING_SLOT_VAR0 && - (c->fs_key->point_sprite_mask & - (1 << (var->data.location - - VARYING_SLOT_VAR0)))) { + } else if (var->data.location == VARYING_SLOT_PNTC || + (var->data.location >= VARYING_SLOT_VAR0 && + (c->fs_key->point_sprite_mask & + (1 << (var->data.location - + VARYING_SLOT_VAR0))))) { c->inputs[loc * 4 + 0] = c->point_x; c->inputs[loc * 4 + 1] = c->point_y; } else { @@ -1449,11 +1703,11 @@ static void ntq_setup_uniforms(struct vc4_compile *c) { nir_foreach_variable(var, &c->s->uniforms) { - unsigned array_len = MAX2(glsl_get_length(var->type), 1); - unsigned array_elem_size = 4 * sizeof(float); + uint32_t vec4_count = uniforms_type_size(var->type); + unsigned vec4_size = 4 * sizeof(float); - declare_uniform_range(c, var->data.driver_location * array_elem_size, - array_len * array_elem_size); + declare_uniform_range(c, var->data.driver_location * vec4_size, + vec4_count * vec4_size); } } @@ -1475,7 +1729,7 @@ ntq_setup_registers(struct vc4_compile *c, struct exec_list *list) _mesa_hash_table_insert(c->def_ht, nir_reg, qregs); for (int i = 0; i < array_len * nir_reg->num_components; i++) - qregs[i] = qir_uniform_ui(c, 0); + qregs[i] = qir_get_temp(c); } } @@ -1501,79 +1755,135 @@ ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr) qregs[i] = qir_uniform_ui(c, 0); } +static void +ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(nir_src_as_const_value(instr->src[0])->u32[0] == 0); + + /* Reads of the per-sample color need to be done in + * order. + */ + int sample_index = (nir_intrinsic_base(instr) - + VC4_NIR_TLB_COLOR_READ_INPUT); + for (int i = 0; i <= sample_index; i++) { + if (c->color_reads[i].file == QFILE_NULL) { + c->color_reads[i] = + qir_TLB_COLOR_READ(c); + } + } + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->color_reads[sample_index])); +} + +static void +ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(instr->num_components == 1); + + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + assert(const_offset && "vc4 doesn't support indirect inputs"); + + if (c->stage == QSTAGE_FRAG && + nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { + ntq_emit_color_read(c, instr); + return; + } + + uint32_t offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + int comp = nir_intrinsic_component(instr); + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->inputs[offset * 4 + comp])); +} + static void ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) { - const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; nir_const_value *const_offset; unsigned offset; - struct qreg *dest = NULL; - - if (info->has_dest) { - dest = ntq_get_dest(c, &instr->dest); - } switch (instr->intrinsic) { case nir_intrinsic_load_uniform: assert(instr->num_components == 1); const_offset = nir_src_as_const_value(instr->src[0]); if (const_offset) { - offset = instr->const_index[0] + const_offset->u32[0]; + offset = nir_intrinsic_base(instr) + const_offset->u32[0]; assert(offset % 4 == 0); /* We need dwords */ offset = offset / 4; - if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) { - *dest = qir_uniform(c, QUNIFORM_UNIFORM, - offset); - } else { - *dest = qir_uniform(c, offset - - VC4_NIR_STATE_UNIFORM_OFFSET, - 0); - } + ntq_store_dest(c, &instr->dest, 0, + qir_uniform(c, QUNIFORM_UNIFORM, + offset)); } else { - *dest = indirect_uniform_load(c, instr); + ntq_store_dest(c, &instr->dest, 0, + indirect_uniform_load(c, instr)); } break; + case nir_intrinsic_load_ubo: + assert(instr->num_components == 1); + ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr)); + break; + case nir_intrinsic_load_user_clip_plane: for (int i = 0; i < instr->num_components; i++) { - dest[i] = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE, - instr->const_index[0] * 4 + i); + ntq_store_dest(c, &instr->dest, i, + qir_uniform(c, QUNIFORM_USER_CLIP_PLANE, + nir_intrinsic_ucp_id(instr) * + 4 + i)); } break; + case nir_intrinsic_load_blend_const_color_r_float: + case nir_intrinsic_load_blend_const_color_g_float: + case nir_intrinsic_load_blend_const_color_b_float: + case nir_intrinsic_load_blend_const_color_a_float: + ntq_store_dest(c, &instr->dest, 0, + qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X + + (instr->intrinsic - + nir_intrinsic_load_blend_const_color_r_float), + 0)); + break; + + case nir_intrinsic_load_blend_const_color_rgba8888_unorm: + ntq_store_dest(c, &instr->dest, 0, + qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA, + 0)); + break; + + case nir_intrinsic_load_blend_const_color_aaaa8888_unorm: + ntq_store_dest(c, &instr->dest, 0, + qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA, + 0)); + break; + + case nir_intrinsic_load_alpha_ref_float: + ntq_store_dest(c, &instr->dest, 0, + qir_uniform(c, QUNIFORM_ALPHA_REF, 0)); + break; + case nir_intrinsic_load_sample_mask_in: - *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0); + ntq_store_dest(c, &instr->dest, 0, + qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0)); + break; + + case nir_intrinsic_load_front_face: + /* The register contains 0 (front) or 1 (back), and we need to + * turn it into a NIR bool where true means front. + */ + ntq_store_dest(c, &instr->dest, 0, + qir_ADD(c, + qir_uniform_ui(c, -1), + qir_reg(QFILE_FRAG_REV_FLAG, 0))); break; case nir_intrinsic_load_input: - assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - assert(const_offset && "vc4 doesn't support indirect inputs"); - if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) { - assert(const_offset->u32[0] == 0); - /* Reads of the per-sample color need to be done in - * order. - */ - int sample_index = (instr->const_index[0] - - VC4_NIR_TLB_COLOR_READ_INPUT); - for (int i = 0; i <= sample_index; i++) { - if (c->color_reads[i].file == QFILE_NULL) { - c->color_reads[i] = - qir_TLB_COLOR_READ(c); - } - } - *dest = c->color_reads[sample_index]; - } else { - offset = instr->const_index[0] + const_offset->u32[0]; - *dest = c->inputs[offset]; - } + ntq_emit_load_input(c, instr); break; case nir_intrinsic_store_output: const_offset = nir_src_as_const_value(instr->src[1]); assert(const_offset && "vc4 doesn't support indirect outputs"); - offset = instr->const_index[0] + const_offset->u32[0]; + offset = nir_intrinsic_base(instr) + const_offset->u32[0]; /* MSAA color outputs are the only case where we have an * output that's not lowered to being a store of a single 32 @@ -1587,6 +1897,7 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) i)); } } else { + offset = offset * 4 + nir_intrinsic_component(instr); assert(instr->num_components == 1); c->outputs[offset] = qir_MOV(c, ntq_get_src(c, instr->src[0], 0)); @@ -1595,15 +1906,33 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_discard: - c->discard = qir_uniform_ui(c, ~0); + if (c->execute.file != QFILE_NULL) { + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->discard, + qir_uniform_ui(c, ~0)); + } else { + qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0)); + } break; - case nir_intrinsic_discard_if: - if (c->discard.file == QFILE_NULL) - c->discard = qir_uniform_ui(c, 0); - c->discard = qir_OR(c, c->discard, + case nir_intrinsic_discard_if: { + /* true (~0) if we're discarding */ + struct qreg cond = ntq_get_src(c, instr->src[0], 0); + + if (c->execute.file != QFILE_NULL) { + /* execute == 0 means the channel is active. Invert + * the condition so that we can use zero as "executing + * and discarding." + */ + qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond))); + qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond); + } else { + qir_OR_dest(c, c->discard, c->discard, ntq_get_src(c, instr->src[0], 0)); + } + break; + } default: fprintf(stderr, "Unknown intrinsic: "); @@ -1613,10 +1942,125 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) } } +/* Clears (activates) the execute flags for any channels whose jump target + * matches this block. + */ +static void +ntq_activate_execute_for_block(struct vc4_compile *c) +{ + qir_SF(c, qir_SUB(c, + c->execute, + qir_uniform_ui(c, c->cur_block->index))); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0)); +} + static void ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt) { - fprintf(stderr, "general IF statements not handled.\n"); + if (!c->vc4->screen->has_control_flow) { + fprintf(stderr, + "IF statement support requires updated kernel.\n"); + return; + } + + nir_block *nir_else_block = nir_if_first_else_block(if_stmt); + bool empty_else_block = + (nir_else_block == nir_if_last_else_block(if_stmt) && + exec_list_is_empty(&nir_else_block->instr_list)); + + struct qblock *then_block = qir_new_block(c); + struct qblock *after_block = qir_new_block(c); + struct qblock *else_block; + if (empty_else_block) + else_block = after_block; + else + else_block = qir_new_block(c); + + bool was_top_level = false; + if (c->execute.file == QFILE_NULL) { + c->execute = qir_MOV(c, qir_uniform_ui(c, 0)); + was_top_level = true; + } + + /* Set ZS for executing (execute == 0) and jumping (if->condition == + * 0) channels, and then update execute flags for those to point to + * the ELSE block. + */ + qir_SF(c, qir_OR(c, + c->execute, + ntq_get_src(c, if_stmt->condition, 0))); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, + qir_uniform_ui(c, else_block->index)); + + /* Jump to ELSE if nothing is active for THEN, otherwise fall + * through. + */ + qir_SF(c, c->execute); + qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC); + qir_link_blocks(c->cur_block, else_block); + qir_link_blocks(c->cur_block, then_block); + + /* Process the THEN block. */ + qir_set_emit_block(c, then_block); + ntq_emit_cf_list(c, &if_stmt->then_list); + + if (!empty_else_block) { + /* Handle the end of the THEN block. First, all currently + * active channels update their execute flags to point to + * ENDIF + */ + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, + qir_uniform_ui(c, after_block->index)); + + /* If everything points at ENDIF, then jump there immediately. */ + qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index))); + qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS); + qir_link_blocks(c->cur_block, after_block); + qir_link_blocks(c->cur_block, else_block); + + qir_set_emit_block(c, else_block); + ntq_activate_execute_for_block(c); + ntq_emit_cf_list(c, &if_stmt->else_list); + } + + qir_link_blocks(c->cur_block, after_block); + + qir_set_emit_block(c, after_block); + if (was_top_level) { + c->execute = c->undef; + c->last_top_block = c->cur_block; + } else { + ntq_activate_execute_for_block(c); + } +} + +static void +ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump) +{ + struct qblock *jump_block; + switch (jump->type) { + case nir_jump_break: + jump_block = c->loop_break_block; + break; + case nir_jump_continue: + jump_block = c->loop_cont_block; + break; + default: + unreachable("Unsupported jump type\n"); + } + + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, + qir_uniform_ui(c, jump_block->index)); + + /* Jump to the destination block if everyone has taken the jump. */ + qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index))); + qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS); + struct qblock *new_block = qir_new_block(c); + qir_link_blocks(c->cur_block, jump_block); + qir_link_blocks(c->cur_block, new_block); + qir_set_emit_block(c, new_block); } static void @@ -1643,6 +2087,10 @@ ntq_emit_instr(struct vc4_compile *c, nir_instr *instr) ntq_emit_tex(c, nir_instr_as_tex(instr)); break; + case nir_instr_type_jump: + ntq_emit_jump(c, nir_instr_as_jump(instr)); + break; + default: fprintf(stderr, "Unknown NIR instr type: "); nir_print_instr(instr, stderr); @@ -1662,10 +2110,61 @@ ntq_emit_block(struct vc4_compile *c, nir_block *block) static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list); static void -ntq_emit_loop(struct vc4_compile *c, nir_loop *nloop) +ntq_emit_loop(struct vc4_compile *c, nir_loop *loop) { - fprintf(stderr, "LOOPS not fully handled. Rendering errors likely.\n"); - ntq_emit_cf_list(c, &nloop->body); + if (!c->vc4->screen->has_control_flow) { + fprintf(stderr, + "loop support requires updated kernel.\n"); + ntq_emit_cf_list(c, &loop->body); + return; + } + + bool was_top_level = false; + if (c->execute.file == QFILE_NULL) { + c->execute = qir_MOV(c, qir_uniform_ui(c, 0)); + was_top_level = true; + } + + struct qblock *save_loop_cont_block = c->loop_cont_block; + struct qblock *save_loop_break_block = c->loop_break_block; + + c->loop_cont_block = qir_new_block(c); + c->loop_break_block = qir_new_block(c); + + qir_link_blocks(c->cur_block, c->loop_cont_block); + qir_set_emit_block(c, c->loop_cont_block); + ntq_activate_execute_for_block(c); + + ntq_emit_cf_list(c, &loop->body); + + /* If anything had explicitly continued, or is here at the end of the + * loop, then we need to loop again. SF updates are masked by the + * instruction's condition, so we can do the OR of the two conditions + * within SF. + */ + qir_SF(c, c->execute); + struct qinst *cont_check = + qir_SUB_dest(c, + c->undef, + c->execute, + qir_uniform_ui(c, c->loop_cont_block->index)); + cont_check->cond = QPU_COND_ZC; + cont_check->sf = true; + + qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS); + qir_link_blocks(c->cur_block, c->loop_cont_block); + qir_link_blocks(c->cur_block, c->loop_break_block); + + qir_set_emit_block(c, c->loop_break_block); + if (was_top_level) { + c->execute = c->undef; + c->last_top_block = c->cur_block; + } else { + ntq_activate_execute_for_block(c); + } + + c->loop_break_block = save_loop_break_block; + c->loop_cont_block = save_loop_cont_block; } static void @@ -1713,6 +2212,9 @@ ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl) static void nir_to_qir(struct vc4_compile *c) { + if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard) + c->discard = qir_MOV(c, qir_uniform_ui(c, 0)); + ntq_setup_inputs(c); ntq_setup_outputs(c); ntq_setup_uniforms(c); @@ -1727,24 +2229,27 @@ nir_to_qir(struct vc4_compile *c) } static const nir_shader_compiler_options nir_options = { + .lower_all_io_to_temps = true, .lower_extract_byte = true, .lower_extract_word = true, + .lower_fdiv = true, .lower_ffma = true, .lower_flrp32 = true, .lower_fpow = true, .lower_fsat = true, .lower_fsqrt = true, + .lower_ldexp = true, .lower_negate = true, + .native_integers = true, + .max_unroll_iterations = 32, }; -static bool -count_nir_instrs_in_block(nir_block *block, void *state) +const void * +vc4_screen_get_compiler_options(struct pipe_screen *pscreen, + enum pipe_shader_ir ir, + enum pipe_shader_type shader) { - int *count = (int *) state; - nir_foreach_instr(instr, block) { - *count = *count + 1; - } - return true; + return &nir_options; } static int @@ -1754,21 +2259,27 @@ count_nir_instrs(nir_shader *nir) nir_foreach_function(function, nir) { if (!function->impl) continue; - nir_foreach_block_call(function->impl, count_nir_instrs_in_block, &count); + nir_foreach_block(block, function->impl) { + nir_foreach_instr(instr, block) + count++; + } } return count; } static struct vc4_compile * vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, - struct vc4_key *key) + struct vc4_key *key, bool fs_threaded) { struct vc4_compile *c = qir_compile_init(); + c->vc4 = vc4; c->stage = stage; c->shader_state = &key->shader_state->base; c->program_id = key->shader_state->program_id; - c->variant_id = key->shader_state->compiled_variant_count++; + c->variant_id = + p_atomic_inc_return(&key->shader_state->compiled_variant_count); + c->fs_threaded = fs_threaded; c->key = key; switch (stage) { @@ -1789,21 +2300,17 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, break; } - const struct tgsi_token *tokens = key->shader_state->base.tokens; - - if (vc4_debug & VC4_DEBUG_TGSI) { - fprintf(stderr, "%s prog %d/%d TGSI:\n", - qir_get_stage_name(c->stage), - c->program_id, c->variant_id); - tgsi_dump(tokens, 0); - } - - c->s = tgsi_to_nir(tokens, &nir_options); - NIR_PASS_V(c->s, nir_opt_global_to_local); - NIR_PASS_V(c->s, nir_convert_to_ssa); + c->s = nir_shader_clone(c, key->shader_state->base.ir.nir); - if (stage == QSTAGE_FRAG) + if (stage == QSTAGE_FRAG) { + if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) { + NIR_PASS_V(c->s, nir_lower_alpha_test, + c->fs_key->alpha_test_func, + c->fs_key->sample_alpha_to_one && + c->fs_key->msaa); + } NIR_PASS_V(c->s, vc4_nir_lower_blend, c); + } struct nir_lower_tex_options tex_options = { /* We would need to implement txs, but we don't want the @@ -1844,25 +2351,42 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, tex_options.lower_srgb |= (1 << i); } - NIR_PASS_V(c->s, nir_normalize_cubemap_coords); NIR_PASS_V(c->s, nir_lower_tex, &tex_options); if (c->fs_key && c->fs_key->light_twoside) NIR_PASS_V(c->s, nir_lower_two_sided_color); - if (stage == QSTAGE_FRAG) - NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables); + if (c->vs_key && c->vs_key->clamp_color) + NIR_PASS_V(c->s, nir_lower_clamp_color_outputs); + + if (c->key->ucp_enables) { + if (stage == QSTAGE_FRAG) { + NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables); + } else { + NIR_PASS_V(c->s, nir_lower_clip_vs, + c->key->ucp_enables, false); + NIR_PASS_V(c->s, nir_lower_io_to_scalar, + nir_var_shader_out); + } + } + + /* FS input scalarizing must happen after nir_lower_two_sided_color, + * which only handles a vec4 at a time. Similarly, VS output + * scalarizing must happen after nir_lower_clip_vs. + */ + if (c->stage == QSTAGE_FRAG) + NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in); else - NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); NIR_PASS_V(c->s, vc4_nir_lower_io, c); NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c); NIR_PASS_V(c->s, nir_lower_idiv); - NIR_PASS_V(c->s, nir_lower_load_const_to_scalar); vc4_optimize_nir(c->s); - NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_local); + NIR_PASS_V(c->s, nir_lower_bool_to_int32); + NIR_PASS_V(c->s, nir_convert_from_ssa, true); if (vc4_debug & VC4_DEBUG_SHADERDB) { @@ -1883,12 +2407,23 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, switch (stage) { case QSTAGE_FRAG: + /* FS threading requires that the thread execute + * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating + * (with no other THRSW afterwards, obviously). If we didn't + * fetch a texture at a top level block, this wouldn't be + * true. + */ + if (c->fs_threaded && !c->last_thrsw_at_top_level) { + c->failed = true; + return c; + } + emit_frag_end(c); break; case QSTAGE_VERT: emit_vert_end(c, - vc4->prog.fs->input_slots, - vc4->prog.fs->num_inputs); + c->vs_key->fs_inputs->input_slots, + c->vs_key->fs_inputs->num_inputs); break; case QSTAGE_COORD: emit_coord_end(c); @@ -1907,6 +2442,7 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, qir_lower_uniforms(c); qir_schedule_instructions(c); + qir_emit_uniform_stream_resets(c); if (vc4_debug & VC4_DEBUG_QIR) { fprintf(stderr, "%s prog %d/%d QIR:\n", @@ -1944,9 +2480,59 @@ vc4_shader_state_create(struct pipe_context *pctx, if (!so) return NULL; - so->base.tokens = tgsi_dup_tokens(cso->tokens); so->program_id = vc4->next_uncompiled_program_id++; + nir_shader *s; + + if (cso->type == PIPE_SHADER_IR_NIR) { + /* The backend takes ownership of the NIR shader on state + * creation. + */ + s = cso->ir.nir; + + NIR_PASS_V(s, nir_lower_io, nir_var_uniform, + uniforms_type_size, + (nir_lower_io_options)0); + } else { + assert(cso->type == PIPE_SHADER_IR_TGSI); + + if (vc4_debug & VC4_DEBUG_TGSI) { + fprintf(stderr, "prog %d TGSI:\n", + so->program_id); + tgsi_dump(cso->tokens, 0); + fprintf(stderr, "\n"); + } + s = tgsi_to_nir(cso->tokens, &nir_options); + } + + NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform, + type_size, + (nir_lower_io_options)0); + + NIR_PASS_V(s, nir_opt_global_to_local); + NIR_PASS_V(s, nir_lower_regs_to_ssa); + NIR_PASS_V(s, nir_normalize_cubemap_coords); + + NIR_PASS_V(s, nir_lower_load_const_to_scalar); + + vc4_optimize_nir(s); + + NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp); + + /* Garbage collect dead instructions */ + nir_sweep(s); + + so->base.type = PIPE_SHADER_IR_NIR; + so->base.ir.nir = s; + + if (vc4_debug & VC4_DEBUG_NIR) { + fprintf(stderr, "%s prog %d NIR:\n", + gl_shader_stage_name(s->info.stage), + so->program_id); + nir_print_shader(s, stderr); + fprintf(stderr, "\n"); + } + return so; } @@ -1969,18 +2555,87 @@ copy_uniform_state_to_shader(struct vc4_compiled_shader *shader, vc4_set_shader_uniform_dirty_flags(shader); } +static void +vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c, + struct vc4_compiled_shader *shader) +{ + struct vc4_fs_inputs inputs; + + memset(&inputs, 0, sizeof(inputs)); + inputs.input_slots = ralloc_array(shader, + struct vc4_varying_slot, + c->num_input_slots); + + bool input_live[c->num_input_slots]; + + memset(input_live, 0, sizeof(input_live)); + qir_for_each_inst_inorder(inst, c) { + for (int i = 0; i < qir_get_nsrc(inst); i++) { + if (inst->src[i].file == QFILE_VARY) + input_live[inst->src[i].index] = true; + } + } + + for (int i = 0; i < c->num_input_slots; i++) { + struct vc4_varying_slot *slot = &c->input_slots[i]; + + if (!input_live[i]) + continue; + + /* Skip non-VS-output inputs. */ + if (slot->slot == (uint8_t)~0) + continue; + + if (slot->slot == VARYING_SLOT_COL0 || + slot->slot == VARYING_SLOT_COL1 || + slot->slot == VARYING_SLOT_BFC0 || + slot->slot == VARYING_SLOT_BFC1) { + shader->color_inputs |= (1 << inputs.num_inputs); + } + + inputs.input_slots[inputs.num_inputs] = *slot; + inputs.num_inputs++; + } + shader->num_inputs = inputs.num_inputs; + + /* Add our set of inputs to the set of all inputs seen. This way, we + * can have a single pointer that identifies an FS inputs set, + * allowing VS to avoid recompiling when the FS is recompiled (or a + * new one is bound using separate shader objects) but the inputs + * don't change. + */ + struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs); + if (entry) { + shader->fs_inputs = entry->key; + ralloc_free(inputs.input_slots); + } else { + struct vc4_fs_inputs *alloc_inputs; + + alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs); + memcpy(alloc_inputs, &inputs, sizeof(inputs)); + ralloc_steal(alloc_inputs, inputs.input_slots); + _mesa_set_add(vc4->fs_inputs_set, alloc_inputs); + + shader->fs_inputs = alloc_inputs; + } +} + static struct vc4_compiled_shader * vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, struct vc4_key *key) { struct hash_table *ht; uint32_t key_size; + bool try_threading; + if (stage == QSTAGE_FRAG) { ht = vc4->fs_cache; key_size = sizeof(struct vc4_fs_key); + try_threading = vc4->screen->has_threaded_fs; } else { ht = vc4->vs_cache; key_size = sizeof(struct vc4_vs_key); + try_threading = false; } struct vc4_compiled_shader *shader; @@ -1988,45 +2643,23 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, if (entry) return entry->data; - struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key); + struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading); + /* If the FS failed to compile threaded, fall back to single threaded. */ + if (try_threading && c->failed) { + qir_compile_destroy(c); + c = vc4_shader_ntq(vc4, stage, key, false); + } + shader = rzalloc(NULL, struct vc4_compiled_shader); shader->program_id = vc4->next_compiled_program_id++; if (stage == QSTAGE_FRAG) { - bool input_live[c->num_input_slots]; + vc4_setup_compiled_fs_inputs(vc4, c, shader); - memset(input_live, 0, sizeof(input_live)); - list_for_each_entry(struct qinst, inst, &c->instructions, link) { - for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { - if (inst->src[i].file == QFILE_VARY) - input_live[inst->src[i].index] = true; - } - } - - shader->input_slots = ralloc_array(shader, - struct vc4_varying_slot, - c->num_input_slots); - - for (int i = 0; i < c->num_input_slots; i++) { - struct vc4_varying_slot *slot = &c->input_slots[i]; - - if (!input_live[i]) - continue; - - /* Skip non-VS-output inputs. */ - if (slot->slot == (uint8_t)~0) - continue; - - if (slot->slot == VARYING_SLOT_COL0 || - slot->slot == VARYING_SLOT_COL1 || - slot->slot == VARYING_SLOT_BFC0 || - slot->slot == VARYING_SLOT_BFC1) { - shader->color_inputs |= (1 << shader->num_inputs); - } - - shader->input_slots[shader->num_inputs] = *slot; - shader->num_inputs++; - } + /* Note: the temporary clone in c->s has been freed. */ + nir_shader *orig_shader = key->shader_state->base.ir.nir; + if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH)) + shader->disable_early_z = true; } else { shader->num_inputs = c->num_inputs; @@ -2040,9 +2673,17 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, } } - copy_uniform_state_to_shader(shader, c); - shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts, - c->qpu_inst_count * sizeof(uint64_t)); + shader->failed = c->failed; + if (c->failed) { + shader->failed = true; + } else { + copy_uniform_state_to_shader(shader, c); + shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts, + c->qpu_inst_count * + sizeof(uint64_t)); + } + + shader->fs_threaded = c->fs_threaded; /* Copy the compiler UBO range state to the compiled shader, dropping * out arrays that were never referenced by an indirect load. @@ -2077,10 +2718,17 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, } } + if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) { + fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n", + qir_get_stage_name(c->stage), + c->program_id, c->variant_id, + 1 + shader->fs_threaded); + } + qir_compile_destroy(c); struct vc4_key *dup_key; - dup_key = ralloc_size(shader, key_size); + dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */ memcpy(dup_key, key, key_size); _mesa_hash_table_insert(ht, dup_key, shader); @@ -2093,6 +2741,7 @@ vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key, { for (int i = 0; i < texstate->num_textures; i++) { struct pipe_sampler_view *sampler = texstate->textures[i]; + struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler); struct pipe_sampler_state *sampler_state = texstate->samplers[i]; @@ -2113,6 +2762,8 @@ vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key, key->tex[i].compare_func = sampler_state->compare_func; key->tex[i].wrap_s = sampler_state->wrap_s; key->tex[i].wrap_t = sampler_state->wrap_t; + key->tex[i].force_first_level = + vc4_sampler->force_first_level; } } @@ -2122,6 +2773,7 @@ vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key, static void vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) { + struct vc4_job *job = vc4->job; struct vc4_fs_key local_key; struct vc4_fs_key *key = &local_key; @@ -2132,8 +2784,8 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) VC4_DIRTY_RASTERIZER | VC4_DIRTY_SAMPLE_MASK | VC4_DIRTY_FRAGTEX | - VC4_DIRTY_TEXSTATE | - VC4_DIRTY_UNCOMPILED_FS))) { + VC4_DIRTY_UNCOMPILED_FS | + VC4_DIRTY_UBO_1_SIZE))) { return; } @@ -2149,10 +2801,9 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) } else { key->logicop_func = PIPE_LOGICOP_COPY; } - if (vc4->msaa) { + if (job->msaa) { key->msaa = vc4->rasterizer->base.multisample; - key->sample_coverage = (vc4->rasterizer->base.multisample && - vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); + key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage; key->sample_alpha_to_one = vc4->blend->alpha_to_one; } @@ -2165,10 +2816,10 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0; key->depth_enabled = (vc4->zsa->base.depth.enabled || key->stencil_enabled); - if (vc4->zsa->base.alpha.enabled) { - key->alpha_test = true; + if (vc4->zsa->base.alpha.enabled) key->alpha_test_func = vc4->zsa->base.alpha.func; - } + else + key->alpha_test_func = COMPARE_FUNC_ALWAYS; if (key->is_points) { key->point_sprite_mask = @@ -2178,6 +2829,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) PIPE_SPRITE_COORD_UPPER_LEFT); } + key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size; key->light_twoside = vc4->rasterizer->base.light_twoside; struct vc4_compiled_shader *old_fs = vc4->prog.fs; @@ -2186,10 +2838,14 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) return; vc4->dirty |= VC4_DIRTY_COMPILED_FS; + if (vc4->rasterizer->base.flatshade && - old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) { + (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) { vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS; } + + if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs) + vc4->dirty |= VC4_DIRTY_FS_INPUTS; } static void @@ -2201,17 +2857,17 @@ vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode) if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE | VC4_DIRTY_RASTERIZER | VC4_DIRTY_VERTTEX | - VC4_DIRTY_TEXSTATE | VC4_DIRTY_VTXSTATE | VC4_DIRTY_UNCOMPILED_VS | - VC4_DIRTY_COMPILED_FS))) { + VC4_DIRTY_FS_INPUTS))) { return; } memset(key, 0, sizeof(*key)); vc4_setup_shared_key(vc4, &key->base, &vc4->verttex); key->base.shader_state = vc4->prog.bind_vs; - key->compiled_fs_id = vc4->prog.fs->program_id; + key->fs_inputs = vc4->prog.fs->fs_inputs; + key->clamp_color = vc4->rasterizer->base.clamp_vertex_color; for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++) key->attr_formats[i] = vc4->vtx->pipe[i].src_format; @@ -2228,6 +2884,8 @@ vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode) } key->is_coord = true; + /* Coord shaders don't care what the FS inputs are. */ + key->fs_inputs = NULL; struct vc4_compiled_shader *cs = vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base); if (cs != vc4->prog.cs) { @@ -2236,11 +2894,15 @@ vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode) } } -void +bool vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode) { vc4_update_compiled_fs(vc4, prim_mode); vc4_update_compiled_vs(vc4, prim_mode); + + return !(vc4->prog.cs->failed || + vc4->prog.vs->failed || + vc4->prog.fs->failed); } static uint32_t @@ -2267,8 +2929,32 @@ vs_cache_compare(const void *key1, const void *key2) return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0; } +static uint32_t +fs_inputs_hash(const void *key) +{ + const struct vc4_fs_inputs *inputs = key; + + return _mesa_hash_data(inputs->input_slots, + sizeof(*inputs->input_slots) * + inputs->num_inputs); +} + +static bool +fs_inputs_compare(const void *key1, const void *key2) +{ + const struct vc4_fs_inputs *inputs1 = key1; + const struct vc4_fs_inputs *inputs2 = key2; + + return (inputs1->num_inputs == inputs2->num_inputs && + memcmp(inputs1->input_slots, + inputs2->input_slots, + sizeof(*inputs1->input_slots) * + inputs1->num_inputs) == 0); +} + static void delete_from_cache_if_matches(struct hash_table *ht, + struct vc4_compiled_shader **last_compile, struct hash_entry *entry, struct vc4_uncompiled_shader *so) { @@ -2278,6 +2964,10 @@ delete_from_cache_if_matches(struct hash_table *ht, struct vc4_compiled_shader *shader = entry->data; _mesa_hash_table_remove(ht, entry); vc4_bo_unreference(&shader->bo); + + if (shader == *last_compile) + *last_compile = NULL; + ralloc_free(shader); } } @@ -2288,13 +2978,16 @@ vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso) struct vc4_context *vc4 = vc4_context(pctx); struct vc4_uncompiled_shader *so = hwcso; - struct hash_entry *entry; - hash_table_foreach(vc4->fs_cache, entry) - delete_from_cache_if_matches(vc4->fs_cache, entry, so); - hash_table_foreach(vc4->vs_cache, entry) - delete_from_cache_if_matches(vc4->vs_cache, entry, so); + hash_table_foreach(vc4->fs_cache, entry) { + delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs, + entry, so); + } + hash_table_foreach(vc4->vs_cache, entry) { + delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs, + entry, so); + } - free((void *)so->base.tokens); + ralloc_free(so->base.ir.nir); free(so); } @@ -2332,6 +3025,8 @@ vc4_program_init(struct pipe_context *pctx) fs_cache_compare); vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash, vs_cache_compare); + vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash, + fs_inputs_compare); } void @@ -2339,7 +3034,6 @@ vc4_program_fini(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); - struct hash_entry *entry; hash_table_foreach(vc4->fs_cache, entry) { struct vc4_compiled_shader *shader = entry->data; vc4_bo_unreference(&shader->bo);