X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_program.c;h=2d0a52bb5fb88a515f3081b1299b0e936d75c86a;hb=0bf667984b074105be62116fa76be42b2a422e28;hp=1e8542fc27fb23b7c5d033617bccc525a0adf109;hpb=f4d143f0d9736f4413bcb9e778bf92b97684e6f9;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 1e8542fc27f..2d0a52bb5fb 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -24,7 +24,7 @@ #include #include "util/u_format.h" -#include "util/u_hash.h" +#include "util/crc32.h" #include "util/u_math.h" #include "util/u_memory.h" #include "util/ralloc.h" @@ -33,20 +33,30 @@ #include "tgsi/tgsi_parse.h" #include "compiler/nir/nir.h" #include "compiler/nir/nir_builder.h" +#include "compiler/nir_types.h" #include "nir/tgsi_to_nir.h" #include "vc4_context.h" #include "vc4_qpu.h" #include "vc4_qir.h" #include "mesa/state_tracker/st_glsl_types.h" -#ifdef USE_VC4_SIMULATOR -#include "simpenrose/simpenrose.h" -#endif static struct qreg ntq_get_src(struct vc4_compile *c, nir_src src, int i); static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list); +static int +type_size(const struct glsl_type *type) +{ + return glsl_count_attribute_slots(type, false); +} + +static int +uniforms_type_size(const struct glsl_type *type) +{ + return st_glsl_storage_type_size(type, false); +} + static void resize_qreg_array(struct vc4_compile *c, struct qreg **regs, @@ -68,6 +78,23 @@ resize_qreg_array(struct vc4_compile *c, (*regs)[i] = c->undef; } +static void +ntq_emit_thrsw(struct vc4_compile *c) +{ + if (!c->fs_threaded) + return; + + /* Always thread switch after each texture operation for now. + * + * We could do better by batching a bunch of texture fetches up and + * then doing one thread switch and collecting all their results + * afterward. + */ + qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef, + c->undef, c->undef)); + c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL); +} + static struct qreg indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) { @@ -102,12 +129,44 @@ indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) /* Clamp to [0, array size). Note that MIN/MAX are signed. */ indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0)); - indirect_offset = qir_MIN(c, indirect_offset, - qir_uniform_ui(c, (range->dst_offset + - range->size - 4))); + indirect_offset = qir_MIN_NOIMM(c, indirect_offset, + qir_uniform_ui(c, (range->dst_offset + + range->size - 4))); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + indirect_offset, + qir_uniform(c, QUNIFORM_UBO_ADDR, 0)); - qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0)); c->num_texture_samples++; + + ntq_emit_thrsw(c); + + return qir_TEX_RESULT(c); +} + +static struct qreg +vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr) +{ + nir_const_value *buffer_index = + nir_src_as_const_value(intr->src[0]); + assert(buffer_index->u32[0] == 1); + assert(c->stage == QSTAGE_FRAG); + + struct qreg offset = ntq_get_src(c, intr->src[1], 0); + + /* Clamp to [0, array size). Note that MIN/MAX are signed. */ + offset = qir_MAX(c, offset, qir_uniform_ui(c, 0)); + offset = qir_MIN_NOIMM(c, offset, + qir_uniform_ui(c, c->fs_key->ubo_1_size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + offset, + qir_uniform(c, QUNIFORM_UBO_ADDR, buffer_index->u32[0])); + + c->num_texture_samples++; + + ntq_emit_thrsw(c); + return qir_TEX_RESULT(c); } @@ -140,10 +199,33 @@ ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def) return qregs; } +/** + * This function is responsible for getting QIR results into the associated + * storage for a NIR instruction. + * + * If it's a NIR SSA def, then we just set the associated hash table entry to + * the new result. + * + * If it's a NIR reg, then we need to update the existing qreg assigned to the + * NIR destination with the incoming value. To do that without introducing + * new MOVs, we require that the incoming qreg either be a uniform, or be + * SSA-defined by the previous QIR instruction in the block and rewritable by + * this function. That lets us sneak ahead and insert the SF flag beforehand + * (knowing that the previous instruction doesn't depend on flags) and rewrite + * its destination to be the NIR reg's destination + */ static void ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan, struct qreg result) { + struct qinst *last_inst = NULL; + if (!list_empty(&c->cur_block->instructions)) + last_inst = (struct qinst *)c->cur_block->instructions.prev; + + assert(result.file == QFILE_UNIF || + (result.file == QFILE_TEMP && + last_inst && last_inst == c->defs[result.index])); + if (dest->is_ssa) { assert(chan < dest->ssa.num_components); @@ -165,14 +247,34 @@ ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan, _mesa_hash_table_search(c->def_ht, reg); struct qreg *qregs = entry->data; - /* Conditionally move the result to the destination if the - * channel is active. + /* Insert a MOV if the source wasn't an SSA def in the + * previous instruction. + */ + if (result.file == QFILE_UNIF) { + result = qir_MOV(c, result); + last_inst = c->defs[result.index]; + } + + /* We know they're both temps, so just rewrite index. */ + c->defs[last_inst->dst.index] = NULL; + last_inst->dst.index = qregs[chan].index; + + /* If we're in control flow, then make this update of the reg + * conditional on the execution mask. */ if (c->execute.file != QFILE_NULL) { + last_inst->dst.index = qregs[chan].index; + + /* Set the flags to the current exec mask. To insert + * the SF, we temporarily remove our SSA instruction. + */ + list_del(&last_inst->link); qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, qregs[chan], result); - } else { - qir_MOV_dest(c, qregs[chan], result); + list_addtail(&last_inst->link, + &c->cur_block->instructions); + + last_inst->cond = QPU_COND_ZS; + last_inst->cond_is_exec_mask = true; } } } @@ -218,7 +320,7 @@ static struct qreg ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr, unsigned src) { - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); unsigned chan = ffs(instr->dest.write_mask) - 1; struct qreg r = ntq_get_src(c, instr->src[src].src, instr->src[src].swizzle[chan]); @@ -319,26 +421,26 @@ ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr) /* Perform the clamping required by kernel validation. */ addr = qir_MAX(c, addr, qir_uniform_ui(c, 0)); - addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4)); + addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); - qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); + ntq_emit_thrsw(c); struct qreg tex = qir_TEX_RESULT(c); c->num_texture_samples++; - struct qreg dest[4]; enum pipe_format format = c->key->tex[unit].format; if (util_format_is_depth_or_stencil(format)) { struct qreg scaled = ntq_scale_depth_texture(c, tex); for (int i = 0; i < 4; i++) - dest[i] = scaled; + ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled)); } else { for (int i = 0; i < 4; i++) - dest[i] = qir_UNPACK_8_F(c, tex, i); + ntq_store_dest(c, &instr->dest, i, + qir_UNPACK_8_F(c, tex, i)); } - - for (int i = 0; i < 4; i++) - ntq_store_dest(c, &instr->dest, i, dest[i]); } static void @@ -372,7 +474,7 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) lod = ntq_get_src(c, instr->src[i].src, 0); is_txl = true; break; - case nir_tex_src_comparitor: + case nir_tex_src_comparator: compare = ntq_get_src(c, instr->src[i].src, 0); break; default: @@ -380,6 +482,16 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) } } + if (c->stage != QSTAGE_FRAG && !is_txl) { + /* From the GLSL 1.20 spec: + * + * "If it is mip-mapped and running on the vertex shader, + * then the base texture is used." + */ + is_txl = true; + lod = qir_uniform_ui(c, 0); + } + if (c->key->tex[unit].force_first_level) { lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit); is_txl = true; @@ -410,14 +522,20 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) unit | (is_txl << 16)); } + struct qinst *tmu; if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { - qir_TEX_R(c, r, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) { - qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit), - texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), + qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, + unit)); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) { @@ -428,14 +546,23 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) t = qir_SAT(c, t); } - qir_TEX_T(c, t, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; - if (is_txl || is_txb) - qir_TEX_B(c, lod, texture_u[next_texture_u++]); + if (is_txl || is_txb) { + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; + } - qir_TEX_S(c, s, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s); + tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++]; c->num_texture_samples++; + + ntq_emit_thrsw(c); + struct qreg tex = qir_TEX_RESULT(c); enum pipe_format format = c->key->tex[unit].format; @@ -448,6 +575,15 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) struct qreg u0 = qir_uniform_f(c, 0.0f); struct qreg u1 = qir_uniform_f(c, 1.0f); if (c->key->tex[unit].compare_mode) { + /* From the GL_ARB_shadow spec: + * + * "Let Dt (D subscript t) be the depth texture + * value, in the range [0, 1]. Let R be the + * interpolated texture coordinate clamped to the + * range [0, 1]." + */ + compare = qir_SAT(c, compare); + switch (c->key->tex[unit].compare_func) { case PIPE_FUNC_NEVER: depth_output = qir_uniform_f(c, 0.0f); @@ -502,8 +638,11 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); struct qreg diff = qir_FSUB(c, src, trunc); qir_SF(c, diff); - return qir_SEL(c, QPU_COND_NS, - qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff); + + qir_FADD_dest(c, diff, + diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, diff); } /** @@ -513,15 +652,18 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) static struct qreg ntq_ffloor(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was < 0 in the first place. */ - qir_SF(c, qir_FSUB(c, src, trunc)); + qir_SF(c, qir_FSUB(c, src, result)); - return qir_SEL(c, QPU_COND_NS, - qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc); + struct qinst *sub = qir_FSUB_dest(c, result, + result, qir_uniform_f(c, 1.0)); + sub->cond = QPU_COND_NS; + + return qir_MOV(c, result); } /** @@ -531,36 +673,58 @@ ntq_ffloor(struct vc4_compile *c, struct qreg src) static struct qreg ntq_fceil(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was > 0 in the first place. */ - qir_SF(c, qir_FSUB(c, trunc, src)); + qir_SF(c, qir_FSUB(c, result, src)); + + qir_FADD_dest(c, result, + result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, result); +} + +static struct qreg +ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x) +{ + /* Since we're using a Taylor approximation, we want to have a small + * number of coefficients and take advantage of sin/cos repeating + * every 2pi. We keep our x as close to 0 as we can, since the series + * will be less accurate as |x| increases. (Also, be careful of + * shifting the input x value to be tricky with sin/cos relations, + * because getting accurate values for x==0 is very important for SDL + * rendering) + */ + struct qreg scaled_x = + qir_FMUL(c, x, + qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); + /* Note: FTOI truncates toward 0. */ + struct qreg x_frac = qir_FSUB(c, scaled_x, + qir_ITOF(c, qir_FTOI(c, scaled_x))); + /* Map [0.5, 1] to [-0.5, 0] */ + qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC; + /* Map [-1, -0.5] to [0, 0.5] */ + qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; - return qir_SEL(c, QPU_COND_NS, - qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc); + return x_frac; } static struct qreg ntq_fsin(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -2.0 * M_PI, - pow(2.0 * M_PI, 3) / (3 * 2 * 1), - -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 2.0 * M_PI, + -pow(2.0 * M_PI, 3) / (3 * 2 * 1), + pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, - src, - qir_uniform_f(c, 1.0 / (M_PI * 2.0))); - - struct qreg x = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); + struct qreg x = ntq_shrink_sincos_input_range(c, src); struct qreg x2 = qir_FMUL(c, x, x); struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0])); for (int i = 1; i < ARRAY_SIZE(coeff); i++) { @@ -578,21 +742,15 @@ static struct qreg ntq_fcos(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -1.0f, - pow(2.0 * M_PI, 2) / (2 * 1), - -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), - pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 1.0f, + -pow(2.0 * M_PI, 2) / (2 * 1), + pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, src, - qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); - struct qreg x_frac = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); - + struct qreg x_frac = ntq_shrink_sincos_input_range(c, src); struct qreg sum = qir_uniform_f(c, coeff[0]); struct qreg x2 = qir_FMUL(c, x_frac, x_frac); struct qreg x = x2; /* Current x^2, x^4, or x^6 */ @@ -600,13 +758,10 @@ ntq_fcos(struct vc4_compile *c, struct qreg src) if (i != 1) x = qir_FMUL(c, x, x2); - struct qreg mul = qir_FMUL(c, + sum = qir_FADD(c, qir_FMUL(c, x, - qir_uniform_f(c, coeff[i])); - if (i == 0) - sum = mul; - else - sum = qir_FADD(c, sum, mul); + qir_uniform_f(c, coeff[i])), + sum); } return sum; } @@ -620,7 +775,7 @@ ntq_fsign(struct vc4_compile *c, struct qreg src) qir_MOV_dest(c, t, qir_uniform_f(c, 0.0)); qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC; qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS; - return t; + return qir_MOV(c, t); } static void @@ -799,7 +954,7 @@ ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr) qir_PACK_8_F(c, result, src, i); } - ntq_store_dest(c, &instr->dest.dest, 0, result); + ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result)); } /** Handles sign-extended bitfield extracts for 16 bits. */ @@ -849,24 +1004,24 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, enum qpu_cond cond; switch (compare_instr->op) { - case nir_op_feq: - case nir_op_ieq: + case nir_op_feq32: + case nir_op_ieq32: case nir_op_seq: cond = QPU_COND_ZS; break; - case nir_op_fne: - case nir_op_ine: + case nir_op_fne32: + case nir_op_ine32: case nir_op_sne: cond = QPU_COND_ZC; break; - case nir_op_fge: - case nir_op_ige: - case nir_op_uge: + case nir_op_fge32: + case nir_op_ige32: + case nir_op_uge32: case nir_op_sge: cond = QPU_COND_NC; break; - case nir_op_flt: - case nir_op_ilt: + case nir_op_flt32: + case nir_op_ilt32: case nir_op_slt: cond = QPU_COND_NS; break; @@ -893,7 +1048,7 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0)); break; - case nir_op_bcsel: + case nir_op_b32csel: *dest = qir_SEL(c, cond, ntq_get_alu_src(c, sel_instr, 1), ntq_get_alu_src(c, sel_instr, 2)); @@ -905,6 +1060,9 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, break; } + /* Make the temporary for nir_store_dest(). */ + *dest = qir_MOV(c, *dest); + return true; } @@ -918,6 +1076,8 @@ static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr, { if (!instr->src[0].src.is_ssa) goto out; + if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu) + goto out; nir_alu_instr *compare = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr); if (!compare) @@ -929,7 +1089,47 @@ static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr, out: qir_SF(c, src[0]); - return qir_SEL(c, QPU_COND_NS, src[1], src[2]); + return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2])); +} + +static struct qreg +ntq_fddx(struct vc4_compile *c, struct qreg src) +{ + /* Make sure that we have a bare temp to use for MUL rotation, so it + * can be allocated to an accumulator. + */ + if (src.pack || src.file != QFILE_TEMP) + src = qir_MOV(c, src); + + struct qreg from_left = qir_ROT_MUL(c, src, 1); + struct qreg from_right = qir_ROT_MUL(c, src, 15); + + /* Distinguish left/right pixels of the quad. */ + qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0), + qir_uniform_ui(c, 1))); + + return qir_MOV(c, qir_SEL(c, QPU_COND_ZS, + qir_FSUB(c, from_right, src), + qir_FSUB(c, src, from_left))); +} + +static struct qreg +ntq_fddy(struct vc4_compile *c, struct qreg src) +{ + if (src.pack || src.file != QFILE_TEMP) + src = qir_MOV(c, src); + + struct qreg from_bottom = qir_ROT_MUL(c, src, 2); + struct qreg from_top = qir_ROT_MUL(c, src, 14); + + /* Distinguish top/bottom pixels of the quad. */ + qir_SF(c, qir_AND(c, + qir_reg(QFILE_QPU_ELEMENT, 0), + qir_uniform_ui(c, 2))); + + return qir_MOV(c, qir_SEL(c, QPU_COND_ZS, + qir_FSUB(c, from_top, src), + qir_FSUB(c, src, from_bottom))); } static void @@ -950,7 +1150,8 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) srcs[i] = ntq_get_src(c, instr->src[i].src, instr->src[i].swizzle[0]); for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) - ntq_store_dest(c, &instr->dest.dest, i, srcs[i]); + ntq_store_dest(c, &instr->dest.dest, i, + qir_MOV(c, srcs[i])); return; } @@ -999,26 +1200,26 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) result = qir_FMAX(c, src[0], src[1]); break; - case nir_op_f2i: - case nir_op_f2u: + case nir_op_f2i32: + case nir_op_f2u32: result = qir_FTOI(c, src[0]); break; - case nir_op_i2f: - case nir_op_u2f: + case nir_op_i2f32: + case nir_op_u2f32: result = qir_ITOF(c, src[0]); break; - case nir_op_b2f: + case nir_op_b2f32: result = qir_AND(c, src[0], qir_uniform_f(c, 1.0)); break; - case nir_op_b2i: + case nir_op_b2i32: result = qir_AND(c, src[0], qir_uniform_ui(c, 1)); break; - case nir_op_i2b: - case nir_op_f2b: + case nir_op_i2b32: + case nir_op_f2b32: qir_SF(c, src[0]); - result = qir_SEL(c, QPU_COND_ZC, - qir_uniform_ui(c, ~0), - qir_uniform_ui(c, 0)); + result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, + qir_uniform_ui(c, ~0), + qir_uniform_ui(c, 0))); break; case nir_op_iadd: @@ -1063,26 +1264,26 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) case nir_op_sne: case nir_op_sge: case nir_op_slt: - case nir_op_feq: - case nir_op_fne: - case nir_op_fge: - case nir_op_flt: - case nir_op_ieq: - case nir_op_ine: - case nir_op_ige: - case nir_op_uge: - case nir_op_ilt: + case nir_op_feq32: + case nir_op_fne32: + case nir_op_fge32: + case nir_op_flt32: + case nir_op_ieq32: + case nir_op_ine32: + case nir_op_ige32: + case nir_op_uge32: + case nir_op_ilt32: if (!ntq_emit_comparison(c, &result, instr, instr)) { fprintf(stderr, "Bad comparison instruction\n"); } break; - case nir_op_bcsel: + case nir_op_b32csel: result = ntq_emit_bcsel(c, instr, src); break; case nir_op_fcsel: qir_SF(c, src[0]); - result = qir_SEL(c, QPU_COND_ZC, src[1], src[2]); + result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2])); break; case nir_op_frcp: @@ -1158,6 +1359,18 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) result = qir_V8MULD(c, src[0], src[1]); break; + case nir_op_fddx: + case nir_op_fddx_coarse: + case nir_op_fddx_fine: + result = ntq_fddx(c, src[0]); + break; + + case nir_op_fddy: + case nir_op_fddy_coarse: + case nir_op_fddy_fine: + result = ntq_fddy(c, src[0]); + break; + default: fprintf(stderr, "unknown NIR ALU inst: "); nir_print_instr(&instr->instr, stderr); @@ -1168,7 +1381,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) /* We have a scalar result, so the instruction should only have a * single channel written to. */ - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); ntq_store_dest(c, &instr->dest.dest, ffs(instr->dest.write_mask) - 1, result); } @@ -1184,7 +1397,7 @@ emit_frag_end(struct vc4_compile *c) } uint32_t discard_cond = QPU_COND_ALWAYS; - if (c->discard.file != QFILE_NULL) { + if (c->s->info.fs.uses_discard) { qir_SF(c, c->discard); discard_cond = QPU_COND_ZS; } @@ -1313,7 +1526,7 @@ emit_vert_end(struct vc4_compile *c, struct vc4_varying_slot *fs_inputs, uint32_t num_fs_inputs) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1348,7 +1561,7 @@ emit_vert_end(struct vc4_compile *c, static void emit_coord_end(struct vc4_compile *c) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1371,18 +1584,21 @@ vc4_optimize_nir(struct nir_shader *s) progress = false; NIR_PASS_V(s, nir_lower_vars_to_ssa); - NIR_PASS_V(s, nir_lower_alu_to_scalar); - NIR_PASS_V(s, nir_lower_phis_to_scalar); - + NIR_PASS(progress, s, nir_lower_alu_to_scalar); + NIR_PASS(progress, s, nir_lower_phis_to_scalar); NIR_PASS(progress, s, nir_copy_prop); NIR_PASS(progress, s, nir_opt_remove_phis); NIR_PASS(progress, s, nir_opt_dce); NIR_PASS(progress, s, nir_opt_dead_cf); NIR_PASS(progress, s, nir_opt_cse); - NIR_PASS(progress, s, nir_opt_peephole_select); + NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); NIR_PASS(progress, s, nir_opt_undef); + NIR_PASS(progress, s, nir_opt_loop_unroll, + nir_var_shader_in | + nir_var_shader_out | + nir_var_function_temp); } while (progress); } @@ -1487,7 +1703,7 @@ static void ntq_setup_uniforms(struct vc4_compile *c) { nir_foreach_variable(var, &c->s->uniforms) { - uint32_t vec4_count = st_glsl_type_size(var->type); + uint32_t vec4_count = uniforms_type_size(var->type); unsigned vec4_size = 4 * sizeof(float); declare_uniform_range(c, var->data.driver_location * vec4_size, @@ -1539,6 +1755,46 @@ ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr) qregs[i] = qir_uniform_ui(c, 0); } +static void +ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(nir_src_as_const_value(instr->src[0])->u32[0] == 0); + + /* Reads of the per-sample color need to be done in + * order. + */ + int sample_index = (nir_intrinsic_base(instr) - + VC4_NIR_TLB_COLOR_READ_INPUT); + for (int i = 0; i <= sample_index; i++) { + if (c->color_reads[i].file == QFILE_NULL) { + c->color_reads[i] = + qir_TLB_COLOR_READ(c); + } + } + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->color_reads[sample_index])); +} + +static void +ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(instr->num_components == 1); + + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + assert(const_offset && "vc4 doesn't support indirect inputs"); + + if (c->stage == QSTAGE_FRAG && + nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { + ntq_emit_color_read(c, instr); + return; + } + + uint32_t offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + int comp = nir_intrinsic_component(instr); + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->inputs[offset * 4 + comp])); +} + static void ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) { @@ -1563,6 +1819,11 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) } break; + case nir_intrinsic_load_ubo: + assert(instr->num_components == 1); + ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr)); + break; + case nir_intrinsic_load_user_clip_plane: for (int i = 0; i < instr->num_components; i++) { ntq_store_dest(c, &instr->dest, i, @@ -1616,31 +1877,7 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_input: - assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - assert(const_offset && "vc4 doesn't support indirect inputs"); - if (c->stage == QSTAGE_FRAG && - nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { - assert(const_offset->u32[0] == 0); - /* Reads of the per-sample color need to be done in - * order. - */ - int sample_index = (nir_intrinsic_base(instr) - - VC4_NIR_TLB_COLOR_READ_INPUT); - for (int i = 0; i <= sample_index; i++) { - if (c->color_reads[i].file == QFILE_NULL) { - c->color_reads[i] = - qir_TLB_COLOR_READ(c); - } - } - ntq_store_dest(c, &instr->dest, 0, - c->color_reads[sample_index]); - } else { - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; - int comp = nir_intrinsic_component(instr); - ntq_store_dest(c, &instr->dest, 0, - c->inputs[offset * 4 + comp]); - } + ntq_emit_load_input(c, instr); break; case nir_intrinsic_store_output: @@ -1669,15 +1906,33 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_discard: - c->discard = qir_uniform_ui(c, ~0); + if (c->execute.file != QFILE_NULL) { + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->discard, + qir_uniform_ui(c, ~0)); + } else { + qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0)); + } break; - case nir_intrinsic_discard_if: - if (c->discard.file == QFILE_NULL) - c->discard = qir_uniform_ui(c, 0); - c->discard = qir_OR(c, c->discard, + case nir_intrinsic_discard_if: { + /* true (~0) if we're discarding */ + struct qreg cond = ntq_get_src(c, instr->src[0], 0); + + if (c->execute.file != QFILE_NULL) { + /* execute == 0 means the channel is active. Invert + * the condition so that we can use zero as "executing + * and discarding." + */ + qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond))); + qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond); + } else { + qir_OR_dest(c, c->discard, c->discard, ntq_get_src(c, instr->src[0], 0)); + } + break; + } default: fprintf(stderr, "Unknown intrinsic: "); @@ -1708,11 +1963,9 @@ ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt) return; } - nir_cf_node *nir_first_else_node = nir_if_first_else_node(if_stmt); - nir_cf_node *nir_last_else_node = nir_if_last_else_node(if_stmt); - nir_block *nir_else_block = nir_cf_node_as_block(nir_first_else_node); + nir_block *nir_else_block = nir_if_first_else_block(if_stmt); bool empty_else_block = - (nir_first_else_node == nir_last_else_node && + (nir_else_block == nir_if_last_else_block(if_stmt) && exec_list_is_empty(&nir_else_block->instr_list)); struct qblock *then_block = qir_new_block(c); @@ -1774,32 +2027,40 @@ ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt) qir_link_blocks(c->cur_block, after_block); qir_set_emit_block(c, after_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); - + } } static void ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump) { + struct qblock *jump_block; switch (jump->type) { case nir_jump_break: - qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, c->execute, - qir_uniform_ui(c, c->loop_break_block->index)); + jump_block = c->loop_break_block; break; - case nir_jump_continue: - qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, c->execute, - qir_uniform_ui(c, c->loop_cont_block->index)); + jump_block = c->loop_cont_block; break; - - case nir_jump_return: - unreachable("All returns shouold be lowered\n"); + default: + unreachable("Unsupported jump type\n"); } + + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, + qir_uniform_ui(c, jump_block->index)); + + /* Jump to the destination block if everyone has taken the jump. */ + qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index))); + qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS); + struct qblock *new_block = qir_new_block(c); + qir_link_blocks(c->cur_block, jump_block); + qir_link_blocks(c->cur_block, new_block); + qir_set_emit_block(c, new_block); } static void @@ -1895,10 +2156,12 @@ ntq_emit_loop(struct vc4_compile *c, nir_loop *loop) qir_link_blocks(c->cur_block, c->loop_break_block); qir_set_emit_block(c, c->loop_break_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); + } c->loop_break_block = save_loop_break_block; c->loop_cont_block = save_loop_cont_block; @@ -1949,6 +2212,9 @@ ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl) static void nir_to_qir(struct vc4_compile *c) { + if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard) + c->discard = qir_MOV(c, qir_uniform_ui(c, 0)); + ntq_setup_inputs(c); ntq_setup_outputs(c); ntq_setup_uniforms(c); @@ -1963,16 +2229,29 @@ nir_to_qir(struct vc4_compile *c) } static const nir_shader_compiler_options nir_options = { + .lower_all_io_to_temps = true, .lower_extract_byte = true, .lower_extract_word = true, + .lower_fdiv = true, .lower_ffma = true, .lower_flrp32 = true, .lower_fpow = true, .lower_fsat = true, .lower_fsqrt = true, + .lower_ldexp = true, .lower_negate = true, + .native_integers = true, + .max_unroll_iterations = 32, }; +const void * +vc4_screen_get_compiler_options(struct pipe_screen *pscreen, + enum pipe_shader_ir ir, + enum pipe_shader_type shader) +{ + return &nir_options; +} + static int count_nir_instrs(nir_shader *nir) { @@ -1990,7 +2269,7 @@ count_nir_instrs(nir_shader *nir) static struct vc4_compile * vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, - struct vc4_key *key) + struct vc4_key *key, bool fs_threaded) { struct vc4_compile *c = qir_compile_init(); @@ -2000,6 +2279,7 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, c->program_id = key->shader_state->program_id; c->variant_id = p_atomic_inc_return(&key->shader_state->compiled_variant_count); + c->fs_threaded = fs_threaded; c->key = key; switch (stage) { @@ -2022,8 +2302,15 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, c->s = nir_shader_clone(c, key->shader_state->base.ir.nir); - if (stage == QSTAGE_FRAG) + if (stage == QSTAGE_FRAG) { + if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) { + NIR_PASS_V(c->s, nir_lower_alpha_test, + c->fs_key->alpha_test_func, + c->fs_key->sample_alpha_to_one && + c->fs_key->msaa); + } NIR_PASS_V(c->s, vc4_nir_lower_blend, c); + } struct nir_lower_tex_options tex_options = { /* We would need to implement txs, but we don't want the @@ -2076,7 +2363,8 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, if (stage == QSTAGE_FRAG) { NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables); } else { - NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_clip_vs, + c->key->ucp_enables, false); NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); } @@ -2097,6 +2385,8 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, vc4_optimize_nir(c->s); + NIR_PASS_V(c->s, nir_lower_bool_to_int32); + NIR_PASS_V(c->s, nir_convert_from_ssa, true); if (vc4_debug & VC4_DEBUG_SHADERDB) { @@ -2117,6 +2407,17 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, switch (stage) { case QSTAGE_FRAG: + /* FS threading requires that the thread execute + * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating + * (with no other THRSW afterwards, obviously). If we didn't + * fetch a texture at a top level block, this wouldn't be + * true. + */ + if (c->fs_threaded && !c->last_thrsw_at_top_level) { + c->failed = true; + return c; + } + emit_frag_end(c); break; case QSTAGE_VERT: @@ -2181,24 +2482,42 @@ vc4_shader_state_create(struct pipe_context *pctx, so->program_id = vc4->next_uncompiled_program_id++; - if (vc4_debug & VC4_DEBUG_TGSI) { - fprintf(stderr, "prog %d TGSI:\n", - so->program_id); - tgsi_dump(cso->tokens, 0); - fprintf(stderr, "\n"); + nir_shader *s; + + if (cso->type == PIPE_SHADER_IR_NIR) { + /* The backend takes ownership of the NIR shader on state + * creation. + */ + s = cso->ir.nir; + + NIR_PASS_V(s, nir_lower_io, nir_var_uniform, + uniforms_type_size, + (nir_lower_io_options)0); + } else { + assert(cso->type == PIPE_SHADER_IR_TGSI); + + if (vc4_debug & VC4_DEBUG_TGSI) { + fprintf(stderr, "prog %d TGSI:\n", + so->program_id); + tgsi_dump(cso->tokens, 0); + fprintf(stderr, "\n"); + } + s = tgsi_to_nir(cso->tokens, &nir_options); } - nir_shader *s = tgsi_to_nir(cso->tokens, &nir_options); + NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform, + type_size, + (nir_lower_io_options)0); NIR_PASS_V(s, nir_opt_global_to_local); - NIR_PASS_V(s, nir_convert_to_ssa); + NIR_PASS_V(s, nir_lower_regs_to_ssa); NIR_PASS_V(s, nir_normalize_cubemap_coords); NIR_PASS_V(s, nir_lower_load_const_to_scalar); vc4_optimize_nir(s); - NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local); + NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp); /* Garbage collect dead instructions */ nir_sweep(s); @@ -2208,7 +2527,7 @@ vc4_shader_state_create(struct pipe_context *pctx, if (vc4_debug & VC4_DEBUG_NIR) { fprintf(stderr, "%s prog %d NIR:\n", - gl_shader_stage_name(s->stage), + gl_shader_stage_name(s->info.stage), so->program_id); nir_print_shader(s, stderr); fprintf(stderr, "\n"); @@ -2251,7 +2570,7 @@ vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c, memset(input_live, 0, sizeof(input_live)); qir_for_each_inst_inorder(inst, c) { - for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { + for (int i = 0; i < qir_get_nsrc(inst); i++) { if (inst->src[i].file == QFILE_VARY) input_live[inst->src[i].index] = true; } @@ -2307,12 +2626,16 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, { struct hash_table *ht; uint32_t key_size; + bool try_threading; + if (stage == QSTAGE_FRAG) { ht = vc4->fs_cache; key_size = sizeof(struct vc4_fs_key); + try_threading = vc4->screen->has_threaded_fs; } else { ht = vc4->vs_cache; key_size = sizeof(struct vc4_vs_key); + try_threading = false; } struct vc4_compiled_shader *shader; @@ -2320,7 +2643,13 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, if (entry) return entry->data; - struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key); + struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading); + /* If the FS failed to compile threaded, fall back to single threaded. */ + if (try_threading && c->failed) { + qir_compile_destroy(c); + c = vc4_shader_ntq(vc4, stage, key, false); + } + shader = rzalloc(NULL, struct vc4_compiled_shader); shader->program_id = vc4->next_compiled_program_id++; @@ -2344,9 +2673,17 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, } } - copy_uniform_state_to_shader(shader, c); - shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts, - c->qpu_inst_count * sizeof(uint64_t)); + shader->failed = c->failed; + if (c->failed) { + shader->failed = true; + } else { + copy_uniform_state_to_shader(shader, c); + shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts, + c->qpu_inst_count * + sizeof(uint64_t)); + } + + shader->fs_threaded = c->fs_threaded; /* Copy the compiler UBO range state to the compiled shader, dropping * out arrays that were never referenced by an indirect load. @@ -2381,10 +2718,17 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, } } + if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) { + fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n", + qir_get_stage_name(c->stage), + c->program_id, c->variant_id, + 1 + shader->fs_threaded); + } + qir_compile_destroy(c); struct vc4_key *dup_key; - dup_key = ralloc_size(shader, key_size); + dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */ memcpy(dup_key, key, key_size); _mesa_hash_table_insert(ht, dup_key, shader); @@ -2429,6 +2773,7 @@ vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key, static void vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) { + struct vc4_job *job = vc4->job; struct vc4_fs_key local_key; struct vc4_fs_key *key = &local_key; @@ -2439,7 +2784,8 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) VC4_DIRTY_RASTERIZER | VC4_DIRTY_SAMPLE_MASK | VC4_DIRTY_FRAGTEX | - VC4_DIRTY_UNCOMPILED_FS))) { + VC4_DIRTY_UNCOMPILED_FS | + VC4_DIRTY_UBO_1_SIZE))) { return; } @@ -2455,10 +2801,9 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) } else { key->logicop_func = PIPE_LOGICOP_COPY; } - if (vc4->msaa) { + if (job->msaa) { key->msaa = vc4->rasterizer->base.multisample; - key->sample_coverage = (vc4->rasterizer->base.multisample && - vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); + key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage; key->sample_alpha_to_one = vc4->blend->alpha_to_one; } @@ -2471,10 +2816,10 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0; key->depth_enabled = (vc4->zsa->base.depth.enabled || key->stencil_enabled); - if (vc4->zsa->base.alpha.enabled) { - key->alpha_test = true; + if (vc4->zsa->base.alpha.enabled) key->alpha_test_func = vc4->zsa->base.alpha.func; - } + else + key->alpha_test_func = COMPARE_FUNC_ALWAYS; if (key->is_points) { key->point_sprite_mask = @@ -2484,6 +2829,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) PIPE_SPRITE_COORD_UPPER_LEFT); } + key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size; key->light_twoside = vc4->rasterizer->base.light_twoside; struct vc4_compiled_shader *old_fs = vc4->prog.fs; @@ -2494,11 +2840,11 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) vc4->dirty |= VC4_DIRTY_COMPILED_FS; if (vc4->rasterizer->base.flatshade && - old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) { + (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) { vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS; } - if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs) + if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs) vc4->dirty |= VC4_DIRTY_FS_INPUTS; } @@ -2548,11 +2894,15 @@ vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode) } } -void +bool vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode) { vc4_update_compiled_fs(vc4, prim_mode); vc4_update_compiled_vs(vc4, prim_mode); + + return !(vc4->prog.cs->failed || + vc4->prog.vs->failed || + vc4->prog.fs->failed); } static uint32_t @@ -2604,6 +2954,7 @@ fs_inputs_compare(const void *key1, const void *key2) static void delete_from_cache_if_matches(struct hash_table *ht, + struct vc4_compiled_shader **last_compile, struct hash_entry *entry, struct vc4_uncompiled_shader *so) { @@ -2613,6 +2964,10 @@ delete_from_cache_if_matches(struct hash_table *ht, struct vc4_compiled_shader *shader = entry->data; _mesa_hash_table_remove(ht, entry); vc4_bo_unreference(&shader->bo); + + if (shader == *last_compile) + *last_compile = NULL; + ralloc_free(shader); } } @@ -2623,11 +2978,14 @@ vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso) struct vc4_context *vc4 = vc4_context(pctx); struct vc4_uncompiled_shader *so = hwcso; - struct hash_entry *entry; - hash_table_foreach(vc4->fs_cache, entry) - delete_from_cache_if_matches(vc4->fs_cache, entry, so); - hash_table_foreach(vc4->vs_cache, entry) - delete_from_cache_if_matches(vc4->vs_cache, entry, so); + hash_table_foreach(vc4->fs_cache, entry) { + delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs, + entry, so); + } + hash_table_foreach(vc4->vs_cache, entry) { + delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs, + entry, so); + } ralloc_free(so->base.ir.nir); free(so); @@ -2676,7 +3034,6 @@ vc4_program_fini(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); - struct hash_entry *entry; hash_table_foreach(vc4->fs_cache, entry) { struct vc4_compiled_shader *shader = entry->data; vc4_bo_unreference(&shader->bo);