X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_program.c;h=6a429d427b10a05a85b90f85c7e40820dbd104c1;hb=0ef168d513059b276fb9b70ab95af874983ab08b;hp=809c96dda3dfb02dbb6ad2b7f909bca0d6ef0ae0;hpb=52d2b28f7f107fbaff023533a15058055fa73bf0;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 809c96dda3d..6a429d427b1 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -23,8 +23,9 @@ */ #include -#include "util/u_format.h" -#include "util/u_hash.h" +#include "util/format/u_format.h" +#include "util/crc32.h" +#include "util/u_helpers.h" #include "util/u_math.h" #include "util/u_memory.h" #include "util/ralloc.h" @@ -33,17 +34,23 @@ #include "tgsi/tgsi_parse.h" #include "compiler/nir/nir.h" #include "compiler/nir/nir_builder.h" +#include "compiler/nir_types.h" #include "nir/tgsi_to_nir.h" #include "vc4_context.h" #include "vc4_qpu.h" #include "vc4_qir.h" -#include "mesa/state_tracker/st_glsl_types.h" static struct qreg ntq_get_src(struct vc4_compile *c, nir_src src, int i); static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list); +static int +type_size(const struct glsl_type *type, bool bindless) +{ + return glsl_count_attribute_slots(type, false); +} + static void resize_qreg_array(struct vc4_compile *c, struct qreg **regs, @@ -65,46 +72,68 @@ resize_qreg_array(struct vc4_compile *c, (*regs)[i] = c->undef; } +static void +ntq_emit_thrsw(struct vc4_compile *c) +{ + if (!c->fs_threaded) + return; + + /* Always thread switch after each texture operation for now. + * + * We could do better by batching a bunch of texture fetches up and + * then doing one thread switch and collecting all their results + * afterward. + */ + qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef, + c->undef, c->undef)); + c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL); +} + static struct qreg indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) { struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0); - uint32_t offset = nir_intrinsic_base(intr); - struct vc4_compiler_ubo_range *range = NULL; - unsigned i; - for (i = 0; i < c->num_uniform_ranges; i++) { - range = &c->ubo_ranges[i]; - if (offset >= range->src_offset && - offset < range->src_offset + range->size) { - break; - } - } - /* The driver-location-based offset always has to be within a declared - * uniform range. - */ - assert(range); - if (!range->used) { - range->used = true; - range->dst_offset = c->next_ubo_dst_offset; - c->next_ubo_dst_offset += range->size; - c->num_ubo_ranges++; - } - offset -= range->src_offset; + /* Clamp to [0, array size). Note that MIN/MAX are signed. */ + uint32_t range = nir_intrinsic_range(intr); + indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0)); + indirect_offset = qir_MIN_NOIMM(c, indirect_offset, + qir_uniform_ui(c, range - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + indirect_offset, + qir_uniform(c, QUNIFORM_UBO0_ADDR, + nir_intrinsic_base(intr))); + + c->num_texture_samples++; + + ntq_emit_thrsw(c); - /* Adjust for where we stored the TGSI register base. */ - indirect_offset = qir_ADD(c, indirect_offset, - qir_uniform_ui(c, (range->dst_offset + - offset))); + return qir_TEX_RESULT(c); +} + +static struct qreg +vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr) +{ + int buffer_index = nir_src_as_uint(intr->src[0]); + assert(buffer_index == 1); + assert(c->stage == QSTAGE_FRAG); + + struct qreg offset = ntq_get_src(c, intr->src[1], 0); /* Clamp to [0, array size). Note that MIN/MAX are signed. */ - indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0)); - indirect_offset = qir_MIN(c, indirect_offset, - qir_uniform_ui(c, (range->dst_offset + - range->size - 4))); + offset = qir_MAX(c, offset, qir_uniform_ui(c, 0)); + offset = qir_MIN_NOIMM(c, offset, + qir_uniform_ui(c, c->fs_key->ubo_1_size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + offset, + qir_uniform(c, QUNIFORM_UBO1_ADDR, 0)); - qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0)); c->num_texture_samples++; + + ntq_emit_thrsw(c); + return qir_TEX_RESULT(c); } @@ -157,7 +186,7 @@ ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan, struct qreg result) { struct qinst *last_inst = NULL; - if (!list_empty(&c->cur_block->instructions)) + if (!list_is_empty(&c->cur_block->instructions)) last_inst = (struct qinst *)c->cur_block->instructions.prev; assert(result.file == QFILE_UNIF || @@ -258,7 +287,7 @@ static struct qreg ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr, unsigned src) { - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); unsigned chan = ffs(instr->dest.write_mask) - 1; struct qreg r = ntq_get_src(c, instr->src[src].src, instr->src[src].swizzle[chan]); @@ -359,9 +388,12 @@ ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr) /* Perform the clamping required by kernel validation. */ addr = qir_MAX(c, addr, qir_uniform_ui(c, 0)); - addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4)); + addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); - qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); + ntq_emit_thrsw(c); struct qreg tex = qir_TEX_RESULT(c); c->num_texture_samples++; @@ -409,7 +441,7 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) lod = ntq_get_src(c, instr->src[i].src, 0); is_txl = true; break; - case nir_tex_src_comparitor: + case nir_tex_src_comparator: compare = ntq_get_src(c, instr->src[i].src, 0); break; default: @@ -417,6 +449,16 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) } } + if (c->stage != QSTAGE_FRAG && !is_txl) { + /* From the GLSL 1.20 spec: + * + * "If it is mip-mapped and running on the vertex shader, + * then the base texture is used." + */ + is_txl = true; + lod = qir_uniform_ui(c, 0); + } + if (c->key->tex[unit].force_first_level) { lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit); is_txl = true; @@ -447,14 +489,20 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) unit | (is_txl << 16)); } + struct qinst *tmu; if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { - qir_TEX_R(c, r, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) { - qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit), - texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), + qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, + unit)); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) { @@ -465,14 +513,23 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) t = qir_SAT(c, t); } - qir_TEX_T(c, t, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; - if (is_txl || is_txb) - qir_TEX_B(c, lod, texture_u[next_texture_u++]); + if (is_txl || is_txb) { + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; + } - qir_TEX_S(c, s, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s); + tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++]; c->num_texture_samples++; + + ntq_emit_thrsw(c); + struct qreg tex = qir_TEX_RESULT(c); enum pipe_format format = c->key->tex[unit].format; @@ -485,6 +542,15 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) struct qreg u0 = qir_uniform_f(c, 0.0f); struct qreg u1 = qir_uniform_f(c, 1.0f); if (c->key->tex[unit].compare_mode) { + /* From the GL_ARB_shadow spec: + * + * "Let Dt (D subscript t) be the depth texture + * value, in the range [0, 1]. Let R be the + * interpolated texture coordinate clamped to the + * range [0, 1]." + */ + compare = qir_SAT(c, compare); + switch (c->key->tex[unit].compare_func) { case PIPE_FUNC_NEVER: depth_output = qir_uniform_f(c, 0.0f); @@ -539,9 +605,11 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); struct qreg diff = qir_FSUB(c, src, trunc); qir_SF(c, diff); - return qir_MOV(c, qir_SEL(c, QPU_COND_NS, - qir_FADD(c, diff, qir_uniform_f(c, 1.0)), - diff)); + + qir_FADD_dest(c, diff, + diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, diff); } /** @@ -551,16 +619,18 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) static struct qreg ntq_ffloor(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was < 0 in the first place. */ - qir_SF(c, qir_FSUB(c, src, trunc)); + qir_SF(c, qir_FSUB(c, src, result)); + + struct qinst *sub = qir_FSUB_dest(c, result, + result, qir_uniform_f(c, 1.0)); + sub->cond = QPU_COND_NS; - return qir_MOV(c, qir_SEL(c, QPU_COND_NS, - qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), - trunc)); + return qir_MOV(c, result); } /** @@ -570,37 +640,58 @@ ntq_ffloor(struct vc4_compile *c, struct qreg src) static struct qreg ntq_fceil(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was > 0 in the first place. */ - qir_SF(c, qir_FSUB(c, trunc, src)); + qir_SF(c, qir_FSUB(c, result, src)); - return qir_MOV(c, qir_SEL(c, QPU_COND_NS, - qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), - trunc)); + qir_FADD_dest(c, result, + result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, result); +} + +static struct qreg +ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x) +{ + /* Since we're using a Taylor approximation, we want to have a small + * number of coefficients and take advantage of sin/cos repeating + * every 2pi. We keep our x as close to 0 as we can, since the series + * will be less accurate as |x| increases. (Also, be careful of + * shifting the input x value to be tricky with sin/cos relations, + * because getting accurate values for x==0 is very important for SDL + * rendering) + */ + struct qreg scaled_x = + qir_FMUL(c, x, + qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); + /* Note: FTOI truncates toward 0. */ + struct qreg x_frac = qir_FSUB(c, scaled_x, + qir_ITOF(c, qir_FTOI(c, scaled_x))); + /* Map [0.5, 1] to [-0.5, 0] */ + qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC; + /* Map [-1, -0.5] to [0, 0.5] */ + qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return x_frac; } static struct qreg ntq_fsin(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -2.0 * M_PI, - pow(2.0 * M_PI, 3) / (3 * 2 * 1), - -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 2.0 * M_PI, + -pow(2.0 * M_PI, 3) / (3 * 2 * 1), + pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, - src, - qir_uniform_f(c, 1.0 / (M_PI * 2.0))); - - struct qreg x = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); + struct qreg x = ntq_shrink_sincos_input_range(c, src); struct qreg x2 = qir_FMUL(c, x, x); struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0])); for (int i = 1; i < ARRAY_SIZE(coeff); i++) { @@ -618,21 +709,15 @@ static struct qreg ntq_fcos(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -1.0f, - pow(2.0 * M_PI, 2) / (2 * 1), - -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), - pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 1.0f, + -pow(2.0 * M_PI, 2) / (2 * 1), + pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, src, - qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); - struct qreg x_frac = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); - + struct qreg x_frac = ntq_shrink_sincos_input_range(c, src); struct qreg sum = qir_uniform_f(c, coeff[0]); struct qreg x2 = qir_FMUL(c, x_frac, x_frac); struct qreg x = x2; /* Current x^2, x^4, or x^6 */ @@ -640,13 +725,10 @@ ntq_fcos(struct vc4_compile *c, struct qreg src) if (i != 1) x = qir_FMUL(c, x, x2); - struct qreg mul = qir_FMUL(c, + sum = qir_FADD(c, qir_FMUL(c, x, - qir_uniform_f(c, coeff[i])); - if (i == 0) - sum = mul; - else - sum = qir_FADD(c, sum, mul); + qir_uniform_f(c, coeff[i])), + sum); } return sum; } @@ -745,31 +827,13 @@ add_output(struct vc4_compile *c, c->output_slots[decl_offset].swizzle = swizzle; } -static void -declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size) -{ - unsigned array_id = c->num_uniform_ranges++; - if (array_id >= c->ubo_ranges_array_size) { - c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2, - array_id + 1); - c->ubo_ranges = reralloc(c, c->ubo_ranges, - struct vc4_compiler_ubo_range, - c->ubo_ranges_array_size); - } - - c->ubo_ranges[array_id].dst_offset = 0; - c->ubo_ranges[array_id].src_offset = start; - c->ubo_ranges[array_id].size = size; - c->ubo_ranges[array_id].used = false; -} - static bool ntq_src_is_only_ssa_def_user(nir_src *src) { if (!src->is_ssa) return false; - if (!list_empty(&src->ssa->if_uses)) + if (!list_is_empty(&src->ssa->if_uses)) return false; return (src->ssa->uses.next == &src->use_link && @@ -889,24 +953,24 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, enum qpu_cond cond; switch (compare_instr->op) { - case nir_op_feq: - case nir_op_ieq: + case nir_op_feq32: + case nir_op_ieq32: case nir_op_seq: cond = QPU_COND_ZS; break; - case nir_op_fne: - case nir_op_ine: + case nir_op_fne32: + case nir_op_ine32: case nir_op_sne: cond = QPU_COND_ZC; break; - case nir_op_fge: - case nir_op_ige: - case nir_op_uge: + case nir_op_fge32: + case nir_op_ige32: + case nir_op_uge32: case nir_op_sge: cond = QPU_COND_NC; break; - case nir_op_flt: - case nir_op_ilt: + case nir_op_flt32: + case nir_op_ilt32: case nir_op_slt: cond = QPU_COND_NS; break; @@ -933,7 +997,7 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0)); break; - case nir_op_bcsel: + case nir_op_b32csel: *dest = qir_SEL(c, cond, ntq_get_alu_src(c, sel_instr, 1), ntq_get_alu_src(c, sel_instr, 2)); @@ -1065,8 +1129,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) struct qreg result; switch (instr->op) { - case nir_op_fmov: - case nir_op_imov: + case nir_op_mov: result = qir_MOV(c, src[0]); break; case nir_op_fmul: @@ -1085,22 +1148,22 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) result = qir_FMAX(c, src[0], src[1]); break; - case nir_op_f2i: - case nir_op_f2u: + case nir_op_f2i32: + case nir_op_f2u32: result = qir_FTOI(c, src[0]); break; - case nir_op_i2f: - case nir_op_u2f: + case nir_op_i2f32: + case nir_op_u2f32: result = qir_ITOF(c, src[0]); break; - case nir_op_b2f: + case nir_op_b2f32: result = qir_AND(c, src[0], qir_uniform_f(c, 1.0)); break; - case nir_op_b2i: + case nir_op_b2i32: result = qir_AND(c, src[0], qir_uniform_ui(c, 1)); break; - case nir_op_i2b: - case nir_op_f2b: + case nir_op_i2b32: + case nir_op_f2b32: qir_SF(c, src[0]); result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, qir_uniform_ui(c, ~0), @@ -1149,21 +1212,21 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) case nir_op_sne: case nir_op_sge: case nir_op_slt: - case nir_op_feq: - case nir_op_fne: - case nir_op_fge: - case nir_op_flt: - case nir_op_ieq: - case nir_op_ine: - case nir_op_ige: - case nir_op_uge: - case nir_op_ilt: + case nir_op_feq32: + case nir_op_fne32: + case nir_op_fge32: + case nir_op_flt32: + case nir_op_ieq32: + case nir_op_ine32: + case nir_op_ige32: + case nir_op_uge32: + case nir_op_ilt32: if (!ntq_emit_comparison(c, &result, instr, instr)) { fprintf(stderr, "Bad comparison instruction\n"); } break; - case nir_op_bcsel: + case nir_op_b32csel: result = ntq_emit_bcsel(c, instr, src); break; case nir_op_fcsel: @@ -1266,7 +1329,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) /* We have a scalar result, so the instruction should only have a * single channel written to. */ - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); ntq_store_dest(c, &instr->dest.dest, ffs(instr->dest.write_mask) - 1, result); } @@ -1282,7 +1345,7 @@ emit_frag_end(struct vc4_compile *c) } uint32_t discard_cond = QPU_COND_ALWAYS; - if (c->s->info->fs.uses_discard) { + if (c->s->info.fs.uses_discard) { qir_SF(c, c->discard); discard_cond = QPU_COND_ZS; } @@ -1379,11 +1442,6 @@ emit_point_size_write(struct vc4_compile *c) else point_size = qir_uniform_f(c, 1.0); - /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835, - * BCM21553). - */ - point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125)); - qir_VPM_WRITE(c, point_size); } @@ -1411,7 +1469,7 @@ emit_vert_end(struct vc4_compile *c, struct vc4_varying_slot *fs_inputs, uint32_t num_fs_inputs) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1446,7 +1504,7 @@ emit_vert_end(struct vc4_compile *c, static void emit_coord_end(struct vc4_compile *c) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1464,22 +1522,48 @@ static void vc4_optimize_nir(struct nir_shader *s) { bool progress; + unsigned lower_flrp = + (s->options->lower_flrp16 ? 16 : 0) | + (s->options->lower_flrp32 ? 32 : 0) | + (s->options->lower_flrp64 ? 64 : 0); do { progress = false; NIR_PASS_V(s, nir_lower_vars_to_ssa); - NIR_PASS(progress, s, nir_lower_alu_to_scalar); + NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL); NIR_PASS(progress, s, nir_lower_phis_to_scalar); NIR_PASS(progress, s, nir_copy_prop); NIR_PASS(progress, s, nir_opt_remove_phis); NIR_PASS(progress, s, nir_opt_dce); NIR_PASS(progress, s, nir_opt_dead_cf); NIR_PASS(progress, s, nir_opt_cse); - NIR_PASS(progress, s, nir_opt_peephole_select, 8); + NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); + if (lower_flrp != 0) { + bool lower_flrp_progress = false; + + NIR_PASS(lower_flrp_progress, s, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + s->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, s, nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, s, nir_opt_undef); + NIR_PASS(progress, s, nir_opt_loop_unroll, + nir_var_shader_in | + nir_var_shader_out | + nir_var_function_temp); } while (progress); } @@ -1525,11 +1609,8 @@ ntq_setup_inputs(struct vc4_compile *c) if (c->stage == QSTAGE_FRAG) { if (var->data.location == VARYING_SLOT_POS) { emit_fragcoord_input(c, loc); - } else if (var->data.location == VARYING_SLOT_PNTC || - (var->data.location >= VARYING_SLOT_VAR0 && - (c->fs_key->point_sprite_mask & - (1 << (var->data.location - - VARYING_SLOT_VAR0))))) { + } else if (util_varying_is_point_coord(var->data.location, + c->fs_key->point_sprite_mask)) { c->inputs[loc * 4 + 0] = c->point_x; c->inputs[loc * 4 + 1] = c->point_y; } else { @@ -1580,19 +1661,6 @@ ntq_setup_outputs(struct vc4_compile *c) } } -static void -ntq_setup_uniforms(struct vc4_compile *c) -{ - nir_foreach_variable(var, &c->s->uniforms) { - uint32_t vec4_count = st_glsl_type_size(var->type); - unsigned vec4_size = 4 * sizeof(float); - - declare_uniform_range(c, var->data.driver_location * vec4_size, - vec4_count * vec4_size); - - } -} - /** * Sets up the mapping from nir_register to struct qreg *. * @@ -1619,7 +1687,7 @@ ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr) { struct qreg *qregs = ntq_init_ssa_def(c, &instr->def); for (int i = 0; i < instr->def.num_components; i++) - qregs[i] = qir_uniform_ui(c, instr->value.u32[i]); + qregs[i] = qir_uniform_ui(c, instr->value[i].u32); _mesa_hash_table_insert(c->def_ht, &instr->def, qregs); } @@ -1636,18 +1704,57 @@ ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr) qregs[i] = qir_uniform_ui(c, 0); } +static void +ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(nir_src_as_uint(instr->src[0]) == 0); + + /* Reads of the per-sample color need to be done in + * order. + */ + int sample_index = (nir_intrinsic_base(instr) - + VC4_NIR_TLB_COLOR_READ_INPUT); + for (int i = 0; i <= sample_index; i++) { + if (c->color_reads[i].file == QFILE_NULL) { + c->color_reads[i] = + qir_TLB_COLOR_READ(c); + } + } + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->color_reads[sample_index])); +} + +static void +ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(instr->num_components == 1); + assert(nir_src_is_const(instr->src[0]) && + "vc4 doesn't support indirect inputs"); + + if (c->stage == QSTAGE_FRAG && + nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { + ntq_emit_color_read(c, instr); + return; + } + + uint32_t offset = nir_intrinsic_base(instr) + + nir_src_as_uint(instr->src[0]); + int comp = nir_intrinsic_component(instr); + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->inputs[offset * 4 + comp])); +} + static void ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) { - nir_const_value *const_offset; unsigned offset; switch (instr->intrinsic) { case nir_intrinsic_load_uniform: assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - if (const_offset) { - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + if (nir_src_is_const(instr->src[0])) { + offset = nir_intrinsic_base(instr) + + nir_src_as_uint(instr->src[0]); assert(offset % 4 == 0); /* We need dwords */ offset = offset / 4; @@ -1660,8 +1767,13 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) } break; + case nir_intrinsic_load_ubo: + assert(instr->num_components == 1); + ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr)); + break; + case nir_intrinsic_load_user_clip_plane: - for (int i = 0; i < instr->num_components; i++) { + for (int i = 0; i < nir_intrinsic_dest_components(instr); i++) { ntq_store_dest(c, &instr->dest, i, qir_uniform(c, QUNIFORM_USER_CLIP_PLANE, nir_intrinsic_ucp_id(instr) * @@ -1713,37 +1825,14 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_input: - assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - assert(const_offset && "vc4 doesn't support indirect inputs"); - if (c->stage == QSTAGE_FRAG && - nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { - assert(const_offset->u32[0] == 0); - /* Reads of the per-sample color need to be done in - * order. - */ - int sample_index = (nir_intrinsic_base(instr) - - VC4_NIR_TLB_COLOR_READ_INPUT); - for (int i = 0; i <= sample_index; i++) { - if (c->color_reads[i].file == QFILE_NULL) { - c->color_reads[i] = - qir_TLB_COLOR_READ(c); - } - } - ntq_store_dest(c, &instr->dest, 0, - qir_MOV(c, c->color_reads[sample_index])); - } else { - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; - int comp = nir_intrinsic_component(instr); - ntq_store_dest(c, &instr->dest, 0, - qir_MOV(c, c->inputs[offset * 4 + comp])); - } + ntq_emit_load_input(c, instr); break; case nir_intrinsic_store_output: - const_offset = nir_src_as_const_value(instr->src[1]); - assert(const_offset && "vc4 doesn't support indirect outputs"); - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + assert(nir_src_is_const(instr->src[1]) && + "vc4 doesn't support indirect outputs"); + offset = nir_intrinsic_base(instr) + + nir_src_as_uint(instr->src[1]); /* MSAA color outputs are the only case where we have an * output that's not lowered to being a store of a single 32 @@ -1887,32 +1976,40 @@ ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt) qir_link_blocks(c->cur_block, after_block); qir_set_emit_block(c, after_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); - + } } static void ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump) { + struct qblock *jump_block; switch (jump->type) { case nir_jump_break: - qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, c->execute, - qir_uniform_ui(c, c->loop_break_block->index)); + jump_block = c->loop_break_block; break; - case nir_jump_continue: - qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, c->execute, - qir_uniform_ui(c, c->loop_cont_block->index)); + jump_block = c->loop_cont_block; break; - - case nir_jump_return: - unreachable("All returns shouold be lowered\n"); + default: + unreachable("Unsupported jump type\n"); } + + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, + qir_uniform_ui(c, jump_block->index)); + + /* Jump to the destination block if everyone has taken the jump. */ + qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index))); + qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS); + struct qblock *new_block = qir_new_block(c); + qir_link_blocks(c->cur_block, jump_block); + qir_link_blocks(c->cur_block, new_block); + qir_set_emit_block(c, new_block); } static void @@ -2008,10 +2105,12 @@ ntq_emit_loop(struct vc4_compile *c, nir_loop *loop) qir_link_blocks(c->cur_block, c->loop_break_block); qir_set_emit_block(c, c->loop_break_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); + } c->loop_break_block = save_loop_break_block; c->loop_cont_block = save_loop_cont_block; @@ -2062,13 +2161,11 @@ ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl) static void nir_to_qir(struct vc4_compile *c) { - if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard) + if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard) c->discard = qir_MOV(c, qir_uniform_ui(c, 0)); ntq_setup_inputs(c); ntq_setup_outputs(c); - ntq_setup_uniforms(c); - ntq_setup_registers(c, &c->s->registers); /* Find the main function and emit the body. */ nir_foreach_function(function, c->s) { @@ -2079,20 +2176,27 @@ nir_to_qir(struct vc4_compile *c) } static const nir_shader_compiler_options nir_options = { + .lower_all_io_to_temps = true, .lower_extract_byte = true, .lower_extract_word = true, + .lower_fdiv = true, .lower_ffma = true, .lower_flrp32 = true, + .lower_fmod = true, .lower_fpow = true, .lower_fsat = true, .lower_fsqrt = true, + .lower_ldexp = true, .lower_negate = true, - .native_integers = true, + .lower_rotate = true, + .lower_to_scalar = true, + .max_unroll_iterations = 32, }; const void * vc4_screen_get_compiler_options(struct pipe_screen *pscreen, - enum pipe_shader_ir ir, unsigned shader) + enum pipe_shader_ir ir, + enum pipe_shader_type shader) { return &nir_options; } @@ -2114,7 +2218,7 @@ count_nir_instrs(nir_shader *nir) static struct vc4_compile * vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, - struct vc4_key *key) + struct vc4_key *key, bool fs_threaded) { struct vc4_compile *c = qir_compile_init(); @@ -2124,6 +2228,7 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, c->program_id = key->shader_state->program_id; c->variant_id = p_atomic_inc_return(&key->shader_state->compiled_variant_count); + c->fs_threaded = fs_threaded; c->key = key; switch (stage) { @@ -2146,8 +2251,16 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, c->s = nir_shader_clone(c, key->shader_state->base.ir.nir); - if (stage == QSTAGE_FRAG) + if (stage == QSTAGE_FRAG) { + if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) { + NIR_PASS_V(c->s, nir_lower_alpha_test, + c->fs_key->alpha_test_func, + c->fs_key->sample_alpha_to_one && + c->fs_key->msaa, + NULL); + } NIR_PASS_V(c->s, vc4_nir_lower_blend, c); + } struct nir_lower_tex_options tex_options = { /* We would need to implement txs, but we don't want the @@ -2191,16 +2304,18 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, NIR_PASS_V(c->s, nir_lower_tex, &tex_options); if (c->fs_key && c->fs_key->light_twoside) - NIR_PASS_V(c->s, nir_lower_two_sided_color); + NIR_PASS_V(c->s, nir_lower_two_sided_color, true); if (c->vs_key && c->vs_key->clamp_color) NIR_PASS_V(c->s, nir_lower_clamp_color_outputs); if (c->key->ucp_enables) { if (stage == QSTAGE_FRAG) { - NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_clip_fs, + c->key->ucp_enables, false); } else { - NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_clip_vs, + c->key->ucp_enables, false, false, NULL); NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); } @@ -2217,10 +2332,27 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, NIR_PASS_V(c->s, vc4_nir_lower_io, c); NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c); - NIR_PASS_V(c->s, nir_lower_idiv); + NIR_PASS_V(c->s, nir_lower_idiv, nir_lower_idiv_fast); vc4_optimize_nir(c->s); + /* Do late algebraic optimization to turn add(a, neg(b)) back into + * subs, then the mandatory cleanup after algebraic. Note that it may + * produce fnegs, and if so then we need to keep running to squash + * fneg(fneg(a)). + */ + bool more_late_algebraic = true; + while (more_late_algebraic) { + more_late_algebraic = false; + NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late); + NIR_PASS_V(c->s, nir_opt_constant_folding); + NIR_PASS_V(c->s, nir_copy_prop); + NIR_PASS_V(c->s, nir_opt_dce); + NIR_PASS_V(c->s, nir_opt_cse); + } + + NIR_PASS_V(c->s, nir_lower_bool_to_int32); + NIR_PASS_V(c->s, nir_convert_from_ssa, true); if (vc4_debug & VC4_DEBUG_SHADERDB) { @@ -2241,6 +2373,17 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, switch (stage) { case QSTAGE_FRAG: + /* FS threading requires that the thread execute + * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating + * (with no other THRSW afterwards, obviously). If we didn't + * fetch a texture at a top level block, this wouldn't be + * true. + */ + if (c->fs_threaded && !c->last_thrsw_at_top_level) { + c->failed = true; + return c; + } + emit_frag_end(c); break; case QSTAGE_VERT: @@ -2312,7 +2455,7 @@ vc4_shader_state_create(struct pipe_context *pctx, * creation. */ s = cso->ir.nir; - } else { + } else { assert(cso->type == PIPE_SHADER_IR_TGSI); if (vc4_debug & VC4_DEBUG_TGSI) { @@ -2321,18 +2464,23 @@ vc4_shader_state_create(struct pipe_context *pctx, tgsi_dump(cso->tokens, 0); fprintf(stderr, "\n"); } - s = tgsi_to_nir(cso->tokens, &nir_options); + s = tgsi_to_nir(cso->tokens, pctx->screen, false); } - NIR_PASS_V(s, nir_opt_global_to_local); - NIR_PASS_V(s, nir_convert_to_ssa); + if (s->info.stage == MESA_SHADER_VERTEX) + NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f); + + NIR_PASS_V(s, nir_lower_io, nir_var_shader_in | nir_var_shader_out, + type_size, (nir_lower_io_options)0); + + NIR_PASS_V(s, nir_lower_regs_to_ssa); NIR_PASS_V(s, nir_normalize_cubemap_coords); NIR_PASS_V(s, nir_lower_load_const_to_scalar); vc4_optimize_nir(s); - NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local); + NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp, NULL); /* Garbage collect dead instructions */ nir_sweep(s); @@ -2342,7 +2490,7 @@ vc4_shader_state_create(struct pipe_context *pctx, if (vc4_debug & VC4_DEBUG_NIR) { fprintf(stderr, "%s prog %d NIR:\n", - gl_shader_stage_name(s->stage), + gl_shader_stage_name(s->info.stage), so->program_id); nir_print_shader(s, stderr); fprintf(stderr, "\n"); @@ -2385,7 +2533,7 @@ vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c, memset(input_live, 0, sizeof(input_live)); qir_for_each_inst_inorder(inst, c) { - for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { + for (int i = 0; i < qir_get_nsrc(inst); i++) { if (inst->src[i].file == QFILE_VARY) input_live[inst->src[i].index] = true; } @@ -2441,12 +2589,16 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, { struct hash_table *ht; uint32_t key_size; + bool try_threading; + if (stage == QSTAGE_FRAG) { ht = vc4->fs_cache; key_size = sizeof(struct vc4_fs_key); + try_threading = vc4->screen->has_threaded_fs; } else { ht = vc4->vs_cache; key_size = sizeof(struct vc4_vs_key); + try_threading = false; } struct vc4_compiled_shader *shader; @@ -2454,7 +2606,13 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, if (entry) return entry->data; - struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key); + struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading); + /* If the FS failed to compile threaded, fall back to single threaded. */ + if (try_threading && c->failed) { + qir_compile_destroy(c); + c = vc4_shader_ntq(vc4, stage, key, false); + } + shader = rzalloc(NULL, struct vc4_compiled_shader); shader->program_id = vc4->next_compiled_program_id++; @@ -2463,7 +2621,7 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, /* Note: the temporary clone in c->s has been freed. */ nir_shader *orig_shader = key->shader_state->base.ir.nir; - if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH)) + if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH)) shader->disable_early_z = true; } else { shader->num_inputs = c->num_inputs; @@ -2478,41 +2636,23 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, } } - copy_uniform_state_to_shader(shader, c); - shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts, - c->qpu_inst_count * sizeof(uint64_t)); - - /* Copy the compiler UBO range state to the compiled shader, dropping - * out arrays that were never referenced by an indirect load. - * - * (Note that QIR dead code elimination of an array access still - * leaves that array alive, though) - */ - if (c->num_ubo_ranges) { - shader->num_ubo_ranges = c->num_ubo_ranges; - shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range, - c->num_ubo_ranges); - uint32_t j = 0; - for (int i = 0; i < c->num_uniform_ranges; i++) { - struct vc4_compiler_ubo_range *range = - &c->ubo_ranges[i]; - if (!range->used) - continue; - - shader->ubo_ranges[j].dst_offset = range->dst_offset; - shader->ubo_ranges[j].src_offset = range->src_offset; - shader->ubo_ranges[j].size = range->size; - shader->ubo_size += c->ubo_ranges[i].size; - j++; - } + shader->failed = c->failed; + if (c->failed) { + shader->failed = true; + } else { + copy_uniform_state_to_shader(shader, c); + shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts, + c->qpu_inst_count * + sizeof(uint64_t)); } - if (shader->ubo_size) { - if (vc4_debug & VC4_DEBUG_SHADERDB) { - fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n", - qir_get_stage_name(c->stage), - c->program_id, c->variant_id, - shader->ubo_size / 4); - } + + shader->fs_threaded = c->fs_threaded; + + if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) { + fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n", + qir_get_stage_name(c->stage), + c->program_id, c->variant_id, + 1 + shader->fs_threaded); } qir_compile_destroy(c); @@ -2574,7 +2714,8 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) VC4_DIRTY_RASTERIZER | VC4_DIRTY_SAMPLE_MASK | VC4_DIRTY_FRAGTEX | - VC4_DIRTY_UNCOMPILED_FS))) { + VC4_DIRTY_UNCOMPILED_FS | + VC4_DIRTY_UBO_1_SIZE))) { return; } @@ -2592,8 +2733,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) } if (job->msaa) { key->msaa = vc4->rasterizer->base.multisample; - key->sample_coverage = (vc4->rasterizer->base.multisample && - vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); + key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage; key->sample_alpha_to_one = vc4->blend->alpha_to_one; } @@ -2606,10 +2746,10 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0; key->depth_enabled = (vc4->zsa->base.depth.enabled || key->stencil_enabled); - if (vc4->zsa->base.alpha.enabled) { - key->alpha_test = true; + if (vc4->zsa->base.alpha.enabled) key->alpha_test_func = vc4->zsa->base.alpha.func; - } + else + key->alpha_test_func = COMPARE_FUNC_ALWAYS; if (key->is_points) { key->point_sprite_mask = @@ -2619,6 +2759,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) PIPE_SPRITE_COORD_UPPER_LEFT); } + key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size; key->light_twoside = vc4->rasterizer->base.light_twoside; struct vc4_compiled_shader *old_fs = vc4->prog.fs; @@ -2629,11 +2770,11 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) vc4->dirty |= VC4_DIRTY_COMPILED_FS; if (vc4->rasterizer->base.flatshade && - old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) { + (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) { vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS; } - if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs) + if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs) vc4->dirty |= VC4_DIRTY_FS_INPUTS; } @@ -2683,11 +2824,15 @@ vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode) } } -void +bool vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode) { vc4_update_compiled_fs(vc4, prim_mode); vc4_update_compiled_vs(vc4, prim_mode); + + return !(vc4->prog.cs->failed || + vc4->prog.vs->failed || + vc4->prog.fs->failed); } static uint32_t @@ -2739,6 +2884,7 @@ fs_inputs_compare(const void *key1, const void *key2) static void delete_from_cache_if_matches(struct hash_table *ht, + struct vc4_compiled_shader **last_compile, struct hash_entry *entry, struct vc4_uncompiled_shader *so) { @@ -2748,6 +2894,10 @@ delete_from_cache_if_matches(struct hash_table *ht, struct vc4_compiled_shader *shader = entry->data; _mesa_hash_table_remove(ht, entry); vc4_bo_unreference(&shader->bo); + + if (shader == *last_compile) + *last_compile = NULL; + ralloc_free(shader); } } @@ -2758,11 +2908,14 @@ vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso) struct vc4_context *vc4 = vc4_context(pctx); struct vc4_uncompiled_shader *so = hwcso; - struct hash_entry *entry; - hash_table_foreach(vc4->fs_cache, entry) - delete_from_cache_if_matches(vc4->fs_cache, entry, so); - hash_table_foreach(vc4->vs_cache, entry) - delete_from_cache_if_matches(vc4->vs_cache, entry, so); + hash_table_foreach(vc4->fs_cache, entry) { + delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs, + entry, so); + } + hash_table_foreach(vc4->vs_cache, entry) { + delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs, + entry, so); + } ralloc_free(so->base.ir.nir); free(so); @@ -2811,7 +2964,6 @@ vc4_program_fini(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); - struct hash_entry *entry; hash_table_foreach(vc4->fs_cache, entry) { struct vc4_compiled_shader *shader = entry->data; vc4_bo_unreference(&shader->bo);