X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_program.c;h=eb8b3a2c377568690035a3589facd4c955e32834;hb=33886474d646134f9784771a0ded3510a0180515;hp=4865bcbd283337eb0c38a45fec7b896b25aa4f5e;hpb=b2309393039b2ec0cc00a8e6fd828c60c4ef1e11;p=mesa.git diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index 4865bcbd283..eb8b3a2c377 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -33,6 +33,7 @@ #include "tgsi/tgsi_parse.h" #include "compiler/nir/nir.h" #include "compiler/nir/nir_builder.h" +#include "compiler/nir_types.h" #include "nir/tgsi_to_nir.h" #include "vc4_context.h" #include "vc4_qpu.h" @@ -44,6 +45,18 @@ ntq_get_src(struct vc4_compile *c, nir_src src, int i); static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list); +static int +type_size(const struct glsl_type *type) +{ + return glsl_count_attribute_slots(type, false); +} + +static int +uniforms_type_size(const struct glsl_type *type) +{ + return st_glsl_storage_type_size(type, false); +} + static void resize_qreg_array(struct vc4_compile *c, struct qreg **regs, @@ -131,6 +144,32 @@ indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) return qir_TEX_RESULT(c); } +static struct qreg +vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr) +{ + nir_const_value *buffer_index = + nir_src_as_const_value(intr->src[0]); + assert(buffer_index->u32[0] == 1); + assert(c->stage == QSTAGE_FRAG); + + struct qreg offset = ntq_get_src(c, intr->src[1], 0); + + /* Clamp to [0, array size). Note that MIN/MAX are signed. */ + offset = qir_MAX(c, offset, qir_uniform_ui(c, 0)); + offset = qir_MIN_NOIMM(c, offset, + qir_uniform_ui(c, c->fs_key->ubo_1_size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + offset, + qir_uniform(c, QUNIFORM_UBO_ADDR, buffer_index->u32[0])); + + c->num_texture_samples++; + + ntq_emit_thrsw(c); + + return qir_TEX_RESULT(c); +} + nir_ssa_def * vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz) { @@ -281,7 +320,7 @@ static struct qreg ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr, unsigned src) { - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); unsigned chan = ffs(instr->dest.write_mask) - 1; struct qreg r = ntq_get_src(c, instr->src[src].src, instr->src[src].swizzle[chan]); @@ -647,25 +686,45 @@ ntq_fceil(struct vc4_compile *c, struct qreg src) return qir_MOV(c, result); } +static struct qreg +ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x) +{ + /* Since we're using a Taylor approximation, we want to have a small + * number of coefficients and take advantage of sin/cos repeating + * every 2pi. We keep our x as close to 0 as we can, since the series + * will be less accurate as |x| increases. (Also, be careful of + * shifting the input x value to be tricky with sin/cos relations, + * because getting accurate values for x==0 is very important for SDL + * rendering) + */ + struct qreg scaled_x = + qir_FMUL(c, x, + qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); + /* Note: FTOI truncates toward 0. */ + struct qreg x_frac = qir_FSUB(c, scaled_x, + qir_ITOF(c, qir_FTOI(c, scaled_x))); + /* Map [0.5, 1] to [-0.5, 0] */ + qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC; + /* Map [-1, -0.5] to [0, 0.5] */ + qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return x_frac; +} + static struct qreg ntq_fsin(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -2.0 * M_PI, - pow(2.0 * M_PI, 3) / (3 * 2 * 1), - -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 2.0 * M_PI, + -pow(2.0 * M_PI, 3) / (3 * 2 * 1), + pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, - src, - qir_uniform_f(c, 1.0 / (M_PI * 2.0))); - - struct qreg x = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); + struct qreg x = ntq_shrink_sincos_input_range(c, src); struct qreg x2 = qir_FMUL(c, x, x); struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0])); for (int i = 1; i < ARRAY_SIZE(coeff); i++) { @@ -683,21 +742,15 @@ static struct qreg ntq_fcos(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -1.0f, - pow(2.0 * M_PI, 2) / (2 * 1), - -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), - pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 1.0f, + -pow(2.0 * M_PI, 2) / (2 * 1), + pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, src, - qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); - struct qreg x_frac = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); - + struct qreg x_frac = ntq_shrink_sincos_input_range(c, src); struct qreg sum = qir_uniform_f(c, coeff[0]); struct qreg x2 = qir_FMUL(c, x_frac, x_frac); struct qreg x = x2; /* Current x^2, x^4, or x^6 */ @@ -705,13 +758,10 @@ ntq_fcos(struct vc4_compile *c, struct qreg src) if (i != 1) x = qir_FMUL(c, x, x2); - struct qreg mul = qir_FMUL(c, + sum = qir_FADD(c, qir_FMUL(c, x, - qir_uniform_f(c, coeff[i])); - if (i == 0) - sum = mul; - else - sum = qir_FADD(c, sum, mul); + qir_uniform_f(c, coeff[i])), + sum); } return sum; } @@ -954,24 +1004,24 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, enum qpu_cond cond; switch (compare_instr->op) { - case nir_op_feq: - case nir_op_ieq: + case nir_op_feq32: + case nir_op_ieq32: case nir_op_seq: cond = QPU_COND_ZS; break; - case nir_op_fne: - case nir_op_ine: + case nir_op_fne32: + case nir_op_ine32: case nir_op_sne: cond = QPU_COND_ZC; break; - case nir_op_fge: - case nir_op_ige: - case nir_op_uge: + case nir_op_fge32: + case nir_op_ige32: + case nir_op_uge32: case nir_op_sge: cond = QPU_COND_NC; break; - case nir_op_flt: - case nir_op_ilt: + case nir_op_flt32: + case nir_op_ilt32: case nir_op_slt: cond = QPU_COND_NS; break; @@ -998,7 +1048,7 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0)); break; - case nir_op_bcsel: + case nir_op_b32csel: *dest = qir_SEL(c, cond, ntq_get_alu_src(c, sel_instr, 1), ntq_get_alu_src(c, sel_instr, 2)); @@ -1150,22 +1200,22 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) result = qir_FMAX(c, src[0], src[1]); break; - case nir_op_f2i: - case nir_op_f2u: + case nir_op_f2i32: + case nir_op_f2u32: result = qir_FTOI(c, src[0]); break; - case nir_op_i2f: - case nir_op_u2f: + case nir_op_i2f32: + case nir_op_u2f32: result = qir_ITOF(c, src[0]); break; - case nir_op_b2f: + case nir_op_b2f32: result = qir_AND(c, src[0], qir_uniform_f(c, 1.0)); break; - case nir_op_b2i: + case nir_op_b2i32: result = qir_AND(c, src[0], qir_uniform_ui(c, 1)); break; - case nir_op_i2b: - case nir_op_f2b: + case nir_op_i2b32: + case nir_op_f2b32: qir_SF(c, src[0]); result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, qir_uniform_ui(c, ~0), @@ -1214,21 +1264,21 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) case nir_op_sne: case nir_op_sge: case nir_op_slt: - case nir_op_feq: - case nir_op_fne: - case nir_op_fge: - case nir_op_flt: - case nir_op_ieq: - case nir_op_ine: - case nir_op_ige: - case nir_op_uge: - case nir_op_ilt: + case nir_op_feq32: + case nir_op_fne32: + case nir_op_fge32: + case nir_op_flt32: + case nir_op_ieq32: + case nir_op_ine32: + case nir_op_ige32: + case nir_op_uge32: + case nir_op_ilt32: if (!ntq_emit_comparison(c, &result, instr, instr)) { fprintf(stderr, "Bad comparison instruction\n"); } break; - case nir_op_bcsel: + case nir_op_b32csel: result = ntq_emit_bcsel(c, instr, src); break; case nir_op_fcsel: @@ -1331,7 +1381,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) /* We have a scalar result, so the instruction should only have a * single channel written to. */ - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); ntq_store_dest(c, &instr->dest.dest, ffs(instr->dest.write_mask) - 1, result); } @@ -1347,7 +1397,7 @@ emit_frag_end(struct vc4_compile *c) } uint32_t discard_cond = QPU_COND_ALWAYS; - if (c->s->info->fs.uses_discard) { + if (c->s->info.fs.uses_discard) { qir_SF(c, c->discard); discard_cond = QPU_COND_ZS; } @@ -1511,7 +1561,7 @@ emit_vert_end(struct vc4_compile *c, static void emit_coord_end(struct vc4_compile *c) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1541,14 +1591,14 @@ vc4_optimize_nir(struct nir_shader *s) NIR_PASS(progress, s, nir_opt_dce); NIR_PASS(progress, s, nir_opt_dead_cf); NIR_PASS(progress, s, nir_opt_cse); - NIR_PASS(progress, s, nir_opt_peephole_select, 8); + NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); NIR_PASS(progress, s, nir_opt_undef); NIR_PASS(progress, s, nir_opt_loop_unroll, nir_var_shader_in | nir_var_shader_out | - nir_var_local); + nir_var_function_temp); } while (progress); } @@ -1653,7 +1703,7 @@ static void ntq_setup_uniforms(struct vc4_compile *c) { nir_foreach_variable(var, &c->s->uniforms) { - uint32_t vec4_count = st_glsl_type_size(var->type); + uint32_t vec4_count = uniforms_type_size(var->type); unsigned vec4_size = 4 * sizeof(float); declare_uniform_range(c, var->data.driver_location * vec4_size, @@ -1705,6 +1755,46 @@ ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr) qregs[i] = qir_uniform_ui(c, 0); } +static void +ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(nir_src_as_const_value(instr->src[0])->u32[0] == 0); + + /* Reads of the per-sample color need to be done in + * order. + */ + int sample_index = (nir_intrinsic_base(instr) - + VC4_NIR_TLB_COLOR_READ_INPUT); + for (int i = 0; i <= sample_index; i++) { + if (c->color_reads[i].file == QFILE_NULL) { + c->color_reads[i] = + qir_TLB_COLOR_READ(c); + } + } + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->color_reads[sample_index])); +} + +static void +ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(instr->num_components == 1); + + nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]); + assert(const_offset && "vc4 doesn't support indirect inputs"); + + if (c->stage == QSTAGE_FRAG && + nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { + ntq_emit_color_read(c, instr); + return; + } + + uint32_t offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + int comp = nir_intrinsic_component(instr); + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->inputs[offset * 4 + comp])); +} + static void ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) { @@ -1729,6 +1819,11 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) } break; + case nir_intrinsic_load_ubo: + assert(instr->num_components == 1); + ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr)); + break; + case nir_intrinsic_load_user_clip_plane: for (int i = 0; i < instr->num_components; i++) { ntq_store_dest(c, &instr->dest, i, @@ -1782,31 +1877,7 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_input: - assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - assert(const_offset && "vc4 doesn't support indirect inputs"); - if (c->stage == QSTAGE_FRAG && - nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { - assert(const_offset->u32[0] == 0); - /* Reads of the per-sample color need to be done in - * order. - */ - int sample_index = (nir_intrinsic_base(instr) - - VC4_NIR_TLB_COLOR_READ_INPUT); - for (int i = 0; i <= sample_index; i++) { - if (c->color_reads[i].file == QFILE_NULL) { - c->color_reads[i] = - qir_TLB_COLOR_READ(c); - } - } - ntq_store_dest(c, &instr->dest, 0, - qir_MOV(c, c->color_reads[sample_index])); - } else { - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; - int comp = nir_intrinsic_component(instr); - ntq_store_dest(c, &instr->dest, 0, - qir_MOV(c, c->inputs[offset * 4 + comp])); - } + ntq_emit_load_input(c, instr); break; case nir_intrinsic_store_output: @@ -1956,11 +2027,12 @@ ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt) qir_link_blocks(c->cur_block, after_block); qir_set_emit_block(c, after_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); - + } } static void @@ -2084,10 +2156,12 @@ ntq_emit_loop(struct vc4_compile *c, nir_loop *loop) qir_link_blocks(c->cur_block, c->loop_break_block); qir_set_emit_block(c, c->loop_break_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); + } c->loop_break_block = save_loop_break_block; c->loop_cont_block = save_loop_cont_block; @@ -2138,7 +2212,7 @@ ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl) static void nir_to_qir(struct vc4_compile *c) { - if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard) + if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard) c->discard = qir_MOV(c, qir_uniform_ui(c, 0)); ntq_setup_inputs(c); @@ -2155,13 +2229,16 @@ nir_to_qir(struct vc4_compile *c) } static const nir_shader_compiler_options nir_options = { + .lower_all_io_to_temps = true, .lower_extract_byte = true, .lower_extract_word = true, + .lower_fdiv = true, .lower_ffma = true, .lower_flrp32 = true, .lower_fpow = true, .lower_fsat = true, .lower_fsqrt = true, + .lower_ldexp = true, .lower_negate = true, .native_integers = true, .max_unroll_iterations = 32, @@ -2169,7 +2246,8 @@ static const nir_shader_compiler_options nir_options = { const void * vc4_screen_get_compiler_options(struct pipe_screen *pscreen, - enum pipe_shader_ir ir, unsigned shader) + enum pipe_shader_ir ir, + enum pipe_shader_type shader) { return &nir_options; } @@ -2224,8 +2302,15 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, c->s = nir_shader_clone(c, key->shader_state->base.ir.nir); - if (stage == QSTAGE_FRAG) + if (stage == QSTAGE_FRAG) { + if (c->fs_key->alpha_test_func != COMPARE_FUNC_ALWAYS) { + NIR_PASS_V(c->s, nir_lower_alpha_test, + c->fs_key->alpha_test_func, + c->fs_key->sample_alpha_to_one && + c->fs_key->msaa); + } NIR_PASS_V(c->s, vc4_nir_lower_blend, c); + } struct nir_lower_tex_options tex_options = { /* We would need to implement txs, but we don't want the @@ -2278,7 +2363,8 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, if (stage == QSTAGE_FRAG) { NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables); } else { - NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_clip_vs, + c->key->ucp_enables, false); NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); } @@ -2299,6 +2385,8 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, vc4_optimize_nir(c->s); + NIR_PASS_V(c->s, nir_lower_bool_to_int32); + NIR_PASS_V(c->s, nir_convert_from_ssa, true); if (vc4_debug & VC4_DEBUG_SHADERDB) { @@ -2401,7 +2489,11 @@ vc4_shader_state_create(struct pipe_context *pctx, * creation. */ s = cso->ir.nir; - } else { + + NIR_PASS_V(s, nir_lower_io, nir_var_uniform, + uniforms_type_size, + (nir_lower_io_options)0); + } else { assert(cso->type == PIPE_SHADER_IR_TGSI); if (vc4_debug & VC4_DEBUG_TGSI) { @@ -2410,9 +2502,13 @@ vc4_shader_state_create(struct pipe_context *pctx, tgsi_dump(cso->tokens, 0); fprintf(stderr, "\n"); } - s = tgsi_to_nir(cso->tokens, &nir_options); + s = tgsi_to_nir(cso->tokens, pctx->screen); } + NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform, + type_size, + (nir_lower_io_options)0); + NIR_PASS_V(s, nir_opt_global_to_local); NIR_PASS_V(s, nir_lower_regs_to_ssa); NIR_PASS_V(s, nir_normalize_cubemap_coords); @@ -2421,7 +2517,7 @@ vc4_shader_state_create(struct pipe_context *pctx, vc4_optimize_nir(s); - NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local); + NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp); /* Garbage collect dead instructions */ nir_sweep(s); @@ -2431,7 +2527,7 @@ vc4_shader_state_create(struct pipe_context *pctx, if (vc4_debug & VC4_DEBUG_NIR) { fprintf(stderr, "%s prog %d NIR:\n", - gl_shader_stage_name(s->stage), + gl_shader_stage_name(s->info.stage), so->program_id); nir_print_shader(s, stderr); fprintf(stderr, "\n"); @@ -2562,7 +2658,7 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, /* Note: the temporary clone in c->s has been freed. */ nir_shader *orig_shader = key->shader_state->base.ir.nir; - if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH)) + if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH)) shader->disable_early_z = true; } else { shader->num_inputs = c->num_inputs; @@ -2622,6 +2718,13 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, } } + if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) { + fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n", + qir_get_stage_name(c->stage), + c->program_id, c->variant_id, + 1 + shader->fs_threaded); + } + qir_compile_destroy(c); struct vc4_key *dup_key; @@ -2681,7 +2784,8 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) VC4_DIRTY_RASTERIZER | VC4_DIRTY_SAMPLE_MASK | VC4_DIRTY_FRAGTEX | - VC4_DIRTY_UNCOMPILED_FS))) { + VC4_DIRTY_UNCOMPILED_FS | + VC4_DIRTY_UBO_1_SIZE))) { return; } @@ -2699,8 +2803,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) } if (job->msaa) { key->msaa = vc4->rasterizer->base.multisample; - key->sample_coverage = (vc4->rasterizer->base.multisample && - vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); + key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage; key->sample_alpha_to_one = vc4->blend->alpha_to_one; } @@ -2713,10 +2816,10 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0; key->depth_enabled = (vc4->zsa->base.depth.enabled || key->stencil_enabled); - if (vc4->zsa->base.alpha.enabled) { - key->alpha_test = true; + if (vc4->zsa->base.alpha.enabled) key->alpha_test_func = vc4->zsa->base.alpha.func; - } + else + key->alpha_test_func = COMPARE_FUNC_ALWAYS; if (key->is_points) { key->point_sprite_mask = @@ -2726,6 +2829,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) PIPE_SPRITE_COORD_UPPER_LEFT); } + key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size; key->light_twoside = vc4->rasterizer->base.light_twoside; struct vc4_compiled_shader *old_fs = vc4->prog.fs; @@ -2736,11 +2840,11 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) vc4->dirty |= VC4_DIRTY_COMPILED_FS; if (vc4->rasterizer->base.flatshade && - old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) { + (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) { vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS; } - if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs) + if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs) vc4->dirty |= VC4_DIRTY_FS_INPUTS; } @@ -2850,6 +2954,7 @@ fs_inputs_compare(const void *key1, const void *key2) static void delete_from_cache_if_matches(struct hash_table *ht, + struct vc4_compiled_shader **last_compile, struct hash_entry *entry, struct vc4_uncompiled_shader *so) { @@ -2859,6 +2964,10 @@ delete_from_cache_if_matches(struct hash_table *ht, struct vc4_compiled_shader *shader = entry->data; _mesa_hash_table_remove(ht, entry); vc4_bo_unreference(&shader->bo); + + if (shader == *last_compile) + *last_compile = NULL; + ralloc_free(shader); } } @@ -2869,11 +2978,14 @@ vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso) struct vc4_context *vc4 = vc4_context(pctx); struct vc4_uncompiled_shader *so = hwcso; - struct hash_entry *entry; - hash_table_foreach(vc4->fs_cache, entry) - delete_from_cache_if_matches(vc4->fs_cache, entry, so); - hash_table_foreach(vc4->vs_cache, entry) - delete_from_cache_if_matches(vc4->vs_cache, entry, so); + hash_table_foreach(vc4->fs_cache, entry) { + delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs, + entry, so); + } + hash_table_foreach(vc4->vs_cache, entry) { + delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs, + entry, so); + } ralloc_free(so->base.ir.nir); free(so); @@ -2922,7 +3034,6 @@ vc4_program_fini(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); - struct hash_entry *entry; hash_table_foreach(vc4->fs_cache, entry) { struct vc4_compiled_shader *shader = entry->data; vc4_bo_unreference(&shader->bo);