X-Git-Url: https://git.libre-soc.org/?p=mesa.git;a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fvc4%2Fvc4_program.c;h=fc1488619892bf6bd66996ac57f6bc0be86dbea0;hp=d2281ce6bd3ccc4d434cc61f2a72f5c894009442;hb=a79b93269cf340ce4d23b5b34100039bcaafc841;hpb=ace0d810e56a1e2978fc3ac237158918ebe2a23c diff --git a/src/gallium/drivers/vc4/vc4_program.c b/src/gallium/drivers/vc4/vc4_program.c index d2281ce6bd3..fc148861989 100644 --- a/src/gallium/drivers/vc4/vc4_program.c +++ b/src/gallium/drivers/vc4/vc4_program.c @@ -24,7 +24,7 @@ #include #include "util/u_format.h" -#include "util/u_hash.h" +#include "util/crc32.h" #include "util/u_math.h" #include "util/u_memory.h" #include "util/ralloc.h" @@ -33,17 +33,23 @@ #include "tgsi/tgsi_parse.h" #include "compiler/nir/nir.h" #include "compiler/nir/nir_builder.h" +#include "compiler/nir_types.h" #include "nir/tgsi_to_nir.h" #include "vc4_context.h" #include "vc4_qpu.h" #include "vc4_qir.h" -#include "mesa/state_tracker/st_glsl_types.h" static struct qreg ntq_get_src(struct vc4_compile *c, nir_src src, int i); static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list); +static int +type_size(const struct glsl_type *type, bool bindless) +{ + return glsl_count_attribute_slots(type, false); +} + static void resize_qreg_array(struct vc4_compile *c, struct qreg **regs, @@ -86,41 +92,43 @@ static struct qreg indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr) { struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0); - uint32_t offset = nir_intrinsic_base(intr); - struct vc4_compiler_ubo_range *range = NULL; - unsigned i; - for (i = 0; i < c->num_uniform_ranges; i++) { - range = &c->ubo_ranges[i]; - if (offset >= range->src_offset && - offset < range->src_offset + range->size) { - break; - } - } - /* The driver-location-based offset always has to be within a declared - * uniform range. - */ - assert(range); - if (!range->used) { - range->used = true; - range->dst_offset = c->next_ubo_dst_offset; - c->next_ubo_dst_offset += range->size; - c->num_ubo_ranges++; - } - offset -= range->src_offset; + /* Clamp to [0, array size). Note that MIN/MAX are signed. */ + uint32_t range = nir_intrinsic_range(intr); + indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0)); + indirect_offset = qir_MIN_NOIMM(c, indirect_offset, + qir_uniform_ui(c, range - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + indirect_offset, + qir_uniform(c, QUNIFORM_UBO0_ADDR, + nir_intrinsic_base(intr))); + + c->num_texture_samples++; + + ntq_emit_thrsw(c); + + return qir_TEX_RESULT(c); +} + +static struct qreg +vc4_ubo_load(struct vc4_compile *c, nir_intrinsic_instr *intr) +{ + int buffer_index = nir_src_as_uint(intr->src[0]); + assert(buffer_index == 1); + assert(c->stage == QSTAGE_FRAG); - /* Adjust for where we stored the TGSI register base. */ - indirect_offset = qir_ADD(c, indirect_offset, - qir_uniform_ui(c, (range->dst_offset + - offset))); + struct qreg offset = ntq_get_src(c, intr->src[1], 0); /* Clamp to [0, array size). Note that MIN/MAX are signed. */ - indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0)); - indirect_offset = qir_MIN(c, indirect_offset, - qir_uniform_ui(c, (range->dst_offset + - range->size - 4))); + offset = qir_MAX(c, offset, qir_uniform_ui(c, 0)); + offset = qir_MIN_NOIMM(c, offset, + qir_uniform_ui(c, c->fs_key->ubo_1_size - 4)); + + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + offset, + qir_uniform(c, QUNIFORM_UBO1_ADDR, 0)); - qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0)); c->num_texture_samples++; ntq_emit_thrsw(c); @@ -278,7 +286,7 @@ static struct qreg ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr, unsigned src) { - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); unsigned chan = ffs(instr->dest.write_mask) - 1; struct qreg r = ntq_get_src(c, instr->src[src].src, instr->src[src].swizzle[chan]); @@ -379,9 +387,10 @@ ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr) /* Perform the clamping required by kernel validation. */ addr = qir_MAX(c, addr, qir_uniform_ui(c, 0)); - addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4)); + addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4)); - qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); + qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0), + addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit)); ntq_emit_thrsw(c); @@ -431,7 +440,7 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) lod = ntq_get_src(c, instr->src[i].src, 0); is_txl = true; break; - case nir_tex_src_comparitor: + case nir_tex_src_comparator: compare = ntq_get_src(c, instr->src[i].src, 0); break; default: @@ -479,14 +488,20 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) unit | (is_txl << 16)); } + struct qinst *tmu; if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { - qir_TEX_R(c, r, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER || c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) { - qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit), - texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), + qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, + unit)); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; } if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) { @@ -497,12 +512,18 @@ ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr) t = qir_SAT(c, t); } - qir_TEX_T(c, t, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; - if (is_txl || is_txb) - qir_TEX_B(c, lod, texture_u[next_texture_u++]); + if (is_txl || is_txb) { + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod); + tmu->src[qir_get_tex_uniform_src(tmu)] = + texture_u[next_texture_u++]; + } - qir_TEX_S(c, s, texture_u[next_texture_u++]); + tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s); + tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++]; c->num_texture_samples++; @@ -583,9 +604,11 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); struct qreg diff = qir_FSUB(c, src, trunc); qir_SF(c, diff); - return qir_MOV(c, qir_SEL(c, QPU_COND_NS, - qir_FADD(c, diff, qir_uniform_f(c, 1.0)), - diff)); + + qir_FADD_dest(c, diff, + diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, diff); } /** @@ -595,16 +618,18 @@ ntq_ffract(struct vc4_compile *c, struct qreg src) static struct qreg ntq_ffloor(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was < 0 in the first place. */ - qir_SF(c, qir_FSUB(c, src, trunc)); + qir_SF(c, qir_FSUB(c, src, result)); + + struct qinst *sub = qir_FSUB_dest(c, result, + result, qir_uniform_f(c, 1.0)); + sub->cond = QPU_COND_NS; - return qir_MOV(c, qir_SEL(c, QPU_COND_NS, - qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), - trunc)); + return qir_MOV(c, result); } /** @@ -614,37 +639,58 @@ ntq_ffloor(struct vc4_compile *c, struct qreg src) static struct qreg ntq_fceil(struct vc4_compile *c, struct qreg src) { - struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src)); + struct qreg result = qir_ITOF(c, qir_FTOI(c, src)); /* This will be < 0 if we truncated and the truncation was of a value * that was > 0 in the first place. */ - qir_SF(c, qir_FSUB(c, trunc, src)); + qir_SF(c, qir_FSUB(c, result, src)); + + qir_FADD_dest(c, result, + result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; + + return qir_MOV(c, result); +} + +static struct qreg +ntq_shrink_sincos_input_range(struct vc4_compile *c, struct qreg x) +{ + /* Since we're using a Taylor approximation, we want to have a small + * number of coefficients and take advantage of sin/cos repeating + * every 2pi. We keep our x as close to 0 as we can, since the series + * will be less accurate as |x| increases. (Also, be careful of + * shifting the input x value to be tricky with sin/cos relations, + * because getting accurate values for x==0 is very important for SDL + * rendering) + */ + struct qreg scaled_x = + qir_FMUL(c, x, + qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); + /* Note: FTOI truncates toward 0. */ + struct qreg x_frac = qir_FSUB(c, scaled_x, + qir_ITOF(c, qir_FTOI(c, scaled_x))); + /* Map [0.5, 1] to [-0.5, 0] */ + qir_SF(c, qir_FSUB(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FSUB_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NC; + /* Map [-1, -0.5] to [0, 0.5] */ + qir_SF(c, qir_FADD(c, x_frac, qir_uniform_f(c, 0.5))); + qir_FADD_dest(c, x_frac, x_frac, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS; - return qir_MOV(c, qir_SEL(c, QPU_COND_NS, - qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), - trunc)); + return x_frac; } static struct qreg ntq_fsin(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -2.0 * M_PI, - pow(2.0 * M_PI, 3) / (3 * 2 * 1), - -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 2.0 * M_PI, + -pow(2.0 * M_PI, 3) / (3 * 2 * 1), + pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, - src, - qir_uniform_f(c, 1.0 / (M_PI * 2.0))); - - struct qreg x = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); + struct qreg x = ntq_shrink_sincos_input_range(c, src); struct qreg x2 = qir_FMUL(c, x, x); struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0])); for (int i = 1; i < ARRAY_SIZE(coeff); i++) { @@ -662,21 +708,15 @@ static struct qreg ntq_fcos(struct vc4_compile *c, struct qreg src) { float coeff[] = { - -1.0f, - pow(2.0 * M_PI, 2) / (2 * 1), - -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), - pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), - -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), - pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + 1.0f, + -pow(2.0 * M_PI, 2) / (2 * 1), + pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1), + pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), + -pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1), }; - struct qreg scaled_x = - qir_FMUL(c, src, - qir_uniform_f(c, 1.0f / (M_PI * 2.0f))); - struct qreg x_frac = qir_FADD(c, - ntq_ffract(c, scaled_x), - qir_uniform_f(c, -0.5)); - + struct qreg x_frac = ntq_shrink_sincos_input_range(c, src); struct qreg sum = qir_uniform_f(c, coeff[0]); struct qreg x2 = qir_FMUL(c, x_frac, x_frac); struct qreg x = x2; /* Current x^2, x^4, or x^6 */ @@ -684,13 +724,10 @@ ntq_fcos(struct vc4_compile *c, struct qreg src) if (i != 1) x = qir_FMUL(c, x, x2); - struct qreg mul = qir_FMUL(c, + sum = qir_FADD(c, qir_FMUL(c, x, - qir_uniform_f(c, coeff[i])); - if (i == 0) - sum = mul; - else - sum = qir_FADD(c, sum, mul); + qir_uniform_f(c, coeff[i])), + sum); } return sum; } @@ -789,24 +826,6 @@ add_output(struct vc4_compile *c, c->output_slots[decl_offset].swizzle = swizzle; } -static void -declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size) -{ - unsigned array_id = c->num_uniform_ranges++; - if (array_id >= c->ubo_ranges_array_size) { - c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2, - array_id + 1); - c->ubo_ranges = reralloc(c, c->ubo_ranges, - struct vc4_compiler_ubo_range, - c->ubo_ranges_array_size); - } - - c->ubo_ranges[array_id].dst_offset = 0; - c->ubo_ranges[array_id].src_offset = start; - c->ubo_ranges[array_id].size = size; - c->ubo_ranges[array_id].used = false; -} - static bool ntq_src_is_only_ssa_def_user(nir_src *src) { @@ -933,24 +952,24 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, enum qpu_cond cond; switch (compare_instr->op) { - case nir_op_feq: - case nir_op_ieq: + case nir_op_feq32: + case nir_op_ieq32: case nir_op_seq: cond = QPU_COND_ZS; break; - case nir_op_fne: - case nir_op_ine: + case nir_op_fne32: + case nir_op_ine32: case nir_op_sne: cond = QPU_COND_ZC; break; - case nir_op_fge: - case nir_op_ige: - case nir_op_uge: + case nir_op_fge32: + case nir_op_ige32: + case nir_op_uge32: case nir_op_sge: cond = QPU_COND_NC; break; - case nir_op_flt: - case nir_op_ilt: + case nir_op_flt32: + case nir_op_ilt32: case nir_op_slt: cond = QPU_COND_NS; break; @@ -977,7 +996,7 @@ ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest, qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0)); break; - case nir_op_bcsel: + case nir_op_b32csel: *dest = qir_SEL(c, cond, ntq_get_alu_src(c, sel_instr, 1), ntq_get_alu_src(c, sel_instr, 2)); @@ -1109,8 +1128,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) struct qreg result; switch (instr->op) { - case nir_op_fmov: - case nir_op_imov: + case nir_op_mov: result = qir_MOV(c, src[0]); break; case nir_op_fmul: @@ -1129,22 +1147,22 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) result = qir_FMAX(c, src[0], src[1]); break; - case nir_op_f2i: - case nir_op_f2u: + case nir_op_f2i32: + case nir_op_f2u32: result = qir_FTOI(c, src[0]); break; - case nir_op_i2f: - case nir_op_u2f: + case nir_op_i2f32: + case nir_op_u2f32: result = qir_ITOF(c, src[0]); break; - case nir_op_b2f: + case nir_op_b2f32: result = qir_AND(c, src[0], qir_uniform_f(c, 1.0)); break; - case nir_op_b2i: + case nir_op_b2i32: result = qir_AND(c, src[0], qir_uniform_ui(c, 1)); break; - case nir_op_i2b: - case nir_op_f2b: + case nir_op_i2b32: + case nir_op_f2b32: qir_SF(c, src[0]); result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, qir_uniform_ui(c, ~0), @@ -1193,21 +1211,21 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) case nir_op_sne: case nir_op_sge: case nir_op_slt: - case nir_op_feq: - case nir_op_fne: - case nir_op_fge: - case nir_op_flt: - case nir_op_ieq: - case nir_op_ine: - case nir_op_ige: - case nir_op_uge: - case nir_op_ilt: + case nir_op_feq32: + case nir_op_fne32: + case nir_op_fge32: + case nir_op_flt32: + case nir_op_ieq32: + case nir_op_ine32: + case nir_op_ige32: + case nir_op_uge32: + case nir_op_ilt32: if (!ntq_emit_comparison(c, &result, instr, instr)) { fprintf(stderr, "Bad comparison instruction\n"); } break; - case nir_op_bcsel: + case nir_op_b32csel: result = ntq_emit_bcsel(c, instr, src); break; case nir_op_fcsel: @@ -1310,7 +1328,7 @@ ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr) /* We have a scalar result, so the instruction should only have a * single channel written to. */ - assert(util_is_power_of_two(instr->dest.write_mask)); + assert(util_is_power_of_two_or_zero(instr->dest.write_mask)); ntq_store_dest(c, &instr->dest.dest, ffs(instr->dest.write_mask) - 1, result); } @@ -1326,7 +1344,7 @@ emit_frag_end(struct vc4_compile *c) } uint32_t discard_cond = QPU_COND_ALWAYS; - if (c->s->info->fs.uses_discard) { + if (c->s->info.fs.uses_discard) { qir_SF(c, c->discard); discard_cond = QPU_COND_ZS; } @@ -1423,11 +1441,6 @@ emit_point_size_write(struct vc4_compile *c) else point_size = qir_uniform_f(c, 1.0); - /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835, - * BCM21553). - */ - point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125)); - qir_VPM_WRITE(c, point_size); } @@ -1490,7 +1503,7 @@ emit_vert_end(struct vc4_compile *c, static void emit_coord_end(struct vc4_compile *c) { - struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]); + struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]); emit_stub_vpm_read(c); @@ -1508,22 +1521,48 @@ static void vc4_optimize_nir(struct nir_shader *s) { bool progress; + unsigned lower_flrp = + (s->options->lower_flrp16 ? 16 : 0) | + (s->options->lower_flrp32 ? 32 : 0) | + (s->options->lower_flrp64 ? 64 : 0); do { progress = false; NIR_PASS_V(s, nir_lower_vars_to_ssa); - NIR_PASS(progress, s, nir_lower_alu_to_scalar); + NIR_PASS(progress, s, nir_lower_alu_to_scalar, NULL, NULL); NIR_PASS(progress, s, nir_lower_phis_to_scalar); NIR_PASS(progress, s, nir_copy_prop); NIR_PASS(progress, s, nir_opt_remove_phis); NIR_PASS(progress, s, nir_opt_dce); NIR_PASS(progress, s, nir_opt_dead_cf); NIR_PASS(progress, s, nir_opt_cse); - NIR_PASS(progress, s, nir_opt_peephole_select, 8); + NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true); NIR_PASS(progress, s, nir_opt_algebraic); NIR_PASS(progress, s, nir_opt_constant_folding); + if (lower_flrp != 0) { + bool lower_flrp_progress = false; + + NIR_PASS(lower_flrp_progress, s, nir_lower_flrp, + lower_flrp, + false /* always_precise */, + s->options->lower_ffma); + if (lower_flrp_progress) { + NIR_PASS(progress, s, nir_opt_constant_folding); + progress = true; + } + + /* Nothing should rematerialize any flrps, so we only + * need to do this lowering once. + */ + lower_flrp = 0; + } + NIR_PASS(progress, s, nir_opt_undef); + NIR_PASS(progress, s, nir_opt_loop_unroll, + nir_var_shader_in | + nir_var_shader_out | + nir_var_function_temp); } while (progress); } @@ -1624,19 +1663,6 @@ ntq_setup_outputs(struct vc4_compile *c) } } -static void -ntq_setup_uniforms(struct vc4_compile *c) -{ - nir_foreach_variable(var, &c->s->uniforms) { - uint32_t vec4_count = st_glsl_type_size(var->type); - unsigned vec4_size = 4 * sizeof(float); - - declare_uniform_range(c, var->data.driver_location * vec4_size, - vec4_count * vec4_size); - - } -} - /** * Sets up the mapping from nir_register to struct qreg *. * @@ -1663,7 +1689,7 @@ ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr) { struct qreg *qregs = ntq_init_ssa_def(c, &instr->def); for (int i = 0; i < instr->def.num_components; i++) - qregs[i] = qir_uniform_ui(c, instr->value.u32[i]); + qregs[i] = qir_uniform_ui(c, instr->value[i].u32); _mesa_hash_table_insert(c->def_ht, &instr->def, qregs); } @@ -1680,18 +1706,57 @@ ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr) qregs[i] = qir_uniform_ui(c, 0); } +static void +ntq_emit_color_read(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(nir_src_as_uint(instr->src[0]) == 0); + + /* Reads of the per-sample color need to be done in + * order. + */ + int sample_index = (nir_intrinsic_base(instr) - + VC4_NIR_TLB_COLOR_READ_INPUT); + for (int i = 0; i <= sample_index; i++) { + if (c->color_reads[i].file == QFILE_NULL) { + c->color_reads[i] = + qir_TLB_COLOR_READ(c); + } + } + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->color_reads[sample_index])); +} + +static void +ntq_emit_load_input(struct vc4_compile *c, nir_intrinsic_instr *instr) +{ + assert(instr->num_components == 1); + assert(nir_src_is_const(instr->src[0]) && + "vc4 doesn't support indirect inputs"); + + if (c->stage == QSTAGE_FRAG && + nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { + ntq_emit_color_read(c, instr); + return; + } + + uint32_t offset = nir_intrinsic_base(instr) + + nir_src_as_uint(instr->src[0]); + int comp = nir_intrinsic_component(instr); + ntq_store_dest(c, &instr->dest, 0, + qir_MOV(c, c->inputs[offset * 4 + comp])); +} + static void ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) { - nir_const_value *const_offset; unsigned offset; switch (instr->intrinsic) { case nir_intrinsic_load_uniform: assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - if (const_offset) { - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + if (nir_src_is_const(instr->src[0])) { + offset = nir_intrinsic_base(instr) + + nir_src_as_uint(instr->src[0]); assert(offset % 4 == 0); /* We need dwords */ offset = offset / 4; @@ -1704,6 +1769,11 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) } break; + case nir_intrinsic_load_ubo: + assert(instr->num_components == 1); + ntq_store_dest(c, &instr->dest, 0, vc4_ubo_load(c, instr)); + break; + case nir_intrinsic_load_user_clip_plane: for (int i = 0; i < instr->num_components; i++) { ntq_store_dest(c, &instr->dest, i, @@ -1736,11 +1806,6 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) 0)); break; - case nir_intrinsic_load_alpha_ref_float: - ntq_store_dest(c, &instr->dest, 0, - qir_uniform(c, QUNIFORM_ALPHA_REF, 0)); - break; - case nir_intrinsic_load_sample_mask_in: ntq_store_dest(c, &instr->dest, 0, qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0)); @@ -1757,37 +1822,14 @@ ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr) break; case nir_intrinsic_load_input: - assert(instr->num_components == 1); - const_offset = nir_src_as_const_value(instr->src[0]); - assert(const_offset && "vc4 doesn't support indirect inputs"); - if (c->stage == QSTAGE_FRAG && - nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) { - assert(const_offset->u32[0] == 0); - /* Reads of the per-sample color need to be done in - * order. - */ - int sample_index = (nir_intrinsic_base(instr) - - VC4_NIR_TLB_COLOR_READ_INPUT); - for (int i = 0; i <= sample_index; i++) { - if (c->color_reads[i].file == QFILE_NULL) { - c->color_reads[i] = - qir_TLB_COLOR_READ(c); - } - } - ntq_store_dest(c, &instr->dest, 0, - qir_MOV(c, c->color_reads[sample_index])); - } else { - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; - int comp = nir_intrinsic_component(instr); - ntq_store_dest(c, &instr->dest, 0, - qir_MOV(c, c->inputs[offset * 4 + comp])); - } + ntq_emit_load_input(c, instr); break; case nir_intrinsic_store_output: - const_offset = nir_src_as_const_value(instr->src[1]); - assert(const_offset && "vc4 doesn't support indirect outputs"); - offset = nir_intrinsic_base(instr) + const_offset->u32[0]; + assert(nir_src_is_const(instr->src[1]) && + "vc4 doesn't support indirect outputs"); + offset = nir_intrinsic_base(instr) + + nir_src_as_uint(instr->src[1]); /* MSAA color outputs are the only case where we have an * output that's not lowered to being a store of a single 32 @@ -1931,32 +1973,40 @@ ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt) qir_link_blocks(c->cur_block, after_block); qir_set_emit_block(c, after_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); - + } } static void ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump) { + struct qblock *jump_block; switch (jump->type) { case nir_jump_break: - qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, c->execute, - qir_uniform_ui(c, c->loop_break_block->index)); + jump_block = c->loop_break_block; break; - case nir_jump_continue: - qir_SF(c, c->execute); - qir_MOV_cond(c, QPU_COND_ZS, c->execute, - qir_uniform_ui(c, c->loop_cont_block->index)); + jump_block = c->loop_cont_block; break; - - case nir_jump_return: - unreachable("All returns shouold be lowered\n"); + default: + unreachable("Unsupported jump type\n"); } + + qir_SF(c, c->execute); + qir_MOV_cond(c, QPU_COND_ZS, c->execute, + qir_uniform_ui(c, jump_block->index)); + + /* Jump to the destination block if everyone has taken the jump. */ + qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index))); + qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS); + struct qblock *new_block = qir_new_block(c); + qir_link_blocks(c->cur_block, jump_block); + qir_link_blocks(c->cur_block, new_block); + qir_set_emit_block(c, new_block); } static void @@ -2052,10 +2102,12 @@ ntq_emit_loop(struct vc4_compile *c, nir_loop *loop) qir_link_blocks(c->cur_block, c->loop_break_block); qir_set_emit_block(c, c->loop_break_block); - if (was_top_level) + if (was_top_level) { c->execute = c->undef; - else + c->last_top_block = c->cur_block; + } else { ntq_activate_execute_for_block(c); + } c->loop_break_block = save_loop_break_block; c->loop_cont_block = save_loop_cont_block; @@ -2106,13 +2158,11 @@ ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl) static void nir_to_qir(struct vc4_compile *c) { - if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard) + if (c->stage == QSTAGE_FRAG && c->s->info.fs.uses_discard) c->discard = qir_MOV(c, qir_uniform_ui(c, 0)); ntq_setup_inputs(c); ntq_setup_outputs(c); - ntq_setup_uniforms(c); - ntq_setup_registers(c, &c->s->registers); /* Find the main function and emit the body. */ nir_foreach_function(function, c->s) { @@ -2123,20 +2173,27 @@ nir_to_qir(struct vc4_compile *c) } static const nir_shader_compiler_options nir_options = { + .lower_all_io_to_temps = true, .lower_extract_byte = true, .lower_extract_word = true, + .lower_fdiv = true, .lower_ffma = true, .lower_flrp32 = true, + .lower_fmod = true, .lower_fpow = true, .lower_fsat = true, .lower_fsqrt = true, + .lower_ldexp = true, .lower_negate = true, - .native_integers = true, + .lower_rotate = true, + .lower_to_scalar = true, + .max_unroll_iterations = 32, }; const void * vc4_screen_get_compiler_options(struct pipe_screen *pscreen, - enum pipe_shader_ir ir, unsigned shader) + enum pipe_shader_ir ir, + enum pipe_shader_type shader) { return &nir_options; } @@ -2158,7 +2215,7 @@ count_nir_instrs(nir_shader *nir) static struct vc4_compile * vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, - struct vc4_key *key) + struct vc4_key *key, bool fs_threaded) { struct vc4_compile *c = qir_compile_init(); @@ -2168,6 +2225,7 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, c->program_id = key->shader_state->program_id; c->variant_id = p_atomic_inc_return(&key->shader_state->compiled_variant_count); + c->fs_threaded = fs_threaded; c->key = key; switch (stage) { @@ -2242,9 +2300,11 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, if (c->key->ucp_enables) { if (stage == QSTAGE_FRAG) { - NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_clip_fs, + c->key->ucp_enables, false); } else { - NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables); + NIR_PASS_V(c->s, nir_lower_clip_vs, + c->key->ucp_enables, false, false, NULL); NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out); } @@ -2265,6 +2325,23 @@ vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage, vc4_optimize_nir(c->s); + /* Do late algebraic optimization to turn add(a, neg(b)) back into + * subs, then the mandatory cleanup after algebraic. Note that it may + * produce fnegs, and if so then we need to keep running to squash + * fneg(fneg(a)). + */ + bool more_late_algebraic = true; + while (more_late_algebraic) { + more_late_algebraic = false; + NIR_PASS(more_late_algebraic, c->s, nir_opt_algebraic_late); + NIR_PASS_V(c->s, nir_opt_constant_folding); + NIR_PASS_V(c->s, nir_copy_prop); + NIR_PASS_V(c->s, nir_opt_dce); + NIR_PASS_V(c->s, nir_opt_cse); + } + + NIR_PASS_V(c->s, nir_lower_bool_to_int32); + NIR_PASS_V(c->s, nir_convert_from_ssa, true); if (vc4_debug & VC4_DEBUG_SHADERDB) { @@ -2367,7 +2444,7 @@ vc4_shader_state_create(struct pipe_context *pctx, * creation. */ s = cso->ir.nir; - } else { + } else { assert(cso->type == PIPE_SHADER_IR_TGSI); if (vc4_debug & VC4_DEBUG_TGSI) { @@ -2376,18 +2453,23 @@ vc4_shader_state_create(struct pipe_context *pctx, tgsi_dump(cso->tokens, 0); fprintf(stderr, "\n"); } - s = tgsi_to_nir(cso->tokens, &nir_options); + s = tgsi_to_nir(cso->tokens, pctx->screen); } - NIR_PASS_V(s, nir_opt_global_to_local); - NIR_PASS_V(s, nir_convert_to_ssa); + if (s->info.stage == MESA_SHADER_VERTEX) + NIR_PASS_V(s, nir_lower_point_size, 1.0f, 0.0f); + + NIR_PASS_V(s, nir_lower_io, nir_var_all, type_size, + (nir_lower_io_options)0); + + NIR_PASS_V(s, nir_lower_regs_to_ssa); NIR_PASS_V(s, nir_normalize_cubemap_coords); NIR_PASS_V(s, nir_lower_load_const_to_scalar); vc4_optimize_nir(s); - NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local); + NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp); /* Garbage collect dead instructions */ nir_sweep(s); @@ -2397,7 +2479,7 @@ vc4_shader_state_create(struct pipe_context *pctx, if (vc4_debug & VC4_DEBUG_NIR) { fprintf(stderr, "%s prog %d NIR:\n", - gl_shader_stage_name(s->stage), + gl_shader_stage_name(s->info.stage), so->program_id); nir_print_shader(s, stderr); fprintf(stderr, "\n"); @@ -2440,7 +2522,7 @@ vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c, memset(input_live, 0, sizeof(input_live)); qir_for_each_inst_inorder(inst, c) { - for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) { + for (int i = 0; i < qir_get_nsrc(inst); i++) { if (inst->src[i].file == QFILE_VARY) input_live[inst->src[i].index] = true; } @@ -2496,12 +2578,16 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, { struct hash_table *ht; uint32_t key_size; + bool try_threading; + if (stage == QSTAGE_FRAG) { ht = vc4->fs_cache; key_size = sizeof(struct vc4_fs_key); + try_threading = vc4->screen->has_threaded_fs; } else { ht = vc4->vs_cache; key_size = sizeof(struct vc4_vs_key); + try_threading = false; } struct vc4_compiled_shader *shader; @@ -2509,7 +2595,13 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, if (entry) return entry->data; - struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key); + struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading); + /* If the FS failed to compile threaded, fall back to single threaded. */ + if (try_threading && c->failed) { + qir_compile_destroy(c); + c = vc4_shader_ntq(vc4, stage, key, false); + } + shader = rzalloc(NULL, struct vc4_compiled_shader); shader->program_id = vc4->next_compiled_program_id++; @@ -2518,7 +2610,7 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, /* Note: the temporary clone in c->s has been freed. */ nir_shader *orig_shader = key->shader_state->base.ir.nir; - if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH)) + if (orig_shader->info.outputs_written & (1 << FRAG_RESULT_DEPTH)) shader->disable_early_z = true; } else { shader->num_inputs = c->num_inputs; @@ -2543,37 +2635,13 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage, sizeof(uint64_t)); } - /* Copy the compiler UBO range state to the compiled shader, dropping - * out arrays that were never referenced by an indirect load. - * - * (Note that QIR dead code elimination of an array access still - * leaves that array alive, though) - */ - if (c->num_ubo_ranges) { - shader->num_ubo_ranges = c->num_ubo_ranges; - shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range, - c->num_ubo_ranges); - uint32_t j = 0; - for (int i = 0; i < c->num_uniform_ranges; i++) { - struct vc4_compiler_ubo_range *range = - &c->ubo_ranges[i]; - if (!range->used) - continue; - - shader->ubo_ranges[j].dst_offset = range->dst_offset; - shader->ubo_ranges[j].src_offset = range->src_offset; - shader->ubo_ranges[j].size = range->size; - shader->ubo_size += c->ubo_ranges[i].size; - j++; - } - } - if (shader->ubo_size) { - if (vc4_debug & VC4_DEBUG_SHADERDB) { - fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n", - qir_get_stage_name(c->stage), - c->program_id, c->variant_id, - shader->ubo_size / 4); - } + shader->fs_threaded = c->fs_threaded; + + if ((vc4_debug & VC4_DEBUG_SHADERDB) && stage == QSTAGE_FRAG) { + fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d FS threads\n", + qir_get_stage_name(c->stage), + c->program_id, c->variant_id, + 1 + shader->fs_threaded); } qir_compile_destroy(c); @@ -2635,7 +2703,8 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) VC4_DIRTY_RASTERIZER | VC4_DIRTY_SAMPLE_MASK | VC4_DIRTY_FRAGTEX | - VC4_DIRTY_UNCOMPILED_FS))) { + VC4_DIRTY_UNCOMPILED_FS | + VC4_DIRTY_UBO_1_SIZE))) { return; } @@ -2653,8 +2722,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) } if (job->msaa) { key->msaa = vc4->rasterizer->base.multisample; - key->sample_coverage = (vc4->rasterizer->base.multisample && - vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); + key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1); key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage; key->sample_alpha_to_one = vc4->blend->alpha_to_one; } @@ -2667,10 +2735,10 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0; key->depth_enabled = (vc4->zsa->base.depth.enabled || key->stencil_enabled); - if (vc4->zsa->base.alpha.enabled) { - key->alpha_test = true; + if (vc4->zsa->base.alpha.enabled) key->alpha_test_func = vc4->zsa->base.alpha.func; - } + else + key->alpha_test_func = COMPARE_FUNC_ALWAYS; if (key->is_points) { key->point_sprite_mask = @@ -2680,6 +2748,7 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) PIPE_SPRITE_COORD_UPPER_LEFT); } + key->ubo_1_size = vc4->constbuf[PIPE_SHADER_FRAGMENT].cb[1].buffer_size; key->light_twoside = vc4->rasterizer->base.light_twoside; struct vc4_compiled_shader *old_fs = vc4->prog.fs; @@ -2690,11 +2759,11 @@ vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode) vc4->dirty |= VC4_DIRTY_COMPILED_FS; if (vc4->rasterizer->base.flatshade && - old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) { + (!old_fs || vc4->prog.fs->color_inputs != old_fs->color_inputs)) { vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS; } - if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs) + if (!old_fs || vc4->prog.fs->fs_inputs != old_fs->fs_inputs) vc4->dirty |= VC4_DIRTY_FS_INPUTS; } @@ -2804,6 +2873,7 @@ fs_inputs_compare(const void *key1, const void *key2) static void delete_from_cache_if_matches(struct hash_table *ht, + struct vc4_compiled_shader **last_compile, struct hash_entry *entry, struct vc4_uncompiled_shader *so) { @@ -2813,6 +2883,10 @@ delete_from_cache_if_matches(struct hash_table *ht, struct vc4_compiled_shader *shader = entry->data; _mesa_hash_table_remove(ht, entry); vc4_bo_unreference(&shader->bo); + + if (shader == *last_compile) + *last_compile = NULL; + ralloc_free(shader); } } @@ -2823,11 +2897,14 @@ vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso) struct vc4_context *vc4 = vc4_context(pctx); struct vc4_uncompiled_shader *so = hwcso; - struct hash_entry *entry; - hash_table_foreach(vc4->fs_cache, entry) - delete_from_cache_if_matches(vc4->fs_cache, entry, so); - hash_table_foreach(vc4->vs_cache, entry) - delete_from_cache_if_matches(vc4->vs_cache, entry, so); + hash_table_foreach(vc4->fs_cache, entry) { + delete_from_cache_if_matches(vc4->fs_cache, &vc4->prog.fs, + entry, so); + } + hash_table_foreach(vc4->vs_cache, entry) { + delete_from_cache_if_matches(vc4->vs_cache, &vc4->prog.vs, + entry, so); + } ralloc_free(so->base.ir.nir); free(so); @@ -2876,7 +2953,6 @@ vc4_program_fini(struct pipe_context *pctx) { struct vc4_context *vc4 = vc4_context(pctx); - struct hash_entry *entry; hash_table_foreach(vc4->fs_cache, entry) { struct vc4_compiled_shader *shader = entry->data; vc4_bo_unreference(&shader->bo);