X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fbroadcom%2Fcompiler%2Fv3d40_tex.c;h=90e64f638fc75f2c8edee40bd5dd48af64bdb215;hb=HEAD;hp=30f1293c85c3974da019b6cd4508809db3e56448;hpb=85b78f96a663d11f2ec10ba6d34b4e5971f38077;p=mesa.git diff --git a/src/broadcom/compiler/v3d40_tex.c b/src/broadcom/compiler/v3d40_tex.c index 30f1293c85c..90e64f638fc 100644 --- a/src/broadcom/compiler/v3d40_tex.c +++ b/src/broadcom/compiler/v3d40_tex.c @@ -22,7 +22,6 @@ */ #include "v3d_compiler.h" -#include "compiler/nir/nir_deref.h" /* We don't do any address packing. */ #define __gen_user_data void @@ -68,13 +67,7 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = { }; - struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = { - .output_type_32_bit = (c->key->tex[unit].return_size == 32 && - !instr->is_shadow), - - .unnormalized_coordinates = (instr->sampler_dim == - GLSL_SAMPLER_DIM_RECT), - }; + assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42); struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { .op = V3D_TMU_OP_REGULAR, @@ -87,7 +80,11 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) .disable_autolod = instr->op == nir_texop_tg4 }; - int non_array_components = instr->coord_components - instr->is_array; + int non_array_components = + instr->op != nir_texop_lod ? + instr->coord_components - instr->is_array : + instr->coord_components; + struct qreg s; for (unsigned i = 0; i < instr->num_srcs; i++) { @@ -126,8 +123,15 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) ntq_get_src(c, instr->src[i].src, 0), &tmu_writes); - if (instr->op != nir_texop_txf) + /* With texel fetch automatic LOD is already disabled, + * and disable_autolod must not be enabled. For + * non-cubes we can use the register TMUSLOD, that + * implicitly sets disable_autolod. + */ + if (instr->op != nir_texop_txf && + instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { p2_unpacked.disable_autolod = true; + } break; case nir_tex_src_comparator: @@ -139,7 +143,7 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) case nir_tex_src_offset: { if (nir_src_is_const(instr->src[i].src)) { p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0); - if (instr->coord_components >= 2) + if (non_array_components >= 2) p2_unpacked.offset_t = nir_src_comp_as_int(instr->src[i].src, 1); if (non_array_components >= 3) @@ -175,14 +179,6 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) p0_unpacked.return_words_of_texture_data = nir_ssa_def_components_read(&instr->dest.ssa); - /* Word enables can't ask for more channels than the output type could - * provide (2 for f16, 4 for 32-bit). - */ - assert(!p1_unpacked.output_type_32_bit || - p0_unpacked.return_words_of_texture_data < (1 << 4)); - assert(p1_unpacked.output_type_32_bit || - p0_unpacked.return_words_of_texture_data < (1 << 2)); - assert(p0_unpacked.return_words_of_texture_data != 0); uint32_t p0_packed; @@ -190,27 +186,87 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) (uint8_t *)&p0_packed, &p0_unpacked); - uint32_t p1_packed; - V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL, - (uint8_t *)&p1_packed, - &p1_unpacked); - uint32_t p2_packed; V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL, (uint8_t *)&p2_packed, &p2_unpacked); - /* Load unit number into the high bits of the texture or sampler - * address field, which will be be used by the driver to decide which - * texture to put in the actual address field. + /* We manually set the LOD Query bit (see + * V3D42_TMU_CONFIG_PARAMETER_2) as right now is the only V42 specific + * feature over V41 we are using + */ + if (instr->op == nir_texop_lod) + p2_packed |= 1UL << 24; + + /* Load unit number into the high bits of the texture address field, + * which will be be used by the driver to decide which texture to put + * in the actual address field. */ p0_packed |= unit << 24; - p1_packed |= unit << 24; vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed); - /* XXX perf: Can we skip p1 setup for txf ops? */ - vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed); - if (memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0) + + /* Even if the texture operation doesn't need a sampler by + * itself, we still need to add the sampler configuration + * parameter if the output is 32 bit + */ + bool output_type_32_bit = (c->key->tex[unit].return_size == 32 && + !instr->is_shadow); + + /* + * p1 is optional, but we can skip it only if p2 can be skipped too + */ + bool needs_p2_config = + (instr->op == nir_texop_lod || + memcmp(&p2_unpacked, &p2_unpacked_default, sizeof(p2_unpacked)) != 0); + + if (output_type_32_bit || + nir_tex_instr_need_sampler(instr)) { + struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = { + .output_type_32_bit = output_type_32_bit, + + .unnormalized_coordinates = (instr->sampler_dim == + GLSL_SAMPLER_DIM_RECT), + }; + + /* Word enables can't ask for more channels than the + * output type could provide (2 for f16, 4 for + * 32-bit). + */ + assert(!p1_unpacked.output_type_32_bit || + p0_unpacked.return_words_of_texture_data < (1 << 4)); + assert(p1_unpacked.output_type_32_bit || + p0_unpacked.return_words_of_texture_data < (1 << 2)); + + uint32_t p1_packed; + V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL, + (uint8_t *)&p1_packed, + &p1_unpacked); + + /* Load unit number into the high bits of the sampler + * address field, which will be be used by the driver + * to decide which sampler to put in the actual + * address field. + */ + p1_packed |= unit << 24; + + vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed); + } else if (needs_p2_config) { + /* Configuration parameters need to be set up in + * order, and if P2 is needed, you need to set up P1 + * too even if sampler info is not needed by the + * texture operation. But we can set up default info, + * and avoid asking the driver for the sampler state + * address + */ + uint32_t p1_packed_default; + V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL, + (uint8_t *)&p1_packed_default, + &p1_unpacked_default); + vir_WRTMUC(c, QUNIFORM_CONSTANT, p1_packed_default); + } + + if (needs_p2_config) vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed); if (instr->op == nir_texop_txf) { @@ -218,6 +274,8 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s, &tmu_writes); } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) { vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s, &tmu_writes); + } else if (instr->op == nir_texop_txl) { + vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSLOD, s, &tmu_writes); } else { vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s, &tmu_writes); } @@ -236,35 +294,32 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) } } -static void -type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align) -{ - *size = 1; - *align = 1; -} - static uint32_t v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr) { switch (instr->intrinsic) { - case nir_intrinsic_image_deref_load: - case nir_intrinsic_image_deref_store: + case nir_intrinsic_image_load: + case nir_intrinsic_image_store: return V3D_TMU_OP_REGULAR; - case nir_intrinsic_image_deref_atomic_add: + case nir_intrinsic_image_atomic_add: return v3d_get_op_for_atomic_add(instr, 3); - case nir_intrinsic_image_deref_atomic_min: + case nir_intrinsic_image_atomic_imin: + return V3D_TMU_OP_WRITE_SMIN; + case nir_intrinsic_image_atomic_umin: return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR; - case nir_intrinsic_image_deref_atomic_max: + case nir_intrinsic_image_atomic_imax: + return V3D_TMU_OP_WRITE_SMAX; + case nir_intrinsic_image_atomic_umax: return V3D_TMU_OP_WRITE_UMAX; - case nir_intrinsic_image_deref_atomic_and: + case nir_intrinsic_image_atomic_and: return V3D_TMU_OP_WRITE_AND_READ_INC; - case nir_intrinsic_image_deref_atomic_or: + case nir_intrinsic_image_atomic_or: return V3D_TMU_OP_WRITE_OR_READ_DEC; - case nir_intrinsic_image_deref_atomic_xor: + case nir_intrinsic_image_atomic_xor: return V3D_TMU_OP_WRITE_XOR_READ_NOT; - case nir_intrinsic_image_deref_atomic_exchange: + case nir_intrinsic_image_atomic_exchange: return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH; - case nir_intrinsic_image_deref_atomic_comp_swap: + case nir_intrinsic_image_atomic_comp_swap: return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH; default: unreachable("unknown image intrinsic"); @@ -275,11 +330,8 @@ void v3d40_vir_emit_image_load_store(struct v3d_compile *c, nir_intrinsic_instr *instr) { - nir_variable *var = nir_intrinsic_get_var(instr, 0); - const struct glsl_type *sampler_type = glsl_without_array(var->type); - unsigned unit = (var->data.driver_location + - nir_deref_instr_get_const_offset(nir_src_as_deref(instr->src[0]), - type_size_align_1)); + unsigned format = nir_intrinsic_format(instr); + unsigned unit = nir_src_as_uint(instr->src[0]); int tmu_writes = 0; struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = { @@ -287,7 +339,7 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = { .per_pixel_mask_enable = true, - .output_type_32_bit = v3d_gl_format_is_return_32(var->data.image.format), + .output_type_32_bit = v3d_gl_format_is_return_32(format), }; struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 }; @@ -298,12 +350,12 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, * need/can to do things slightly different, like not loading the * amount to add/sub, as that is implicit. */ - bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_deref_atomic_add && + bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_atomic_add && (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC || p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC)); bool is_1d = false; - switch (glsl_get_sampler_dim(sampler_type)) { + switch (nir_intrinsic_image_dim(instr)) { case GLSL_SAMPLER_DIM_1D: is_1d = true; break; @@ -311,11 +363,11 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, break; case GLSL_SAMPLER_DIM_2D: case GLSL_SAMPLER_DIM_RECT: + case GLSL_SAMPLER_DIM_CUBE: vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT, ntq_get_src(c, instr->src[1], 1), &tmu_writes); break; case GLSL_SAMPLER_DIM_3D: - case GLSL_SAMPLER_DIM_CUBE: vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT, ntq_get_src(c, instr->src[1], 1), &tmu_writes); vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR, @@ -325,7 +377,11 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, unreachable("bad image sampler dim"); } - if (glsl_sampler_type_is_array(sampler_type)) { + /* In order to fetch on a cube map, we need to interpret it as + * 2D arrays, where the third coord would be the face index. + */ + if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE || + nir_intrinsic_image_array(instr)) { vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI, ntq_get_src(c, instr->src[1], is_1d ? 1 : 2), &tmu_writes); @@ -369,7 +425,7 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed); /* Emit the data writes for atomics or image store. */ - if (instr->intrinsic != nir_intrinsic_image_deref_load && + if (instr->intrinsic != nir_intrinsic_image_load && !atomic_add_replaced) { /* Vector for stores, or first atomic argument */ struct qreg src[4]; @@ -381,16 +437,28 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, /* Second atomic argument */ if (instr->intrinsic == - nir_intrinsic_image_deref_atomic_comp_swap) { + nir_intrinsic_image_atomic_comp_swap) { vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUD, ntq_get_src(c, instr->src[4], 0), &tmu_writes); } } + if (vir_in_nonuniform_control_flow(c) && + instr->intrinsic != nir_intrinsic_image_load) { + vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute), + V3D_QPU_PF_PUSHZ); + } + vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0), &tmu_writes); + if (vir_in_nonuniform_control_flow(c) && + instr->intrinsic != nir_intrinsic_image_load) { + struct qinst *last_inst= (struct qinst *)c->cur_block->instructions.prev; + vir_set_cond(last_inst, V3D_QPU_COND_IFA); + } + vir_emit_thrsw(c); /* The input FIFO has 16 slots across all threads, so make sure we @@ -406,4 +474,7 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c, if (nir_intrinsic_dest_components(instr) == 0) vir_TMUWT(c); + + if (instr->intrinsic != nir_intrinsic_image_load) + c->tmu_dirty_rcl = true; }