{
struct qinst *inst = vir_NOP(c);
inst->qpu.sig.wrtmuc = true;
- inst->has_implicit_uniform = true;
- inst->src[0] = vir_uniform(c, contents, data);
+ inst->uniform = vir_get_uniform_index(c, contents, data);
}
static const struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked_default = {
case nir_tex_src_offset: {
if (nir_src_is_const(instr->src[i].src)) {
- nir_const_value *offset =
- nir_src_as_const_value(instr->src[i].src);
-
- p2_unpacked.offset_s = offset->i32[0];
+ p2_unpacked.offset_s = nir_src_comp_as_int(instr->src[i].src, 0);
if (instr->coord_components >= 2)
- p2_unpacked.offset_t = offset->i32[1];
- if (instr->coord_components >= 3)
- p2_unpacked.offset_r = offset->i32[2];
+ p2_unpacked.offset_t =
+ nir_src_comp_as_int(instr->src[i].src, 1);
+ if (non_array_components >= 3)
+ p2_unpacked.offset_r =
+ nir_src_comp_as_int(instr->src[i].src, 2);
} else {
struct qreg mask = vir_uniform_ui(c, 0xf);
struct qreg x, y, offset;
assert(p1_unpacked.output_type_32_bit ||
p0_unpacked.return_words_of_texture_data < (1 << 2));
+ assert(p0_unpacked.return_words_of_texture_data != 0);
+
uint32_t p0_packed;
V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
(uint8_t *)&p0_packed,
*align = 1;
}
+static uint32_t
+v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
+{
+ switch (instr->intrinsic) {
+ case nir_intrinsic_image_deref_load:
+ case nir_intrinsic_image_deref_store:
+ return V3D_TMU_OP_REGULAR;
+ case nir_intrinsic_image_deref_atomic_add:
+ return V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
+ case nir_intrinsic_image_deref_atomic_min:
+ return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
+ case nir_intrinsic_image_deref_atomic_max:
+ return V3D_TMU_OP_WRITE_UMAX;
+ case nir_intrinsic_image_deref_atomic_and:
+ return V3D_TMU_OP_WRITE_AND_READ_INC;
+ case nir_intrinsic_image_deref_atomic_or:
+ return V3D_TMU_OP_WRITE_OR_READ_DEC;
+ case nir_intrinsic_image_deref_atomic_xor:
+ return V3D_TMU_OP_WRITE_XOR_READ_NOT;
+ case nir_intrinsic_image_deref_atomic_exchange:
+ return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
+ case nir_intrinsic_image_deref_atomic_comp_swap:
+ return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
+ default:
+ unreachable("unknown image intrinsic");
+ };
+}
+
void
v3d40_vir_emit_image_load_store(struct v3d_compile *c,
nir_intrinsic_instr *instr)
/* XXX perf: We should turn add/sub of 1 to inc/dec. Perhaps NIR
* wants to have support for inc/dec?
*/
- switch (instr->intrinsic) {
- case nir_intrinsic_image_deref_load:
- case nir_intrinsic_image_deref_store:
- p2_unpacked.op = V3D_TMU_OP_REGULAR;
- break;
- case nir_intrinsic_image_deref_atomic_add:
- p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
- break;
- case nir_intrinsic_image_deref_atomic_min:
- p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
- break;
-
- case nir_intrinsic_image_deref_atomic_max:
- p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
- break;
- case nir_intrinsic_image_deref_atomic_and:
- p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
- break;
- case nir_intrinsic_image_deref_atomic_or:
- p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
- break;
- case nir_intrinsic_image_deref_atomic_xor:
- p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
- break;
- case nir_intrinsic_image_deref_atomic_exchange:
- p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
- break;
- case nir_intrinsic_image_deref_atomic_comp_swap:
- p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
- break;
- default:
- unreachable("unknown image intrinsic");
- };
+ p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
bool is_1d = false;
switch (glsl_get_sampler_dim(sampler_type)) {