case nir_intrinsic_image_deref_atomic_add:
return v3d_get_op_for_atomic_add(instr, 3);
case nir_intrinsic_image_deref_atomic_imin:
+ return V3D_TMU_OP_WRITE_SMIN;
case nir_intrinsic_image_deref_atomic_umin:
return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
case nir_intrinsic_image_deref_atomic_imax:
+ return V3D_TMU_OP_WRITE_SMAX;
case nir_intrinsic_image_deref_atomic_umax:
return V3D_TMU_OP_WRITE_UMAX;
case nir_intrinsic_image_deref_atomic_and:
}
}
+ if (vir_in_nonuniform_control_flow(c) &&
+ instr->intrinsic != nir_intrinsic_image_deref_load) {
+ vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
+ V3D_QPU_PF_PUSHZ);
+ }
+
vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
&tmu_writes);
+ if (vir_in_nonuniform_control_flow(c) &&
+ instr->intrinsic != nir_intrinsic_image_deref_load) {
+ struct qinst *last_inst= (struct qinst *)c->cur_block->instructions.prev;
+ vir_set_cond(last_inst, V3D_QPU_COND_IFA);
+ }
+
vir_emit_thrsw(c);
/* The input FIFO has 16 slots across all threads, so make sure we
if (nir_intrinsic_dest_components(instr) == 0)
vir_TMUWT(c);
+
+ if (instr->intrinsic != nir_intrinsic_image_deref_load)
+ c->tmu_dirty_rcl = true;
}