v3d: Fix predication with atomic image operations
[mesa.git] / src / broadcom / compiler / v3d40_tex.c
index 3bed32d839fb603d9abfeb4c87efcfe1b4de7bc0..379f90c146a27f465189c55fb9561b71bb6cd9e8 100644 (file)
@@ -142,7 +142,7 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
                                 if (instr->coord_components >= 2)
                                         p2_unpacked.offset_t =
                                                 nir_src_comp_as_int(instr->src[i].src, 1);
-                                if (instr->coord_components >= 3)
+                                if (non_array_components >= 3)
                                         p2_unpacked.offset_r =
                                                 nir_src_comp_as_int(instr->src[i].src, 2);
                         } else {
@@ -183,6 +183,8 @@ v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
         assert(p1_unpacked.output_type_32_bit ||
                p0_unpacked.return_words_of_texture_data < (1 << 2));
 
+        assert(p0_unpacked.return_words_of_texture_data != 0);
+
         uint32_t p0_packed;
         V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
                                           (uint8_t *)&p0_packed,
@@ -241,6 +243,38 @@ type_size_align_1(const struct glsl_type *type, unsigned *size, unsigned *align)
         *align = 1;
 }
 
+static uint32_t
+v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
+{
+        switch (instr->intrinsic) {
+        case nir_intrinsic_image_deref_load:
+        case nir_intrinsic_image_deref_store:
+                return V3D_TMU_OP_REGULAR;
+        case nir_intrinsic_image_deref_atomic_add:
+                return v3d_get_op_for_atomic_add(instr, 3);
+        case nir_intrinsic_image_deref_atomic_imin:
+                return V3D_TMU_OP_WRITE_SMIN;
+        case nir_intrinsic_image_deref_atomic_umin:
+                return V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
+        case nir_intrinsic_image_deref_atomic_imax:
+                return V3D_TMU_OP_WRITE_SMAX;
+        case nir_intrinsic_image_deref_atomic_umax:
+                return V3D_TMU_OP_WRITE_UMAX;
+        case nir_intrinsic_image_deref_atomic_and:
+                return V3D_TMU_OP_WRITE_AND_READ_INC;
+        case nir_intrinsic_image_deref_atomic_or:
+                return V3D_TMU_OP_WRITE_OR_READ_DEC;
+        case nir_intrinsic_image_deref_atomic_xor:
+                return V3D_TMU_OP_WRITE_XOR_READ_NOT;
+        case nir_intrinsic_image_deref_atomic_exchange:
+                return V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
+        case nir_intrinsic_image_deref_atomic_comp_swap:
+                return V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
+        default:
+                unreachable("unknown image intrinsic");
+        };
+}
+
 void
 v3d40_vir_emit_image_load_store(struct v3d_compile *c,
                                 nir_intrinsic_instr *instr)
@@ -262,42 +296,15 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
 
         struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = { 0 };
 
-        /* XXX perf: We should turn add/sub of 1 to inc/dec.  Perhaps NIR
-         * wants to have support for inc/dec?
-         */
-        switch (instr->intrinsic) {
-        case nir_intrinsic_image_deref_load:
-        case nir_intrinsic_image_deref_store:
-                p2_unpacked.op = V3D_TMU_OP_REGULAR;
-                break;
-        case nir_intrinsic_image_deref_atomic_add:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_ADD_READ_PREFETCH;
-                break;
-        case nir_intrinsic_image_deref_atomic_min:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_UMIN_FULL_L1_CLEAR;
-                break;
+        p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
 
-        case nir_intrinsic_image_deref_atomic_max:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_UMAX;
-                break;
-        case nir_intrinsic_image_deref_atomic_and:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_AND_READ_INC;
-                break;
-        case nir_intrinsic_image_deref_atomic_or:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_OR_READ_DEC;
-                break;
-        case nir_intrinsic_image_deref_atomic_xor:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_XOR_READ_NOT;
-                break;
-        case nir_intrinsic_image_deref_atomic_exchange:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_XCHG_READ_FLUSH;
-                break;
-        case nir_intrinsic_image_deref_atomic_comp_swap:
-                p2_unpacked.op = V3D_TMU_OP_WRITE_CMPXCHG_READ_FLUSH;
-                break;
-        default:
-                unreachable("unknown image intrinsic");
-        };
+        /* If we were able to replace atomic_add for an inc/dec, then we
+         * need/can to do things slightly different, like not loading the
+         * amount to add/sub, as that is implicit.
+         */
+        bool atomic_add_replaced = (instr->intrinsic == nir_intrinsic_image_deref_atomic_add &&
+                                    (p2_unpacked.op == V3D_TMU_OP_WRITE_AND_READ_INC ||
+                                     p2_unpacked.op == V3D_TMU_OP_WRITE_OR_READ_DEC));
 
         bool is_1d = false;
         switch (glsl_get_sampler_dim(sampler_type)) {
@@ -366,7 +373,8 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
                 vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
 
         /* Emit the data writes for atomics or image store. */
-        if (instr->intrinsic != nir_intrinsic_image_deref_load) {
+        if (instr->intrinsic != nir_intrinsic_image_deref_load &&
+            !atomic_add_replaced) {
                 /* Vector for stores, or first atomic argument */
                 struct qreg src[4];
                 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
@@ -384,9 +392,21 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
                 }
         }
 
+        if (vir_in_nonuniform_control_flow(c) &&
+            instr->intrinsic != nir_intrinsic_image_deref_load) {
+           vir_set_pf(vir_MOV_dest(c, vir_nop_reg(), c->execute),
+                      V3D_QPU_PF_PUSHZ);
+        }
+
         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, ntq_get_src(c, instr->src[1], 0),
                       &tmu_writes);
 
+        if (vir_in_nonuniform_control_flow(c) &&
+            instr->intrinsic != nir_intrinsic_image_deref_load) {
+           struct qinst *last_inst= (struct  qinst *)c->cur_block->instructions.prev;
+           vir_set_cond(last_inst, V3D_QPU_COND_IFA);
+        }
+
         vir_emit_thrsw(c);
 
         /* The input FIFO has 16 slots across all threads, so make sure we
@@ -402,4 +422,7 @@ v3d40_vir_emit_image_load_store(struct v3d_compile *c,
 
         if (nir_intrinsic_dest_components(instr) == 0)
                 vir_TMUWT(c);
+
+        if (instr->intrinsic != nir_intrinsic_image_deref_load)
+                c->tmu_dirty_rcl = true;
 }