freedreno/ir3: set even bit for f2f16_rtne
[mesa.git] / src / freedreno / ir3 / ir3_nir_lower_io_offsets.c
index f0b4218fcb477ff9e3500459dee8286b2ae816d7..456e331aba7800fdbfb58ff9dafab9a63d943a16 100644 (file)
@@ -84,9 +84,8 @@ get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
 }
 
 static nir_ssa_def *
-check_and_propagate_bit_shift32(nir_builder *b, nir_ssa_def *offset,
-                                                               nir_alu_instr *alu_instr, int32_t direction,
-                                                               int32_t shift)
+check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
+                                                               int32_t direction, int32_t shift)
 {
        debug_assert(alu_instr->src[1].src.is_ssa);
        nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
@@ -113,8 +112,6 @@ check_and_propagate_bit_shift32(nir_builder *b, nir_ssa_def *offset,
        if (new_shift < -31 || new_shift > 31)
                return NULL;
 
-       b->cursor = nir_before_instr(&alu_instr->instr);
-
        /* Add or substract shift depending on the final direction (SHR vs. SHL). */
        if (shift * direction < 0)
                shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
@@ -135,21 +132,29 @@ ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shi
        nir_ssa_def *shift_ssa;
        nir_ssa_def *new_offset = NULL;
 
+       b->cursor = nir_after_instr(&alu->instr);
+
+       /* the first src could be something like ssa_18.x, but we only want
+        * the single component.  Otherwise the ishl/ishr/ushr could turn
+        * into a vec4 operation:
+        */
+       nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
+
        switch (alu->op) {
        case nir_op_ishl:
-               shift_ssa = check_and_propagate_bit_shift32(b, offset, alu, 1, shift);
+               shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
                if (shift_ssa)
-                       new_offset = nir_ishl(b, alu->src[0].src.ssa, shift_ssa);
+                       new_offset = nir_ishl(b, src0, shift_ssa);
                break;
        case nir_op_ishr:
-               shift_ssa = check_and_propagate_bit_shift32(b, offset, alu, -1, shift);
+               shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
                if (shift_ssa)
-                       new_offset = nir_ishr(b, alu->src[0].src.ssa, shift_ssa);
+                       new_offset = nir_ishr(b, src0, shift_ssa);
                break;
        case nir_op_ushr:
-               shift_ssa = check_and_propagate_bit_shift32(b, offset, alu, -1, shift);
+               shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
                if (shift_ssa)
-                       new_offset = nir_ushr(b, alu->src[0].src.ssa, shift_ssa);
+                       new_offset = nir_ushr(b, src0, shift_ssa);
                break;
        default:
                return NULL;
@@ -163,10 +168,16 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
                                          unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
 {
        unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
+       int shift = 2;
 
        bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
        nir_ssa_def *new_dest = NULL;
 
+       /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
+       if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
+               (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
+               shift = 1;
+
        /* Here we create a new intrinsic and copy over all contents from the old one. */
 
        nir_intrinsic_instr *new_intrinsic;
@@ -187,7 +198,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
         * Here we use the convention that shifting right is negative while shifting
         * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
         */
-       nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -2);
+       nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
 
        /* The new source that will hold the dword-offset is always the last
         * one for every intrinsic.
@@ -219,7 +230,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
        if (new_offset)
                offset = new_offset;
        else
-               offset = nir_ushr(b, offset, nir_imm_int(b, 2));
+               offset = nir_ushr(b, offset, nir_imm_int(b, shift));
 
        /* Insert the new intrinsic right before the old one. */
        nir_builder_instr_insert(b, &new_intrinsic->instr);
@@ -245,17 +256,101 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
        return true;
 }
 
+static bool
+lower_offset_for_ubo(nir_intrinsic_instr *intrinsic, nir_builder *b)
+{
+       /* We only need to lower offset if using LDC. Currently, we only use LDC
+        * in the bindless mode. Also, LDC is introduced on A6xx, but currently we
+        * only use bindless in turnip which is A6xx only.
+        *
+        * TODO: We should be using LDC always on A6xx+.
+        */
+       if (!ir3_bindless_resource(intrinsic->src[0]))
+               return false;
+
+       /* TODO handle other bitsizes, including non-dword-aligned loads */
+       assert(intrinsic->dest.ssa.bit_size == 32);
+
+       b->cursor = nir_before_instr(&intrinsic->instr);
+
+       nir_intrinsic_instr *new_intrinsic =
+               nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo_ir3);
+
+       debug_assert(intrinsic->dest.is_ssa);
+       new_intrinsic->src[0] = nir_src_for_ssa(intrinsic->src[0].ssa);
+
+       nir_ssa_def *offset = intrinsic->src[1].ssa;
+       nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -4);
+
+       if (!new_offset)
+               new_offset = nir_ushr(b, offset, nir_imm_int(b, 4));
+
+       new_intrinsic->src[1] = nir_src_for_ssa(new_offset);
+
+       unsigned align_mul = nir_intrinsic_align_mul(intrinsic);
+       unsigned align_offset = nir_intrinsic_align_offset(intrinsic);
+
+       unsigned components = intrinsic->num_components;
+
+       if (align_mul % 16 != 0)
+               components = 4;
+
+       new_intrinsic->num_components = components;
+
+       nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
+                                         components, 32, NULL);
+
+       nir_builder_instr_insert(b, &new_intrinsic->instr);
+
+       nir_ssa_def *new_dest;
+       if (align_mul % 16 == 0) {
+               /* We know that the low 4 bits of the offset are constant and equal to
+                * align_offset. Use the component offset.
+                */
+               unsigned component = align_offset / 4;
+               nir_intrinsic_set_base(new_intrinsic, component);
+               new_dest = &new_intrinsic->dest.ssa;
+       } else {
+               /* We have to assume it isn't aligned, and extract the components
+                * dynamically.
+                */
+               nir_intrinsic_set_base(new_intrinsic, 0);
+               nir_ssa_def *component =
+                       nir_iand(b, nir_ushr(b, offset, nir_imm_int(b, 2)), nir_imm_int(b, 3));
+               nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS];
+               for (unsigned i = 0; i < intrinsic->num_components; i++) {
+                       nir_ssa_def *idx = nir_iadd(b, nir_imm_int(b, i), component);
+                       channels[i] = nir_vector_extract(b, &new_intrinsic->dest.ssa, idx);
+               }
+
+               new_dest = nir_vec(b, channels, intrinsic->num_components);
+       }
+
+       nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa,
+                                                        nir_src_for_ssa(new_dest));
+
+       nir_instr_remove(&intrinsic->instr);
+
+       return true;
+}
+
 static bool
 lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
 {
        bool progress = false;
 
-       nir_foreach_instr_safe(instr, block) {
+       nir_foreach_instr_safe (instr, block) {
                if (instr->type != nir_instr_type_intrinsic)
                        continue;
 
                nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
 
+               /* UBO */
+               if (intr->intrinsic == nir_intrinsic_load_ubo) {
+                       progress |= lower_offset_for_ubo(intr, b);
+                       continue;
+               }
+
                /* SSBO */
                int ir3_intrinsic;
                uint8_t offset_src_idx;
@@ -278,7 +373,7 @@ lower_io_offsets_func(nir_function_impl *impl)
        nir_builder_init(&b, impl);
 
        bool progress = false;
-       nir_foreach_block_safe(block, impl) {
+       nir_foreach_block_safe (block, impl) {
                progress |= lower_io_offsets_block(block, &b, mem_ctx);
        }
 
@@ -295,7 +390,7 @@ ir3_nir_lower_io_offsets(nir_shader *shader)
 {
        bool progress = false;
 
-       nir_foreach_function(function, shader) {
+       nir_foreach_function (function, shader) {
                if (function->impl)
                        progress |= lower_io_offsets_func(function->impl);
        }