Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / freedreno / ir3 / ir3_nir_lower_io_offsets.c
index 264f106454518628bacf883a79c8726a3f820be9..110197d93b9bcc0a95d82969cccc2be3ce206281 100644 (file)
@@ -83,20 +83,106 @@ get_ir3_intrinsic_for_ssbo_intrinsic(unsigned intrinsic,
        return -1;
 }
 
+static nir_ssa_def *
+check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
+                                                               int32_t direction, int32_t shift)
+{
+       debug_assert(alu_instr->src[1].src.is_ssa);
+       nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
+
+       /* Only propagate if the shift is a const value so we can check value range
+        * statically.
+        */
+       nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
+       if (!const_val)
+               return NULL;
+
+       int32_t current_shift = const_val[0].i32 * direction;
+       int32_t new_shift = current_shift + shift;
+
+       /* If the merge would reverse the direction, bail out.
+        * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
+        */
+       if (current_shift * new_shift < 0)
+               return NULL;
+
+       /* If the propagation would overflow an int32_t, bail out too to be on the
+        * safe side.
+        */
+       if (new_shift < -31 || new_shift > 31)
+               return NULL;
+
+       /* Add or substract shift depending on the final direction (SHR vs. SHL). */
+       if (shift * direction < 0)
+               shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
+       else
+               shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
+
+       return shift_ssa;
+}
+
+nir_ssa_def *
+ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift)
+{
+       nir_instr *offset_instr = offset->parent_instr;
+       if (offset_instr->type != nir_instr_type_alu)
+               return NULL;
+
+       nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
+       nir_ssa_def *shift_ssa;
+       nir_ssa_def *new_offset = NULL;
+
+       /* the first src could be something like ssa_18.x, but we only want
+        * the single component.  Otherwise the ishl/ishr/ushr could turn
+        * into a vec4 operation:
+        */
+       nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
+
+       switch (alu->op) {
+       case nir_op_ishl:
+               shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
+               if (shift_ssa)
+                       new_offset = nir_ishl(b, src0, shift_ssa);
+               break;
+       case nir_op_ishr:
+               shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
+               if (shift_ssa)
+                       new_offset = nir_ishr(b, src0, shift_ssa);
+               break;
+       case nir_op_ushr:
+               shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
+               if (shift_ssa)
+                       new_offset = nir_ushr(b, src0, shift_ssa);
+               break;
+       default:
+               return NULL;
+       }
+
+       return new_offset;
+}
+
 static bool
 lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
                                          unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
 {
        unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
+       int shift = 2;
 
        bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
        nir_ssa_def *new_dest = NULL;
 
+       /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
+       if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
+               (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
+               shift = 1;
+
        /* Here we create a new intrinsic and copy over all contents from the old one. */
 
        nir_intrinsic_instr *new_intrinsic;
        nir_src *target_src;
 
+       b->cursor = nir_before_instr(&intrinsic->instr);
+
        /* 'offset_src_idx' holds the index of the source that represent the offset. */
        new_intrinsic =
                nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
@@ -104,6 +190,16 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
        debug_assert(intrinsic->src[offset_src_idx].is_ssa);
        nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
 
+       /* Since we don't have value range checking, we first try to propagate
+        * the division by 4 ('offset >> 2') into another bit-shift instruction that
+        * possibly defines the offset. If that's the case, we emit a similar
+        * instructions adjusting (merging) the shift value.
+        *
+        * Here we use the convention that shifting right is negative while shifting
+        * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
+        */
+       nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
+
        /* The new source that will hold the dword-offset is always the last
         * one for every intrinsic.
         */
@@ -121,17 +217,19 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
        for (unsigned i = 0; i < num_srcs; i++)
                new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
 
-       for (unsigned i = 0; i < NIR_INTRINSIC_MAX_CONST_INDEX; i++)
-               new_intrinsic->const_index[i] = intrinsic->const_index[i];
+       nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
 
        new_intrinsic->num_components = intrinsic->num_components;
 
-       b->cursor = nir_before_instr(&intrinsic->instr);
-       nir_ssa_def *offset_div_4 = nir_ushr(b, offset, nir_imm_int(b, 2));
-       debug_assert(offset_div_4);
+       /* If we managed to propagate the division by 4, just use the new offset
+        * register and don't emit the SHR.
+        */
+       if (new_offset)
+               offset = new_offset;
+       else
+               offset = nir_ushr(b, offset, nir_imm_int(b, shift));
 
        /* Insert the new intrinsic right before the old one. */
-       b->cursor = nir_before_instr(&intrinsic->instr);
        nir_builder_instr_insert(b, &new_intrinsic->instr);
 
        /* Replace the last source of the new intrinsic by the result of
@@ -139,7 +237,7 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
         */
        nir_instr_rewrite_src(&new_intrinsic->instr,
                                                  target_src,
-                                                 nir_src_for_ssa(offset_div_4));
+                                                 nir_src_for_ssa(offset));
 
        if (has_dest) {
                /* Replace the uses of the original destination by that
@@ -156,11 +254,11 @@ lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
 }
 
 static bool
-lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
+lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx, int gpu_id)
 {
        bool progress = false;
 
-       nir_foreach_instr_safe(instr, block) {
+       nir_foreach_instr_safe (instr, block) {
                if (instr->type != nir_instr_type_intrinsic)
                        continue;
 
@@ -181,15 +279,15 @@ lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
 }
 
 static bool
-lower_io_offsets_func(nir_function_impl *impl)
+lower_io_offsets_func(nir_function_impl *impl, int gpu_id)
 {
        void *mem_ctx = ralloc_parent(impl);
        nir_builder b;
        nir_builder_init(&b, impl);
 
        bool progress = false;
-       nir_foreach_block_safe(block, impl) {
-               progress |= lower_io_offsets_block(block, &b, mem_ctx);
+       nir_foreach_block_safe (block, impl) {
+               progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);
        }
 
        if (progress) {
@@ -201,13 +299,13 @@ lower_io_offsets_func(nir_function_impl *impl)
 }
 
 bool
-ir3_nir_lower_io_offsets(nir_shader *shader)
+ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)
 {
        bool progress = false;
 
-       nir_foreach_function(function, shader) {
+       nir_foreach_function (function, shader) {
                if (function->impl)
-                       progress |= lower_io_offsets_func(function->impl);
+                       progress |= lower_io_offsets_func(function->impl, gpu_id);
        }
 
        return progress;