return -1;
}
+static nir_ssa_def *
+check_and_propagate_bit_shift32(nir_builder *b, nir_alu_instr *alu_instr,
+ int32_t direction, int32_t shift)
+{
+ debug_assert(alu_instr->src[1].src.is_ssa);
+ nir_ssa_def *shift_ssa = alu_instr->src[1].src.ssa;
+
+ /* Only propagate if the shift is a const value so we can check value range
+ * statically.
+ */
+ nir_const_value *const_val = nir_src_as_const_value(alu_instr->src[1].src);
+ if (!const_val)
+ return NULL;
+
+ int32_t current_shift = const_val[0].i32 * direction;
+ int32_t new_shift = current_shift + shift;
+
+ /* If the merge would reverse the direction, bail out.
+ * e.g, 'x << 2' then 'x >> 4' is not 'x >> 2'.
+ */
+ if (current_shift * new_shift < 0)
+ return NULL;
+
+ /* If the propagation would overflow an int32_t, bail out too to be on the
+ * safe side.
+ */
+ if (new_shift < -31 || new_shift > 31)
+ return NULL;
+
+ /* Add or substract shift depending on the final direction (SHR vs. SHL). */
+ if (shift * direction < 0)
+ shift_ssa = nir_isub(b, shift_ssa, nir_imm_int(b, abs(shift)));
+ else
+ shift_ssa = nir_iadd(b, shift_ssa, nir_imm_int(b, abs(shift)));
+
+ return shift_ssa;
+}
+
+nir_ssa_def *
+ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift)
+{
+ nir_instr *offset_instr = offset->parent_instr;
+ if (offset_instr->type != nir_instr_type_alu)
+ return NULL;
+
+ nir_alu_instr *alu = nir_instr_as_alu(offset_instr);
+ nir_ssa_def *shift_ssa;
+ nir_ssa_def *new_offset = NULL;
+
+ /* the first src could be something like ssa_18.x, but we only want
+ * the single component. Otherwise the ishl/ishr/ushr could turn
+ * into a vec4 operation:
+ */
+ nir_ssa_def *src0 = nir_mov_alu(b, alu->src[0], 1);
+
+ switch (alu->op) {
+ case nir_op_ishl:
+ shift_ssa = check_and_propagate_bit_shift32(b, alu, 1, shift);
+ if (shift_ssa)
+ new_offset = nir_ishl(b, src0, shift_ssa);
+ break;
+ case nir_op_ishr:
+ shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
+ if (shift_ssa)
+ new_offset = nir_ishr(b, src0, shift_ssa);
+ break;
+ case nir_op_ushr:
+ shift_ssa = check_and_propagate_bit_shift32(b, alu, -1, shift);
+ if (shift_ssa)
+ new_offset = nir_ushr(b, src0, shift_ssa);
+ break;
+ default:
+ return NULL;
+ }
+
+ return new_offset;
+}
+
static bool
lower_offset_for_ssbo(nir_intrinsic_instr *intrinsic, nir_builder *b,
unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
{
unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
+ int shift = 2;
bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
nir_ssa_def *new_dest = NULL;
+ /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
+ if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
+ (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
+ shift = 1;
+
/* Here we create a new intrinsic and copy over all contents from the old one. */
nir_intrinsic_instr *new_intrinsic;
nir_src *target_src;
+ b->cursor = nir_before_instr(&intrinsic->instr);
+
/* 'offset_src_idx' holds the index of the source that represent the offset. */
new_intrinsic =
nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
debug_assert(intrinsic->src[offset_src_idx].is_ssa);
nir_ssa_def *offset = intrinsic->src[offset_src_idx].ssa;
+ /* Since we don't have value range checking, we first try to propagate
+ * the division by 4 ('offset >> 2') into another bit-shift instruction that
+ * possibly defines the offset. If that's the case, we emit a similar
+ * instructions adjusting (merging) the shift value.
+ *
+ * Here we use the convention that shifting right is negative while shifting
+ * left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
+ */
+ nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
+
/* The new source that will hold the dword-offset is always the last
* one for every intrinsic.
*/
for (unsigned i = 0; i < num_srcs; i++)
new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
- for (unsigned i = 0; i < NIR_INTRINSIC_MAX_CONST_INDEX; i++)
- new_intrinsic->const_index[i] = intrinsic->const_index[i];
+ nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
new_intrinsic->num_components = intrinsic->num_components;
- b->cursor = nir_before_instr(&intrinsic->instr);
- nir_ssa_def *offset_div_4 = nir_ushr(b, offset, nir_imm_int(b, 2));
- debug_assert(offset_div_4);
+ /* If we managed to propagate the division by 4, just use the new offset
+ * register and don't emit the SHR.
+ */
+ if (new_offset)
+ offset = new_offset;
+ else
+ offset = nir_ushr(b, offset, nir_imm_int(b, shift));
/* Insert the new intrinsic right before the old one. */
- b->cursor = nir_before_instr(&intrinsic->instr);
nir_builder_instr_insert(b, &new_intrinsic->instr);
/* Replace the last source of the new intrinsic by the result of
*/
nir_instr_rewrite_src(&new_intrinsic->instr,
target_src,
- nir_src_for_ssa(offset_div_4));
+ nir_src_for_ssa(offset));
if (has_dest) {
/* Replace the uses of the original destination by that
}
static bool
-lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
+lower_offset_for_ubo(nir_intrinsic_instr *intrinsic, nir_builder *b, int gpu_id)
+{
+ /* We only need to lower offset if using LDC, which takes an offset in
+ * vec4 units and has the start component baked into the instruction.
+ */
+ if (gpu_id < 600)
+ return false;
+
+ /* TODO handle other bitsizes, including non-dword-aligned loads */
+ assert(intrinsic->dest.ssa.bit_size == 32);
+
+ b->cursor = nir_before_instr(&intrinsic->instr);
+
+ nir_intrinsic_instr *new_intrinsic =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo_ir3);
+
+ debug_assert(intrinsic->dest.is_ssa);
+ new_intrinsic->src[0] = intrinsic->src[0];
+
+ nir_ssa_def *offset = intrinsic->src[1].ssa;
+ nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -4);
+
+ if (!new_offset)
+ new_offset = nir_ushr(b, offset, nir_imm_int(b, 4));
+
+ new_intrinsic->src[1] = nir_src_for_ssa(new_offset);
+
+ unsigned align_mul = nir_intrinsic_align_mul(intrinsic);
+ unsigned align_offset = nir_intrinsic_align_offset(intrinsic);
+
+ unsigned components = intrinsic->num_components;
+
+ if (align_mul % 16 != 0)
+ components = 4;
+
+ new_intrinsic->num_components = components;
+
+ nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
+ components, 32, NULL);
+
+ nir_builder_instr_insert(b, &new_intrinsic->instr);
+
+ nir_ssa_def *new_dest;
+ if (align_mul % 16 == 0) {
+ /* We know that the low 4 bits of the offset are constant and equal to
+ * align_offset. Use the component offset.
+ */
+ unsigned component = align_offset / 4;
+ nir_intrinsic_set_base(new_intrinsic, component);
+ new_dest = &new_intrinsic->dest.ssa;
+ } else {
+ /* We have to assume it isn't aligned, and extract the components
+ * dynamically.
+ */
+ nir_intrinsic_set_base(new_intrinsic, 0);
+ nir_ssa_def *component =
+ nir_iand(b, nir_ushr(b, offset, nir_imm_int(b, 2)), nir_imm_int(b, 3));
+ nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < intrinsic->num_components; i++) {
+ nir_ssa_def *idx = nir_iadd(b, nir_imm_int(b, i), component);
+ channels[i] = nir_vector_extract(b, &new_intrinsic->dest.ssa, idx);
+ }
+
+ new_dest = nir_vec(b, channels, intrinsic->num_components);
+ }
+
+ nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa,
+ nir_src_for_ssa(new_dest));
+
+ nir_instr_remove(&intrinsic->instr);
+
+ return true;
+}
+
+static bool
+lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx, int gpu_id)
{
bool progress = false;
- nir_foreach_instr_safe(instr, block) {
+ nir_foreach_instr_safe (instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ /* UBO */
+ if (intr->intrinsic == nir_intrinsic_load_ubo) {
+ progress |= lower_offset_for_ubo(intr, b, gpu_id);
+ continue;
+ }
+
/* SSBO */
int ir3_intrinsic;
uint8_t offset_src_idx;
}
static bool
-lower_io_offsets_func(nir_function_impl *impl)
+lower_io_offsets_func(nir_function_impl *impl, int gpu_id)
{
void *mem_ctx = ralloc_parent(impl);
nir_builder b;
nir_builder_init(&b, impl);
bool progress = false;
- nir_foreach_block_safe(block, impl) {
- progress |= lower_io_offsets_block(block, &b, mem_ctx);
+ nir_foreach_block_safe (block, impl) {
+ progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);
}
if (progress) {
}
bool
-ir3_nir_lower_io_offsets(nir_shader *shader)
+ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)
{
bool progress = false;
- nir_foreach_function(function, shader) {
+ nir_foreach_function (function, shader) {
if (function->impl)
- progress |= lower_io_offsets_func(function->impl);
+ progress |= lower_io_offsets_func(function->impl, gpu_id);
}
return progress;