nir_ssa_def *shift_ssa;
nir_ssa_def *new_offset = NULL;
- b->cursor = nir_after_instr(&alu->instr);
-
/* the first src could be something like ssa_18.x, but we only want
* the single component. Otherwise the ishl/ishr/ushr could turn
* into a vec4 operation:
unsigned ir3_ssbo_opcode, uint8_t offset_src_idx)
{
unsigned num_srcs = nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
+ int shift = 2;
bool has_dest = nir_intrinsic_infos[intrinsic->intrinsic].has_dest;
nir_ssa_def *new_dest = NULL;
+ /* for 16-bit ssbo access, offset is in 16-bit words instead of dwords */
+ if ((has_dest && intrinsic->dest.ssa.bit_size == 16) ||
+ (!has_dest && intrinsic->src[0].ssa->bit_size == 16))
+ shift = 1;
+
/* Here we create a new intrinsic and copy over all contents from the old one. */
nir_intrinsic_instr *new_intrinsic;
nir_src *target_src;
+ b->cursor = nir_before_instr(&intrinsic->instr);
+
/* 'offset_src_idx' holds the index of the source that represent the offset. */
new_intrinsic =
nir_intrinsic_instr_create(b->shader, ir3_ssbo_opcode);
* Here we use the convention that shifting right is negative while shifting
* left is positive. So 'x / 4' ~ 'x >> 2' or 'x << -2'.
*/
- nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -2);
+ nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -shift);
/* The new source that will hold the dword-offset is always the last
* one for every intrinsic.
for (unsigned i = 0; i < num_srcs; i++)
new_intrinsic->src[i] = nir_src_for_ssa(intrinsic->src[i].ssa);
- for (unsigned i = 0; i < NIR_INTRINSIC_MAX_CONST_INDEX; i++)
- new_intrinsic->const_index[i] = intrinsic->const_index[i];
+ nir_intrinsic_copy_const_indices(new_intrinsic, intrinsic);
new_intrinsic->num_components = intrinsic->num_components;
- b->cursor = nir_before_instr(&intrinsic->instr);
-
/* If we managed to propagate the division by 4, just use the new offset
* register and don't emit the SHR.
*/
if (new_offset)
offset = new_offset;
else
- offset = nir_ushr(b, offset, nir_imm_int(b, 2));
+ offset = nir_ushr(b, offset, nir_imm_int(b, shift));
/* Insert the new intrinsic right before the old one. */
nir_builder_instr_insert(b, &new_intrinsic->instr);
}
static bool
-lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx)
+lower_offset_for_ubo(nir_intrinsic_instr *intrinsic, nir_builder *b, int gpu_id)
+{
+ /* We only need to lower offset if using LDC, which takes an offset in
+ * vec4 units and has the start component baked into the instruction.
+ */
+ if (gpu_id < 600)
+ return false;
+
+ /* TODO handle other bitsizes, including non-dword-aligned loads */
+ assert(intrinsic->dest.ssa.bit_size == 32);
+
+ b->cursor = nir_before_instr(&intrinsic->instr);
+
+ nir_intrinsic_instr *new_intrinsic =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_ubo_ir3);
+
+ debug_assert(intrinsic->dest.is_ssa);
+ new_intrinsic->src[0] = intrinsic->src[0];
+
+ nir_ssa_def *offset = intrinsic->src[1].ssa;
+ nir_ssa_def *new_offset = ir3_nir_try_propagate_bit_shift(b, offset, -4);
+
+ if (!new_offset)
+ new_offset = nir_ushr(b, offset, nir_imm_int(b, 4));
+
+ new_intrinsic->src[1] = nir_src_for_ssa(new_offset);
+
+ unsigned align_mul = nir_intrinsic_align_mul(intrinsic);
+ unsigned align_offset = nir_intrinsic_align_offset(intrinsic);
+
+ unsigned components = intrinsic->num_components;
+
+ if (align_mul % 16 != 0)
+ components = 4;
+
+ new_intrinsic->num_components = components;
+
+ nir_ssa_dest_init(&new_intrinsic->instr, &new_intrinsic->dest,
+ components, 32, NULL);
+
+ nir_builder_instr_insert(b, &new_intrinsic->instr);
+
+ nir_ssa_def *new_dest;
+ if (align_mul % 16 == 0) {
+ /* We know that the low 4 bits of the offset are constant and equal to
+ * align_offset. Use the component offset.
+ */
+ unsigned component = align_offset / 4;
+ nir_intrinsic_set_base(new_intrinsic, component);
+ new_dest = &new_intrinsic->dest.ssa;
+ } else {
+ /* We have to assume it isn't aligned, and extract the components
+ * dynamically.
+ */
+ nir_intrinsic_set_base(new_intrinsic, 0);
+ nir_ssa_def *component =
+ nir_iand(b, nir_ushr(b, offset, nir_imm_int(b, 2)), nir_imm_int(b, 3));
+ nir_ssa_def *channels[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < intrinsic->num_components; i++) {
+ nir_ssa_def *idx = nir_iadd(b, nir_imm_int(b, i), component);
+ channels[i] = nir_vector_extract(b, &new_intrinsic->dest.ssa, idx);
+ }
+
+ new_dest = nir_vec(b, channels, intrinsic->num_components);
+ }
+
+ nir_ssa_def_rewrite_uses(&intrinsic->dest.ssa,
+ nir_src_for_ssa(new_dest));
+
+ nir_instr_remove(&intrinsic->instr);
+
+ return true;
+}
+
+static bool
+lower_io_offsets_block(nir_block *block, nir_builder *b, void *mem_ctx, int gpu_id)
{
bool progress = false;
- nir_foreach_instr_safe(instr, block) {
+ nir_foreach_instr_safe (instr, block) {
if (instr->type != nir_instr_type_intrinsic)
continue;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ /* UBO */
+ if (intr->intrinsic == nir_intrinsic_load_ubo) {
+ progress |= lower_offset_for_ubo(intr, b, gpu_id);
+ continue;
+ }
+
/* SSBO */
int ir3_intrinsic;
uint8_t offset_src_idx;
}
static bool
-lower_io_offsets_func(nir_function_impl *impl)
+lower_io_offsets_func(nir_function_impl *impl, int gpu_id)
{
void *mem_ctx = ralloc_parent(impl);
nir_builder b;
nir_builder_init(&b, impl);
bool progress = false;
- nir_foreach_block_safe(block, impl) {
- progress |= lower_io_offsets_block(block, &b, mem_ctx);
+ nir_foreach_block_safe (block, impl) {
+ progress |= lower_io_offsets_block(block, &b, mem_ctx, gpu_id);
}
if (progress) {
}
bool
-ir3_nir_lower_io_offsets(nir_shader *shader)
+ir3_nir_lower_io_offsets(nir_shader *shader, int gpu_id)
{
bool progress = false;
- nir_foreach_function(function, shader) {
+ nir_foreach_function (function, shader) {
if (function->impl)
- progress |= lower_io_offsets_func(function->impl);
+ progress |= lower_io_offsets_func(function->impl, gpu_id);
}
return progress;