}
static void
-rewrite_src_with_bti(nir_builder *b, nir_instr *instr,
- nir_src *src, uint32_t offset)
+rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
+ nir_instr *instr, nir_src *src,
+ enum iris_surface_group group)
{
- assert(offset != 0xd0d0d0d0);
+ assert(bt->offsets[group] != 0xd0d0d0d0);
b->cursor = nir_before_instr(instr);
nir_ssa_def *bti;
if (nir_src_is_const(*src)) {
- bti = nir_imm_intN_t(b, nir_src_as_uint(*src) + offset,
+ bti = nir_imm_intN_t(b, nir_src_as_uint(*src) + bt->offsets[group],
src->ssa->bit_size);
} else {
- bti = nir_iadd_imm(b, src->ssa, offset);
+ bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
}
nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
}
const struct shader_info *info = &nir->info;
memset(bt, 0, sizeof(*bt));
+ for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
+ bt->offsets[i] = 0xd0d0d0d0;
/* Calculate the initial binding table index for each group. */
uint32_t next_offset;
if (info->stage == MESA_SHADER_FRAGMENT) {
next_offset = num_render_targets;
+ bt->offsets[IRIS_SURFACE_GROUP_RENDER_TARGET] = 0;
} else if (info->stage == MESA_SHADER_COMPUTE) {
next_offset = 1;
+ bt->offsets[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 0;
} else {
next_offset = 0;
}
unsigned num_textures = util_last_bit(info->textures_used);
if (num_textures) {
- bt->texture_start = next_offset;
+ bt->offsets[IRIS_SURFACE_GROUP_TEXTURE] = next_offset;
next_offset += num_textures;
- } else {
- bt->texture_start = 0xd0d0d0d0;
}
if (info->num_images) {
- bt->image_start = next_offset;
+ bt->offsets[IRIS_SURFACE_GROUP_IMAGE] = next_offset;
next_offset += info->num_images;
- } else {
- bt->image_start = 0xd0d0d0d0;
}
/* Allocate a slot in the UBO section for NIR constants if present.
if (num_cbufs) {
//assert(info->num_ubos <= BRW_MAX_UBO);
- bt->ubo_start = next_offset;
+ bt->offsets[IRIS_SURFACE_GROUP_UBO] = next_offset;
next_offset += num_cbufs;
- } else {
- bt->ubo_start = 0xd0d0d0d0;
}
if (info->num_ssbos || info->num_abos) {
- bt->ssbo_start = next_offset;
+ bt->offsets[IRIS_SURFACE_GROUP_SSBO] = next_offset;
// XXX: see iris_state "wasting 16 binding table slots for ABOs" comment
next_offset += IRIS_MAX_ABOS + info->num_ssbos;
- } else {
- bt->ssbo_start = 0xd0d0d0d0;
}
bt->size_bytes = next_offset * 4;
nir_foreach_block (block, impl) {
nir_foreach_instr (instr, block) {
if (instr->type == nir_instr_type_tex) {
- assert(bt->texture_start != 0xd0d0d0d0);
- nir_instr_as_tex(instr)->texture_index += bt->texture_start;
+ assert(bt->offsets[IRIS_SURFACE_GROUP_TEXTURE] != 0xd0d0d0d0);
+ nir_instr_as_tex(instr)->texture_index +=
+ bt->offsets[IRIS_SURFACE_GROUP_TEXTURE];
continue;
}
case nir_intrinsic_image_atomic_comp_swap:
case nir_intrinsic_image_load_raw_intel:
case nir_intrinsic_image_store_raw_intel:
- rewrite_src_with_bti(&b, instr, &intrin->src[0], bt->image_start);
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_IMAGE);
break;
case nir_intrinsic_load_ubo:
- rewrite_src_with_bti(&b, instr, &intrin->src[0], bt->ubo_start);
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_UBO);
break;
case nir_intrinsic_store_ssbo:
- rewrite_src_with_bti(&b, instr, &intrin->src[1], bt->ssbo_start);
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
+ IRIS_SURFACE_GROUP_SSBO);
break;
case nir_intrinsic_get_buffer_size:
case nir_intrinsic_ssbo_atomic_fmax:
case nir_intrinsic_ssbo_atomic_fcomp_swap:
case nir_intrinsic_load_ssbo:
- rewrite_src_with_bti(&b, instr, &intrin->src[0], bt->ssbo_start);
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_SSBO);
break;
default:
if (!pin_only) bt_map[s++] = (addr) - binder_addr;
#define bt_assert(section, exists) \
- if (!pin_only) assert(shader->bt.section == (exists) ? s : 0xd0d0d0d0)
+ if (!pin_only) assert(shader->bt.offsets[section] == (exists) ? s : 0xd0d0d0d0)
/**
* Populate the binding table for a given shader stage.
unsigned num_textures = util_last_bit(info->textures_used);
- bt_assert(texture_start, num_textures > 0);
+ bt_assert(IRIS_SURFACE_GROUP_TEXTURE, num_textures > 0);
for (int i = 0; i < num_textures; i++) {
struct iris_sampler_view *view = shs->textures[i];
push_bt_entry(addr);
}
- bt_assert(image_start, info->num_images > 0);
+ bt_assert(IRIS_SURFACE_GROUP_IMAGE, info->num_images > 0);
for (int i = 0; i < info->num_images; i++) {
uint32_t addr = use_image(batch, ice, shs, i);
push_bt_entry(addr);
}
- bt_assert(ubo_start, shader->num_cbufs > 0);
+ bt_assert(IRIS_SURFACE_GROUP_UBO, shader->num_cbufs > 0);
for (int i = 0; i < shader->num_cbufs; i++) {
uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
push_bt_entry(addr);
}
- bt_assert(ssbo_start, info->num_abos + info->num_ssbos > 0);
+ bt_assert(IRIS_SURFACE_GROUP_SSBO, info->num_abos + info->num_ssbos > 0);
/* XXX: st is wasting 16 binding table slots for ABOs. Should add a cap
* for changing nir_lower_atomics_to_ssbos setting and buffer_base offset
continue;
/* Range block is a binding table index, map back to UBO index. */
- unsigned block_index = range->block - shader->bt.ubo_start;
+ unsigned block_index = range->block - shader->bt.offsets[IRIS_SURFACE_GROUP_UBO];
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
struct iris_resource *res = (void *) cbuf->buffer;
if (range->length > 0) {
/* Range block is a binding table index, map back to UBO index. */
- unsigned block_index = range->block - shader->bt.ubo_start;
+ unsigned block_index = range->block - shader->bt.offsets[IRIS_SURFACE_GROUP_UBO];
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
struct iris_resource *res = (void *) cbuf->buffer;
continue;
/* Range block is a binding table index, map back to UBO index. */
- unsigned block_index = range->block - shader->bt.ubo_start;
+ unsigned block_index = range->block - shader->bt.offsets[IRIS_SURFACE_GROUP_UBO];
struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
struct iris_resource *res = (void *) cbuf->buffer;