layout->image_dims.count;
layout->image_dims.count += 3; /* three const per */
break;
- case nir_intrinsic_load_ubo:
- if (nir_src_is_const(intr->src[0])) {
- layout->num_ubos = MAX2(layout->num_ubos,
- nir_src_as_uint(intr->src[0]) + 1);
- } else {
- layout->num_ubos = shader->info.num_ubos;
- }
- break;
case nir_intrinsic_load_base_vertex:
case nir_intrinsic_load_first_vertex:
layout->num_driver_params =
MAX2(const_state->num_driver_params, IR3_DP_VTXCNT_MAX + 1);
}
+ const_state->num_ubos = nir->info.num_ubos;
+
/* num_driver_params is scalar, align to vec4: */
const_state->num_driver_params = align(const_state->num_driver_params, 4);
if (const_state->num_ubos > 0) {
const_state->offsets.ubo = constoff;
- constoff += align(nir->info.num_ubos * ptrsz, 4) / 4;
+ constoff += align(const_state->num_ubos * ptrsz, 4) / 4;
}
if (const_state->ssbo_size.count > 0) {
}
static void
-lower_ubo_block_decrement(nir_intrinsic_instr *instr, nir_builder *b)
+lower_ubo_block_decrement(nir_intrinsic_instr *instr, nir_builder *b, int *num_ubos)
{
/* Skip shifting things for turnip's bindless resources. */
- if (ir3_bindless_resource(instr->src[0]))
+ if (ir3_bindless_resource(instr->src[0])) {
+ assert(!b->shader->info.first_ubo_is_default_ubo); /* only set for GL */
return;
+ }
/* Shift all GL nir_intrinsic_load_ubo UBO indices down by 1, because we
* have lowered block 0 off of load_ubo to constbuf and ir3_const only
- * uploads pointers for block 1-N.
+ * uploads pointers for block 1-N. This is also where we update the NIR
+ * num_ubos to reflect the UBOs that remain in use after others got
+ * lowered to constbuf access.
*/
+ if (nir_src_is_const(instr->src[0])) {
+ int block = nir_src_as_uint(instr->src[0]) - 1;
+ *num_ubos = MAX2(*num_ubos, block + 1);
+ } else {
+ *num_ubos = b->shader->info.num_ubos - 1;
+ }
+
nir_ssa_def *old_idx = nir_ssa_for_src(b, instr->src[0], 1);
nir_ssa_def *new_idx = nir_iadd_imm(b, old_idx, -1);
nir_instr_rewrite_src(&instr->instr, &instr->src[0],
static void
lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
- struct ir3_ubo_analysis_state *state)
+ struct ir3_ubo_analysis_state *state, int *num_ubos)
{
b->cursor = nir_before_instr(&instr->instr);
*/
struct ir3_ubo_range *range = get_existing_range(instr, state, false);
if (!range) {
- lower_ubo_block_decrement(instr, b);
+ lower_ubo_block_decrement(instr, b, num_ubos);
return;
}
* access, so for now just fall back to pulling.
*/
if (!nir_src_is_const(instr->src[1])) {
- lower_ubo_block_decrement(instr, b);
+ lower_ubo_block_decrement(instr, b, num_ubos);
return;
}
*/
const struct ir3_ubo_range r = get_ubo_load_range(instr);
if (!(range->start <= r.start && r.end <= range->end)) {
- lower_ubo_block_decrement(instr, b);
+ lower_ubo_block_decrement(instr, b, num_ubos);
return;
}
}
}
state->size = offset;
+ int num_ubos = 0;
nir_foreach_function (function, nir) {
if (function->impl) {
nir_builder builder;
nir_foreach_block (block, function->impl) {
nir_foreach_instr_safe (instr, block) {
if (instr_is_load_ubo(instr))
- lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr), &builder, state);
+ lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr),
+ &builder, state, &num_ubos);
}
}
nir_metadata_dominance);
}
}
-
- /* If we previously had UBO 0, it's been lowered off of load_ubo and all
- * the others were shifted down.
+ /* Update the num_ubos field for GL (first_ubo_is_default_ubo). With
+ * Vulkan's bindless, we don't use the num_ubos field, so we can leave it
+ * incremented.
*/
- if (nir->info.num_ubos >= 1 && nir->info.first_ubo_is_default_ubo)
- nir->info.num_ubos--;
+ if (nir->info.first_ubo_is_default_ubo)
+ nir->info.num_ubos = num_ubos;
return state->lower_count > 0;
}