/* We don't have a good way of determining the range of the dynamic
* access in general, so for now just fall back to pulling.
*/
- if (!nir_src_is_const(instr->src[1]) && !ubo_is_gl_uniforms(&range->ubo))
+ if (!nir_src_is_const(instr->src[1]) && !ubo_is_gl_uniforms(&range->ubo)) {
+ track_ubo_use(instr, b, num_ubos);
return false;
+ }
/* After gathering the UBO access ranges, we limit the total
* upload. Don't lower if this load is outside the range.
nir_intrinsic_op op = nir_instr_as_intrinsic(instr)->intrinsic;
- /* ir3_nir_lower_io_offsets happens after this pass. */
- assert(op != nir_intrinsic_load_ubo_ir3);
+ /* nir_lower_ubo_vec4 happens after this pass. */
+ assert(op != nir_intrinsic_load_ubo_vec4);
return op == nir_intrinsic_load_ubo;
}
*/
struct ir3_const_state worst_case_const_state = { };
ir3_setup_const_state(nir, v, &worst_case_const_state);
- const uint32_t max_upload = (compiler->max_const -
+ const uint32_t max_upload = (ir3_max_const(v) -
worst_case_const_state.offsets.immediate) * 16;
uint32_t offset = v->shader->num_reserved_user_consts * 16;