Since binning pass variants share the same const_state with their
draw-pass counterpart, we should re-use the draw-pass variant's ubo
range analysis. So split the two functions of the existing pass
into two parts.
Signed-off-by: Rob Clark <robdclark@chromium.org>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5526>
progress |= OPT(s, nir_lower_tex, &tex_options);
}
progress |= OPT(s, nir_lower_tex, &tex_options);
}
- progress |= OPT(s, ir3_nir_analyze_ubo_ranges, so);
+ if (!so->binning_pass)
+ OPT_V(s, ir3_nir_analyze_ubo_ranges, so);
+
+ progress |= OPT(s, ir3_nir_lower_ubo_loads, so);
/* UBO offset lowering has to come after we've decided what will
* be left as load_ubo
/* UBO offset lowering has to come after we've decided what will
* be left as load_ubo
void ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
struct ir3_const_state *const_state);
void ir3_setup_const_state(nir_shader *nir, struct ir3_shader_variant *v,
struct ir3_const_state *const_state);
-bool ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v);
+void ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v);
+bool ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v);
nir_ssa_def *
ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift);
nir_ssa_def *
ir3_nir_try_propagate_bit_shift(nir_builder *b, nir_ssa_def *offset, int32_t shift);
lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
lower_ubo_load_to_uniform(nir_intrinsic_instr *instr, nir_builder *b,
- struct ir3_ubo_analysis_state *state, int *num_ubos, uint32_t alignment)
+ const struct ir3_ubo_analysis_state *state,
+ int *num_ubos, uint32_t alignment)
{
b->cursor = nir_before_instr(&instr->instr);
{
b->cursor = nir_before_instr(&instr->instr);
const struct ir3_ubo_range *range = get_existing_range(instr, state);
if (!range) {
track_ubo_use(instr, b, num_ubos);
const struct ir3_ubo_range *range = get_existing_range(instr, state);
if (!range) {
track_ubo_use(instr, b, num_ubos);
}
/* We don't have a good way of determining the range of the dynamic
* access in general, so for now just fall back to pulling.
*/
if (!nir_src_is_const(instr->src[1]) && !ubo_is_gl_uniforms(&range->ubo))
}
/* We don't have a good way of determining the range of the dynamic
* access in general, so for now just fall back to pulling.
*/
if (!nir_src_is_const(instr->src[1]) && !ubo_is_gl_uniforms(&range->ubo))
/* After gathering the UBO access ranges, we limit the total
* upload. Don't lower if this load is outside the range.
/* After gathering the UBO access ranges, we limit the total
* upload. Don't lower if this load is outside the range.
instr, alignment);
if (!(range->start <= r.start && r.end <= range->end)) {
track_ubo_use(instr, b, num_ubos);
instr, alignment);
if (!(range->start <= r.start && r.end <= range->end)) {
track_ubo_use(instr, b, num_ubos);
}
nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
}
nir_ssa_def *ubo_offset = nir_ssa_for_src(b, instr->src[1], 1);
nir_instr_remove(&instr->instr);
nir_instr_remove(&instr->instr);
return op == nir_intrinsic_load_ubo;
}
return op == nir_intrinsic_load_ubo;
}
ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
{
struct ir3_const_state *const_state = ir3_const_state(v);
ir3_nir_analyze_ubo_ranges(nir_shader *nir, struct ir3_shader_variant *v)
{
struct ir3_const_state *const_state = ir3_const_state(v);
+}
+
+bool
+ir3_nir_lower_ubo_loads(nir_shader *nir, struct ir3_shader_variant *v)
+{
+ struct ir3_compiler *compiler = v->shader->compiler;
+ /* For the binning pass variant, we re-use the corresponding draw-pass
+ * variants const_state and ubo state. To make these clear, in this
+ * pass it is const (read-only)
+ */
+ const struct ir3_const_state *const_state = ir3_const_state(v);
+ const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
nir_foreach_function (function, nir) {
if (function->impl) {
nir_builder builder;
nir_builder_init(&builder, function->impl);
nir_foreach_block (block, function->impl) {
nir_foreach_instr_safe (instr, block) {
nir_foreach_function (function, nir) {
if (function->impl) {
nir_builder builder;
nir_builder_init(&builder, function->impl);
nir_foreach_block (block, function->impl) {
nir_foreach_instr_safe (instr, block) {
- if (instr_is_load_ubo(instr))
+ if (!instr_is_load_ubo(instr))
+ continue;
+ progress |=
lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr),
&builder, state, &num_ubos,
compiler->const_upload_unit);
lower_ubo_load_to_uniform(nir_instr_as_intrinsic(instr),
&builder, state, &num_ubos,
compiler->const_upload_unit);
if (nir->info.first_ubo_is_default_ubo)
nir->info.num_ubos = num_ubos;
if (nir->info.first_ubo_is_default_ubo)
nir->info.num_ubos = num_ubos;
- return state->lower_count > 0;
struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
uint32_t num_enabled;
uint32_t size;
struct ir3_ubo_range range[IR3_MAX_UBO_PUSH_RANGES];
uint32_t num_enabled;
uint32_t size;
uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
};
uint32_t cmdstream_size; /* for per-gen backend to stash required cmdstream size */
};