iris_batch_maybe_flush(batch, 1500);
- if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
- iris_update_compiled_compute_shader(ice);
+ iris_update_compiled_compute_shader(ice);
iris_update_grid_size_resource(ice, grid);
ice->shaders.last_vue_map = &vue_prog_data->vue_map;
}
+static void
+iris_update_pull_constant_descriptors(struct iris_context *ice,
+ gl_shader_stage stage)
+{
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
+ if (!shader || !shader->prog_data->has_ubo_pull)
+ return;
+
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+ bool any_new_descriptors =
+ shader->num_system_values > 0 && shs->sysvals_need_upload;
+
+ unsigned bound_cbufs = shs->bound_cbufs;
+
+ while (bound_cbufs) {
+ const int i = u_bit_scan(&bound_cbufs);
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
+ struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
+ if (!surf_state->res && cbuf->buffer) {
+ iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
+ any_new_descriptors = true;
+ }
+ }
+
+ if (any_new_descriptors)
+ ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
+}
+
/**
* Get the prog_data for a given stage, or NULL if the stage is disabled.
*/
}
}
}
+
+ for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
+ if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
+ iris_update_pull_constant_descriptors(ice, i);
+ }
}
static struct iris_compiled_shader *
return shader;
}
-void
-iris_update_compiled_compute_shader(struct iris_context *ice)
+static void
+iris_update_compiled_cs(struct iris_context *ice)
{
struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
struct iris_uncompiled_shader *ish =
}
}
+void
+iris_update_compiled_compute_shader(struct iris_context *ice)
+{
+ if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
+ iris_update_compiled_cs(ice);
+
+ if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
+ iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
+}
+
void
iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
uint32_t *dst)
struct iris_shader_state *shs = &ice->state.shaders[stage];
struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
+ /* TODO: Only do this if the buffer changes? */
+ pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
+
if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
shs->bound_cbufs |= 1u << index;
struct iris_resource *res = (void *) cbuf->buffer;
res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
res->bind_stages |= 1 << stage;
-
- iris_upload_ubo_ssbo_surf_state(ice, cbuf,
- &shs->constbuf_surf_state[index],
- false);
} else {
shs->bound_cbufs &= ~(1u << index);
pipe_resource_reference(&cbuf->buffer, NULL);
- pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
}
ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << stage;
- // XXX: maybe not necessary all the time...?
- // XXX: we need 3DS_BTP to commit these changes, and if we fell back to
- // XXX: pull model we may need actual new bindings...
- ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
}
static void
struct iris_state_ref *surf_state,
bool writable)
{
- if (!buf->buffer)
+ if (!buf->buffer || !surf_state->res)
return use_null_surface(batch, ice);
iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable);
struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
if (res->bo == iris_resource_bo(cbuf->buffer)) {
- iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
- ice->state.dirty |=
- (IRIS_DIRTY_CONSTANTS_VS | IRIS_DIRTY_BINDINGS_VS) << s;
+ pipe_resource_reference(&surf_state->res, NULL);
+ ice->state.dirty |= IRIS_DIRTY_CONSTANTS_VS << s;
}
}
}