iris_populate_binding_table(struct iris_context *ice,
struct iris_batch *batch,
gl_shader_stage stage,
- bool pin_only)
+ bool pin_only,
+ struct iris_state_ref *grid_size_surf)
{
const struct iris_binder *binder = &ice->state.binder;
struct iris_compiled_shader *shader = ice->shaders.prog[stage];
return;
}
+ if (stage == MESA_SHADER_COMPUTE) {
+ /* surface for gl_NumWorkGroups */
+ assert(grid_size_surf || pin_only);
+ if (grid_size_surf) {
+ struct iris_bo *bo = iris_resource_bo(grid_size_surf->res);
+ iris_use_pinned_bo(batch, bo, false);
+ push_bt_entry(grid_size_surf->offset);
+ }
+ }
+
if (stage == MESA_SHADER_FRAGMENT) {
struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
/* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
if (clean & (IRIS_DIRTY_BINDINGS_VS << stage)) {
/* Re-pin any buffers referred to by the binding table. */
- iris_populate_binding_table(ice, batch, stage, true);
+ iris_populate_binding_table(ice, batch, stage, true, NULL);
}
}
if (clean & IRIS_DIRTY_BINDINGS_CS) {
/* Re-pin any buffers referred to by the binding table. */
- iris_populate_binding_table(ice, batch, stage, true);
+ iris_populate_binding_table(ice, batch, stage, true, NULL);
}
struct pipe_resource *sampler_res = shs->sampler_table.res;
for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
if (dirty & (IRIS_DIRTY_BINDINGS_VS << stage)) {
- iris_populate_binding_table(ice, batch, stage, false);
+ iris_populate_binding_table(ice, batch, stage, false, NULL);
}
}
struct brw_stage_prog_data *prog_data = shader->prog_data;
struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
- if (dirty & IRIS_DIRTY_BINDINGS_CS)
- iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
+ struct pipe_resource *grid_size_res = NULL;
+ uint32_t grid_size_offset;
+ if (grid->indirect) {
+ grid_size_res = grid->indirect;
+ grid_size_offset = grid->indirect_offset;
+ } else {
+ uint32_t *grid_size_map =
+ stream_state(batch, ice->state.surface_uploader, &grid_size_res, 12, 4,
+ &grid_size_offset);
+ grid_size_map[0] = grid->grid[0];
+ grid_size_map[1] = grid->grid[1];
+ grid_size_map[2] = grid->grid[2];
+ struct iris_bo *grid_size_bo = iris_resource_bo(grid_size_res);
+ grid_size_offset -= iris_bo_offset_from_base_address(grid_size_bo);
+ }
+
+ struct iris_state_ref grid_size_surf;
+ memset(&grid_size_surf, 0, sizeof(grid_size_surf));
+ void *grid_surf_state_map =
+ upload_state(ice->state.surface_uploader,
+ &grid_size_surf,
+ 4 * GENX(RENDER_SURFACE_STATE_length), 64);
+ assert(grid_surf_state_map);
+ struct iris_bo *grid_size_bo = iris_resource_bo(grid_size_res);
+ iris_use_pinned_bo(batch, grid_size_bo, false);
+ grid_size_surf.offset +=
+ iris_bo_offset_from_base_address(iris_resource_bo(grid_size_surf.res));
+ isl_buffer_fill_state(&screen->isl_dev, grid_surf_state_map,
+ .address =
+ grid_size_bo->gtt_offset + grid_size_offset,
+ .size_B = 12,
+ .format = ISL_FORMAT_RAW,
+ .stride_B = 1,
+ .mocs = MOCS_WB);
+
+ if (dirty & IRIS_DIRTY_BINDINGS_CS || grid_size_res)
+ iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false,
+ &grid_size_surf);
iris_use_optional_res(batch, shs->sampler_table.res, false);
iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false);