+static void
+fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
+{
+ struct fd_context *ctx = fd_context(pctx);
+ struct fd_batch *batch, *save_batch = NULL;
+ unsigned i;
+
+ batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
+ fd_batch_reference(&save_batch, ctx->batch);
+ fd_batch_reference(&ctx->batch, batch);
+ fd_context_all_dirty(ctx);
+
+ mtx_lock(&ctx->screen->lock);
+
+ /* Mark SSBOs as being written.. we don't actually know which ones are
+ * read vs written, so just assume the worst
+ */
+ foreach_bit(i, ctx->shaderbuf[PIPE_SHADER_COMPUTE].enabled_mask)
+ resource_written(batch, ctx->shaderbuf[PIPE_SHADER_COMPUTE].sb[i].buffer);
+
+ foreach_bit(i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
+ struct pipe_image_view *img =
+ &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
+ if (img->access & PIPE_IMAGE_ACCESS_WRITE)
+ resource_written(batch, img->resource);
+ else
+ resource_read(batch, img->resource);
+ }
+
+ /* UBO's are read */
+ foreach_bit(i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
+ resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
+
+ /* Mark textures as being read */
+ foreach_bit(i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
+ resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
+
+ /* For global buffers, we don't really know if read or written, so assume
+ * the worst:
+ */
+ foreach_bit(i, ctx->global_bindings.enabled_mask)
+ resource_written(batch, ctx->global_bindings.buf[i]);
+
+ if (info->indirect)
+ resource_read(batch, info->indirect);
+
+ mtx_unlock(&ctx->screen->lock);
+
+ batch->needs_flush = true;
+ ctx->launch_grid(ctx, info);
+
+ fd_batch_flush(batch, false);
+
+ fd_batch_reference(&ctx->batch, save_batch);
+ fd_context_all_dirty(ctx);
+ fd_batch_reference(&save_batch, NULL);
+ fd_batch_reference(&batch, NULL);
+}
+