+static void
+nvc0_compute_validate_buffers(struct nvc0_context *nvc0)
+{
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+ struct nvc0_screen *screen = nvc0->screen;
+ const int s = 5;
+ int i;
+
+ BEGIN_NVC0(push, NVC0_CP(CB_SIZE), 3);
+ PUSH_DATA (push, NVC0_CB_AUX_SIZE);
+ PUSH_DATAh(push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
+ PUSH_DATA (push, screen->uniform_bo->offset + NVC0_CB_AUX_INFO(s));
+ BEGIN_1IC0(push, NVC0_CP(CB_POS), 1 + 4 * NVC0_MAX_BUFFERS);
+ PUSH_DATA (push, NVC0_CB_AUX_BUF_INFO(0));
+
+ for (i = 0; i < NVC0_MAX_BUFFERS; i++) {
+ if (nvc0->buffers[s][i].buffer) {
+ struct nv04_resource *res =
+ nv04_resource(nvc0->buffers[s][i].buffer);
+ PUSH_DATA (push, res->address + nvc0->buffers[s][i].buffer_offset);
+ PUSH_DATAh(push, res->address + nvc0->buffers[s][i].buffer_offset);
+ PUSH_DATA (push, nvc0->buffers[s][i].buffer_size);
+ PUSH_DATA (push, 0);
+ BCTX_REFN(nvc0->bufctx_cp, CP_BUF, res, RDWR);
+ util_range_add(&res->valid_buffer_range,
+ nvc0->buffers[s][i].buffer_offset,
+ nvc0->buffers[s][i].buffer_offset +
+ nvc0->buffers[s][i].buffer_size);
+ } else {
+ PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0);
+ PUSH_DATA (push, 0);
+ }
+ }
+}
+
+void
+nvc0_compute_validate_globals(struct nvc0_context *nvc0)
+{
+ unsigned i;
+
+ for (i = 0; i < nvc0->global_residents.size / sizeof(struct pipe_resource *);
+ ++i) {
+ struct pipe_resource *res = *util_dynarray_element(
+ &nvc0->global_residents, struct pipe_resource *, i);
+ if (res)
+ nvc0_add_resident(nvc0->bufctx_cp, NVC0_BIND_CP_GLOBAL,
+ nv04_resource(res), NOUVEAU_BO_RDWR);
+ }
+}
+
+static inline void
+nvc0_compute_invalidate_surfaces(struct nvc0_context *nvc0, const int s)
+{
+ struct nouveau_pushbuf *push = nvc0->base.pushbuf;
+ int i;
+
+ for (i = 0; i < NVC0_MAX_IMAGES; ++i) {
+ if (s == 5)
+ BEGIN_NVC0(push, NVC0_CP(IMAGE(i)), 6);
+ else
+ BEGIN_NVC0(push, NVC0_3D(IMAGE(i)), 6);
+ PUSH_DATA(push, 0);
+ PUSH_DATA(push, 0);
+ PUSH_DATA(push, 0);
+ PUSH_DATA(push, 0);
+ PUSH_DATA(push, 0x14000);
+ PUSH_DATA(push, 0);
+ }
+}
+
+static void
+nvc0_compute_validate_surfaces(struct nvc0_context *nvc0)
+{
+ /* TODO: Invalidating both 3D and CP surfaces before validating surfaces for
+ * compute is probably not really necessary, but we didn't find any better
+ * solutions for now. This fixes some invalidation issues when compute and
+ * fragment shaders are used inside the same context. Anyway, we definitely
+ * have invalidation issues between 3D and CP for other resources like SSBO
+ * and atomic counters. */
+ nvc0_compute_invalidate_surfaces(nvc0, 4);
+ nvc0_compute_invalidate_surfaces(nvc0, 5);
+
+ nvc0_validate_suf(nvc0, 5);
+
+ /* Invalidate all FRAGMENT images because they are aliased with COMPUTE. */
+ nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_SUF);
+ nvc0->dirty_3d |= NVC0_NEW_3D_SURFACES;
+ nvc0->images_dirty[4] |= nvc0->images_valid[4];
+}