}
}
-/* Will be removed once images are completely done. */
-#if 0
-static void
-nve4_compute_validate_surfaces(struct nvc0_context *nvc0)
-{
- struct nvc0_screen *screen = nvc0->screen;
- struct nouveau_pushbuf *push = nvc0->base.pushbuf;
- struct nv50_surface *sf;
- struct nv04_resource *res;
- uint32_t mask;
- unsigned i;
- const unsigned t = 1;
- uint64_t address;
-
- address = screen->uniform_bo->offset + NVC0_CB_AUX_INFO(5);
-
- mask = nvc0->surfaces_dirty[t];
- while (mask) {
- i = ffs(mask) - 1;
- mask &= ~(1 << i);
-
- /*
- * NVE4's surface load/store instructions receive all the information
- * directly instead of via binding points, so we have to supply them.
- */
- BEGIN_NVC0(push, NVE4_CP(UPLOAD_DST_ADDRESS_HIGH), 2);
- PUSH_DATAh(push, address + NVC0_CB_AUX_BUF_INFO(i));
- PUSH_DATA (push, address + NVC0_CB_AUX_BUF_INFO(i));
- BEGIN_NVC0(push, NVE4_CP(UPLOAD_LINE_LENGTH_IN), 2);
- PUSH_DATA (push, 64);
- PUSH_DATA (push, 1);
- BEGIN_1IC0(push, NVE4_CP(UPLOAD_EXEC), 17);
- PUSH_DATA (push, NVE4_COMPUTE_UPLOAD_EXEC_LINEAR | (0x20 << 1));
-
- nve4_set_surface_info(push, nvc0->surfaces[t][i], screen);
-
- sf = nv50_surface(nvc0->surfaces[t][i]);
- if (sf) {
- res = nv04_resource(sf->base.texture);
-
- if (sf->base.writable)
- BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
- else
- BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
- }
- }
- if (nvc0->surfaces_dirty[t]) {
- BEGIN_NVC0(push, NVE4_CP(FLUSH), 1);
- PUSH_DATA (push, NVE4_COMPUTE_FLUSH_CB);
- }
-
- /* re-reference non-dirty surfaces */
- mask = nvc0->surfaces_valid[t] & ~nvc0->surfaces_dirty[t];
- while (mask) {
- i = ffs(mask) - 1;
- mask &= ~(1 << i);
-
- sf = nv50_surface(nvc0->surfaces[t][i]);
- res = nv04_resource(sf->base.texture);
-
- if (sf->base.writable)
- BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RDWR);
- else
- BCTX_REFN(nvc0->bufctx_cp, CP_SUF, res, RD);
- }
-
- nvc0->surfaces_dirty[t] = 0;
-}
-#endif
-
/* Thankfully, textures with samplers follow the normal rules. */
static void
nve4_compute_validate_samplers(struct nvc0_context *nvc0)