}
}
+static void
+fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
+{
+ /* On devices that could sample from GMEM we could possibly do better.
+ * Or if we knew that we were doing GMEM bypass we could just emit a
+ * cache flush, perhaps? But we don't know if future draws would cause
+ * us to use GMEM, and a flush in bypass isn't the end of the world.
+ */
+ fd_context_flush(pctx, NULL, 0);
+}
+
/**
* emit marker string as payload of a no-op packet, which can be
* decoded by cffdump.
pctx->set_debug_callback = fd_set_debug_callback;
pctx->create_fence_fd = fd_create_fence_fd;
pctx->fence_server_sync = fd_fence_server_sync;
+ pctx->texture_barrier = fd_texture_barrier;
pctx->stream_uploader = u_upload_create_default(pctx);
if (!pctx->stream_uploader)
case PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT:
case PIPE_CAP_STRING_MARKER:
case PIPE_CAP_MIXED_COLOR_DEPTH_BITS:
+ case PIPE_CAP_TEXTURE_BARRIER:
return 1;
case PIPE_CAP_VERTEXID_NOBASE:
case PIPE_CAP_TGSI_TEXCOORD:
case PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER:
case PIPE_CAP_TEXTURE_MULTISAMPLE:
- case PIPE_CAP_TEXTURE_BARRIER:
case PIPE_CAP_TEXTURE_MIRROR_CLAMP:
case PIPE_CAP_QUERY_MEMORY_INFO:
case PIPE_CAP_PCI_GROUP: