gallium/pipebuffer: Use persistent maps for slabs
authorThomas Hellstrom <thellstrom@vmware.com>
Wed, 22 Apr 2020 11:27:35 +0000 (13:27 +0200)
committerMarge Bot <eric+marge@anholt.net>
Wed, 29 Apr 2020 13:45:12 +0000 (13:45 +0000)
Instead of the ugly practice of relying on the provider caching maps,
introduce and use persistent pipebuffer maps. Providers that can't handle
persistent maps can't use the slab manager.

The only current user is the svga drm winsys which always maps
persistently.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Charmaine Lee <charmainel@vmware.com>
Reviewed-by: Brian Paul <brianp@vmware.com>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4804>

src/gallium/auxiliary/pipebuffer/pb_buffer.h
src/gallium/auxiliary/pipebuffer/pb_bufmgr_slab.c
src/gallium/winsys/svga/drm/vmw_buffer.c

index bd60eba1432cdd3bed488f6a1bc68d9019f04187..9c71fc86c9ccac7068f309e7c16c09662edc68f1 100644 (file)
@@ -66,6 +66,11 @@ enum pb_usage_flags {
    PB_USAGE_GPU_WRITE = (1 << 3),
    PB_USAGE_DONTBLOCK = (1 << 9),
    PB_USAGE_UNSYNCHRONIZED = (1 << 10),
+   /* Persistent mappings may remain across a flush. Note that contrary
+    * to OpenGL persistent maps, there is no requirement at the pipebuffer
+    * api level to explicitly enforce coherency by barriers or range flushes.
+    */
+   PB_USAGE_PERSISTENT = (1 << 13)
 };
 
 /* For error checking elsewhere */
@@ -74,7 +79,8 @@ enum pb_usage_flags {
                       PB_USAGE_GPU_READ | \
                       PB_USAGE_GPU_WRITE | \
                       PB_USAGE_DONTBLOCK | \
-                      PB_USAGE_UNSYNCHRONIZED)
+                      PB_USAGE_UNSYNCHRONIZED | \
+                      PB_USAGE_PERSISTENT)
 
 #define PB_USAGE_CPU_READ_WRITE  (PB_USAGE_CPU_READ | PB_USAGE_CPU_WRITE)
 #define PB_USAGE_GPU_READ_WRITE  (PB_USAGE_GPU_READ | PB_USAGE_GPU_WRITE)
index dd59733fe9b7e3491f5a6f753293b4d5a9d0b656..c936096210a518de16fb92f5f6a7601d98ddb6a1 100644 (file)
@@ -215,6 +215,7 @@ pb_slab_buffer_destroy(struct pb_buffer *_buf)
    if (slab->numFree == slab->numBuffers) {
       list = &slab->head;
       list_delinit(list);
+      pb_unmap(slab->bo);
       pb_reference(&slab->bo, NULL);
       FREE(slab->buffers);
       FREE(slab);
@@ -315,15 +316,16 @@ pb_slab_create(struct pb_slab_manager *mgr)
    }
 
    /* Note down the slab virtual address. All mappings are accessed directly 
-    * through this address so it is required that the buffer is pinned. */
+    * through this address so it is required that the buffer is mapped
+    * persistent */
    slab->virtual = pb_map(slab->bo, 
                           PB_USAGE_CPU_READ |
-                          PB_USAGE_CPU_WRITE, NULL);
+                          PB_USAGE_CPU_WRITE |
+                          PB_USAGE_PERSISTENT, NULL);
    if(!slab->virtual) {
       ret = PIPE_ERROR_OUT_OF_MEMORY;
       goto out_err1;
    }
-   pb_unmap(slab->bo);
 
    numBuffers = slab->bo->size / mgr->bufSize;
 
index 34c5e341782f0dc1edb65e10e4021aa7b1905888..e2ddf78ed2edfe430dd8ea88bed14ba51f36e6b3 100644 (file)
@@ -364,6 +364,8 @@ vmw_svga_winsys_buffer_map(struct svga_winsys_screen *sws,
                  (unsigned) PIPE_TRANSFER_DONTBLOCK);
    STATIC_ASSERT((unsigned) PB_USAGE_UNSYNCHRONIZED ==
                  (unsigned) PIPE_TRANSFER_UNSYNCHRONIZED);
+   STATIC_ASSERT((unsigned) PB_USAGE_PERSISTENT ==
+                 (unsigned) PIPE_TRANSFER_PERSISTENT);
 
    map = pb_map(vmw_pb_buffer(buf), flags & PB_USAGE_ALL, NULL);