gallium/pb_bufmgr_cache: limit the size of cache
authorMarek Olšák <marek.olsak@amd.com>
Wed, 20 Aug 2014 21:53:40 +0000 (23:53 +0200)
committerMarek Olšák <marek.olsak@amd.com>
Mon, 1 Sep 2014 18:17:48 +0000 (20:17 +0200)
This should make a machine which is running piglit more responsive at times.
e.g. streaming-texture-leak can easily eat 600 MB because of how fast it
creates new textures.

src/gallium/auxiliary/pipebuffer/pb_bufmgr.h
src/gallium/auxiliary/pipebuffer/pb_bufmgr_cache.c
src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
src/gallium/winsys/svga/drm/vmw_screen_pools.c

index d5b0ee2ac9677a42ea2d540bd3a96351676170e0..147ce39041cf6ceb7a444fa12c4d6c635c58d0b3 100644 (file)
@@ -163,7 +163,8 @@ struct pb_manager *
 pb_cache_manager_create(struct pb_manager *provider, 
                         unsigned usecs,
                         float size_factor,
-                        unsigned bypass_usage);
+                        unsigned bypass_usage,
+                        uint64_t maximum_cache_size);
 
 
 struct pb_fence_ops;
index 32a88754042e3898a0bc98cd545d5685fc1823ca..5eb8d06a09186e10a59e5b11e89946163178b693 100644 (file)
@@ -84,6 +84,7 @@ struct pb_cache_manager
    pb_size numDelayed;
    float size_factor;
    unsigned bypass_usage;
+   uint64_t cache_size, max_cache_size;
 };
 
 
@@ -114,6 +115,7 @@ _pb_cache_buffer_destroy(struct pb_cache_buffer *buf)
    LIST_DEL(&buf->head);
    assert(mgr->numDelayed);
    --mgr->numDelayed;
+   mgr->cache_size -= buf->base.size;
    assert(!pipe_is_referenced(&buf->base.reference));
    pb_reference(&buf->buffer, NULL);
    FREE(buf);
@@ -158,11 +160,20 @@ pb_cache_buffer_destroy(struct pb_buffer *_buf)
    assert(!pipe_is_referenced(&buf->base.reference));
    
    _pb_cache_buffer_list_check_free(mgr);
-   
+
+   /* Directly release any buffer that exceeds the limit. */
+   if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
+      pb_reference(&buf->buffer, NULL);
+      FREE(buf);
+      pipe_mutex_unlock(mgr->mutex);
+      return;
+   }
+
    buf->start = os_time_get();
    buf->end = buf->start + mgr->usecs;
    LIST_ADDTAIL(&buf->head, &mgr->delayed);
    ++mgr->numDelayed;
+   mgr->cache_size += buf->base.size;
    pipe_mutex_unlock(mgr->mutex);
 }
 
@@ -314,6 +325,7 @@ pb_cache_manager_create_buffer(struct pb_manager *_mgr,
    }
    
    if(buf) {
+      mgr->cache_size -= buf->base.size;
       LIST_DEL(&buf->head);
       --mgr->numDelayed;
       pipe_mutex_unlock(mgr->mutex);
@@ -400,12 +412,15 @@ pb_cache_manager_destroy(struct pb_manager *mgr)
  * the requested size as cache hits.
  * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
  * buffer allocation requests are redirected to the provider.
+ * @param maximum_cache_size  Maximum size of all unused buffers the cache can
+ * hold.
  */
 struct pb_manager *
 pb_cache_manager_create(struct pb_manager *provider, 
                         unsigned usecs,
                         float size_factor,
-                        unsigned bypass_usage)
+                        unsigned bypass_usage,
+                        uint64_t maximum_cache_size)
 {
    struct pb_cache_manager *mgr;
 
@@ -425,6 +440,7 @@ pb_cache_manager_create(struct pb_manager *provider,
    mgr->bypass_usage = bypass_usage;
    LIST_INITHEAD(&mgr->delayed);
    mgr->numDelayed = 0;
+   mgr->max_cache_size = maximum_cache_size;
    pipe_mutex_init(mgr->mutex);
       
    return &mgr->base;
index 820cc90cda0c539781d3da3337a46bda95b54b12..3b695f985342ad2a9c0cfa249e65b8476919a6b8 100644 (file)
@@ -671,16 +671,20 @@ radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
     ws->kman = radeon_bomgr_create(ws);
     if (!ws->kman)
         goto fail;
-    ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+    ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+                                            ws->info.vram_size / 8);
     if (!ws->cman_vram)
         goto fail;
-    ws->cman_vram_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+    ws->cman_vram_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+                                                   ws->info.vram_size / 8);
     if (!ws->cman_vram_gtt_wc)
         goto fail;
-    ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+    ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+                                           ws->info.gart_size / 8);
     if (!ws->cman_gtt)
         goto fail;
-    ws->cman_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+    ws->cman_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+                                              ws->info.gart_size / 8);
     if (!ws->cman_gtt_wc)
         goto fail;
 
index 50d2a81fdb0050422bf7b40ba8904bd534151c05..1815bfa67072770521c943571fdb5a47378acc33 100644 (file)
@@ -124,7 +124,8 @@ vmw_mob_pools_init(struct vmw_winsys_screen *vws)
 
    vws->pools.mob_cache = 
       pb_cache_manager_create(vws->pools.gmr, 100000, 2.0f,
-                              VMW_BUFFER_USAGE_SHARED);
+                              VMW_BUFFER_USAGE_SHARED,
+                              64 * 1024 * 1024);
    if (!vws->pools.mob_cache)
       return FALSE;