This should make a machine which is running piglit more responsive at times.
e.g. streaming-texture-leak can easily eat 600 MB because of how fast it
creates new textures.
pb_cache_manager_create(struct pb_manager *provider,
unsigned usecs,
float size_factor,
- unsigned bypass_usage);
+ unsigned bypass_usage,
+ uint64_t maximum_cache_size);
struct pb_fence_ops;
pb_size numDelayed;
float size_factor;
unsigned bypass_usage;
+ uint64_t cache_size, max_cache_size;
};
LIST_DEL(&buf->head);
assert(mgr->numDelayed);
--mgr->numDelayed;
+ mgr->cache_size -= buf->base.size;
assert(!pipe_is_referenced(&buf->base.reference));
pb_reference(&buf->buffer, NULL);
FREE(buf);
assert(!pipe_is_referenced(&buf->base.reference));
_pb_cache_buffer_list_check_free(mgr);
-
+
+ /* Directly release any buffer that exceeds the limit. */
+ if (mgr->cache_size + buf->base.size > mgr->max_cache_size) {
+ pb_reference(&buf->buffer, NULL);
+ FREE(buf);
+ pipe_mutex_unlock(mgr->mutex);
+ return;
+ }
+
buf->start = os_time_get();
buf->end = buf->start + mgr->usecs;
LIST_ADDTAIL(&buf->head, &mgr->delayed);
++mgr->numDelayed;
+ mgr->cache_size += buf->base.size;
pipe_mutex_unlock(mgr->mutex);
}
}
if(buf) {
+ mgr->cache_size -= buf->base.size;
LIST_DEL(&buf->head);
--mgr->numDelayed;
pipe_mutex_unlock(mgr->mutex);
* the requested size as cache hits.
* @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
* buffer allocation requests are redirected to the provider.
+ * @param maximum_cache_size Maximum size of all unused buffers the cache can
+ * hold.
*/
struct pb_manager *
pb_cache_manager_create(struct pb_manager *provider,
unsigned usecs,
float size_factor,
- unsigned bypass_usage)
+ unsigned bypass_usage,
+ uint64_t maximum_cache_size)
{
struct pb_cache_manager *mgr;
mgr->bypass_usage = bypass_usage;
LIST_INITHEAD(&mgr->delayed);
mgr->numDelayed = 0;
+ mgr->max_cache_size = maximum_cache_size;
pipe_mutex_init(mgr->mutex);
return &mgr->base;
ws->kman = radeon_bomgr_create(ws);
if (!ws->kman)
goto fail;
- ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ ws->cman_vram = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+ ws->info.vram_size / 8);
if (!ws->cman_vram)
goto fail;
- ws->cman_vram_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ ws->cman_vram_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+ ws->info.vram_size / 8);
if (!ws->cman_vram_gtt_wc)
goto fail;
- ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ ws->cman_gtt = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+ ws->info.gart_size / 8);
if (!ws->cman_gtt)
goto fail;
- ws->cman_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0);
+ ws->cman_gtt_wc = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
+ ws->info.gart_size / 8);
if (!ws->cman_gtt_wc)
goto fail;
vws->pools.mob_cache =
pb_cache_manager_create(vws->pools.gmr, 100000, 2.0f,
- VMW_BUFFER_USAGE_SHARED);
+ VMW_BUFFER_USAGE_SHARED,
+ 64 * 1024 * 1024);
if (!vws->pools.mob_cache)
return FALSE;