#include "svga_winsys.h"
#include "svga_screen.h"
#include "svga_screen_cache.h"
-
+#include "svga_context.h"
+#include "svga_cmd.h"
#define SVGA_SURFACE_CACHE_ENABLED 1
/**
* Return the size of the surface described by the key (in bytes).
*/
-static unsigned
-surface_size(const struct svga_host_surface_cache_key *key)
+unsigned
+svga_surface_size(const struct svga_host_surface_cache_key *key)
{
unsigned bw, bh, bpb, total_size, i;
assert(key->numMipLevels > 0);
assert(key->numFaces > 0);
+ assert(key->arraySize > 0);
if (key->format == SVGA3D_BUFFER) {
/* Special case: we don't want to count vertex/index buffers
total_size += img_size;
}
- total_size *= key->numFaces;
+ total_size *= key->numFaces * key->arraySize * MAX2(1, key->sampleCount);
return total_size;
}
bucket = svga_screen_cache_bucket(key);
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
curr = cache->bucket[bucket].next;
next = curr->next;
entry->handle = NULL;
/* Remove from hash table */
- LIST_DEL(&entry->bucket_head);
+ list_del(&entry->bucket_head);
/* remove from LRU list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
/* Add the cache entry (but not the surface!) to the empty list */
- LIST_ADD(&entry->head, &cache->empty);
+ list_add(&entry->head, &cache->empty);
/* update the cache size */
- surf_size = surface_size(&entry->key);
+ surf_size = svga_surface_size(&entry->key);
assert(surf_size <= cache->total_size);
if (surf_size > cache->total_size)
cache->total_size = 0; /* should never happen, but be safe */
next = curr->next;
}
- pipe_mutex_unlock(cache->mutex);
+ mtx_unlock(&cache->mutex);
if (SVGA_DEBUG & DEBUG_DMA)
debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__,
if (entry->key.format != SVGA3D_BUFFER) {
/* we don't want to discard vertex/index buffers */
- cache->total_size -= surface_size(&entry->key);
+ cache->total_size -= svga_surface_size(&entry->key);
assert(entry->handle);
sws->surface_reference(sws, &entry->handle, NULL);
- LIST_DEL(&entry->bucket_head);
- LIST_DEL(&entry->head);
- LIST_ADD(&entry->head, &cache->empty);
+ list_del(&entry->bucket_head);
+ list_del(&entry->head);
+ list_add(&entry->head, &cache->empty);
if (cache->total_size <= target_size) {
/* all done */
if (!handle)
return;
- surf_size = surface_size(key);
+ surf_size = svga_surface_size(key);
*p_handle = NULL;
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
if (surf_size >= SVGA_HOST_SURFACE_CACHE_BYTES) {
/* this surface is too large to cache, just free it */
sws->surface_reference(sws, &handle, NULL);
- pipe_mutex_unlock(cache->mutex);
+ mtx_unlock(&cache->mutex);
return;
}
* just discard this surface.
*/
sws->surface_reference(sws, &handle, NULL);
- pipe_mutex_unlock(cache->mutex);
+ mtx_unlock(&cache->mutex);
return;
}
}
- if (!LIST_IS_EMPTY(&cache->empty)) {
+ if (!list_is_empty(&cache->empty)) {
/* An empty entry has no surface associated with it.
* Use the first empty entry.
*/
cache->empty.next, head);
/* Remove from LRU list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
}
- else if (!LIST_IS_EMPTY(&cache->unused)) {
+ else if (!list_is_empty(&cache->unused)) {
/* free the last used buffer and reuse its entry */
entry = LIST_ENTRY(struct svga_host_surface_cache_entry,
cache->unused.prev, head);
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
"unref sid %p (make space)\n", entry->handle);
- cache->total_size -= surface_size(&entry->key);
+ cache->total_size -= svga_surface_size(&entry->key);
sws->surface_reference(sws, &entry->handle, NULL);
/* Remove from hash table */
- LIST_DEL(&entry->bucket_head);
+ list_del(&entry->bucket_head);
/* Remove from LRU list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
}
if (entry) {
SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
"cache sid %p\n", entry->handle);
- LIST_ADD(&entry->head, &cache->validated);
+
+ /* If we don't have gb objects, we don't need to invalidate. */
+ if (sws->have_gb_objects)
+ list_add(&entry->head, &cache->validated);
+ else
+ list_add(&entry->head, &cache->invalidated);
cache->total_size += surf_size;
}
sws->surface_reference(sws, &handle, NULL);
}
- pipe_mutex_unlock(cache->mutex);
+ mtx_unlock(&cache->mutex);
}
+/* Maximum number of invalidate surface commands in a command buffer */
+# define SVGA_MAX_SURFACE_TO_INVALIDATE 1000
+
/**
* Called during the screen flush to move all buffers not in a validate list
* into the unused list.
*/
void
svga_screen_cache_flush(struct svga_screen *svgascreen,
+ struct svga_context *svga,
struct pipe_fence_handle *fence)
{
struct svga_host_surface_cache *cache = &svgascreen->cache;
struct list_head *curr, *next;
unsigned bucket;
- pipe_mutex_lock(cache->mutex);
+ mtx_lock(&cache->mutex);
/* Loop over entries in the invalidated list */
curr = cache->invalidated.next;
if (sws->surface_is_flushed(sws, entry->handle)) {
/* remove entry from the invalidated list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
sws->fence_reference(sws, &entry->fence, fence);
/* Add entry to the unused list */
- LIST_ADD(&entry->head, &cache->unused);
+ list_add(&entry->head, &cache->unused);
/* Add entry to the hash table bucket */
bucket = svga_screen_cache_bucket(&entry->key);
- LIST_ADD(&entry->bucket_head, &cache->bucket[bucket]);
+ list_add(&entry->bucket_head, &cache->bucket[bucket]);
}
curr = next;
next = curr->next;
}
+ unsigned nsurf = 0;
curr = cache->validated.next;
next = curr->next;
while (curr != &cache->validated) {
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
assert(entry->handle);
+ assert(svga_have_gb_objects(svga));
if (sws->surface_is_flushed(sws, entry->handle)) {
/* remove entry from the validated list */
- LIST_DEL(&entry->head);
+ list_del(&entry->head);
- /* it is now safe to invalidate the surface content. */
- sws->surface_invalidate(sws, entry->handle);
+ /* It is now safe to invalidate the surface content.
+ * It will be done using the current context.
+ */
+ if (SVGA_TRY(SVGA3D_InvalidateGBSurface(svga->swc, entry->handle))
+ != PIPE_OK) {
+ ASSERTED enum pipe_error ret;
+
+ /* Even though surface invalidation here is done after the command
+ * buffer is flushed, it is still possible that it will
+ * fail because there might be just enough of this command that is
+ * filling up the command buffer, so in this case we will call
+ * the winsys flush directly to flush the buffer.
+ * Note, we don't want to call svga_context_flush() here because
+ * this function itself is called inside svga_context_flush().
+ */
+ svga_retry_enter(svga);
+ svga->swc->flush(svga->swc, NULL);
+ nsurf = 0;
+ ret = SVGA3D_InvalidateGBSurface(svga->swc, entry->handle);
+ svga_retry_exit(svga);
+ assert(ret == PIPE_OK);
+ }
/* add the entry to the invalidated list */
- LIST_ADD(&entry->head, &cache->invalidated);
+
+ list_add(&entry->head, &cache->invalidated);
+ nsurf++;
}
curr = next;
next = curr->next;
}
- pipe_mutex_unlock(cache->mutex);
+ mtx_unlock(&cache->mutex);
+
+ /**
+ * In some rare cases (when running ARK survival), we hit the max number
+ * of surface relocations with invalidated surfaces during context flush.
+ * So if the number of invalidated surface exceeds a certain limit (1000),
+ * we'll do another winsys flush.
+ */
+ if (nsurf > SVGA_MAX_SURFACE_TO_INVALIDATE) {
+ svga->swc->flush(svga->swc, NULL);
+ }
}
"unref sid %p (shutdown)\n", cache->entries[i].handle);
sws->surface_reference(sws, &cache->entries[i].handle, NULL);
- cache->total_size -= surface_size(&cache->entries[i].key);
+ cache->total_size -= svga_surface_size(&cache->entries[i].key);
}
if (cache->entries[i].fence)
sws->fence_reference(sws, &cache->entries[i].fence, NULL);
}
- pipe_mutex_destroy(cache->mutex);
+ mtx_destroy(&cache->mutex);
}
assert(cache->total_size == 0);
- pipe_mutex_init(cache->mutex);
+ (void) mtx_init(&cache->mutex, mtx_plain);
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_BUCKETS; ++i)
- LIST_INITHEAD(&cache->bucket[i]);
+ list_inithead(&cache->bucket[i]);
- LIST_INITHEAD(&cache->unused);
+ list_inithead(&cache->unused);
- LIST_INITHEAD(&cache->validated);
+ list_inithead(&cache->validated);
- LIST_INITHEAD(&cache->invalidated);
+ list_inithead(&cache->invalidated);
- LIST_INITHEAD(&cache->empty);
+ list_inithead(&cache->empty);
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
- LIST_ADDTAIL(&cache->entries[i].head, &cache->empty);
+ list_addtail(&cache->entries[i].head, &cache->empty);
return PIPE_OK;
}
key->cachable);
if (cachable) {
+ /* Try to re-cycle a previously freed, cached surface */
if (key->format == SVGA3D_BUFFER) {
- SVGA3dSurfaceFlags hint_flag;
+ SVGA3dSurfaceAllFlags hint_flag;
/* For buffers, round the buffer size up to the nearest power
* of two to increase the probability of cache hits. Keep
}
if (!handle) {
+ /* Unable to recycle surface, allocate a new one */
unsigned usage = 0;
if (!key->cachable)
usage |= SVGA_SURFACE_USAGE_SHARED;
if (key->scanout)
usage |= SVGA_SURFACE_USAGE_SCANOUT;
+ if (key->coherent)
+ usage |= SVGA_SURFACE_USAGE_COHERENT;
handle = sws->surface_create(sws,
key->flags,