pipe_mutex_lock(cache->mutex);
- /* Loop over entries in the validated list */
- curr = cache->validated.next;
+ /* Loop over entries in the invalidated list */
+ curr = cache->invalidated.next;
next = curr->next;
- while (curr != &cache->validated) {
+ while (curr != &cache->invalidated) {
entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
assert(entry->handle);
if (sws->surface_is_flushed(sws, entry->handle)) {
- /* remove entry from LRU list */
+ /* remove entry from the invalidated list */
LIST_DEL(&entry->head);
svgascreen->sws->fence_reference(svgascreen->sws, &entry->fence, fence);
next = curr->next;
}
+ curr = cache->validated.next;
+ next = curr->next;
+ while (curr != &cache->validated) {
+ entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
+
+ assert(entry->handle);
+
+ if (sws->surface_is_flushed(sws, entry->handle)) {
+ /* remove entry from the validated list */
+ LIST_DEL(&entry->head);
+
+ /* it is now safe to invalidate the surface content. */
+ sws->surface_invalidate(sws, entry->handle);
+
+ /* add the entry to the invalidated list */
+ LIST_ADD(&entry->head, &cache->invalidated);
+ }
+
+ curr = next;
+ next = curr->next;
+ }
+
pipe_mutex_unlock(cache->mutex);
}
LIST_INITHEAD(&cache->validated);
+ LIST_INITHEAD(&cache->invalidated);
+
LIST_INITHEAD(&cache->empty);
for (i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
LIST_ADDTAIL(&cache->entries[i].head, &cache->empty);
* that case.
*/
if (SVGA_SURFACE_CACHE_ENABLED && key->cachable) {
+
+ /* Invalidate the surface before putting it into the recycle pool */
+ if (key->format != SVGA3D_BUFFER)
+ sws->surface_invalidate(sws, *p_handle);
+
svga_screen_cache_add(svgascreen, key, p_handle);
}
else {
* A cache entry can be in the following stages:
* 1. empty (entry->handle = NULL)
* 2. holding a buffer in a validate list
- * 3. holding a flushed buffer (not in any validate list) with an active fence
- * 4. holding a flushed buffer with an expired fence
+ * 3. holding a buffer in an invalidate list
+ * 4. holding a flushed buffer (not in any validate list) with an active fence
+ * 5. holding a flushed buffer with an expired fence
*
- * An entry progresses from 1 -> 2 -> 3 -> 4. When we need an entry to put a
+ * An entry progresses from 1 -> 2 -> 3 -> 4 -> 5. When we need an entry to put a
* buffer into we preferentially take from 1, or from the least recently used
- * buffer from 3/4.
+ * buffer from 4/5.
*/
struct svga_host_surface_cache
{
* (3 and 4) */
struct list_head unused;
- /* Entries with buffers still in validate lists (2) */
+ /* Entries with buffers still in validate list (2) */
struct list_head validated;
+ /* Entries with buffers still in invalidate list (3) */
+ struct list_head invalidated;
+
/** Empty entries (1) */
struct list_head empty;
uint32 numLayers,
uint32 numMipLevels);
+ /**
+ * Invalidate the content of this surface
+ */
+ void
+ (*surface_invalidate)(struct svga_winsys_screen *sws,
+ struct svga_winsys_surface *surface);
+
+
/**
* Buffer management. Buffer attributes are mostly fixed over its lifetime.
*
}
+static void
+vmw_svga_winsys_surface_invalidate(struct svga_winsys_screen *sws,
+ struct svga_winsys_surface *surf)
+{
+ /* this is a noop since surface invalidation is not needed for DMA path.
+ * DMA is enabled when guest-backed surface is not enabled or
+ * guest-backed dma is enabled. Since guest-backed dma is enabled
+ * when guest-backed surface is enabled, that implies DMA is always enabled;
+ * hence, surface invalidation is not needed.
+ */
+}
+
static boolean
vmw_svga_winsys_surface_is_flushed(struct svga_winsys_screen *sws,
struct svga_winsys_surface *surface)
vws->base.surface_is_flushed = vmw_svga_winsys_surface_is_flushed;
vws->base.surface_reference = vmw_svga_winsys_surface_ref;
vws->base.surface_can_create = vmw_svga_winsys_surface_can_create;
+ vws->base.surface_invalidate = vmw_svga_winsys_surface_invalidate;
vws->base.buffer_create = vmw_svga_winsys_buffer_create;
vws->base.buffer_map = vmw_svga_winsys_buffer_map;
vws->base.buffer_unmap = vmw_svga_winsys_buffer_unmap;