1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/u_hash.h"
30 #include "svga_debug.h"
31 #include "svga_format.h"
32 #include "svga_winsys.h"
33 #include "svga_screen.h"
34 #include "svga_screen_cache.h"
37 #define SVGA_SURFACE_CACHE_ENABLED 1
41 * Return the size of the surface described by the key (in bytes).
44 surface_size(const struct svga_host_surface_cache_key
*key
)
46 unsigned bw
, bh
, bpb
, total_size
, i
;
48 assert(key
->numMipLevels
> 0);
49 assert(key
->numFaces
> 0);
51 if (key
->format
== SVGA3D_BUFFER
) {
52 /* Special case: we don't want to count vertex/index buffers
53 * against the cache size limit, so view them as zero-sized.
58 svga_format_size(key
->format
, &bw
, &bh
, &bpb
);
62 for (i
= 0; i
< key
->numMipLevels
; i
++) {
63 unsigned w
= u_minify(key
->size
.width
, i
);
64 unsigned h
= u_minify(key
->size
.height
, i
);
65 unsigned d
= u_minify(key
->size
.depth
, i
);
66 unsigned img_size
= ((w
+ bw
- 1) / bw
) * ((h
+ bh
- 1) / bh
) * d
* bpb
;
67 total_size
+= img_size
;
70 total_size
*= key
->numFaces
;
77 * Compute the bucket for this key.
79 static inline unsigned
80 svga_screen_cache_bucket(const struct svga_host_surface_cache_key
*key
)
82 return util_hash_crc32(key
, sizeof *key
) % SVGA_HOST_SURFACE_CACHE_BUCKETS
;
87 * Search the cache for a surface that matches the key. If a match is
88 * found, remove it from the cache and return the surface pointer.
89 * Return NULL otherwise.
91 static struct svga_winsys_surface
*
92 svga_screen_cache_lookup(struct svga_screen
*svgascreen
,
93 const struct svga_host_surface_cache_key
*key
)
95 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
96 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
97 struct svga_host_surface_cache_entry
*entry
;
98 struct svga_winsys_surface
*handle
= NULL
;
99 struct list_head
*curr
, *next
;
103 assert(key
->cachable
);
105 bucket
= svga_screen_cache_bucket(key
);
107 pipe_mutex_lock(cache
->mutex
);
109 curr
= cache
->bucket
[bucket
].next
;
111 while (curr
!= &cache
->bucket
[bucket
]) {
114 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, bucket_head
);
116 assert(entry
->handle
);
118 /* If the key matches and the fence is signalled (the surface is no
119 * longer needed) the lookup was successful. We found a surface that
121 * We unlink the surface from the cache entry and we add the entry to
124 if (memcmp(&entry
->key
, key
, sizeof *key
) == 0 &&
125 sws
->fence_signalled(sws
, entry
->fence
, 0) == 0) {
128 assert(sws
->surface_is_flushed(sws
, entry
->handle
));
130 handle
= entry
->handle
; /* Reference is transfered here. */
131 entry
->handle
= NULL
;
133 /* Remove from hash table */
134 LIST_DEL(&entry
->bucket_head
);
136 /* remove from LRU list */
137 LIST_DEL(&entry
->head
);
139 /* Add the cache entry (but not the surface!) to the empty list */
140 LIST_ADD(&entry
->head
, &cache
->empty
);
142 /* update the cache size */
143 surf_size
= surface_size(&entry
->key
);
144 assert(surf_size
<= cache
->total_size
);
145 if (surf_size
> cache
->total_size
)
146 cache
->total_size
= 0; /* should never happen, but be safe */
148 cache
->total_size
-= surf_size
;
157 pipe_mutex_unlock(cache
->mutex
);
159 if (SVGA_DEBUG
& DEBUG_DMA
)
160 debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__
,
161 handle
? "hit" : "miss", tries
, bucket
);
168 * Free the least recently used entries in the surface cache until the
169 * cache size is <= the target size OR there are no unused entries left
170 * to discard. We don't do any flushing to try to free up additional
174 svga_screen_cache_shrink(struct svga_screen
*svgascreen
,
175 unsigned target_size
)
177 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
178 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
179 struct svga_host_surface_cache_entry
*entry
= NULL
, *next_entry
;
181 /* Walk over the list of unused buffers in reverse order: from oldest
184 LIST_FOR_EACH_ENTRY_SAFE_REV(entry
, next_entry
, &cache
->unused
, head
) {
185 if (entry
->key
.format
!= SVGA3D_BUFFER
) {
186 /* we don't want to discard vertex/index buffers */
188 cache
->total_size
-= surface_size(&entry
->key
);
190 assert(entry
->handle
);
191 sws
->surface_reference(sws
, &entry
->handle
, NULL
);
193 LIST_DEL(&entry
->bucket_head
);
194 LIST_DEL(&entry
->head
);
195 LIST_ADD(&entry
->head
, &cache
->empty
);
197 if (cache
->total_size
<= target_size
) {
207 * Add a surface to the cache. This is done when the driver deletes
208 * the surface. Note: transfers a handle reference.
211 svga_screen_cache_add(struct svga_screen
*svgascreen
,
212 const struct svga_host_surface_cache_key
*key
,
213 struct svga_winsys_surface
**p_handle
)
215 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
216 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
217 struct svga_host_surface_cache_entry
*entry
= NULL
;
218 struct svga_winsys_surface
*handle
= *p_handle
;
221 assert(key
->cachable
);
226 surf_size
= surface_size(key
);
229 pipe_mutex_lock(cache
->mutex
);
231 if (surf_size
>= SVGA_HOST_SURFACE_CACHE_BYTES
) {
232 /* this surface is too large to cache, just free it */
233 sws
->surface_reference(sws
, &handle
, NULL
);
234 pipe_mutex_unlock(cache
->mutex
);
238 if (cache
->total_size
+ surf_size
> SVGA_HOST_SURFACE_CACHE_BYTES
) {
239 /* Adding this surface would exceed the cache size.
240 * Try to discard least recently used entries until we hit the
241 * new target cache size.
243 unsigned target_size
= SVGA_HOST_SURFACE_CACHE_BYTES
- surf_size
;
245 svga_screen_cache_shrink(svgascreen
, target_size
);
247 if (cache
->total_size
> target_size
) {
248 /* we weren't able to shrink the cache as much as we wanted so
249 * just discard this surface.
251 sws
->surface_reference(sws
, &handle
, NULL
);
252 pipe_mutex_unlock(cache
->mutex
);
257 if (!LIST_IS_EMPTY(&cache
->empty
)) {
258 /* An empty entry has no surface associated with it.
259 * Use the first empty entry.
261 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
,
262 cache
->empty
.next
, head
);
264 /* Remove from LRU list */
265 LIST_DEL(&entry
->head
);
267 else if (!LIST_IS_EMPTY(&cache
->unused
)) {
268 /* free the last used buffer and reuse its entry */
269 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
,
270 cache
->unused
.prev
, head
);
271 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
272 "unref sid %p (make space)\n", entry
->handle
);
274 cache
->total_size
-= surface_size(&entry
->key
);
276 sws
->surface_reference(sws
, &entry
->handle
, NULL
);
278 /* Remove from hash table */
279 LIST_DEL(&entry
->bucket_head
);
281 /* Remove from LRU list */
282 LIST_DEL(&entry
->head
);
286 assert(entry
->handle
== NULL
);
287 entry
->handle
= handle
;
288 memcpy(&entry
->key
, key
, sizeof entry
->key
);
290 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
291 "cache sid %p\n", entry
->handle
);
292 LIST_ADD(&entry
->head
, &cache
->validated
);
294 cache
->total_size
+= surf_size
;
297 /* Couldn't cache the buffer -- this really shouldn't happen */
298 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
299 "unref sid %p (couldn't find space)\n", handle
);
300 sws
->surface_reference(sws
, &handle
, NULL
);
303 pipe_mutex_unlock(cache
->mutex
);
308 * Called during the screen flush to move all buffers not in a validate list
309 * into the unused list.
312 svga_screen_cache_flush(struct svga_screen
*svgascreen
,
313 struct pipe_fence_handle
*fence
)
315 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
316 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
317 struct svga_host_surface_cache_entry
*entry
;
318 struct list_head
*curr
, *next
;
321 pipe_mutex_lock(cache
->mutex
);
323 /* Loop over entries in the invalidated list */
324 curr
= cache
->invalidated
.next
;
326 while (curr
!= &cache
->invalidated
) {
327 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, head
);
329 assert(entry
->handle
);
331 if (sws
->surface_is_flushed(sws
, entry
->handle
)) {
332 /* remove entry from the invalidated list */
333 LIST_DEL(&entry
->head
);
335 sws
->fence_reference(sws
, &entry
->fence
, fence
);
337 /* Add entry to the unused list */
338 LIST_ADD(&entry
->head
, &cache
->unused
);
340 /* Add entry to the hash table bucket */
341 bucket
= svga_screen_cache_bucket(&entry
->key
);
342 LIST_ADD(&entry
->bucket_head
, &cache
->bucket
[bucket
]);
349 curr
= cache
->validated
.next
;
351 while (curr
!= &cache
->validated
) {
352 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, head
);
354 assert(entry
->handle
);
356 if (sws
->surface_is_flushed(sws
, entry
->handle
)) {
357 /* remove entry from the validated list */
358 LIST_DEL(&entry
->head
);
360 /* it is now safe to invalidate the surface content. */
361 sws
->surface_invalidate(sws
, entry
->handle
);
363 /* add the entry to the invalidated list */
364 LIST_ADD(&entry
->head
, &cache
->invalidated
);
371 pipe_mutex_unlock(cache
->mutex
);
376 * Free all the surfaces in the cache.
377 * Called when destroying the svga screen object.
380 svga_screen_cache_cleanup(struct svga_screen
*svgascreen
)
382 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
383 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
386 for (i
= 0; i
< SVGA_HOST_SURFACE_CACHE_SIZE
; ++i
) {
387 if (cache
->entries
[i
].handle
) {
388 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
389 "unref sid %p (shutdown)\n", cache
->entries
[i
].handle
);
390 sws
->surface_reference(sws
, &cache
->entries
[i
].handle
, NULL
);
392 cache
->total_size
-= surface_size(&cache
->entries
[i
].key
);
395 if (cache
->entries
[i
].fence
)
396 sws
->fence_reference(sws
, &cache
->entries
[i
].fence
, NULL
);
399 pipe_mutex_destroy(cache
->mutex
);
404 svga_screen_cache_init(struct svga_screen
*svgascreen
)
406 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
409 assert(cache
->total_size
== 0);
411 pipe_mutex_init(cache
->mutex
);
413 for (i
= 0; i
< SVGA_HOST_SURFACE_CACHE_BUCKETS
; ++i
)
414 LIST_INITHEAD(&cache
->bucket
[i
]);
416 LIST_INITHEAD(&cache
->unused
);
418 LIST_INITHEAD(&cache
->validated
);
420 LIST_INITHEAD(&cache
->invalidated
);
422 LIST_INITHEAD(&cache
->empty
);
423 for (i
= 0; i
< SVGA_HOST_SURFACE_CACHE_SIZE
; ++i
)
424 LIST_ADDTAIL(&cache
->entries
[i
].head
, &cache
->empty
);
431 * Allocate a new host-side surface. If the surface is marked as cachable,
432 * first try re-using a surface in the cache of freed surfaces. Otherwise,
433 * allocate a new surface.
434 * \param bind_flags bitmask of PIPE_BIND_x flags
435 * \param usage one of PIPE_USAGE_x values
437 struct svga_winsys_surface
*
438 svga_screen_surface_create(struct svga_screen
*svgascreen
,
439 unsigned bind_flags
, enum pipe_resource_usage usage
,
440 struct svga_host_surface_cache_key
*key
)
442 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
443 struct svga_winsys_surface
*handle
= NULL
;
444 boolean cachable
= SVGA_SURFACE_CACHE_ENABLED
&& key
->cachable
;
446 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
447 "%s sz %dx%dx%d mips %d faces %d arraySize %d cachable %d\n",
458 if (key
->format
== SVGA3D_BUFFER
) {
459 SVGA3dSurfaceFlags hint_flag
;
461 /* For buffers, round the buffer size up to the nearest power
462 * of two to increase the probability of cache hits. Keep
463 * texture surface dimensions unchanged.
466 while (size
< key
->size
.width
)
468 key
->size
.width
= size
;
470 /* Determine whether the buffer is static or dynamic.
471 * This is a bit of a heuristic which can be tuned as needed.
473 if (usage
== PIPE_USAGE_DEFAULT
||
474 usage
== PIPE_USAGE_IMMUTABLE
) {
475 hint_flag
= SVGA3D_SURFACE_HINT_STATIC
;
477 else if (bind_flags
& PIPE_BIND_INDEX_BUFFER
) {
478 /* Index buffers don't change too often. Mark them as static.
480 hint_flag
= SVGA3D_SURFACE_HINT_STATIC
;
483 /* Since we're reusing buffers we're effectively transforming all
484 * of them into dynamic buffers.
486 * It would be nice to not cache long lived static buffers. But there
487 * is no way to detect the long lived from short lived ones yet. A
488 * good heuristic would be buffer size.
490 hint_flag
= SVGA3D_SURFACE_HINT_DYNAMIC
;
493 key
->flags
&= ~(SVGA3D_SURFACE_HINT_STATIC
|
494 SVGA3D_SURFACE_HINT_DYNAMIC
);
495 key
->flags
|= hint_flag
;
498 handle
= svga_screen_cache_lookup(svgascreen
, key
);
500 if (key
->format
== SVGA3D_BUFFER
)
501 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
502 "reuse sid %p sz %d (buffer)\n", handle
,
505 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
506 "reuse sid %p sz %dx%dx%d mips %d faces %d arraySize %d\n", handle
,
520 usage
|= SVGA_SURFACE_USAGE_SHARED
;
522 usage
|= SVGA_SURFACE_USAGE_SCANOUT
;
524 handle
= sws
->surface_create(sws
,
529 key
->numFaces
* key
->arraySize
,
533 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
534 " CREATE sid %p sz %dx%dx%d\n",
546 * Release a surface. We don't actually free the surface- we put
547 * it into the cache of freed surfaces (if it's cachable).
550 svga_screen_surface_destroy(struct svga_screen
*svgascreen
,
551 const struct svga_host_surface_cache_key
*key
,
552 struct svga_winsys_surface
**p_handle
)
554 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
556 /* We only set the cachable flag for surfaces of which we are the
557 * exclusive owner. So just hold onto our existing reference in
560 if (SVGA_SURFACE_CACHE_ENABLED
&& key
->cachable
) {
561 svga_screen_cache_add(svgascreen
, key
, p_handle
);
565 "unref sid %p (uncachable)\n", *p_handle
);
566 sws
->surface_reference(sws
, p_handle
, NULL
);
572 * Print/dump the contents of the screen cache. For debugging.
575 svga_screen_cache_dump(const struct svga_screen
*svgascreen
)
577 const struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
581 debug_printf("svga3d surface cache:\n");
582 for (bucket
= 0; bucket
< SVGA_HOST_SURFACE_CACHE_BUCKETS
; bucket
++) {
583 struct list_head
*curr
;
584 curr
= cache
->bucket
[bucket
].next
;
585 while (curr
&& curr
!= &cache
->bucket
[bucket
]) {
586 struct svga_host_surface_cache_entry
*entry
=
587 LIST_ENTRY(struct svga_host_surface_cache_entry
,
589 if (entry
->key
.format
== SVGA3D_BUFFER
) {
590 debug_printf(" %p: buffer %u bytes\n",
592 entry
->key
.size
.width
);
595 debug_printf(" %p: %u x %u x %u format %u\n",
597 entry
->key
.size
.width
,
598 entry
->key
.size
.height
,
599 entry
->key
.size
.depth
,
607 debug_printf("%u surfaces, %u bytes\n", count
, cache
->total_size
);