1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/u_hash.h"
30 #include "svga_debug.h"
31 #include "svga_format.h"
32 #include "svga_winsys.h"
33 #include "svga_screen.h"
34 #include "svga_screen_cache.h"
37 #define SVGA_SURFACE_CACHE_ENABLED 1
41 * Return the size of the surface described by the key (in bytes).
44 surface_size(const struct svga_host_surface_cache_key
*key
)
46 unsigned bw
, bh
, bpb
, total_size
, i
;
48 assert(key
->numMipLevels
> 0);
49 assert(key
->numFaces
> 0);
51 if (key
->format
== SVGA3D_BUFFER
) {
52 /* Special case: we don't want to count vertex/index buffers
53 * against the cache size limit, so view them as zero-sized.
58 svga_format_size(key
->format
, &bw
, &bh
, &bpb
);
62 for (i
= 0; i
< key
->numMipLevels
; i
++) {
63 unsigned w
= u_minify(key
->size
.width
, i
);
64 unsigned h
= u_minify(key
->size
.height
, i
);
65 unsigned d
= u_minify(key
->size
.depth
, i
);
66 unsigned img_size
= ((w
+ bw
- 1) / bw
) * ((h
+ bh
- 1) / bh
) * d
* bpb
;
67 total_size
+= img_size
;
70 total_size
*= key
->numFaces
;
77 * Compute the bucket for this key.
79 static INLINE
unsigned
80 svga_screen_cache_bucket(const struct svga_host_surface_cache_key
*key
)
82 return util_hash_crc32(key
, sizeof *key
) % SVGA_HOST_SURFACE_CACHE_BUCKETS
;
87 * Search the cache for a surface that matches the key. If a match is
88 * found, remove it from the cache and return the surface pointer.
89 * Return NULL otherwise.
91 static INLINE
struct svga_winsys_surface
*
92 svga_screen_cache_lookup(struct svga_screen
*svgascreen
,
93 const struct svga_host_surface_cache_key
*key
)
95 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
96 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
97 struct svga_host_surface_cache_entry
*entry
;
98 struct svga_winsys_surface
*handle
= NULL
;
99 struct list_head
*curr
, *next
;
103 assert(key
->cachable
);
105 bucket
= svga_screen_cache_bucket(key
);
107 pipe_mutex_lock(cache
->mutex
);
109 curr
= cache
->bucket
[bucket
].next
;
111 while (curr
!= &cache
->bucket
[bucket
]) {
114 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, bucket_head
);
116 assert(entry
->handle
);
118 if (memcmp(&entry
->key
, key
, sizeof *key
) == 0 &&
119 sws
->fence_signalled(sws
, entry
->fence
, 0) == 0) {
122 assert(sws
->surface_is_flushed(sws
, entry
->handle
));
124 handle
= entry
->handle
; /* Reference is transfered here. */
125 entry
->handle
= NULL
;
127 LIST_DEL(&entry
->bucket_head
);
129 LIST_DEL(&entry
->head
);
131 LIST_ADD(&entry
->head
, &cache
->empty
);
133 /* update the cache size */
134 surf_size
= surface_size(&entry
->key
);
135 assert(surf_size
<= cache
->total_size
);
136 if (surf_size
> cache
->total_size
)
137 cache
->total_size
= 0; /* should never happen, but be safe */
139 cache
->total_size
-= surf_size
;
148 pipe_mutex_unlock(cache
->mutex
);
150 if (SVGA_DEBUG
& DEBUG_DMA
)
151 debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__
,
152 handle
? "hit" : "miss", tries
, bucket
);
159 * Free the least recently used entries in the surface cache until the
160 * cache size is <= the target size OR there are no unused entries left
161 * to discard. We don't do any flushing to try to free up additional
165 svga_screen_cache_shrink(struct svga_screen
*svgascreen
,
166 unsigned target_size
)
168 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
169 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
170 struct svga_host_surface_cache_entry
*entry
= NULL
, *next_entry
;
172 /* Walk over the list of unused buffers in reverse order: from oldest
175 LIST_FOR_EACH_ENTRY_SAFE_REV(entry
, next_entry
, &cache
->unused
, head
) {
176 if (entry
->key
.format
!= SVGA3D_BUFFER
) {
177 /* we don't want to discard vertex/index buffers */
179 cache
->total_size
-= surface_size(&entry
->key
);
181 assert(entry
->handle
);
182 sws
->surface_reference(sws
, &entry
->handle
, NULL
);
184 LIST_DEL(&entry
->bucket_head
);
185 LIST_DEL(&entry
->head
);
186 LIST_ADD(&entry
->head
, &cache
->empty
);
188 if (cache
->total_size
<= target_size
) {
198 * Transfers a handle reference.
201 svga_screen_cache_add(struct svga_screen
*svgascreen
,
202 const struct svga_host_surface_cache_key
*key
,
203 struct svga_winsys_surface
**p_handle
)
205 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
206 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
207 struct svga_host_surface_cache_entry
*entry
= NULL
;
208 struct svga_winsys_surface
*handle
= *p_handle
;
211 assert(key
->cachable
);
217 surf_size
= surface_size(key
);
220 pipe_mutex_lock(cache
->mutex
);
222 if (surf_size
>= SVGA_HOST_SURFACE_CACHE_BYTES
) {
223 /* this surface is too large to cache, just free it */
224 sws
->surface_reference(sws
, &handle
, NULL
);
225 pipe_mutex_unlock(cache
->mutex
);
229 if (cache
->total_size
+ surf_size
> SVGA_HOST_SURFACE_CACHE_BYTES
) {
230 /* Adding this surface would exceed the cache size.
231 * Try to discard least recently used entries until we hit the
232 * new target cache size.
234 unsigned target_size
= SVGA_HOST_SURFACE_CACHE_BYTES
- surf_size
;
236 svga_screen_cache_shrink(svgascreen
, target_size
);
238 if (cache
->total_size
> target_size
) {
239 /* we weren't able to shrink the cache as much as we wanted so
240 * just discard this surface.
242 sws
->surface_reference(sws
, &handle
, NULL
);
243 pipe_mutex_unlock(cache
->mutex
);
248 if (!LIST_IS_EMPTY(&cache
->empty
)) {
249 /* use the first empty entry */
250 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
,
251 cache
->empty
.next
, head
);
253 LIST_DEL(&entry
->head
);
255 else if (!LIST_IS_EMPTY(&cache
->unused
)) {
256 /* free the last used buffer and reuse its entry */
257 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
,
258 cache
->unused
.prev
, head
);
259 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
260 "unref sid %p (make space)\n", entry
->handle
);
262 cache
->total_size
-= surface_size(&entry
->key
);
264 sws
->surface_reference(sws
, &entry
->handle
, NULL
);
266 LIST_DEL(&entry
->bucket_head
);
268 LIST_DEL(&entry
->head
);
272 entry
->handle
= handle
;
273 memcpy(&entry
->key
, key
, sizeof entry
->key
);
275 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
276 "cache sid %p\n", entry
->handle
);
277 LIST_ADD(&entry
->head
, &cache
->validated
);
279 cache
->total_size
+= surf_size
;
282 /* Couldn't cache the buffer -- this really shouldn't happen */
283 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
284 "unref sid %p (couldn't find space)\n", handle
);
285 sws
->surface_reference(sws
, &handle
, NULL
);
288 pipe_mutex_unlock(cache
->mutex
);
293 * Called during the screen flush to move all buffers not in a validate list
294 * into the unused list.
297 svga_screen_cache_flush(struct svga_screen
*svgascreen
,
298 struct pipe_fence_handle
*fence
)
300 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
301 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
302 struct svga_host_surface_cache_entry
*entry
;
303 struct list_head
*curr
, *next
;
306 pipe_mutex_lock(cache
->mutex
);
308 curr
= cache
->validated
.next
;
310 while (curr
!= &cache
->validated
) {
311 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, head
);
313 assert(entry
->handle
);
315 if (sws
->surface_is_flushed(sws
, entry
->handle
)) {
316 LIST_DEL(&entry
->head
);
318 svgascreen
->sws
->fence_reference(svgascreen
->sws
, &entry
->fence
, fence
);
320 LIST_ADD(&entry
->head
, &cache
->unused
);
322 bucket
= svga_screen_cache_bucket(&entry
->key
);
323 LIST_ADD(&entry
->bucket_head
, &cache
->bucket
[bucket
]);
330 pipe_mutex_unlock(cache
->mutex
);
335 svga_screen_cache_cleanup(struct svga_screen
*svgascreen
)
337 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
338 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
341 for (i
= 0; i
< SVGA_HOST_SURFACE_CACHE_SIZE
; ++i
) {
342 if (cache
->entries
[i
].handle
) {
343 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
344 "unref sid %p (shutdown)\n", cache
->entries
[i
].handle
);
345 sws
->surface_reference(sws
, &cache
->entries
[i
].handle
, NULL
);
347 cache
->total_size
-= surface_size(&cache
->entries
[i
].key
);
350 if (cache
->entries
[i
].fence
)
351 svgascreen
->sws
->fence_reference(svgascreen
->sws
,
352 &cache
->entries
[i
].fence
, NULL
);
355 pipe_mutex_destroy(cache
->mutex
);
360 svga_screen_cache_init(struct svga_screen
*svgascreen
)
362 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
365 assert(cache
->total_size
== 0);
367 pipe_mutex_init(cache
->mutex
);
369 for (i
= 0; i
< SVGA_HOST_SURFACE_CACHE_BUCKETS
; ++i
)
370 LIST_INITHEAD(&cache
->bucket
[i
]);
372 LIST_INITHEAD(&cache
->unused
);
374 LIST_INITHEAD(&cache
->validated
);
376 LIST_INITHEAD(&cache
->empty
);
377 for (i
= 0; i
< SVGA_HOST_SURFACE_CACHE_SIZE
; ++i
)
378 LIST_ADDTAIL(&cache
->entries
[i
].head
, &cache
->empty
);
384 struct svga_winsys_surface
*
385 svga_screen_surface_create(struct svga_screen
*svgascreen
,
386 struct svga_host_surface_cache_key
*key
)
388 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
389 struct svga_winsys_surface
*handle
= NULL
;
390 boolean cachable
= SVGA_SURFACE_CACHE_ENABLED
&& key
->cachable
;
392 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
393 "%s sz %dx%dx%d mips %d faces %d cachable %d\n",
403 if (key
->format
== SVGA3D_BUFFER
) {
404 /* For buffers, round the buffer size up to the nearest power
405 * of two to increase the probability of cache hits. Keep
406 * texture surface dimensions unchanged.
409 while (size
< key
->size
.width
)
411 key
->size
.width
= size
;
412 /* Since we're reusing buffers we're effectively transforming all
413 * of them into dynamic buffers.
415 * It would be nice to not cache long lived static buffers. But there
416 * is no way to detect the long lived from short lived ones yet. A
417 * good heuristic would be buffer size.
419 key
->flags
&= ~SVGA3D_SURFACE_HINT_STATIC
;
420 key
->flags
|= SVGA3D_SURFACE_HINT_DYNAMIC
;
423 handle
= svga_screen_cache_lookup(svgascreen
, key
);
425 if (key
->format
== SVGA3D_BUFFER
)
426 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
427 "reuse sid %p sz %d (buffer)\n", handle
,
430 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
431 "reuse sid %p sz %dx%dx%d mips %d faces %d\n", handle
,
441 handle
= sws
->surface_create(sws
,
448 SVGA_DBG(DEBUG_CACHE
|DEBUG_DMA
,
449 " CREATE sid %p sz %dx%dx%d\n",
461 svga_screen_surface_destroy(struct svga_screen
*svgascreen
,
462 const struct svga_host_surface_cache_key
*key
,
463 struct svga_winsys_surface
**p_handle
)
465 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
467 /* We only set the cachable flag for surfaces of which we are the
468 * exclusive owner. So just hold onto our existing reference in
471 if (SVGA_SURFACE_CACHE_ENABLED
&& key
->cachable
) {
472 svga_screen_cache_add(svgascreen
, key
, p_handle
);
476 "unref sid %p (uncachable)\n", *p_handle
);
477 sws
->surface_reference(sws
, p_handle
, NULL
);