1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "util/u_memory.h"
27 #include "util/u_hash.h"
29 #include "svga_debug.h"
30 #include "svga_winsys.h"
31 #include "svga_screen.h"
32 #include "svga_screen_cache.h"
35 #define SVGA_SURFACE_CACHE_ENABLED 1
39 * Compute the bucket for this key.
41 static INLINE
unsigned
42 svga_screen_cache_bucket(const struct svga_host_surface_cache_key
*key
)
44 return util_hash_crc32( key
, sizeof key
) % SVGA_HOST_SURFACE_CACHE_BUCKETS
;
48 static INLINE
struct svga_winsys_surface
*
49 svga_screen_cache_lookup(struct svga_screen
*svgascreen
,
50 const struct svga_host_surface_cache_key
*key
)
52 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
53 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
54 struct svga_host_surface_cache_entry
*entry
;
55 struct svga_winsys_surface
*handle
= NULL
;
56 struct list_head
*curr
, *next
;
60 assert(key
->cachable
);
62 bucket
= svga_screen_cache_bucket(key
);
64 pipe_mutex_lock(cache
->mutex
);
66 curr
= cache
->bucket
[bucket
].next
;
68 while(curr
!= &cache
->bucket
[bucket
]) {
71 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, bucket_head
);
73 assert(entry
->handle
);
75 if(memcmp(&entry
->key
, key
, sizeof *key
) == 0 &&
76 sws
->fence_signalled( sws
, entry
->fence
, 0 ) == 0) {
77 assert(sws
->surface_is_flushed(sws
, entry
->handle
));
79 handle
= entry
->handle
; // Reference is transfered here.
82 LIST_DEL(&entry
->bucket_head
);
84 LIST_DEL(&entry
->head
);
86 LIST_ADD(&entry
->head
, &cache
->empty
);
95 pipe_mutex_unlock(cache
->mutex
);
97 if (SVGA_DEBUG
& DEBUG_DMA
)
98 debug_printf("%s: cache %s after %u tries\n", __FUNCTION__
,
99 handle
? "hit" : "miss", tries
);
106 * Transfers a handle reference.
110 svga_screen_cache_add(struct svga_screen
*svgascreen
,
111 const struct svga_host_surface_cache_key
*key
,
112 struct svga_winsys_surface
**p_handle
)
114 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
115 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
116 struct svga_host_surface_cache_entry
*entry
= NULL
;
117 struct svga_winsys_surface
*handle
= *p_handle
;
119 assert(key
->cachable
);
126 pipe_mutex_lock(cache
->mutex
);
128 if(!LIST_IS_EMPTY(&cache
->empty
)) {
129 /* use the first empty entry */
130 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, cache
->empty
.next
, head
);
132 LIST_DEL(&entry
->head
);
134 else if(!LIST_IS_EMPTY(&cache
->unused
)) {
135 /* free the last used buffer and reuse its entry */
136 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, cache
->unused
.prev
, head
);
137 SVGA_DBG(DEBUG_DMA
, "unref sid %p (make space)\n", entry
->handle
);
138 sws
->surface_reference(sws
, &entry
->handle
, NULL
);
140 LIST_DEL(&entry
->bucket_head
);
142 LIST_DEL(&entry
->head
);
146 entry
->handle
= handle
;
147 memcpy(&entry
->key
, key
, sizeof entry
->key
);
149 LIST_ADD(&entry
->head
, &cache
->validated
);
152 /* Couldn't cache the buffer -- this really shouldn't happen */
153 SVGA_DBG(DEBUG_DMA
, "unref sid %p (couldn't find space)\n", handle
);
154 sws
->surface_reference(sws
, &handle
, NULL
);
157 pipe_mutex_unlock(cache
->mutex
);
162 * Called during the screen flush to move all buffers not in a validate list
163 * into the unused list.
166 svga_screen_cache_flush(struct svga_screen
*svgascreen
,
167 struct pipe_fence_handle
*fence
)
169 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
170 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
171 struct svga_host_surface_cache_entry
*entry
;
172 struct list_head
*curr
, *next
;
175 pipe_mutex_lock(cache
->mutex
);
177 curr
= cache
->validated
.next
;
179 while(curr
!= &cache
->validated
) {
180 entry
= LIST_ENTRY(struct svga_host_surface_cache_entry
, curr
, head
);
182 assert(entry
->handle
);
184 if(sws
->surface_is_flushed(sws
, entry
->handle
)) {
185 LIST_DEL(&entry
->head
);
187 svgascreen
->sws
->fence_reference(svgascreen
->sws
, &entry
->fence
, fence
);
189 LIST_ADD(&entry
->head
, &cache
->unused
);
191 bucket
= svga_screen_cache_bucket(&entry
->key
);
192 LIST_ADD(&entry
->bucket_head
, &cache
->bucket
[bucket
]);
199 pipe_mutex_unlock(cache
->mutex
);
204 svga_screen_cache_cleanup(struct svga_screen
*svgascreen
)
206 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
207 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
210 for(i
= 0; i
< SVGA_HOST_SURFACE_CACHE_SIZE
; ++i
) {
211 if(cache
->entries
[i
].handle
) {
212 SVGA_DBG(DEBUG_DMA
, "unref sid %p (shutdown)\n", cache
->entries
[i
].handle
);
213 sws
->surface_reference(sws
, &cache
->entries
[i
].handle
, NULL
);
216 if(cache
->entries
[i
].fence
)
217 svgascreen
->sws
->fence_reference(svgascreen
->sws
, &cache
->entries
[i
].fence
, NULL
);
220 pipe_mutex_destroy(cache
->mutex
);
225 svga_screen_cache_init(struct svga_screen
*svgascreen
)
227 struct svga_host_surface_cache
*cache
= &svgascreen
->cache
;
230 pipe_mutex_init(cache
->mutex
);
232 for(i
= 0; i
< SVGA_HOST_SURFACE_CACHE_BUCKETS
; ++i
)
233 LIST_INITHEAD(&cache
->bucket
[i
]);
235 LIST_INITHEAD(&cache
->unused
);
237 LIST_INITHEAD(&cache
->validated
);
239 LIST_INITHEAD(&cache
->empty
);
240 for(i
= 0; i
< SVGA_HOST_SURFACE_CACHE_SIZE
; ++i
)
241 LIST_ADDTAIL(&cache
->entries
[i
].head
, &cache
->empty
);
247 struct svga_winsys_surface
*
248 svga_screen_surface_create(struct svga_screen
*svgascreen
,
249 struct svga_host_surface_cache_key
*key
)
251 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
252 struct svga_winsys_surface
*handle
= NULL
;
253 boolean cachable
= SVGA_SURFACE_CACHE_ENABLED
&& key
->cachable
;
255 SVGA_DBG(DEBUG_DMA
, "%s sz %dx%dx%d mips %d faces %d cachable %d\n",
265 if (key
->format
== SVGA3D_BUFFER
) {
266 /* For buffers, round the buffer size up to the nearest power
267 * of two to increase the probability of cache hits. Keep
268 * texture surface dimensions unchanged.
271 while(size
< key
->size
.width
)
273 key
->size
.width
= size
;
276 handle
= svga_screen_cache_lookup(svgascreen
, key
);
278 if (key
->format
== SVGA3D_BUFFER
)
279 SVGA_DBG(DEBUG_DMA
, " reuse sid %p sz %d (buffer)\n", handle
,
282 SVGA_DBG(DEBUG_DMA
, " reuse sid %p sz %dx%dx%d mips %d faces %d\n", handle
,
292 handle
= sws
->surface_create(sws
,
299 SVGA_DBG(DEBUG_DMA
, "create sid %p sz %d\n", handle
, key
->size
);
307 svga_screen_surface_destroy(struct svga_screen
*svgascreen
,
308 const struct svga_host_surface_cache_key
*key
,
309 struct svga_winsys_surface
**p_handle
)
311 struct svga_winsys_screen
*sws
= svgascreen
->sws
;
313 /* We only set the cachable flag for surfaces of which we are the
314 * exclusive owner. So just hold onto our existing reference in
317 if(SVGA_SURFACE_CACHE_ENABLED
&& key
->cachable
) {
318 svga_screen_cache_add(svgascreen
, key
, p_handle
);
321 SVGA_DBG(DEBUG_DMA
, "unref sid %p (uncachable)\n", *p_handle
);
322 sws
->surface_reference(sws
, p_handle
, NULL
);