Merge branch '7.8' into master
[mesa.git] / src / gallium / drivers / svga / svga_screen_cache.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "util/u_memory.h"
27 #include "util/u_hash.h"
28
29 #include "svga_debug.h"
30 #include "svga_winsys.h"
31 #include "svga_screen.h"
32 #include "svga_screen_cache.h"
33
34
35 #define SVGA_SURFACE_CACHE_ENABLED 1
36
37
38 /**
39 * Compute the bucket for this key.
40 */
41 static INLINE unsigned
42 svga_screen_cache_bucket(const struct svga_host_surface_cache_key *key)
43 {
44 return util_hash_crc32( key, sizeof *key ) % SVGA_HOST_SURFACE_CACHE_BUCKETS;
45 }
46
47
48 static INLINE struct svga_winsys_surface *
49 svga_screen_cache_lookup(struct svga_screen *svgascreen,
50 const struct svga_host_surface_cache_key *key)
51 {
52 struct svga_host_surface_cache *cache = &svgascreen->cache;
53 struct svga_winsys_screen *sws = svgascreen->sws;
54 struct svga_host_surface_cache_entry *entry;
55 struct svga_winsys_surface *handle = NULL;
56 struct list_head *curr, *next;
57 unsigned bucket;
58 unsigned tries = 0;
59
60 assert(key->cachable);
61
62 bucket = svga_screen_cache_bucket(key);
63
64 pipe_mutex_lock(cache->mutex);
65
66 curr = cache->bucket[bucket].next;
67 next = curr->next;
68 while(curr != &cache->bucket[bucket]) {
69 ++tries;
70
71 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, bucket_head);
72
73 assert(entry->handle);
74
75 if(memcmp(&entry->key, key, sizeof *key) == 0 &&
76 sws->fence_signalled( sws, entry->fence, 0 ) == 0) {
77 assert(sws->surface_is_flushed(sws, entry->handle));
78
79 handle = entry->handle; // Reference is transfered here.
80 entry->handle = NULL;
81
82 LIST_DEL(&entry->bucket_head);
83
84 LIST_DEL(&entry->head);
85
86 LIST_ADD(&entry->head, &cache->empty);
87
88 break;
89 }
90
91 curr = next;
92 next = curr->next;
93 }
94
95 pipe_mutex_unlock(cache->mutex);
96
97 if (SVGA_DEBUG & DEBUG_DMA)
98 debug_printf("%s: cache %s after %u tries (bucket %d)\n", __FUNCTION__,
99 handle ? "hit" : "miss", tries, bucket);
100
101 return handle;
102 }
103
104
105 /*
106 * Transfers a handle reference.
107 */
108
109 static INLINE void
110 svga_screen_cache_add(struct svga_screen *svgascreen,
111 const struct svga_host_surface_cache_key *key,
112 struct svga_winsys_surface **p_handle)
113 {
114 struct svga_host_surface_cache *cache = &svgascreen->cache;
115 struct svga_winsys_screen *sws = svgascreen->sws;
116 struct svga_host_surface_cache_entry *entry = NULL;
117 struct svga_winsys_surface *handle = *p_handle;
118
119 assert(key->cachable);
120
121 assert(handle);
122 if(!handle)
123 return;
124
125 *p_handle = NULL;
126 pipe_mutex_lock(cache->mutex);
127
128 if(!LIST_IS_EMPTY(&cache->empty)) {
129 /* use the first empty entry */
130 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, cache->empty.next, head);
131
132 LIST_DEL(&entry->head);
133 }
134 else if(!LIST_IS_EMPTY(&cache->unused)) {
135 /* free the last used buffer and reuse its entry */
136 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, cache->unused.prev, head);
137 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
138 "unref sid %p (make space)\n", entry->handle);
139 sws->surface_reference(sws, &entry->handle, NULL);
140
141 LIST_DEL(&entry->bucket_head);
142
143 LIST_DEL(&entry->head);
144 }
145
146 if(entry) {
147 entry->handle = handle;
148 memcpy(&entry->key, key, sizeof entry->key);
149
150 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
151 "cache sid %p\n", entry->handle);
152 LIST_ADD(&entry->head, &cache->validated);
153 }
154 else {
155 /* Couldn't cache the buffer -- this really shouldn't happen */
156 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
157 "unref sid %p (couldn't find space)\n", handle);
158 sws->surface_reference(sws, &handle, NULL);
159 }
160
161 pipe_mutex_unlock(cache->mutex);
162 }
163
164
165 /**
166 * Called during the screen flush to move all buffers not in a validate list
167 * into the unused list.
168 */
169 void
170 svga_screen_cache_flush(struct svga_screen *svgascreen,
171 struct pipe_fence_handle *fence)
172 {
173 struct svga_host_surface_cache *cache = &svgascreen->cache;
174 struct svga_winsys_screen *sws = svgascreen->sws;
175 struct svga_host_surface_cache_entry *entry;
176 struct list_head *curr, *next;
177 unsigned bucket;
178
179 pipe_mutex_lock(cache->mutex);
180
181 curr = cache->validated.next;
182 next = curr->next;
183 while(curr != &cache->validated) {
184 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
185
186 assert(entry->handle);
187
188 if(sws->surface_is_flushed(sws, entry->handle)) {
189 LIST_DEL(&entry->head);
190
191 svgascreen->sws->fence_reference(svgascreen->sws, &entry->fence, fence);
192
193 LIST_ADD(&entry->head, &cache->unused);
194
195 bucket = svga_screen_cache_bucket(&entry->key);
196 LIST_ADD(&entry->bucket_head, &cache->bucket[bucket]);
197 }
198
199 curr = next;
200 next = curr->next;
201 }
202
203 pipe_mutex_unlock(cache->mutex);
204 }
205
206
207 void
208 svga_screen_cache_cleanup(struct svga_screen *svgascreen)
209 {
210 struct svga_host_surface_cache *cache = &svgascreen->cache;
211 struct svga_winsys_screen *sws = svgascreen->sws;
212 unsigned i;
213
214 for(i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i) {
215 if(cache->entries[i].handle) {
216 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
217 "unref sid %p (shutdown)\n", cache->entries[i].handle);
218 sws->surface_reference(sws, &cache->entries[i].handle, NULL);
219 }
220
221 if(cache->entries[i].fence)
222 svgascreen->sws->fence_reference(svgascreen->sws, &cache->entries[i].fence, NULL);
223 }
224
225 pipe_mutex_destroy(cache->mutex);
226 }
227
228
229 enum pipe_error
230 svga_screen_cache_init(struct svga_screen *svgascreen)
231 {
232 struct svga_host_surface_cache *cache = &svgascreen->cache;
233 unsigned i;
234
235 pipe_mutex_init(cache->mutex);
236
237 for(i = 0; i < SVGA_HOST_SURFACE_CACHE_BUCKETS; ++i)
238 LIST_INITHEAD(&cache->bucket[i]);
239
240 LIST_INITHEAD(&cache->unused);
241
242 LIST_INITHEAD(&cache->validated);
243
244 LIST_INITHEAD(&cache->empty);
245 for(i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
246 LIST_ADDTAIL(&cache->entries[i].head, &cache->empty);
247
248 return PIPE_OK;
249 }
250
251
252 struct svga_winsys_surface *
253 svga_screen_surface_create(struct svga_screen *svgascreen,
254 struct svga_host_surface_cache_key *key)
255 {
256 struct svga_winsys_screen *sws = svgascreen->sws;
257 struct svga_winsys_surface *handle = NULL;
258 boolean cachable = SVGA_SURFACE_CACHE_ENABLED && key->cachable;
259
260 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
261 "%s sz %dx%dx%d mips %d faces %d cachable %d\n",
262 __FUNCTION__,
263 key->size.width,
264 key->size.height,
265 key->size.depth,
266 key->numMipLevels,
267 key->numFaces,
268 key->cachable);
269
270 if (cachable) {
271 if (key->format == SVGA3D_BUFFER) {
272 /* For buffers, round the buffer size up to the nearest power
273 * of two to increase the probability of cache hits. Keep
274 * texture surface dimensions unchanged.
275 */
276 uint32_t size = 1;
277 while(size < key->size.width)
278 size <<= 1;
279 key->size.width = size;
280 /* Since we're reusing buffers we're effectively transforming all
281 * of them into dynamic buffers.
282 *
283 * It would be nice to not cache long lived static buffers. But there
284 * is no way to detect the long lived from short lived ones yet. A
285 * good heuristic would be buffer size.
286 */
287 key->flags &= ~SVGA3D_SURFACE_HINT_STATIC;
288 key->flags |= SVGA3D_SURFACE_HINT_DYNAMIC;
289 }
290
291 handle = svga_screen_cache_lookup(svgascreen, key);
292 if (handle) {
293 if (key->format == SVGA3D_BUFFER)
294 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
295 "reuse sid %p sz %d (buffer)\n", handle,
296 key->size.width);
297 else
298 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
299 "reuse sid %p sz %dx%dx%d mips %d faces %d\n", handle,
300 key->size.width,
301 key->size.height,
302 key->size.depth,
303 key->numMipLevels,
304 key->numFaces);
305 }
306 }
307
308 if (!handle) {
309 handle = sws->surface_create(sws,
310 key->flags,
311 key->format,
312 key->size,
313 key->numFaces,
314 key->numMipLevels);
315 if (handle)
316 SVGA_DBG(DEBUG_CACHE|DEBUG_DMA,
317 " CREATE sid %p sz %dx%dx%d\n",
318 handle,
319 key->size.width,
320 key->size.height,
321 key->size.depth);
322 }
323
324 return handle;
325 }
326
327
328 void
329 svga_screen_surface_destroy(struct svga_screen *svgascreen,
330 const struct svga_host_surface_cache_key *key,
331 struct svga_winsys_surface **p_handle)
332 {
333 struct svga_winsys_screen *sws = svgascreen->sws;
334
335 /* We only set the cachable flag for surfaces of which we are the
336 * exclusive owner. So just hold onto our existing reference in
337 * that case.
338 */
339 if(SVGA_SURFACE_CACHE_ENABLED && key->cachable) {
340 svga_screen_cache_add(svgascreen, key, p_handle);
341 }
342 else {
343 SVGA_DBG(DEBUG_DMA,
344 "unref sid %p (uncachable)\n", *p_handle);
345 sws->surface_reference(sws, p_handle, NULL);
346 }
347 }