svga: cache textures as well as buffers
[mesa.git] / src / gallium / drivers / svga / svga_screen_cache.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "util/u_memory.h"
27 #include "util/u_hash.h"
28
29 #include "svga_debug.h"
30 #include "svga_winsys.h"
31 #include "svga_screen.h"
32 #include "svga_screen_cache.h"
33
34
35 #define SVGA_SURFACE_CACHE_ENABLED 1
36
37
38 /**
39 * Compute the bucket for this key.
40 */
41 static INLINE unsigned
42 svga_screen_cache_bucket(const struct svga_host_surface_cache_key *key)
43 {
44 return util_hash_crc32( key, sizeof key ) % SVGA_HOST_SURFACE_CACHE_BUCKETS;
45 }
46
47
48 static INLINE struct svga_winsys_surface *
49 svga_screen_cache_lookup(struct svga_screen *svgascreen,
50 const struct svga_host_surface_cache_key *key)
51 {
52 struct svga_host_surface_cache *cache = &svgascreen->cache;
53 struct svga_winsys_screen *sws = svgascreen->sws;
54 struct svga_host_surface_cache_entry *entry;
55 struct svga_winsys_surface *handle = NULL;
56 struct list_head *curr, *next;
57 unsigned bucket;
58 unsigned tries = 0;
59
60 assert(key->cachable);
61
62 bucket = svga_screen_cache_bucket(key);
63
64 pipe_mutex_lock(cache->mutex);
65
66 curr = cache->bucket[bucket].next;
67 next = curr->next;
68 while(curr != &cache->bucket[bucket]) {
69 ++tries;
70
71 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, bucket_head);
72
73 assert(entry->handle);
74
75 if(memcmp(&entry->key, key, sizeof *key) == 0 &&
76 sws->fence_signalled( sws, entry->fence, 0 ) == 0) {
77 assert(sws->surface_is_flushed(sws, entry->handle));
78
79 handle = entry->handle; // Reference is transfered here.
80 entry->handle = NULL;
81
82 LIST_DEL(&entry->bucket_head);
83
84 LIST_DEL(&entry->head);
85
86 LIST_ADD(&entry->head, &cache->empty);
87
88 break;
89 }
90
91 curr = next;
92 next = curr->next;
93 }
94
95 pipe_mutex_unlock(cache->mutex);
96
97 if (SVGA_DEBUG & DEBUG_DMA)
98 debug_printf("%s: cache %s after %u tries\n", __FUNCTION__,
99 handle ? "hit" : "miss", tries);
100
101 return handle;
102 }
103
104
105 /*
106 * Transfers a handle reference.
107 */
108
109 static INLINE void
110 svga_screen_cache_add(struct svga_screen *svgascreen,
111 const struct svga_host_surface_cache_key *key,
112 struct svga_winsys_surface **p_handle)
113 {
114 struct svga_host_surface_cache *cache = &svgascreen->cache;
115 struct svga_winsys_screen *sws = svgascreen->sws;
116 struct svga_host_surface_cache_entry *entry = NULL;
117 struct svga_winsys_surface *handle = *p_handle;
118
119 assert(key->cachable);
120
121 assert(handle);
122 if(!handle)
123 return;
124
125 *p_handle = NULL;
126 pipe_mutex_lock(cache->mutex);
127
128 if(!LIST_IS_EMPTY(&cache->empty)) {
129 /* use the first empty entry */
130 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, cache->empty.next, head);
131
132 LIST_DEL(&entry->head);
133 }
134 else if(!LIST_IS_EMPTY(&cache->unused)) {
135 /* free the last used buffer and reuse its entry */
136 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, cache->unused.prev, head);
137 SVGA_DBG(DEBUG_DMA, "unref sid %p (make space)\n", entry->handle);
138 sws->surface_reference(sws, &entry->handle, NULL);
139
140 LIST_DEL(&entry->bucket_head);
141
142 LIST_DEL(&entry->head);
143 }
144
145 if(entry) {
146 entry->handle = handle;
147 memcpy(&entry->key, key, sizeof entry->key);
148
149 LIST_ADD(&entry->head, &cache->validated);
150 }
151 else {
152 /* Couldn't cache the buffer -- this really shouldn't happen */
153 SVGA_DBG(DEBUG_DMA, "unref sid %p (couldn't find space)\n", handle);
154 sws->surface_reference(sws, &handle, NULL);
155 }
156
157 pipe_mutex_unlock(cache->mutex);
158 }
159
160
161 /**
162 * Called during the screen flush to move all buffers not in a validate list
163 * into the unused list.
164 */
165 void
166 svga_screen_cache_flush(struct svga_screen *svgascreen,
167 struct pipe_fence_handle *fence)
168 {
169 struct svga_host_surface_cache *cache = &svgascreen->cache;
170 struct svga_winsys_screen *sws = svgascreen->sws;
171 struct svga_host_surface_cache_entry *entry;
172 struct list_head *curr, *next;
173 unsigned bucket;
174
175 pipe_mutex_lock(cache->mutex);
176
177 curr = cache->validated.next;
178 next = curr->next;
179 while(curr != &cache->validated) {
180 entry = LIST_ENTRY(struct svga_host_surface_cache_entry, curr, head);
181
182 assert(entry->handle);
183
184 if(sws->surface_is_flushed(sws, entry->handle)) {
185 LIST_DEL(&entry->head);
186
187 svgascreen->sws->fence_reference(svgascreen->sws, &entry->fence, fence);
188
189 LIST_ADD(&entry->head, &cache->unused);
190
191 bucket = svga_screen_cache_bucket(&entry->key);
192 LIST_ADD(&entry->bucket_head, &cache->bucket[bucket]);
193 }
194
195 curr = next;
196 next = curr->next;
197 }
198
199 pipe_mutex_unlock(cache->mutex);
200 }
201
202
203 void
204 svga_screen_cache_cleanup(struct svga_screen *svgascreen)
205 {
206 struct svga_host_surface_cache *cache = &svgascreen->cache;
207 struct svga_winsys_screen *sws = svgascreen->sws;
208 unsigned i;
209
210 for(i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i) {
211 if(cache->entries[i].handle) {
212 SVGA_DBG(DEBUG_DMA, "unref sid %p (shutdown)\n", cache->entries[i].handle);
213 sws->surface_reference(sws, &cache->entries[i].handle, NULL);
214 }
215
216 if(cache->entries[i].fence)
217 svgascreen->sws->fence_reference(svgascreen->sws, &cache->entries[i].fence, NULL);
218 }
219
220 pipe_mutex_destroy(cache->mutex);
221 }
222
223
224 enum pipe_error
225 svga_screen_cache_init(struct svga_screen *svgascreen)
226 {
227 struct svga_host_surface_cache *cache = &svgascreen->cache;
228 unsigned i;
229
230 pipe_mutex_init(cache->mutex);
231
232 for(i = 0; i < SVGA_HOST_SURFACE_CACHE_BUCKETS; ++i)
233 LIST_INITHEAD(&cache->bucket[i]);
234
235 LIST_INITHEAD(&cache->unused);
236
237 LIST_INITHEAD(&cache->validated);
238
239 LIST_INITHEAD(&cache->empty);
240 for(i = 0; i < SVGA_HOST_SURFACE_CACHE_SIZE; ++i)
241 LIST_ADDTAIL(&cache->entries[i].head, &cache->empty);
242
243 return PIPE_OK;
244 }
245
246
247 struct svga_winsys_surface *
248 svga_screen_surface_create(struct svga_screen *svgascreen,
249 struct svga_host_surface_cache_key *key)
250 {
251 struct svga_winsys_screen *sws = svgascreen->sws;
252 struct svga_winsys_surface *handle = NULL;
253 boolean cachable = SVGA_SURFACE_CACHE_ENABLED && key->cachable;
254
255 SVGA_DBG(DEBUG_DMA, "%s sz %dx%dx%d mips %d faces %d cachable %d\n",
256 __FUNCTION__,
257 key->size.width,
258 key->size.height,
259 key->size.depth,
260 key->numMipLevels,
261 key->numFaces,
262 key->cachable);
263
264 if (cachable) {
265 if (key->format == SVGA3D_BUFFER) {
266 /* For buffers, round the buffer size up to the nearest power
267 * of two to increase the probability of cache hits. Keep
268 * texture surface dimensions unchanged.
269 */
270 uint32_t size = 1;
271 while(size < key->size.width)
272 size <<= 1;
273 key->size.width = size;
274 }
275
276 handle = svga_screen_cache_lookup(svgascreen, key);
277 if (handle) {
278 if (key->format == SVGA3D_BUFFER)
279 SVGA_DBG(DEBUG_DMA, " reuse sid %p sz %d (buffer)\n", handle,
280 key->size.width);
281 else
282 SVGA_DBG(DEBUG_DMA, " reuse sid %p sz %dx%dx%d mips %d faces %d\n", handle,
283 key->size.width,
284 key->size.height,
285 key->size.depth,
286 key->numMipLevels,
287 key->numFaces);
288 }
289 }
290
291 if (!handle) {
292 handle = sws->surface_create(sws,
293 key->flags,
294 key->format,
295 key->size,
296 key->numFaces,
297 key->numMipLevels);
298 if (handle)
299 SVGA_DBG(DEBUG_DMA, "create sid %p sz %d\n", handle, key->size);
300 }
301
302 return handle;
303 }
304
305
306 void
307 svga_screen_surface_destroy(struct svga_screen *svgascreen,
308 const struct svga_host_surface_cache_key *key,
309 struct svga_winsys_surface **p_handle)
310 {
311 struct svga_winsys_screen *sws = svgascreen->sws;
312
313 /* We only set the cachable flag for surfaces of which we are the
314 * exclusive owner. So just hold onto our existing reference in
315 * that case.
316 */
317 if(SVGA_SURFACE_CACHE_ENABLED && key->cachable) {
318 svga_screen_cache_add(svgascreen, key, p_handle);
319 }
320 else {
321 SVGA_DBG(DEBUG_DMA, "unref sid %p (uncachable)\n", *p_handle);
322 sws->surface_reference(sws, p_handle, NULL);
323 }
324 }