Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The consumers
35 * can query the hash table of state using a cache_id, opaque key data,
36 * and list of buffers that will be used in relocations, and receive the
37 * corresponding state buffer object of state (plus associated auxiliary
38 * data) in return.
39 *
40 * The inner workings are a simple hash table based on a CRC of the key data.
41 * The cache_id and relocation target buffers associated with the state
42 * buffer are included as auxiliary key data, but are not part of the hash
43 * value (this should be fixed, but will likely be fixed instead by making
44 * consumers use structured keys).
45 *
46 * Replacement is not implemented. Instead, when the cache gets too big, at
47 * a safe point (unlock) we throw out all of the cache data and let it
48 * regenerate for the next rendering operation.
49 *
50 * The reloc_buf pointers need to be included as key data, otherwise the
51 * non-unique values stuffed in the offset in key data through
52 * brw_cache_data() may result in successful probe for state buffers
53 * even when the buffer being referenced doesn't match. The result would be
54 * that the same state cache entry is used twice for different buffers,
55 * only one of the two buffers referenced gets put into the offset, and the
56 * incorrect program is run for the other instance.
57 */
58
59 #include "main/imports.h"
60 #include "brw_state.h"
61 #include "intel_batchbuffer.h"
62 #include "brw_wm.h"
63
64
65 static GLuint
66 hash_key(struct brw_cache_item *item)
67 {
68 GLuint *ikey = (GLuint *)item->key;
69 GLuint hash = item->cache_id, i;
70
71 assert(item->key_size % 4 == 0);
72
73 /* I'm sure this can be improved on:
74 */
75 for (i = 0; i < item->key_size/4; i++) {
76 hash ^= ikey[i];
77 hash = (hash << 5) | (hash >> 27);
78 }
79
80 /* Include the BO pointers as key data as well */
81 ikey = (GLuint *)item->reloc_bufs;
82 for (i = 0; i < item->nr_reloc_bufs * sizeof(drm_intel_bo *) / 4; i++) {
83 hash ^= ikey[i];
84 hash = (hash << 5) | (hash >> 27);
85 }
86
87 return hash;
88 }
89
90
91 /**
92 * Marks a new buffer as being chosen for the given cache id.
93 */
94 static void
95 update_cache_last(struct brw_cache *cache, enum brw_cache_id cache_id,
96 dri_bo *bo)
97 {
98 if (bo == cache->last_bo[cache_id])
99 return; /* no change */
100
101 dri_bo_unreference(cache->last_bo[cache_id]);
102 cache->last_bo[cache_id] = bo;
103 dri_bo_reference(cache->last_bo[cache_id]);
104 cache->brw->state.dirty.cache |= 1 << cache_id;
105 }
106
107 static int
108 brw_cache_item_equals(const struct brw_cache_item *a,
109 const struct brw_cache_item *b)
110 {
111 return a->cache_id == b->cache_id &&
112 a->hash == b->hash &&
113 a->key_size == b->key_size &&
114 (memcmp(a->key, b->key, a->key_size) == 0) &&
115 a->nr_reloc_bufs == b->nr_reloc_bufs &&
116 (memcmp(a->reloc_bufs, b->reloc_bufs,
117 a->nr_reloc_bufs * sizeof(dri_bo *)) == 0);
118 }
119
120 static struct brw_cache_item *
121 search_cache(struct brw_cache *cache, GLuint hash,
122 struct brw_cache_item *lookup)
123 {
124 struct brw_cache_item *c;
125
126 #if 0
127 int bucketcount = 0;
128
129 for (c = cache->items[hash % cache->size]; c; c = c->next)
130 bucketcount++;
131
132 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
133 cache->size, bucketcount, cache->n_items);
134 #endif
135
136 for (c = cache->items[hash % cache->size]; c; c = c->next) {
137 if (brw_cache_item_equals(lookup, c))
138 return c;
139 }
140
141 return NULL;
142 }
143
144
145 static void
146 rehash(struct brw_cache *cache)
147 {
148 struct brw_cache_item **items;
149 struct brw_cache_item *c, *next;
150 GLuint size, i;
151
152 size = cache->size * 3;
153 items = (struct brw_cache_item**) calloc(1, size * sizeof(*items));
154
155 for (i = 0; i < cache->size; i++)
156 for (c = cache->items[i]; c; c = next) {
157 next = c->next;
158 c->next = items[c->hash % size];
159 items[c->hash % size] = c;
160 }
161
162 FREE(cache->items);
163 cache->items = items;
164 cache->size = size;
165 }
166
167
168 /**
169 * Returns the buffer object matching cache_id and key, or NULL.
170 */
171 dri_bo *
172 brw_search_cache(struct brw_cache *cache,
173 enum brw_cache_id cache_id,
174 const void *key,
175 GLuint key_size,
176 dri_bo **reloc_bufs, GLuint nr_reloc_bufs,
177 void *aux_return)
178 {
179 struct brw_cache_item *item;
180 struct brw_cache_item lookup;
181 GLuint hash;
182
183 lookup.cache_id = cache_id;
184 lookup.key = key;
185 lookup.key_size = key_size;
186 lookup.reloc_bufs = reloc_bufs;
187 lookup.nr_reloc_bufs = nr_reloc_bufs;
188 hash = hash_key(&lookup);
189 lookup.hash = hash;
190
191 item = search_cache(cache, hash, &lookup);
192
193 if (item == NULL)
194 return NULL;
195
196 if (aux_return)
197 *(void **)aux_return = (void *)((char *)item->key + item->key_size);
198
199 update_cache_last(cache, cache_id, item->bo);
200
201 dri_bo_reference(item->bo);
202 return item->bo;
203 }
204
205
206 drm_intel_bo *
207 brw_upload_cache_with_auxdata(struct brw_cache *cache,
208 enum brw_cache_id cache_id,
209 const void *key,
210 GLuint key_size,
211 dri_bo **reloc_bufs,
212 GLuint nr_reloc_bufs,
213 const void *data,
214 GLuint data_size,
215 const void *aux,
216 GLuint aux_size,
217 void *aux_return)
218 {
219 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
220 GLuint hash;
221 GLuint relocs_size = nr_reloc_bufs * sizeof(dri_bo *);
222 void *tmp;
223 dri_bo *bo;
224 int i;
225
226 item->cache_id = cache_id;
227 item->key = key;
228 item->key_size = key_size;
229 item->reloc_bufs = reloc_bufs;
230 item->nr_reloc_bufs = nr_reloc_bufs;
231 hash = hash_key(item);
232 item->hash = hash;
233
234 /* Create the buffer object to contain the data */
235 bo = dri_bo_alloc(cache->brw->intel.bufmgr,
236 cache->name[cache_id], data_size, 1 << 6);
237
238
239 /* Set up the memory containing the key, aux_data, and reloc_bufs */
240 tmp = malloc(key_size + aux_size + relocs_size);
241
242 memcpy(tmp, key, key_size);
243 memcpy(tmp + key_size, aux, aux_size);
244 memcpy(tmp + key_size + aux_size, reloc_bufs, relocs_size);
245 for (i = 0; i < nr_reloc_bufs; i++) {
246 if (reloc_bufs[i] != NULL)
247 dri_bo_reference(reloc_bufs[i]);
248 }
249
250 item->key = tmp;
251 item->reloc_bufs = tmp + key_size + aux_size;
252
253 item->bo = bo;
254 dri_bo_reference(bo);
255
256 if (cache->n_items > cache->size * 1.5)
257 rehash(cache);
258
259 hash %= cache->size;
260 item->next = cache->items[hash];
261 cache->items[hash] = item;
262 cache->n_items++;
263
264 if (aux_return) {
265 *(void **)aux_return = (void *)((char *)item->key + item->key_size);
266 }
267
268 if (INTEL_DEBUG & DEBUG_STATE)
269 printf("upload %s: %d bytes to cache id %d\n",
270 cache->name[cache_id],
271 data_size, cache_id);
272
273 /* Copy data to the buffer */
274 dri_bo_subdata(bo, 0, data_size, data);
275
276 update_cache_last(cache, cache_id, bo);
277
278 return bo;
279 }
280
281 drm_intel_bo *
282 brw_upload_cache(struct brw_cache *cache,
283 enum brw_cache_id cache_id,
284 const void *key,
285 GLuint key_size,
286 dri_bo **reloc_bufs,
287 GLuint nr_reloc_bufs,
288 const void *data,
289 GLuint data_size)
290 {
291 return brw_upload_cache_with_auxdata(cache, cache_id,
292 key, key_size,
293 reloc_bufs, nr_reloc_bufs,
294 data, data_size,
295 NULL, 0,
296 NULL);
297 }
298
299 /**
300 * Wrapper around brw_cache_data_sz using the cache_id's canonical key size.
301 *
302 * If nr_reloc_bufs is nonzero, brw_search_cache()/brw_upload_cache() would be
303 * better to use, as the potentially changing offsets in the data-used-as-key
304 * will result in excessive cache misses.
305 *
306 * If aux data is involved, use search/upload instead.
307
308 */
309 dri_bo *
310 brw_cache_data(struct brw_cache *cache,
311 enum brw_cache_id cache_id,
312 const void *data,
313 GLuint data_size,
314 dri_bo **reloc_bufs,
315 GLuint nr_reloc_bufs)
316 {
317 dri_bo *bo;
318 struct brw_cache_item *item, lookup;
319 GLuint hash;
320
321 lookup.cache_id = cache_id;
322 lookup.key = data;
323 lookup.key_size = data_size;
324 lookup.reloc_bufs = reloc_bufs;
325 lookup.nr_reloc_bufs = nr_reloc_bufs;
326 hash = hash_key(&lookup);
327 lookup.hash = hash;
328
329 item = search_cache(cache, hash, &lookup);
330 if (item) {
331 update_cache_last(cache, cache_id, item->bo);
332 dri_bo_reference(item->bo);
333 return item->bo;
334 }
335
336 bo = brw_upload_cache(cache, cache_id,
337 data, data_size,
338 reloc_bufs, nr_reloc_bufs,
339 data, data_size);
340
341 return bo;
342 }
343
344 enum pool_type {
345 DW_SURFACE_STATE,
346 DW_GENERAL_STATE
347 };
348
349
350 static void
351 brw_init_cache_id(struct brw_cache *cache,
352 const char *name,
353 enum brw_cache_id id)
354 {
355 cache->name[id] = strdup(name);
356 }
357
358
359 static void
360 brw_init_non_surface_cache(struct brw_context *brw)
361 {
362 struct brw_cache *cache = &brw->cache;
363
364 cache->brw = brw;
365
366 cache->size = 7;
367 cache->n_items = 0;
368 cache->items = (struct brw_cache_item **)
369 calloc(1, cache->size * sizeof(struct brw_cache_item));
370
371 brw_init_cache_id(cache, "CC_VP", BRW_CC_VP);
372 brw_init_cache_id(cache, "CC_UNIT", BRW_CC_UNIT);
373 brw_init_cache_id(cache, "WM_PROG", BRW_WM_PROG);
374 brw_init_cache_id(cache, "SAMPLER_DEFAULT_COLOR", BRW_SAMPLER_DEFAULT_COLOR);
375 brw_init_cache_id(cache, "SAMPLER", BRW_SAMPLER);
376 brw_init_cache_id(cache, "WM_UNIT", BRW_WM_UNIT);
377 brw_init_cache_id(cache, "SF_PROG", BRW_SF_PROG);
378 brw_init_cache_id(cache, "SF_VP", BRW_SF_VP);
379
380 brw_init_cache_id(cache, "SF_UNIT", BRW_SF_UNIT);
381
382 brw_init_cache_id(cache, "VS_UNIT", BRW_VS_UNIT);
383
384 brw_init_cache_id(cache, "VS_PROG", BRW_VS_PROG);
385
386 brw_init_cache_id(cache, "CLIP_UNIT", BRW_CLIP_UNIT);
387
388 brw_init_cache_id(cache, "CLIP_PROG", BRW_CLIP_PROG);
389
390 brw_init_cache_id(cache, "GS_UNIT", BRW_GS_UNIT);
391
392 brw_init_cache_id(cache, "GS_PROG", BRW_GS_PROG);
393 brw_init_cache_id(cache, "BLEND_STATE", BRW_BLEND_STATE);
394 }
395
396
397 static void
398 brw_init_surface_cache(struct brw_context *brw)
399 {
400 struct brw_cache *cache = &brw->surface_cache;
401
402 cache->brw = brw;
403
404 cache->size = 7;
405 cache->n_items = 0;
406 cache->items = (struct brw_cache_item **)
407 calloc(1, cache->size * sizeof(struct brw_cache_item));
408
409 brw_init_cache_id(cache, "SS_SURFACE", BRW_SS_SURFACE);
410 brw_init_cache_id(cache, "SS_SURF_BIND", BRW_SS_SURF_BIND);
411 }
412
413
414 void
415 brw_init_caches(struct brw_context *brw)
416 {
417 brw_init_non_surface_cache(brw);
418 brw_init_surface_cache(brw);
419 }
420
421
422 static void
423 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
424 {
425 struct brw_cache_item *c, *next;
426 GLuint i;
427
428 if (INTEL_DEBUG & DEBUG_STATE)
429 printf("%s\n", __FUNCTION__);
430
431 for (i = 0; i < cache->size; i++) {
432 for (c = cache->items[i]; c; c = next) {
433 int j;
434
435 next = c->next;
436 for (j = 0; j < c->nr_reloc_bufs; j++)
437 dri_bo_unreference(c->reloc_bufs[j]);
438 dri_bo_unreference(c->bo);
439 free((void *)c->key);
440 free(c);
441 }
442 cache->items[i] = NULL;
443 }
444
445 cache->n_items = 0;
446
447 if (brw->curbe.last_buf) {
448 free(brw->curbe.last_buf);
449 brw->curbe.last_buf = NULL;
450 }
451
452 brw->state.dirty.mesa |= ~0;
453 brw->state.dirty.brw |= ~0;
454 brw->state.dirty.cache |= ~0;
455 }
456
457 /* Clear all entries from the cache that point to the given bo.
458 *
459 * This lets us release memory for reuse earlier for known-dead buffers,
460 * at the cost of walking the entire hash table.
461 */
462 void
463 brw_state_cache_bo_delete(struct brw_cache *cache, dri_bo *bo)
464 {
465 struct brw_cache_item **prev;
466 GLuint i;
467
468 if (INTEL_DEBUG & DEBUG_STATE)
469 printf("%s\n", __FUNCTION__);
470
471 for (i = 0; i < cache->size; i++) {
472 for (prev = &cache->items[i]; *prev;) {
473 struct brw_cache_item *c = *prev;
474
475 if (drm_intel_bo_references(c->bo, bo)) {
476 int j;
477
478 *prev = c->next;
479
480 for (j = 0; j < c->nr_reloc_bufs; j++)
481 dri_bo_unreference(c->reloc_bufs[j]);
482 dri_bo_unreference(c->bo);
483 free((void *)c->key);
484 free(c);
485 cache->n_items--;
486 } else {
487 prev = &c->next;
488 }
489 }
490 }
491 }
492
493 void
494 brw_state_cache_check_size(struct brw_context *brw)
495 {
496 if (INTEL_DEBUG & DEBUG_STATE)
497 printf("%s (n_items=%d)\n", __FUNCTION__, brw->cache.n_items);
498
499 /* un-tuned guess. We've got around 20 state objects for a total of around
500 * 32k, so 1000 of them is around 1.5MB.
501 */
502 if (brw->cache.n_items > 1000)
503 brw_clear_cache(brw, &brw->cache);
504
505 if (brw->surface_cache.n_items > 1000)
506 brw_clear_cache(brw, &brw->surface_cache);
507 }
508
509
510 static void
511 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
512 {
513 GLuint i;
514
515 if (INTEL_DEBUG & DEBUG_STATE)
516 printf("%s\n", __FUNCTION__);
517
518 brw_clear_cache(brw, cache);
519 for (i = 0; i < BRW_MAX_CACHE; i++) {
520 dri_bo_unreference(cache->last_bo[i]);
521 free(cache->name[i]);
522 }
523 free(cache->items);
524 cache->items = NULL;
525 cache->size = 0;
526 }
527
528
529 void
530 brw_destroy_caches(struct brw_context *brw)
531 {
532 brw_destroy_cache(brw, &brw->cache);
533 brw_destroy_cache(brw, &brw->surface_cache);
534 }