i965: Convert the binding table to streamed indirect state.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The consumers
35 * can query the hash table of state using a cache_id, opaque key data,
36 * and list of buffers that will be used in relocations, and receive the
37 * corresponding state buffer object of state (plus associated auxiliary
38 * data) in return.
39 *
40 * The inner workings are a simple hash table based on a CRC of the key data.
41 * The cache_id and relocation target buffers associated with the state
42 * buffer are included as auxiliary key data, but are not part of the hash
43 * value (this should be fixed, but will likely be fixed instead by making
44 * consumers use structured keys).
45 *
46 * Replacement is not implemented. Instead, when the cache gets too big, at
47 * a safe point (unlock) we throw out all of the cache data and let it
48 * regenerate for the next rendering operation.
49 *
50 * The reloc_buf pointers need to be included as key data, otherwise the
51 * non-unique values stuffed in the offset in key data through
52 * brw_cache_data() may result in successful probe for state buffers
53 * even when the buffer being referenced doesn't match. The result would be
54 * that the same state cache entry is used twice for different buffers,
55 * only one of the two buffers referenced gets put into the offset, and the
56 * incorrect program is run for the other instance.
57 */
58
59 #include "main/imports.h"
60 #include "brw_state.h"
61 #include "intel_batchbuffer.h"
62 #include "brw_wm.h"
63
64
65 static GLuint
66 hash_key(struct brw_cache_item *item)
67 {
68 GLuint *ikey = (GLuint *)item->key;
69 GLuint hash = item->cache_id, i;
70
71 assert(item->key_size % 4 == 0);
72
73 /* I'm sure this can be improved on:
74 */
75 for (i = 0; i < item->key_size/4; i++) {
76 hash ^= ikey[i];
77 hash = (hash << 5) | (hash >> 27);
78 }
79
80 /* Include the BO pointers as key data as well */
81 ikey = (GLuint *)item->reloc_bufs;
82 for (i = 0; i < item->nr_reloc_bufs * sizeof(drm_intel_bo *) / 4; i++) {
83 hash ^= ikey[i];
84 hash = (hash << 5) | (hash >> 27);
85 }
86
87 return hash;
88 }
89
90
91 /**
92 * Marks a new buffer as being chosen for the given cache id.
93 */
94 static void
95 update_cache_last(struct brw_cache *cache, enum brw_cache_id cache_id,
96 drm_intel_bo *bo)
97 {
98 if (bo == cache->last_bo[cache_id])
99 return; /* no change */
100
101 drm_intel_bo_unreference(cache->last_bo[cache_id]);
102 cache->last_bo[cache_id] = bo;
103 drm_intel_bo_reference(cache->last_bo[cache_id]);
104 cache->brw->state.dirty.cache |= 1 << cache_id;
105 }
106
107 static int
108 brw_cache_item_equals(const struct brw_cache_item *a,
109 const struct brw_cache_item *b)
110 {
111 return a->cache_id == b->cache_id &&
112 a->hash == b->hash &&
113 a->key_size == b->key_size &&
114 (memcmp(a->key, b->key, a->key_size) == 0) &&
115 a->nr_reloc_bufs == b->nr_reloc_bufs &&
116 (memcmp(a->reloc_bufs, b->reloc_bufs,
117 a->nr_reloc_bufs * sizeof(drm_intel_bo *)) == 0);
118 }
119
120 static struct brw_cache_item *
121 search_cache(struct brw_cache *cache, GLuint hash,
122 struct brw_cache_item *lookup)
123 {
124 struct brw_cache_item *c;
125
126 #if 0
127 int bucketcount = 0;
128
129 for (c = cache->items[hash % cache->size]; c; c = c->next)
130 bucketcount++;
131
132 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
133 cache->size, bucketcount, cache->n_items);
134 #endif
135
136 for (c = cache->items[hash % cache->size]; c; c = c->next) {
137 if (brw_cache_item_equals(lookup, c))
138 return c;
139 }
140
141 return NULL;
142 }
143
144
145 static void
146 rehash(struct brw_cache *cache)
147 {
148 struct brw_cache_item **items;
149 struct brw_cache_item *c, *next;
150 GLuint size, i;
151
152 size = cache->size * 3;
153 items = (struct brw_cache_item**) calloc(1, size * sizeof(*items));
154
155 for (i = 0; i < cache->size; i++)
156 for (c = cache->items[i]; c; c = next) {
157 next = c->next;
158 c->next = items[c->hash % size];
159 items[c->hash % size] = c;
160 }
161
162 FREE(cache->items);
163 cache->items = items;
164 cache->size = size;
165 }
166
167
168 /**
169 * Returns the buffer object matching cache_id and key, or NULL.
170 */
171 drm_intel_bo *
172 brw_search_cache(struct brw_cache *cache,
173 enum brw_cache_id cache_id,
174 const void *key,
175 GLuint key_size,
176 drm_intel_bo **reloc_bufs, GLuint nr_reloc_bufs,
177 void *aux_return)
178 {
179 struct brw_cache_item *item;
180 struct brw_cache_item lookup;
181 GLuint hash;
182
183 lookup.cache_id = cache_id;
184 lookup.key = key;
185 lookup.key_size = key_size;
186 lookup.reloc_bufs = reloc_bufs;
187 lookup.nr_reloc_bufs = nr_reloc_bufs;
188 hash = hash_key(&lookup);
189 lookup.hash = hash;
190
191 item = search_cache(cache, hash, &lookup);
192
193 if (item == NULL)
194 return NULL;
195
196 if (aux_return)
197 *(void **)aux_return = (void *)((char *)item->key + item->key_size);
198
199 update_cache_last(cache, cache_id, item->bo);
200
201 drm_intel_bo_reference(item->bo);
202 return item->bo;
203 }
204
205
206 drm_intel_bo *
207 brw_upload_cache_with_auxdata(struct brw_cache *cache,
208 enum brw_cache_id cache_id,
209 const void *key,
210 GLuint key_size,
211 drm_intel_bo **reloc_bufs,
212 GLuint nr_reloc_bufs,
213 const void *data,
214 GLuint data_size,
215 const void *aux,
216 GLuint aux_size,
217 void *aux_return)
218 {
219 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
220 GLuint hash;
221 GLuint relocs_size = nr_reloc_bufs * sizeof(drm_intel_bo *);
222 void *tmp;
223 drm_intel_bo *bo;
224 int i;
225
226 item->cache_id = cache_id;
227 item->key = key;
228 item->key_size = key_size;
229 item->reloc_bufs = reloc_bufs;
230 item->nr_reloc_bufs = nr_reloc_bufs;
231 hash = hash_key(item);
232 item->hash = hash;
233
234 /* Create the buffer object to contain the data */
235 bo = drm_intel_bo_alloc(cache->brw->intel.bufmgr,
236 cache->name[cache_id], data_size, 1 << 6);
237
238
239 /* Set up the memory containing the key, aux_data, and reloc_bufs */
240 tmp = malloc(key_size + aux_size + relocs_size);
241
242 memcpy(tmp, key, key_size);
243 memcpy(tmp + key_size, aux, aux_size);
244 memcpy(tmp + key_size + aux_size, reloc_bufs, relocs_size);
245 for (i = 0; i < nr_reloc_bufs; i++) {
246 if (reloc_bufs[i] != NULL)
247 drm_intel_bo_reference(reloc_bufs[i]);
248 }
249
250 item->key = tmp;
251 item->reloc_bufs = tmp + key_size + aux_size;
252
253 item->bo = bo;
254 drm_intel_bo_reference(bo);
255
256 if (cache->n_items > cache->size * 1.5)
257 rehash(cache);
258
259 hash %= cache->size;
260 item->next = cache->items[hash];
261 cache->items[hash] = item;
262 cache->n_items++;
263
264 if (aux_return) {
265 *(void **)aux_return = (void *)((char *)item->key + item->key_size);
266 }
267
268 if (INTEL_DEBUG & DEBUG_STATE)
269 printf("upload %s: %d bytes to cache id %d\n",
270 cache->name[cache_id],
271 data_size, cache_id);
272
273 /* Copy data to the buffer */
274 drm_intel_bo_subdata(bo, 0, data_size, data);
275
276 update_cache_last(cache, cache_id, bo);
277
278 return bo;
279 }
280
281 drm_intel_bo *
282 brw_upload_cache(struct brw_cache *cache,
283 enum brw_cache_id cache_id,
284 const void *key,
285 GLuint key_size,
286 drm_intel_bo **reloc_bufs,
287 GLuint nr_reloc_bufs,
288 const void *data,
289 GLuint data_size)
290 {
291 return brw_upload_cache_with_auxdata(cache, cache_id,
292 key, key_size,
293 reloc_bufs, nr_reloc_bufs,
294 data, data_size,
295 NULL, 0,
296 NULL);
297 }
298
299 /**
300 * Wrapper around brw_cache_data_sz using the cache_id's canonical key size.
301 *
302 * If nr_reloc_bufs is nonzero, brw_search_cache()/brw_upload_cache() would be
303 * better to use, as the potentially changing offsets in the data-used-as-key
304 * will result in excessive cache misses.
305 *
306 * If aux data is involved, use search/upload instead.
307
308 */
309 drm_intel_bo *
310 brw_cache_data(struct brw_cache *cache,
311 enum brw_cache_id cache_id,
312 const void *data,
313 GLuint data_size,
314 drm_intel_bo **reloc_bufs,
315 GLuint nr_reloc_bufs)
316 {
317 drm_intel_bo *bo;
318 struct brw_cache_item *item, lookup;
319 GLuint hash;
320
321 lookup.cache_id = cache_id;
322 lookup.key = data;
323 lookup.key_size = data_size;
324 lookup.reloc_bufs = reloc_bufs;
325 lookup.nr_reloc_bufs = nr_reloc_bufs;
326 hash = hash_key(&lookup);
327 lookup.hash = hash;
328
329 item = search_cache(cache, hash, &lookup);
330 if (item) {
331 update_cache_last(cache, cache_id, item->bo);
332 drm_intel_bo_reference(item->bo);
333 return item->bo;
334 }
335
336 bo = brw_upload_cache(cache, cache_id,
337 data, data_size,
338 reloc_bufs, nr_reloc_bufs,
339 data, data_size);
340
341 return bo;
342 }
343
344 enum pool_type {
345 DW_SURFACE_STATE,
346 DW_GENERAL_STATE
347 };
348
349
350 static void
351 brw_init_cache_id(struct brw_cache *cache,
352 const char *name,
353 enum brw_cache_id id)
354 {
355 cache->name[id] = strdup(name);
356 }
357
358
359 static void
360 brw_init_non_surface_cache(struct brw_context *brw)
361 {
362 struct brw_cache *cache = &brw->cache;
363
364 cache->brw = brw;
365
366 cache->size = 7;
367 cache->n_items = 0;
368 cache->items = (struct brw_cache_item **)
369 calloc(1, cache->size * sizeof(struct brw_cache_item));
370
371 brw_init_cache_id(cache, "CC_VP", BRW_CC_VP);
372 brw_init_cache_id(cache, "CC_UNIT", BRW_CC_UNIT);
373 brw_init_cache_id(cache, "WM_PROG", BRW_WM_PROG);
374 brw_init_cache_id(cache, "SAMPLER_DEFAULT_COLOR", BRW_SAMPLER_DEFAULT_COLOR);
375 brw_init_cache_id(cache, "SAMPLER", BRW_SAMPLER);
376 brw_init_cache_id(cache, "WM_UNIT", BRW_WM_UNIT);
377 brw_init_cache_id(cache, "SF_PROG", BRW_SF_PROG);
378 brw_init_cache_id(cache, "SF_VP", BRW_SF_VP);
379
380 brw_init_cache_id(cache, "SF_UNIT", BRW_SF_UNIT);
381
382 brw_init_cache_id(cache, "VS_UNIT", BRW_VS_UNIT);
383
384 brw_init_cache_id(cache, "VS_PROG", BRW_VS_PROG);
385
386 brw_init_cache_id(cache, "CLIP_UNIT", BRW_CLIP_UNIT);
387
388 brw_init_cache_id(cache, "CLIP_PROG", BRW_CLIP_PROG);
389 brw_init_cache_id(cache, "CLIP_VP", BRW_CLIP_VP);
390
391 brw_init_cache_id(cache, "GS_UNIT", BRW_GS_UNIT);
392
393 brw_init_cache_id(cache, "GS_PROG", BRW_GS_PROG);
394 brw_init_cache_id(cache, "BLEND_STATE", BRW_BLEND_STATE);
395 brw_init_cache_id(cache, "COLOR_CALC_STATE", BRW_COLOR_CALC_STATE);
396 brw_init_cache_id(cache, "DEPTH_STENCIL_STATE", BRW_DEPTH_STENCIL_STATE);
397 }
398
399
400 static void
401 brw_init_surface_cache(struct brw_context *brw)
402 {
403 struct brw_cache *cache = &brw->surface_cache;
404
405 cache->brw = brw;
406
407 cache->size = 7;
408 cache->n_items = 0;
409 cache->items = (struct brw_cache_item **)
410 calloc(1, cache->size * sizeof(struct brw_cache_item));
411
412 brw_init_cache_id(cache, "SS_SURFACE", BRW_SS_SURFACE);
413 }
414
415
416 void
417 brw_init_caches(struct brw_context *brw)
418 {
419 brw_init_non_surface_cache(brw);
420 brw_init_surface_cache(brw);
421 }
422
423
424 static void
425 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
426 {
427 struct brw_cache_item *c, *next;
428 GLuint i;
429
430 if (INTEL_DEBUG & DEBUG_STATE)
431 printf("%s\n", __FUNCTION__);
432
433 for (i = 0; i < cache->size; i++) {
434 for (c = cache->items[i]; c; c = next) {
435 int j;
436
437 next = c->next;
438 for (j = 0; j < c->nr_reloc_bufs; j++)
439 drm_intel_bo_unreference(c->reloc_bufs[j]);
440 drm_intel_bo_unreference(c->bo);
441 free((void *)c->key);
442 free(c);
443 }
444 cache->items[i] = NULL;
445 }
446
447 cache->n_items = 0;
448
449 brw->state.dirty.mesa |= ~0;
450 brw->state.dirty.brw |= ~0;
451 brw->state.dirty.cache |= ~0;
452 }
453
454 /* Clear all entries from the cache that point to the given bo.
455 *
456 * This lets us release memory for reuse earlier for known-dead buffers,
457 * at the cost of walking the entire hash table.
458 */
459 void
460 brw_state_cache_bo_delete(struct brw_cache *cache, drm_intel_bo *bo)
461 {
462 struct brw_cache_item **prev;
463 GLuint i;
464
465 if (INTEL_DEBUG & DEBUG_STATE)
466 printf("%s\n", __FUNCTION__);
467
468 for (i = 0; i < cache->size; i++) {
469 for (prev = &cache->items[i]; *prev;) {
470 struct brw_cache_item *c = *prev;
471
472 if (drm_intel_bo_references(c->bo, bo)) {
473 int j;
474
475 *prev = c->next;
476
477 for (j = 0; j < c->nr_reloc_bufs; j++)
478 drm_intel_bo_unreference(c->reloc_bufs[j]);
479 drm_intel_bo_unreference(c->bo);
480 free((void *)c->key);
481 free(c);
482 cache->n_items--;
483 } else {
484 prev = &c->next;
485 }
486 }
487 }
488 }
489
490 void
491 brw_state_cache_check_size(struct brw_context *brw)
492 {
493 if (INTEL_DEBUG & DEBUG_STATE)
494 printf("%s (n_items=%d)\n", __FUNCTION__, brw->cache.n_items);
495
496 /* un-tuned guess. Each object is generally a page, so 1000 of them is 4 MB of
497 * state cache.
498 */
499 if (brw->cache.n_items > 1000)
500 brw_clear_cache(brw, &brw->cache);
501
502 if (brw->surface_cache.n_items > 1000)
503 brw_clear_cache(brw, &brw->surface_cache);
504 }
505
506
507 static void
508 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
509 {
510 GLuint i;
511
512 if (INTEL_DEBUG & DEBUG_STATE)
513 printf("%s\n", __FUNCTION__);
514
515 brw_clear_cache(brw, cache);
516 for (i = 0; i < BRW_MAX_CACHE; i++) {
517 drm_intel_bo_unreference(cache->last_bo[i]);
518 free(cache->name[i]);
519 }
520 free(cache->items);
521 cache->items = NULL;
522 cache->size = 0;
523 }
524
525
526 void
527 brw_destroy_caches(struct brw_context *brw)
528 {
529 brw_destroy_cache(brw, &brw->cache);
530 brw_destroy_cache(brw, &brw->surface_cache);
531 }