i965/state: Don't use brw->state.dirty.mesa
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52 #include "brw_gs.h"
53
54 #define FILE_DEBUG_FLAG DEBUG_STATE
55
56 static GLuint
57 hash_key(struct brw_cache_item *item)
58 {
59 GLuint *ikey = (GLuint *)item->key;
60 GLuint hash = item->cache_id, i;
61
62 assert(item->key_size % 4 == 0);
63
64 /* I'm sure this can be improved on:
65 */
66 for (i = 0; i < item->key_size/4; i++) {
67 hash ^= ikey[i];
68 hash = (hash << 5) | (hash >> 27);
69 }
70
71 return hash;
72 }
73
74 static int
75 brw_cache_item_equals(const struct brw_cache_item *a,
76 const struct brw_cache_item *b)
77 {
78 return a->cache_id == b->cache_id &&
79 a->hash == b->hash &&
80 a->key_size == b->key_size &&
81 (memcmp(a->key, b->key, a->key_size) == 0);
82 }
83
84 static struct brw_cache_item *
85 search_cache(struct brw_cache *cache, GLuint hash,
86 struct brw_cache_item *lookup)
87 {
88 struct brw_cache_item *c;
89
90 #if 0
91 int bucketcount = 0;
92
93 for (c = cache->items[hash % cache->size]; c; c = c->next)
94 bucketcount++;
95
96 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
97 cache->size, bucketcount, cache->n_items);
98 #endif
99
100 for (c = cache->items[hash % cache->size]; c; c = c->next) {
101 if (brw_cache_item_equals(lookup, c))
102 return c;
103 }
104
105 return NULL;
106 }
107
108
109 static void
110 rehash(struct brw_cache *cache)
111 {
112 struct brw_cache_item **items;
113 struct brw_cache_item *c, *next;
114 GLuint size, i;
115
116 size = cache->size * 3;
117 items = calloc(size, sizeof(*items));
118
119 for (i = 0; i < cache->size; i++)
120 for (c = cache->items[i]; c; c = next) {
121 next = c->next;
122 c->next = items[c->hash % size];
123 items[c->hash % size] = c;
124 }
125
126 free(cache->items);
127 cache->items = items;
128 cache->size = size;
129 }
130
131
132 /**
133 * Returns the buffer object matching cache_id and key, or NULL.
134 */
135 bool
136 brw_search_cache(struct brw_cache *cache,
137 enum brw_cache_id cache_id,
138 const void *key, GLuint key_size,
139 uint32_t *inout_offset, void *out_aux)
140 {
141 struct brw_context *brw = cache->brw;
142 struct brw_cache_item *item;
143 struct brw_cache_item lookup;
144 GLuint hash;
145
146 lookup.cache_id = cache_id;
147 lookup.key = key;
148 lookup.key_size = key_size;
149 hash = hash_key(&lookup);
150 lookup.hash = hash;
151
152 item = search_cache(cache, hash, &lookup);
153
154 if (item == NULL)
155 return false;
156
157 *(void **)out_aux = ((char *)item->key + item->key_size);
158
159 if (item->offset != *inout_offset) {
160 brw->ctx.NewDriverState |= (1 << cache_id);
161 *inout_offset = item->offset;
162 }
163
164 return true;
165 }
166
167 static void
168 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
169 {
170 struct brw_context *brw = cache->brw;
171 drm_intel_bo *new_bo;
172
173 new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
174 if (brw->has_llc)
175 drm_intel_gem_bo_map_unsynchronized(new_bo);
176
177 /* Copy any existing data that needs to be saved. */
178 if (cache->next_offset != 0) {
179 if (brw->has_llc) {
180 memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
181 } else {
182 drm_intel_bo_map(cache->bo, false);
183 drm_intel_bo_subdata(new_bo, 0, cache->next_offset,
184 cache->bo->virtual);
185 drm_intel_bo_unmap(cache->bo);
186 }
187 }
188
189 if (brw->has_llc)
190 drm_intel_bo_unmap(cache->bo);
191 drm_intel_bo_unreference(cache->bo);
192 cache->bo = new_bo;
193 cache->bo_used_by_gpu = false;
194
195 /* Since we have a new BO in place, we need to signal the units
196 * that depend on it (state base address on gen5+, or unit state before).
197 */
198 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
199 }
200
201 /**
202 * Attempts to find an item in the cache with identical data and aux
203 * data to use
204 */
205 static bool
206 brw_try_upload_using_copy(struct brw_cache *cache,
207 struct brw_cache_item *result_item,
208 const void *data,
209 const void *aux)
210 {
211 struct brw_context *brw = cache->brw;
212 int i;
213 struct brw_cache_item *item;
214
215 for (i = 0; i < cache->size; i++) {
216 for (item = cache->items[i]; item; item = item->next) {
217 const void *item_aux = item->key + item->key_size;
218 int ret;
219
220 if (item->cache_id != result_item->cache_id ||
221 item->size != result_item->size ||
222 item->aux_size != result_item->aux_size) {
223 continue;
224 }
225
226 if (cache->aux_compare[result_item->cache_id]) {
227 if (!cache->aux_compare[result_item->cache_id](item_aux, aux))
228 continue;
229 } else if (memcmp(item_aux, aux, item->aux_size) != 0) {
230 continue;
231 }
232
233 if (!brw->has_llc)
234 drm_intel_bo_map(cache->bo, false);
235 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
236 if (!brw->has_llc)
237 drm_intel_bo_unmap(cache->bo);
238 if (ret)
239 continue;
240
241 result_item->offset = item->offset;
242
243 return true;
244 }
245 }
246
247 return false;
248 }
249
250 static void
251 brw_upload_item_data(struct brw_cache *cache,
252 struct brw_cache_item *item,
253 const void *data)
254 {
255 struct brw_context *brw = cache->brw;
256
257 /* Allocate space in the cache BO for our new program. */
258 if (cache->next_offset + item->size > cache->bo->size) {
259 uint32_t new_size = cache->bo->size * 2;
260
261 while (cache->next_offset + item->size > new_size)
262 new_size *= 2;
263
264 brw_cache_new_bo(cache, new_size);
265 }
266
267 /* If we would block on writing to an in-use program BO, just
268 * recreate it.
269 */
270 if (!brw->has_llc && cache->bo_used_by_gpu) {
271 perf_debug("Copying busy program cache buffer.\n");
272 brw_cache_new_bo(cache, cache->bo->size);
273 }
274
275 item->offset = cache->next_offset;
276
277 /* Programs are always 64-byte aligned, so set up the next one now */
278 cache->next_offset = ALIGN(item->offset + item->size, 64);
279 }
280
281 void
282 brw_upload_cache(struct brw_cache *cache,
283 enum brw_cache_id cache_id,
284 const void *key,
285 GLuint key_size,
286 const void *data,
287 GLuint data_size,
288 const void *aux,
289 GLuint aux_size,
290 uint32_t *out_offset,
291 void *out_aux)
292 {
293 struct brw_context *brw = cache->brw;
294 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
295 GLuint hash;
296 void *tmp;
297
298 item->cache_id = cache_id;
299 item->size = data_size;
300 item->key = key;
301 item->key_size = key_size;
302 item->aux_size = aux_size;
303 hash = hash_key(item);
304 item->hash = hash;
305
306 /* If we can find a matching prog/prog_data combo in the cache
307 * already, then reuse the existing stuff. This will mean not
308 * flagging CACHE_NEW_* when transitioning between the two
309 * equivalent hash keys. This is notably useful for programs
310 * generating shaders at runtime, where multiple shaders may
311 * compile to the thing in our backend.
312 */
313 if (!brw_try_upload_using_copy(cache, item, data, aux)) {
314 brw_upload_item_data(cache, item, data);
315 }
316
317 /* Set up the memory containing the key and aux_data */
318 tmp = malloc(key_size + aux_size);
319
320 memcpy(tmp, key, key_size);
321 memcpy(tmp + key_size, aux, aux_size);
322
323 item->key = tmp;
324
325 if (cache->n_items > cache->size * 1.5)
326 rehash(cache);
327
328 hash %= cache->size;
329 item->next = cache->items[hash];
330 cache->items[hash] = item;
331 cache->n_items++;
332
333 /* Copy data to the buffer */
334 if (brw->has_llc) {
335 memcpy((char *) cache->bo->virtual + item->offset, data, data_size);
336 } else {
337 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
338 }
339
340 *out_offset = item->offset;
341 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
342 cache->brw->ctx.NewDriverState |= 1 << cache_id;
343 }
344
345 void
346 brw_init_caches(struct brw_context *brw)
347 {
348 struct brw_cache *cache = &brw->cache;
349
350 cache->brw = brw;
351
352 cache->size = 7;
353 cache->n_items = 0;
354 cache->items =
355 calloc(cache->size, sizeof(struct brw_cache_item *));
356
357 cache->bo = drm_intel_bo_alloc(brw->bufmgr,
358 "program cache",
359 4096, 64);
360 if (brw->has_llc)
361 drm_intel_gem_bo_map_unsynchronized(cache->bo);
362
363 cache->aux_compare[BRW_CACHE_VS_PROG] = brw_vs_prog_data_compare;
364 cache->aux_compare[BRW_CACHE_GS_PROG] = brw_gs_prog_data_compare;
365 cache->aux_compare[BRW_CACHE_FS_PROG] = brw_wm_prog_data_compare;
366 cache->aux_free[BRW_CACHE_VS_PROG] = brw_stage_prog_data_free;
367 cache->aux_free[BRW_CACHE_GS_PROG] = brw_stage_prog_data_free;
368 cache->aux_free[BRW_CACHE_FS_PROG] = brw_stage_prog_data_free;
369 }
370
371 static void
372 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
373 {
374 struct brw_cache_item *c, *next;
375 GLuint i;
376
377 DBG("%s\n", __FUNCTION__);
378
379 for (i = 0; i < cache->size; i++) {
380 for (c = cache->items[i]; c; c = next) {
381 next = c->next;
382 if (cache->aux_free[c->cache_id]) {
383 const void *item_aux = c->key + c->key_size;
384 cache->aux_free[c->cache_id](item_aux);
385 }
386 free((void *)c->key);
387 free(c);
388 }
389 cache->items[i] = NULL;
390 }
391
392 cache->n_items = 0;
393
394 /* Start putting programs into the start of the BO again, since
395 * we'll never find the old results.
396 */
397 cache->next_offset = 0;
398
399 /* We need to make sure that the programs get regenerated, since
400 * any offsets leftover in brw_context will no longer be valid.
401 */
402 brw->NewGLState |= ~0;
403 brw->ctx.NewDriverState |= ~0ull;
404 intel_batchbuffer_flush(brw);
405 }
406
407 void
408 brw_state_cache_check_size(struct brw_context *brw)
409 {
410 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
411 * state cache.
412 */
413 if (brw->cache.n_items > 2000) {
414 perf_debug("Exceeded state cache size limit. Clearing the set "
415 "of compiled programs, which will trigger recompiles\n");
416 brw_clear_cache(brw, &brw->cache);
417 }
418 }
419
420
421 static void
422 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
423 {
424
425 DBG("%s\n", __FUNCTION__);
426
427 if (brw->has_llc)
428 drm_intel_bo_unmap(cache->bo);
429 drm_intel_bo_unreference(cache->bo);
430 cache->bo = NULL;
431 brw_clear_cache(brw, cache);
432 free(cache->items);
433 cache->items = NULL;
434 cache->size = 0;
435 }
436
437
438 void
439 brw_destroy_caches(struct brw_context *brw)
440 {
441 brw_destroy_cache(brw, &brw->cache);
442 }