i965: Move intel_context::perf_debug to brw_context.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52 #include "brw_vs.h"
53
54 #define FILE_DEBUG_FLAG DEBUG_STATE
55
56 static GLuint
57 hash_key(struct brw_cache_item *item)
58 {
59 GLuint *ikey = (GLuint *)item->key;
60 GLuint hash = item->cache_id, i;
61
62 assert(item->key_size % 4 == 0);
63
64 /* I'm sure this can be improved on:
65 */
66 for (i = 0; i < item->key_size/4; i++) {
67 hash ^= ikey[i];
68 hash = (hash << 5) | (hash >> 27);
69 }
70
71 return hash;
72 }
73
74 static int
75 brw_cache_item_equals(const struct brw_cache_item *a,
76 const struct brw_cache_item *b)
77 {
78 return a->cache_id == b->cache_id &&
79 a->hash == b->hash &&
80 a->key_size == b->key_size &&
81 (memcmp(a->key, b->key, a->key_size) == 0);
82 }
83
84 static struct brw_cache_item *
85 search_cache(struct brw_cache *cache, GLuint hash,
86 struct brw_cache_item *lookup)
87 {
88 struct brw_cache_item *c;
89
90 #if 0
91 int bucketcount = 0;
92
93 for (c = cache->items[hash % cache->size]; c; c = c->next)
94 bucketcount++;
95
96 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
97 cache->size, bucketcount, cache->n_items);
98 #endif
99
100 for (c = cache->items[hash % cache->size]; c; c = c->next) {
101 if (brw_cache_item_equals(lookup, c))
102 return c;
103 }
104
105 return NULL;
106 }
107
108
109 static void
110 rehash(struct brw_cache *cache)
111 {
112 struct brw_cache_item **items;
113 struct brw_cache_item *c, *next;
114 GLuint size, i;
115
116 size = cache->size * 3;
117 items = calloc(1, size * sizeof(*items));
118
119 for (i = 0; i < cache->size; i++)
120 for (c = cache->items[i]; c; c = next) {
121 next = c->next;
122 c->next = items[c->hash % size];
123 items[c->hash % size] = c;
124 }
125
126 free(cache->items);
127 cache->items = items;
128 cache->size = size;
129 }
130
131
132 /**
133 * Returns the buffer object matching cache_id and key, or NULL.
134 */
135 bool
136 brw_search_cache(struct brw_cache *cache,
137 enum brw_cache_id cache_id,
138 const void *key, GLuint key_size,
139 uint32_t *inout_offset, void *out_aux)
140 {
141 struct brw_context *brw = cache->brw;
142 struct brw_cache_item *item;
143 struct brw_cache_item lookup;
144 GLuint hash;
145
146 lookup.cache_id = cache_id;
147 lookup.key = key;
148 lookup.key_size = key_size;
149 hash = hash_key(&lookup);
150 lookup.hash = hash;
151
152 item = search_cache(cache, hash, &lookup);
153
154 if (item == NULL)
155 return false;
156
157 *(void **)out_aux = ((char *)item->key + item->key_size);
158
159 if (item->offset != *inout_offset) {
160 brw->state.dirty.cache |= (1 << cache_id);
161 *inout_offset = item->offset;
162 }
163
164 return true;
165 }
166
167 static void
168 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
169 {
170 struct brw_context *brw = cache->brw;
171 drm_intel_bo *new_bo;
172
173 new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
174
175 /* Copy any existing data that needs to be saved. */
176 if (cache->next_offset != 0) {
177 drm_intel_bo_map(cache->bo, false);
178 drm_intel_bo_subdata(new_bo, 0, cache->next_offset, cache->bo->virtual);
179 drm_intel_bo_unmap(cache->bo);
180 }
181
182 drm_intel_bo_unreference(cache->bo);
183 cache->bo = new_bo;
184 cache->bo_used_by_gpu = false;
185
186 /* Since we have a new BO in place, we need to signal the units
187 * that depend on it (state base address on gen5+, or unit state before).
188 */
189 brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
190 }
191
192 /**
193 * Attempts to find an item in the cache with identical data and aux
194 * data to use
195 */
196 static bool
197 brw_try_upload_using_copy(struct brw_cache *cache,
198 struct brw_cache_item *result_item,
199 const void *data,
200 const void *aux)
201 {
202 int i;
203 struct brw_cache_item *item;
204
205 for (i = 0; i < cache->size; i++) {
206 for (item = cache->items[i]; item; item = item->next) {
207 const void *item_aux = item->key + item->key_size;
208 int ret;
209
210 if (item->cache_id != result_item->cache_id ||
211 item->size != result_item->size ||
212 item->aux_size != result_item->aux_size) {
213 continue;
214 }
215
216 if (cache->aux_compare[result_item->cache_id]) {
217 if (!cache->aux_compare[result_item->cache_id](item_aux, aux,
218 item->aux_size,
219 item->key))
220 continue;
221 } else if (memcmp(item_aux, aux, item->aux_size) != 0) {
222 continue;
223 }
224
225 drm_intel_bo_map(cache->bo, false);
226 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
227 drm_intel_bo_unmap(cache->bo);
228 if (ret)
229 continue;
230
231 result_item->offset = item->offset;
232
233 return true;
234 }
235 }
236
237 return false;
238 }
239
240 static void
241 brw_upload_item_data(struct brw_cache *cache,
242 struct brw_cache_item *item,
243 const void *data)
244 {
245 /* Allocate space in the cache BO for our new program. */
246 if (cache->next_offset + item->size > cache->bo->size) {
247 uint32_t new_size = cache->bo->size * 2;
248
249 while (cache->next_offset + item->size > new_size)
250 new_size *= 2;
251
252 brw_cache_new_bo(cache, new_size);
253 }
254
255 /* If we would block on writing to an in-use program BO, just
256 * recreate it.
257 */
258 if (cache->bo_used_by_gpu) {
259 brw_cache_new_bo(cache, cache->bo->size);
260 }
261
262 item->offset = cache->next_offset;
263
264 /* Programs are always 64-byte aligned, so set up the next one now */
265 cache->next_offset = ALIGN(item->offset + item->size, 64);
266 }
267
268 void
269 brw_upload_cache(struct brw_cache *cache,
270 enum brw_cache_id cache_id,
271 const void *key,
272 GLuint key_size,
273 const void *data,
274 GLuint data_size,
275 const void *aux,
276 GLuint aux_size,
277 uint32_t *out_offset,
278 void *out_aux)
279 {
280 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
281 GLuint hash;
282 void *tmp;
283
284 item->cache_id = cache_id;
285 item->size = data_size;
286 item->key = key;
287 item->key_size = key_size;
288 item->aux_size = aux_size;
289 hash = hash_key(item);
290 item->hash = hash;
291
292 /* If we can find a matching prog/prog_data combo in the cache
293 * already, then reuse the existing stuff. This will mean not
294 * flagging CACHE_NEW_* when transitioning between the two
295 * equivalent hash keys. This is notably useful for programs
296 * generating shaders at runtime, where multiple shaders may
297 * compile to the thing in our backend.
298 */
299 if (!brw_try_upload_using_copy(cache, item, data, aux)) {
300 brw_upload_item_data(cache, item, data);
301 }
302
303 /* Set up the memory containing the key and aux_data */
304 tmp = malloc(key_size + aux_size);
305
306 memcpy(tmp, key, key_size);
307 memcpy(tmp + key_size, aux, aux_size);
308
309 item->key = tmp;
310
311 if (cache->n_items > cache->size * 1.5)
312 rehash(cache);
313
314 hash %= cache->size;
315 item->next = cache->items[hash];
316 cache->items[hash] = item;
317 cache->n_items++;
318
319 /* Copy data to the buffer */
320 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
321
322 *out_offset = item->offset;
323 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
324 cache->brw->state.dirty.cache |= 1 << cache_id;
325 }
326
327 void
328 brw_init_caches(struct brw_context *brw)
329 {
330 struct brw_cache *cache = &brw->cache;
331
332 cache->brw = brw;
333
334 cache->size = 7;
335 cache->n_items = 0;
336 cache->items =
337 calloc(1, cache->size * sizeof(struct brw_cache_item *));
338
339 cache->bo = drm_intel_bo_alloc(brw->bufmgr,
340 "program cache",
341 4096, 64);
342
343 cache->aux_compare[BRW_VS_PROG] = brw_vs_prog_data_compare;
344 cache->aux_compare[BRW_WM_PROG] = brw_wm_prog_data_compare;
345 cache->aux_free[BRW_VS_PROG] = brw_vs_prog_data_free;
346 cache->aux_free[BRW_WM_PROG] = brw_wm_prog_data_free;
347 }
348
349 static void
350 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
351 {
352 struct brw_cache_item *c, *next;
353 GLuint i;
354
355 DBG("%s\n", __FUNCTION__);
356
357 for (i = 0; i < cache->size; i++) {
358 for (c = cache->items[i]; c; c = next) {
359 next = c->next;
360 if (cache->aux_free[c->cache_id]) {
361 const void *item_aux = c->key + c->key_size;
362 cache->aux_free[c->cache_id](item_aux);
363 }
364 free((void *)c->key);
365 free(c);
366 }
367 cache->items[i] = NULL;
368 }
369
370 cache->n_items = 0;
371
372 /* Start putting programs into the start of the BO again, since
373 * we'll never find the old results.
374 */
375 cache->next_offset = 0;
376
377 /* We need to make sure that the programs get regenerated, since
378 * any offsets leftover in brw_context will no longer be valid.
379 */
380 brw->state.dirty.mesa |= ~0;
381 brw->state.dirty.brw |= ~0;
382 brw->state.dirty.cache |= ~0;
383 intel_batchbuffer_flush(brw);
384 }
385
386 void
387 brw_state_cache_check_size(struct brw_context *brw)
388 {
389 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
390 * state cache.
391 */
392 if (brw->cache.n_items > 2000) {
393 perf_debug("Exceeded state cache size limit. Clearing the set "
394 "of compiled programs, which will trigger recompiles\n");
395 brw_clear_cache(brw, &brw->cache);
396 }
397 }
398
399
400 static void
401 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
402 {
403
404 DBG("%s\n", __FUNCTION__);
405
406 drm_intel_bo_unreference(cache->bo);
407 cache->bo = NULL;
408 brw_clear_cache(brw, cache);
409 free(cache->items);
410 cache->items = NULL;
411 cache->size = 0;
412 }
413
414
415 void
416 brw_destroy_caches(struct brw_context *brw)
417 {
418 brw_destroy_cache(brw, &brw->cache);
419 }