f7c0a2037d944c90dff5a53499067ac73c82e468
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52 #include "brw_gs.h"
53 #include "brw_cs.h"
54
55 #define FILE_DEBUG_FLAG DEBUG_STATE
56
57 static GLuint
58 hash_key(struct brw_cache_item *item)
59 {
60 GLuint *ikey = (GLuint *)item->key;
61 GLuint hash = item->cache_id, i;
62
63 assert(item->key_size % 4 == 0);
64
65 /* I'm sure this can be improved on:
66 */
67 for (i = 0; i < item->key_size/4; i++) {
68 hash ^= ikey[i];
69 hash = (hash << 5) | (hash >> 27);
70 }
71
72 return hash;
73 }
74
75 static int
76 brw_cache_item_equals(const struct brw_cache_item *a,
77 const struct brw_cache_item *b)
78 {
79 return a->cache_id == b->cache_id &&
80 a->hash == b->hash &&
81 a->key_size == b->key_size &&
82 (memcmp(a->key, b->key, a->key_size) == 0);
83 }
84
85 static struct brw_cache_item *
86 search_cache(struct brw_cache *cache, GLuint hash,
87 struct brw_cache_item *lookup)
88 {
89 struct brw_cache_item *c;
90
91 #if 0
92 int bucketcount = 0;
93
94 for (c = cache->items[hash % cache->size]; c; c = c->next)
95 bucketcount++;
96
97 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
98 cache->size, bucketcount, cache->n_items);
99 #endif
100
101 for (c = cache->items[hash % cache->size]; c; c = c->next) {
102 if (brw_cache_item_equals(lookup, c))
103 return c;
104 }
105
106 return NULL;
107 }
108
109
110 static void
111 rehash(struct brw_cache *cache)
112 {
113 struct brw_cache_item **items;
114 struct brw_cache_item *c, *next;
115 GLuint size, i;
116
117 size = cache->size * 3;
118 items = calloc(size, sizeof(*items));
119
120 for (i = 0; i < cache->size; i++)
121 for (c = cache->items[i]; c; c = next) {
122 next = c->next;
123 c->next = items[c->hash % size];
124 items[c->hash % size] = c;
125 }
126
127 free(cache->items);
128 cache->items = items;
129 cache->size = size;
130 }
131
132
133 /**
134 * Returns the buffer object matching cache_id and key, or NULL.
135 */
136 bool
137 brw_search_cache(struct brw_cache *cache,
138 enum brw_cache_id cache_id,
139 const void *key, GLuint key_size,
140 uint32_t *inout_offset, void *inout_aux)
141 {
142 struct brw_context *brw = cache->brw;
143 struct brw_cache_item *item;
144 struct brw_cache_item lookup;
145 GLuint hash;
146
147 lookup.cache_id = cache_id;
148 lookup.key = key;
149 lookup.key_size = key_size;
150 hash = hash_key(&lookup);
151 lookup.hash = hash;
152
153 item = search_cache(cache, hash, &lookup);
154
155 if (item == NULL)
156 return false;
157
158 void *aux = ((char *) item->key) + item->key_size;
159
160 if (item->offset != *inout_offset || aux != *((void **) inout_aux)) {
161 brw->ctx.NewDriverState |= (1 << cache_id);
162 *inout_offset = item->offset;
163 *((void **) inout_aux) = aux;
164 }
165
166 return true;
167 }
168
169 static void
170 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
171 {
172 struct brw_context *brw = cache->brw;
173 drm_intel_bo *new_bo;
174
175 new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
176 if (brw->has_llc)
177 drm_intel_gem_bo_map_unsynchronized(new_bo);
178
179 /* Copy any existing data that needs to be saved. */
180 if (cache->next_offset != 0) {
181 if (brw->has_llc) {
182 memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
183 } else {
184 drm_intel_bo_map(cache->bo, false);
185 drm_intel_bo_subdata(new_bo, 0, cache->next_offset,
186 cache->bo->virtual);
187 drm_intel_bo_unmap(cache->bo);
188 }
189 }
190
191 if (brw->has_llc)
192 drm_intel_bo_unmap(cache->bo);
193 drm_intel_bo_unreference(cache->bo);
194 cache->bo = new_bo;
195 cache->bo_used_by_gpu = false;
196
197 /* Since we have a new BO in place, we need to signal the units
198 * that depend on it (state base address on gen5+, or unit state before).
199 */
200 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
201 }
202
203 /**
204 * Attempts to find an item in the cache with identical data.
205 */
206 static const struct brw_cache_item *
207 brw_lookup_prog(const struct brw_cache *cache,
208 enum brw_cache_id cache_id,
209 const void *data, unsigned data_size)
210 {
211 const struct brw_context *brw = cache->brw;
212 unsigned i;
213 const struct brw_cache_item *item;
214
215 for (i = 0; i < cache->size; i++) {
216 for (item = cache->items[i]; item; item = item->next) {
217 int ret;
218
219 if (item->cache_id != cache_id || item->size != data_size)
220 continue;
221
222 if (!brw->has_llc)
223 drm_intel_bo_map(cache->bo, false);
224 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
225 if (!brw->has_llc)
226 drm_intel_bo_unmap(cache->bo);
227 if (ret)
228 continue;
229
230 return item;
231 }
232 }
233
234 return NULL;
235 }
236
237 static uint32_t
238 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
239 {
240 uint32_t offset;
241 struct brw_context *brw = cache->brw;
242
243 /* Allocate space in the cache BO for our new program. */
244 if (cache->next_offset + size > cache->bo->size) {
245 uint32_t new_size = cache->bo->size * 2;
246
247 while (cache->next_offset + size > new_size)
248 new_size *= 2;
249
250 brw_cache_new_bo(cache, new_size);
251 }
252
253 /* If we would block on writing to an in-use program BO, just
254 * recreate it.
255 */
256 if (!brw->has_llc && cache->bo_used_by_gpu) {
257 perf_debug("Copying busy program cache buffer.\n");
258 brw_cache_new_bo(cache, cache->bo->size);
259 }
260
261 offset = cache->next_offset;
262
263 /* Programs are always 64-byte aligned, so set up the next one now */
264 cache->next_offset = ALIGN(offset + size, 64);
265
266 return offset;
267 }
268
269 void
270 brw_upload_cache(struct brw_cache *cache,
271 enum brw_cache_id cache_id,
272 const void *key,
273 GLuint key_size,
274 const void *data,
275 GLuint data_size,
276 const void *aux,
277 GLuint aux_size,
278 uint32_t *out_offset,
279 void *out_aux)
280 {
281 struct brw_context *brw = cache->brw;
282 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
283 const struct brw_cache_item *matching_data =
284 brw_lookup_prog(cache, cache_id, data, data_size);
285 GLuint hash;
286 void *tmp;
287
288 item->cache_id = cache_id;
289 item->size = data_size;
290 item->key = key;
291 item->key_size = key_size;
292 item->aux_size = aux_size;
293 hash = hash_key(item);
294 item->hash = hash;
295
296 /* If we can find a matching prog in the cache already, then reuse the
297 * existing stuff without creating new copy into the underlying buffer
298 * object. This is notably useful for programs generating shaders at
299 * runtime, where multiple shaders may compile to the same thing in our
300 * backend.
301 */
302 if (matching_data) {
303 item->offset = matching_data->offset;
304 } else {
305 item->offset = brw_alloc_item_data(cache, data_size);
306
307 /* Copy data to the buffer */
308 if (brw->has_llc) {
309 memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
310 } else {
311 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
312 }
313 }
314
315 /* Set up the memory containing the key and aux_data */
316 tmp = malloc(key_size + aux_size);
317
318 memcpy(tmp, key, key_size);
319 memcpy(tmp + key_size, aux, aux_size);
320
321 item->key = tmp;
322
323 if (cache->n_items > cache->size * 1.5f)
324 rehash(cache);
325
326 hash %= cache->size;
327 item->next = cache->items[hash];
328 cache->items[hash] = item;
329 cache->n_items++;
330
331 *out_offset = item->offset;
332 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
333 cache->brw->ctx.NewDriverState |= 1 << cache_id;
334 }
335
336 void
337 brw_init_caches(struct brw_context *brw)
338 {
339 struct brw_cache *cache = &brw->cache;
340
341 cache->brw = brw;
342
343 cache->size = 7;
344 cache->n_items = 0;
345 cache->items =
346 calloc(cache->size, sizeof(struct brw_cache_item *));
347
348 cache->bo = drm_intel_bo_alloc(brw->bufmgr,
349 "program cache",
350 4096, 64);
351 if (brw->has_llc)
352 drm_intel_gem_bo_map_unsynchronized(cache->bo);
353 }
354
355 static void
356 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
357 {
358 struct brw_cache_item *c, *next;
359 GLuint i;
360
361 DBG("%s\n", __func__);
362
363 for (i = 0; i < cache->size; i++) {
364 for (c = cache->items[i]; c; c = next) {
365 next = c->next;
366 if (c->cache_id == BRW_CACHE_VS_PROG ||
367 c->cache_id == BRW_CACHE_GS_PROG ||
368 c->cache_id == BRW_CACHE_FS_PROG ||
369 c->cache_id == BRW_CACHE_CS_PROG) {
370 const void *item_aux = c->key + c->key_size;
371 brw_stage_prog_data_free(item_aux);
372 }
373 free((void *)c->key);
374 free(c);
375 }
376 cache->items[i] = NULL;
377 }
378
379 cache->n_items = 0;
380
381 /* Start putting programs into the start of the BO again, since
382 * we'll never find the old results.
383 */
384 cache->next_offset = 0;
385
386 /* We need to make sure that the programs get regenerated, since
387 * any offsets leftover in brw_context will no longer be valid.
388 */
389 brw->NewGLState |= ~0;
390 brw->ctx.NewDriverState |= ~0ull;
391 intel_batchbuffer_flush(brw);
392 }
393
394 void
395 brw_state_cache_check_size(struct brw_context *brw)
396 {
397 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
398 * state cache.
399 */
400 if (brw->cache.n_items > 2000) {
401 perf_debug("Exceeded state cache size limit. Clearing the set "
402 "of compiled programs, which will trigger recompiles\n");
403 brw_clear_cache(brw, &brw->cache);
404 }
405 }
406
407
408 static void
409 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
410 {
411
412 DBG("%s\n", __func__);
413
414 if (brw->has_llc)
415 drm_intel_bo_unmap(cache->bo);
416 drm_intel_bo_unreference(cache->bo);
417 cache->bo = NULL;
418 brw_clear_cache(brw, cache);
419 free(cache->items);
420 cache->items = NULL;
421 cache->size = 0;
422 }
423
424
425 void
426 brw_destroy_caches(struct brw_context *brw)
427 {
428 brw_destroy_cache(brw, &brw->cache);
429 }