i965: Eliminate brw->gs.prog_data pointer.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52 #include "brw_gs.h"
53 #include "brw_cs.h"
54 #include "brw_program.h"
55
56 #define FILE_DEBUG_FLAG DEBUG_STATE
57
58 static GLuint
59 hash_key(struct brw_cache_item *item)
60 {
61 GLuint *ikey = (GLuint *)item->key;
62 GLuint hash = item->cache_id, i;
63
64 assert(item->key_size % 4 == 0);
65
66 /* I'm sure this can be improved on:
67 */
68 for (i = 0; i < item->key_size/4; i++) {
69 hash ^= ikey[i];
70 hash = (hash << 5) | (hash >> 27);
71 }
72
73 return hash;
74 }
75
76 static int
77 brw_cache_item_equals(const struct brw_cache_item *a,
78 const struct brw_cache_item *b)
79 {
80 return a->cache_id == b->cache_id &&
81 a->hash == b->hash &&
82 a->key_size == b->key_size &&
83 (memcmp(a->key, b->key, a->key_size) == 0);
84 }
85
86 static struct brw_cache_item *
87 search_cache(struct brw_cache *cache, GLuint hash,
88 struct brw_cache_item *lookup)
89 {
90 struct brw_cache_item *c;
91
92 #if 0
93 int bucketcount = 0;
94
95 for (c = cache->items[hash % cache->size]; c; c = c->next)
96 bucketcount++;
97
98 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
99 cache->size, bucketcount, cache->n_items);
100 #endif
101
102 for (c = cache->items[hash % cache->size]; c; c = c->next) {
103 if (brw_cache_item_equals(lookup, c))
104 return c;
105 }
106
107 return NULL;
108 }
109
110
111 static void
112 rehash(struct brw_cache *cache)
113 {
114 struct brw_cache_item **items;
115 struct brw_cache_item *c, *next;
116 GLuint size, i;
117
118 size = cache->size * 3;
119 items = calloc(size, sizeof(*items));
120
121 for (i = 0; i < cache->size; i++)
122 for (c = cache->items[i]; c; c = next) {
123 next = c->next;
124 c->next = items[c->hash % size];
125 items[c->hash % size] = c;
126 }
127
128 free(cache->items);
129 cache->items = items;
130 cache->size = size;
131 }
132
133
134 /**
135 * Returns the buffer object matching cache_id and key, or NULL.
136 */
137 bool
138 brw_search_cache(struct brw_cache *cache,
139 enum brw_cache_id cache_id,
140 const void *key, GLuint key_size,
141 uint32_t *inout_offset, void *inout_aux)
142 {
143 struct brw_context *brw = cache->brw;
144 struct brw_cache_item *item;
145 struct brw_cache_item lookup;
146 GLuint hash;
147
148 lookup.cache_id = cache_id;
149 lookup.key = key;
150 lookup.key_size = key_size;
151 hash = hash_key(&lookup);
152 lookup.hash = hash;
153
154 item = search_cache(cache, hash, &lookup);
155
156 if (item == NULL)
157 return false;
158
159 void *aux = ((char *) item->key) + item->key_size;
160
161 if (item->offset != *inout_offset || aux != *((void **) inout_aux)) {
162 brw->ctx.NewDriverState |= (1 << cache_id);
163 *inout_offset = item->offset;
164 *((void **) inout_aux) = aux;
165 }
166
167 return true;
168 }
169
170 static void
171 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
172 {
173 struct brw_context *brw = cache->brw;
174 drm_intel_bo *new_bo;
175
176 new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
177 if (brw->has_llc)
178 drm_intel_gem_bo_map_unsynchronized(new_bo);
179
180 /* Copy any existing data that needs to be saved. */
181 if (cache->next_offset != 0) {
182 if (brw->has_llc) {
183 memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
184 } else {
185 drm_intel_bo_map(cache->bo, false);
186 drm_intel_bo_subdata(new_bo, 0, cache->next_offset,
187 cache->bo->virtual);
188 drm_intel_bo_unmap(cache->bo);
189 }
190 }
191
192 if (brw->has_llc)
193 drm_intel_bo_unmap(cache->bo);
194 drm_intel_bo_unreference(cache->bo);
195 cache->bo = new_bo;
196 cache->bo_used_by_gpu = false;
197
198 /* Since we have a new BO in place, we need to signal the units
199 * that depend on it (state base address on gen5+, or unit state before).
200 */
201 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
202 brw->batch.state_base_address_emitted = false;
203 }
204
205 /**
206 * Attempts to find an item in the cache with identical data.
207 */
208 static const struct brw_cache_item *
209 brw_lookup_prog(const struct brw_cache *cache,
210 enum brw_cache_id cache_id,
211 const void *data, unsigned data_size)
212 {
213 const struct brw_context *brw = cache->brw;
214 unsigned i;
215 const struct brw_cache_item *item;
216
217 for (i = 0; i < cache->size; i++) {
218 for (item = cache->items[i]; item; item = item->next) {
219 int ret;
220
221 if (item->cache_id != cache_id || item->size != data_size)
222 continue;
223
224 if (!brw->has_llc)
225 drm_intel_bo_map(cache->bo, false);
226 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
227 if (!brw->has_llc)
228 drm_intel_bo_unmap(cache->bo);
229 if (ret)
230 continue;
231
232 return item;
233 }
234 }
235
236 return NULL;
237 }
238
239 static uint32_t
240 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
241 {
242 uint32_t offset;
243 struct brw_context *brw = cache->brw;
244
245 /* Allocate space in the cache BO for our new program. */
246 if (cache->next_offset + size > cache->bo->size) {
247 uint32_t new_size = cache->bo->size * 2;
248
249 while (cache->next_offset + size > new_size)
250 new_size *= 2;
251
252 brw_cache_new_bo(cache, new_size);
253 }
254
255 /* If we would block on writing to an in-use program BO, just
256 * recreate it.
257 */
258 if (!brw->has_llc && cache->bo_used_by_gpu) {
259 perf_debug("Copying busy program cache buffer.\n");
260 brw_cache_new_bo(cache, cache->bo->size);
261 }
262
263 offset = cache->next_offset;
264
265 /* Programs are always 64-byte aligned, so set up the next one now */
266 cache->next_offset = ALIGN(offset + size, 64);
267
268 return offset;
269 }
270
271 void
272 brw_upload_cache(struct brw_cache *cache,
273 enum brw_cache_id cache_id,
274 const void *key,
275 GLuint key_size,
276 const void *data,
277 GLuint data_size,
278 const void *aux,
279 GLuint aux_size,
280 uint32_t *out_offset,
281 void *out_aux)
282 {
283 struct brw_context *brw = cache->brw;
284 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
285 const struct brw_cache_item *matching_data =
286 brw_lookup_prog(cache, cache_id, data, data_size);
287 GLuint hash;
288 void *tmp;
289
290 item->cache_id = cache_id;
291 item->size = data_size;
292 item->key = key;
293 item->key_size = key_size;
294 item->aux_size = aux_size;
295 hash = hash_key(item);
296 item->hash = hash;
297
298 /* If we can find a matching prog in the cache already, then reuse the
299 * existing stuff without creating new copy into the underlying buffer
300 * object. This is notably useful for programs generating shaders at
301 * runtime, where multiple shaders may compile to the same thing in our
302 * backend.
303 */
304 if (matching_data) {
305 item->offset = matching_data->offset;
306 } else {
307 item->offset = brw_alloc_item_data(cache, data_size);
308
309 /* Copy data to the buffer */
310 if (brw->has_llc) {
311 memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
312 } else {
313 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
314 }
315 }
316
317 /* Set up the memory containing the key and aux_data */
318 tmp = malloc(key_size + aux_size);
319
320 memcpy(tmp, key, key_size);
321 memcpy(tmp + key_size, aux, aux_size);
322
323 item->key = tmp;
324
325 if (cache->n_items > cache->size * 1.5f)
326 rehash(cache);
327
328 hash %= cache->size;
329 item->next = cache->items[hash];
330 cache->items[hash] = item;
331 cache->n_items++;
332
333 *out_offset = item->offset;
334 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
335 cache->brw->ctx.NewDriverState |= 1 << cache_id;
336 }
337
338 void
339 brw_init_caches(struct brw_context *brw)
340 {
341 struct brw_cache *cache = &brw->cache;
342
343 cache->brw = brw;
344
345 cache->size = 7;
346 cache->n_items = 0;
347 cache->items =
348 calloc(cache->size, sizeof(struct brw_cache_item *));
349
350 cache->bo = drm_intel_bo_alloc(brw->bufmgr,
351 "program cache",
352 4096, 64);
353 if (brw->has_llc)
354 drm_intel_gem_bo_map_unsynchronized(cache->bo);
355 }
356
357 static void
358 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
359 {
360 struct brw_cache_item *c, *next;
361 GLuint i;
362
363 DBG("%s\n", __func__);
364
365 for (i = 0; i < cache->size; i++) {
366 for (c = cache->items[i]; c; c = next) {
367 next = c->next;
368 if (c->cache_id == BRW_CACHE_VS_PROG ||
369 c->cache_id == BRW_CACHE_TCS_PROG ||
370 c->cache_id == BRW_CACHE_TES_PROG ||
371 c->cache_id == BRW_CACHE_GS_PROG ||
372 c->cache_id == BRW_CACHE_FS_PROG ||
373 c->cache_id == BRW_CACHE_CS_PROG) {
374 const void *item_aux = c->key + c->key_size;
375 brw_stage_prog_data_free(item_aux);
376 }
377 free((void *)c->key);
378 free(c);
379 }
380 cache->items[i] = NULL;
381 }
382
383 cache->n_items = 0;
384
385 /* Start putting programs into the start of the BO again, since
386 * we'll never find the old results.
387 */
388 cache->next_offset = 0;
389
390 /* We need to make sure that the programs get regenerated, since
391 * any offsets leftover in brw_context will no longer be valid.
392 */
393 brw->NewGLState = ~0;
394 brw->ctx.NewDriverState = ~0ull;
395 brw->state.pipelines[BRW_RENDER_PIPELINE].mesa = ~0;
396 brw->state.pipelines[BRW_RENDER_PIPELINE].brw = ~0ull;
397 brw->state.pipelines[BRW_COMPUTE_PIPELINE].mesa = ~0;
398 brw->state.pipelines[BRW_COMPUTE_PIPELINE].brw = ~0ull;
399
400 /* Also, NULL out any stale program pointers. */
401 brw->vs.base.prog_data = NULL;
402 brw->tcs.base.prog_data = NULL;
403 brw->tes.base.prog_data = NULL;
404 brw->gs.base.prog_data = NULL;
405 brw->wm.prog_data = NULL;
406 brw->wm.base.prog_data = NULL;
407 brw->cs.prog_data = NULL;
408 brw->cs.base.prog_data = NULL;
409
410 intel_batchbuffer_flush(brw);
411 }
412
413 void
414 brw_state_cache_check_size(struct brw_context *brw)
415 {
416 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
417 * state cache.
418 */
419 if (brw->cache.n_items > 2000) {
420 perf_debug("Exceeded state cache size limit. Clearing the set "
421 "of compiled programs, which will trigger recompiles\n");
422 brw_clear_cache(brw, &brw->cache);
423 }
424 }
425
426
427 static void
428 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
429 {
430
431 DBG("%s\n", __func__);
432
433 if (brw->has_llc)
434 drm_intel_bo_unmap(cache->bo);
435 drm_intel_bo_unreference(cache->bo);
436 cache->bo = NULL;
437 brw_clear_cache(brw, cache);
438 free(cache->items);
439 cache->items = NULL;
440 cache->size = 0;
441 }
442
443
444 void
445 brw_destroy_caches(struct brw_context *brw)
446 {
447 brw_destroy_cache(brw, &brw->cache);
448 }