i965: Add brw_populate_default_key
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_program_cache.c
33 *
34 * This file implements a simple program cache for 965. The consumers can
35 * query the hash table of programs using a cache_id and program key, and
36 * receive the corresponding program buffer object (plus associated auxiliary
37 * data) in return. Objects in the cache may not have relocations
38 * (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "main/streaming-load-memcpy.h"
49 #include "x86/common_x86_asm.h"
50 #include "intel_batchbuffer.h"
51 #include "brw_state.h"
52 #include "brw_wm.h"
53 #include "brw_gs.h"
54 #include "brw_cs.h"
55 #include "brw_program.h"
56 #include "compiler/brw_eu.h"
57
58 #define FILE_DEBUG_FLAG DEBUG_STATE
59
60 struct brw_cache_item {
61 /**
62 * Effectively part of the key, cache_id identifies what kind of state
63 * buffer is involved, and also which dirty flag should set.
64 */
65 enum brw_cache_id cache_id;
66
67 /** 32-bit hash of the key data */
68 GLuint hash;
69
70 /** for variable-sized keys */
71 GLuint key_size;
72 GLuint prog_data_size;
73 const void *key;
74
75 uint32_t offset;
76 uint32_t size;
77
78 struct brw_cache_item *next;
79 };
80
81 enum brw_cache_id
82 brw_stage_cache_id(gl_shader_stage stage)
83 {
84 static const enum brw_cache_id stage_ids[] = {
85 BRW_CACHE_VS_PROG,
86 BRW_CACHE_TCS_PROG,
87 BRW_CACHE_TES_PROG,
88 BRW_CACHE_GS_PROG,
89 BRW_CACHE_FS_PROG,
90 BRW_CACHE_CS_PROG,
91 };
92 assert((int)stage >= 0 && stage < ARRAY_SIZE(stage_ids));
93 return stage_ids[stage];
94 }
95
96 static unsigned
97 get_program_string_id(enum brw_cache_id cache_id, const void *key)
98 {
99 switch (cache_id) {
100 case BRW_CACHE_VS_PROG:
101 return ((struct brw_vs_prog_key *) key)->program_string_id;
102 case BRW_CACHE_TCS_PROG:
103 return ((struct brw_tcs_prog_key *) key)->program_string_id;
104 case BRW_CACHE_TES_PROG:
105 return ((struct brw_tes_prog_key *) key)->program_string_id;
106 case BRW_CACHE_GS_PROG:
107 return ((struct brw_gs_prog_key *) key)->program_string_id;
108 case BRW_CACHE_CS_PROG:
109 return ((struct brw_cs_prog_key *) key)->program_string_id;
110 case BRW_CACHE_FS_PROG:
111 return ((struct brw_wm_prog_key *) key)->program_string_id;
112 default:
113 unreachable("no program string id for this kind of program");
114 }
115 }
116
117 static GLuint
118 hash_key(struct brw_cache_item *item)
119 {
120 GLuint *ikey = (GLuint *)item->key;
121 GLuint hash = item->cache_id, i;
122
123 assert(item->key_size % 4 == 0);
124
125 /* I'm sure this can be improved on:
126 */
127 for (i = 0; i < item->key_size/4; i++) {
128 hash ^= ikey[i];
129 hash = (hash << 5) | (hash >> 27);
130 }
131
132 return hash;
133 }
134
135 static int
136 brw_cache_item_equals(const struct brw_cache_item *a,
137 const struct brw_cache_item *b)
138 {
139 return a->cache_id == b->cache_id &&
140 a->hash == b->hash &&
141 a->key_size == b->key_size &&
142 (memcmp(a->key, b->key, a->key_size) == 0);
143 }
144
145 static struct brw_cache_item *
146 search_cache(struct brw_cache *cache, GLuint hash,
147 struct brw_cache_item *lookup)
148 {
149 struct brw_cache_item *c;
150
151 #if 0
152 int bucketcount = 0;
153
154 for (c = cache->items[hash % cache->size]; c; c = c->next)
155 bucketcount++;
156
157 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
158 cache->size, bucketcount, cache->n_items);
159 #endif
160
161 for (c = cache->items[hash % cache->size]; c; c = c->next) {
162 if (brw_cache_item_equals(lookup, c))
163 return c;
164 }
165
166 return NULL;
167 }
168
169
170 static void
171 rehash(struct brw_cache *cache)
172 {
173 struct brw_cache_item **items;
174 struct brw_cache_item *c, *next;
175 GLuint size, i;
176
177 size = cache->size * 3;
178 items = calloc(size, sizeof(*items));
179
180 for (i = 0; i < cache->size; i++)
181 for (c = cache->items[i]; c; c = next) {
182 next = c->next;
183 c->next = items[c->hash % size];
184 items[c->hash % size] = c;
185 }
186
187 free(cache->items);
188 cache->items = items;
189 cache->size = size;
190 }
191
192
193 /**
194 * Returns the buffer object matching cache_id and key, or NULL.
195 */
196 bool
197 brw_search_cache(struct brw_cache *cache,
198 enum brw_cache_id cache_id,
199 const void *key, GLuint key_size,
200 uint32_t *inout_offset, void *inout_prog_data)
201 {
202 struct brw_context *brw = cache->brw;
203 struct brw_cache_item *item;
204 struct brw_cache_item lookup;
205 GLuint hash;
206
207 lookup.cache_id = cache_id;
208 lookup.key = key;
209 lookup.key_size = key_size;
210 hash = hash_key(&lookup);
211 lookup.hash = hash;
212
213 item = search_cache(cache, hash, &lookup);
214
215 if (item == NULL)
216 return false;
217
218 void *prog_data = ((char *) item->key) + item->key_size;
219
220 if (item->offset != *inout_offset ||
221 prog_data != *((void **) inout_prog_data)) {
222 brw->ctx.NewDriverState |= (1 << cache_id);
223 *inout_offset = item->offset;
224 *((void **) inout_prog_data) = prog_data;
225 }
226
227 return true;
228 }
229
230 static void
231 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
232 {
233 struct brw_context *brw = cache->brw;
234 struct brw_bo *new_bo;
235
236 perf_debug("Copying to larger program cache: %u kB -> %u kB\n",
237 (unsigned) cache->bo->size / 1024, new_size / 1024);
238
239 new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size,
240 BRW_MEMZONE_SHADER);
241 if (can_do_exec_capture(brw->screen))
242 new_bo->kflags |= EXEC_OBJECT_CAPTURE;
243
244 void *map = brw_bo_map(brw, new_bo, MAP_READ | MAP_WRITE |
245 MAP_ASYNC | MAP_PERSISTENT);
246
247 /* Copy any existing data that needs to be saved. */
248 if (cache->next_offset != 0) {
249 #ifdef USE_SSE41
250 if (!cache->bo->cache_coherent && cpu_has_sse4_1)
251 _mesa_streaming_load_memcpy(map, cache->map, cache->next_offset);
252 else
253 #endif
254 memcpy(map, cache->map, cache->next_offset);
255 }
256
257 brw_bo_unmap(cache->bo);
258 brw_bo_unreference(cache->bo);
259 cache->bo = new_bo;
260 cache->map = map;
261
262 /* Since we have a new BO in place, we need to signal the units
263 * that depend on it (state base address on gen5+, or unit state before).
264 */
265 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
266 brw->batch.state_base_address_emitted = false;
267 }
268
269 /**
270 * Attempts to find an item in the cache with identical data.
271 */
272 static const struct brw_cache_item *
273 brw_lookup_prog(const struct brw_cache *cache,
274 enum brw_cache_id cache_id,
275 const void *data, unsigned data_size)
276 {
277 unsigned i;
278 const struct brw_cache_item *item;
279
280 for (i = 0; i < cache->size; i++) {
281 for (item = cache->items[i]; item; item = item->next) {
282 if (item->cache_id != cache_id || item->size != data_size ||
283 memcmp(cache->map + item->offset, data, item->size) != 0)
284 continue;
285
286 return item;
287 }
288 }
289
290 return NULL;
291 }
292
293 static uint32_t
294 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
295 {
296 uint32_t offset;
297
298 /* Allocate space in the cache BO for our new program. */
299 if (cache->next_offset + size > cache->bo->size) {
300 uint32_t new_size = cache->bo->size * 2;
301
302 while (cache->next_offset + size > new_size)
303 new_size *= 2;
304
305 brw_cache_new_bo(cache, new_size);
306 }
307
308 offset = cache->next_offset;
309
310 /* Programs are always 64-byte aligned, so set up the next one now */
311 cache->next_offset = ALIGN(offset + size, 64);
312
313 return offset;
314 }
315
316 const void *
317 brw_find_previous_compile(struct brw_cache *cache,
318 enum brw_cache_id cache_id,
319 unsigned program_string_id)
320 {
321 for (unsigned i = 0; i < cache->size; i++) {
322 for (struct brw_cache_item *c = cache->items[i]; c; c = c->next) {
323 if (c->cache_id == cache_id &&
324 get_program_string_id(cache_id, c->key) == program_string_id) {
325 return c->key;
326 }
327 }
328 }
329
330 return NULL;
331 }
332
333 void
334 brw_upload_cache(struct brw_cache *cache,
335 enum brw_cache_id cache_id,
336 const void *key,
337 GLuint key_size,
338 const void *data,
339 GLuint data_size,
340 const void *prog_data,
341 GLuint prog_data_size,
342 uint32_t *out_offset,
343 void *out_prog_data)
344 {
345 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
346 const struct brw_cache_item *matching_data =
347 brw_lookup_prog(cache, cache_id, data, data_size);
348 GLuint hash;
349 void *tmp;
350
351 item->cache_id = cache_id;
352 item->size = data_size;
353 item->key = key;
354 item->key_size = key_size;
355 item->prog_data_size = prog_data_size;
356 hash = hash_key(item);
357 item->hash = hash;
358
359 /* If we can find a matching prog in the cache already, then reuse the
360 * existing stuff without creating new copy into the underlying buffer
361 * object. This is notably useful for programs generating shaders at
362 * runtime, where multiple shaders may compile to the same thing in our
363 * backend.
364 */
365 if (matching_data) {
366 item->offset = matching_data->offset;
367 } else {
368 item->offset = brw_alloc_item_data(cache, data_size);
369
370 /* Copy data to the buffer */
371 memcpy(cache->map + item->offset, data, data_size);
372 }
373
374 /* Set up the memory containing the key and prog_data */
375 tmp = malloc(key_size + prog_data_size);
376
377 memcpy(tmp, key, key_size);
378 memcpy(tmp + key_size, prog_data, prog_data_size);
379
380 item->key = tmp;
381
382 if (cache->n_items > cache->size * 1.5f)
383 rehash(cache);
384
385 hash %= cache->size;
386 item->next = cache->items[hash];
387 cache->items[hash] = item;
388 cache->n_items++;
389
390 *out_offset = item->offset;
391 *(void **)out_prog_data = (void *)((char *)item->key + item->key_size);
392 cache->brw->ctx.NewDriverState |= 1 << cache_id;
393 }
394
395 void
396 brw_init_caches(struct brw_context *brw)
397 {
398 struct brw_cache *cache = &brw->cache;
399
400 cache->brw = brw;
401
402 cache->size = 7;
403 cache->n_items = 0;
404 cache->items =
405 calloc(cache->size, sizeof(struct brw_cache_item *));
406
407 cache->bo = brw_bo_alloc(brw->bufmgr, "program cache", 16384,
408 BRW_MEMZONE_SHADER);
409 if (can_do_exec_capture(brw->screen))
410 cache->bo->kflags |= EXEC_OBJECT_CAPTURE;
411
412 cache->map = brw_bo_map(brw, cache->bo, MAP_READ | MAP_WRITE |
413 MAP_ASYNC | MAP_PERSISTENT);
414 }
415
416 static void
417 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
418 {
419 struct brw_cache_item *c, *next;
420 GLuint i;
421
422 DBG("%s\n", __func__);
423
424 for (i = 0; i < cache->size; i++) {
425 for (c = cache->items[i]; c; c = next) {
426 next = c->next;
427 if (c->cache_id == BRW_CACHE_VS_PROG ||
428 c->cache_id == BRW_CACHE_TCS_PROG ||
429 c->cache_id == BRW_CACHE_TES_PROG ||
430 c->cache_id == BRW_CACHE_GS_PROG ||
431 c->cache_id == BRW_CACHE_FS_PROG ||
432 c->cache_id == BRW_CACHE_CS_PROG) {
433 const void *item_prog_data = c->key + c->key_size;
434 brw_stage_prog_data_free(item_prog_data);
435 }
436 free((void *)c->key);
437 free(c);
438 }
439 cache->items[i] = NULL;
440 }
441
442 cache->n_items = 0;
443
444 /* Start putting programs into the start of the BO again, since
445 * we'll never find the old results.
446 */
447 cache->next_offset = 0;
448
449 /* We need to make sure that the programs get regenerated, since
450 * any offsets leftover in brw_context will no longer be valid.
451 */
452 brw->NewGLState = ~0;
453 brw->ctx.NewDriverState = ~0ull;
454 brw->state.pipelines[BRW_RENDER_PIPELINE].mesa = ~0;
455 brw->state.pipelines[BRW_RENDER_PIPELINE].brw = ~0ull;
456 brw->state.pipelines[BRW_COMPUTE_PIPELINE].mesa = ~0;
457 brw->state.pipelines[BRW_COMPUTE_PIPELINE].brw = ~0ull;
458
459 /* Also, NULL out any stale program pointers. */
460 brw->vs.base.prog_data = NULL;
461 brw->tcs.base.prog_data = NULL;
462 brw->tes.base.prog_data = NULL;
463 brw->gs.base.prog_data = NULL;
464 brw->wm.base.prog_data = NULL;
465 brw->cs.base.prog_data = NULL;
466
467 intel_batchbuffer_flush(brw);
468 }
469
470 void
471 brw_program_cache_check_size(struct brw_context *brw)
472 {
473 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
474 * state cache.
475 */
476 if (brw->cache.n_items > 2000) {
477 perf_debug("Exceeded state cache size limit. Clearing the set "
478 "of compiled programs, which will trigger recompiles\n");
479 brw_clear_cache(brw, &brw->cache);
480 brw_cache_new_bo(&brw->cache, brw->cache.bo->size);
481 }
482 }
483
484
485 static void
486 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
487 {
488
489 DBG("%s\n", __func__);
490
491 /* This can be NULL if context creation failed early on */
492 if (cache->bo) {
493 brw_bo_unmap(cache->bo);
494 brw_bo_unreference(cache->bo);
495 cache->bo = NULL;
496 cache->map = NULL;
497 }
498 brw_clear_cache(brw, cache);
499 free(cache->items);
500 cache->items = NULL;
501 cache->size = 0;
502 }
503
504
505 void
506 brw_destroy_caches(struct brw_context *brw)
507 {
508 brw_destroy_cache(brw, &brw->cache);
509 }
510
511 static const char *
512 cache_name(enum brw_cache_id cache_id)
513 {
514 switch (cache_id) {
515 case BRW_CACHE_VS_PROG:
516 return "VS kernel";
517 case BRW_CACHE_TCS_PROG:
518 return "TCS kernel";
519 case BRW_CACHE_TES_PROG:
520 return "TES kernel";
521 case BRW_CACHE_FF_GS_PROG:
522 return "Fixed-function GS kernel";
523 case BRW_CACHE_GS_PROG:
524 return "GS kernel";
525 case BRW_CACHE_CLIP_PROG:
526 return "CLIP kernel";
527 case BRW_CACHE_SF_PROG:
528 return "SF kernel";
529 case BRW_CACHE_FS_PROG:
530 return "FS kernel";
531 case BRW_CACHE_CS_PROG:
532 return "CS kernel";
533 default:
534 return "unknown";
535 }
536 }
537
538 void
539 brw_print_program_cache(struct brw_context *brw)
540 {
541 const struct brw_cache *cache = &brw->cache;
542 struct brw_cache_item *item;
543
544 for (unsigned i = 0; i < cache->size; i++) {
545 for (item = cache->items[i]; item; item = item->next) {
546 fprintf(stderr, "%s:\n", cache_name(i));
547 brw_disassemble(&brw->screen->devinfo, cache->map,
548 item->offset, item->size, stderr);
549 }
550 }
551 }