i965: Pass flags to brw_bo_map_*
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_program_cache.c
33 *
34 * This file implements a simple program cache for 965. The consumers can
35 * query the hash table of programs using a cache_id and program key, and
36 * receive the corresponding program buffer object (plus associated auxiliary
37 * data) in return. Objects in the cache may not have relocations
38 * (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_wm.h"
51 #include "brw_gs.h"
52 #include "brw_cs.h"
53 #include "brw_program.h"
54 #include "compiler/brw_eu.h"
55
56 #define FILE_DEBUG_FLAG DEBUG_STATE
57
58 struct brw_cache_item {
59 /**
60 * Effectively part of the key, cache_id identifies what kind of state
61 * buffer is involved, and also which dirty flag should set.
62 */
63 enum brw_cache_id cache_id;
64
65 /** 32-bit hash of the key data */
66 GLuint hash;
67
68 /** for variable-sized keys */
69 GLuint key_size;
70 GLuint aux_size;
71 const void *key;
72
73 uint32_t offset;
74 uint32_t size;
75
76 struct brw_cache_item *next;
77 };
78
79 static unsigned
80 get_program_string_id(enum brw_cache_id cache_id, const void *key)
81 {
82 switch (cache_id) {
83 case BRW_CACHE_VS_PROG:
84 return ((struct brw_vs_prog_key *) key)->program_string_id;
85 case BRW_CACHE_TCS_PROG:
86 return ((struct brw_tcs_prog_key *) key)->program_string_id;
87 case BRW_CACHE_TES_PROG:
88 return ((struct brw_tes_prog_key *) key)->program_string_id;
89 case BRW_CACHE_GS_PROG:
90 return ((struct brw_gs_prog_key *) key)->program_string_id;
91 case BRW_CACHE_CS_PROG:
92 return ((struct brw_cs_prog_key *) key)->program_string_id;
93 case BRW_CACHE_FS_PROG:
94 return ((struct brw_wm_prog_key *) key)->program_string_id;
95 default:
96 unreachable("no program string id for this kind of program");
97 }
98 }
99
100 static GLuint
101 hash_key(struct brw_cache_item *item)
102 {
103 GLuint *ikey = (GLuint *)item->key;
104 GLuint hash = item->cache_id, i;
105
106 assert(item->key_size % 4 == 0);
107
108 /* I'm sure this can be improved on:
109 */
110 for (i = 0; i < item->key_size/4; i++) {
111 hash ^= ikey[i];
112 hash = (hash << 5) | (hash >> 27);
113 }
114
115 return hash;
116 }
117
118 static int
119 brw_cache_item_equals(const struct brw_cache_item *a,
120 const struct brw_cache_item *b)
121 {
122 return a->cache_id == b->cache_id &&
123 a->hash == b->hash &&
124 a->key_size == b->key_size &&
125 (memcmp(a->key, b->key, a->key_size) == 0);
126 }
127
128 static struct brw_cache_item *
129 search_cache(struct brw_cache *cache, GLuint hash,
130 struct brw_cache_item *lookup)
131 {
132 struct brw_cache_item *c;
133
134 #if 0
135 int bucketcount = 0;
136
137 for (c = cache->items[hash % cache->size]; c; c = c->next)
138 bucketcount++;
139
140 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
141 cache->size, bucketcount, cache->n_items);
142 #endif
143
144 for (c = cache->items[hash % cache->size]; c; c = c->next) {
145 if (brw_cache_item_equals(lookup, c))
146 return c;
147 }
148
149 return NULL;
150 }
151
152
153 static void
154 rehash(struct brw_cache *cache)
155 {
156 struct brw_cache_item **items;
157 struct brw_cache_item *c, *next;
158 GLuint size, i;
159
160 size = cache->size * 3;
161 items = calloc(size, sizeof(*items));
162
163 for (i = 0; i < cache->size; i++)
164 for (c = cache->items[i]; c; c = next) {
165 next = c->next;
166 c->next = items[c->hash % size];
167 items[c->hash % size] = c;
168 }
169
170 free(cache->items);
171 cache->items = items;
172 cache->size = size;
173 }
174
175
176 /**
177 * Returns the buffer object matching cache_id and key, or NULL.
178 */
179 bool
180 brw_search_cache(struct brw_cache *cache,
181 enum brw_cache_id cache_id,
182 const void *key, GLuint key_size,
183 uint32_t *inout_offset, void *inout_aux)
184 {
185 struct brw_context *brw = cache->brw;
186 struct brw_cache_item *item;
187 struct brw_cache_item lookup;
188 GLuint hash;
189
190 lookup.cache_id = cache_id;
191 lookup.key = key;
192 lookup.key_size = key_size;
193 hash = hash_key(&lookup);
194 lookup.hash = hash;
195
196 item = search_cache(cache, hash, &lookup);
197
198 if (item == NULL)
199 return false;
200
201 void *aux = ((char *) item->key) + item->key_size;
202
203 if (item->offset != *inout_offset || aux != *((void **) inout_aux)) {
204 brw->ctx.NewDriverState |= (1 << cache_id);
205 *inout_offset = item->offset;
206 *((void **) inout_aux) = aux;
207 }
208
209 return true;
210 }
211
212 static void
213 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
214 {
215 struct brw_context *brw = cache->brw;
216 struct brw_bo *new_bo;
217 void *llc_map;
218
219 new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
220 if (can_do_exec_capture(brw->screen))
221 new_bo->kflags = EXEC_OBJECT_CAPTURE;
222 if (brw->has_llc)
223 llc_map = brw_bo_map_unsynchronized(brw, new_bo);
224
225 /* Copy any existing data that needs to be saved. */
226 if (cache->next_offset != 0) {
227 if (brw->has_llc) {
228 memcpy(llc_map, cache->map, cache->next_offset);
229 } else {
230 void *map = brw_bo_map_cpu(brw, cache->bo, MAP_READ);
231 brw_bo_subdata(new_bo, 0, cache->next_offset, map);
232 brw_bo_unmap(cache->bo);
233 }
234 }
235
236 if (brw->has_llc)
237 brw_bo_unmap(cache->bo);
238 brw_bo_unreference(cache->bo);
239 cache->bo = new_bo;
240 cache->map = brw->has_llc ? llc_map : NULL;
241 cache->bo_used_by_gpu = false;
242
243 /* Since we have a new BO in place, we need to signal the units
244 * that depend on it (state base address on gen5+, or unit state before).
245 */
246 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
247 brw->batch.state_base_address_emitted = false;
248 }
249
250 /**
251 * Attempts to find an item in the cache with identical data.
252 */
253 static const struct brw_cache_item *
254 brw_lookup_prog(const struct brw_cache *cache,
255 enum brw_cache_id cache_id,
256 const void *data, unsigned data_size)
257 {
258 struct brw_context *brw = cache->brw;
259 unsigned i;
260 const struct brw_cache_item *item;
261
262 for (i = 0; i < cache->size; i++) {
263 for (item = cache->items[i]; item; item = item->next) {
264 int ret;
265
266 if (item->cache_id != cache_id || item->size != data_size)
267 continue;
268
269 void *map;
270 if (!brw->has_llc)
271 map = brw_bo_map_cpu(brw, cache->bo, MAP_READ);
272 else
273 map = cache->map;
274
275 ret = memcmp(map + item->offset, data, item->size);
276 if (!brw->has_llc)
277 brw_bo_unmap(cache->bo);
278 if (ret)
279 continue;
280
281 return item;
282 }
283 }
284
285 return NULL;
286 }
287
288 static uint32_t
289 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
290 {
291 uint32_t offset;
292 struct brw_context *brw = cache->brw;
293
294 /* Allocate space in the cache BO for our new program. */
295 if (cache->next_offset + size > cache->bo->size) {
296 uint32_t new_size = cache->bo->size * 2;
297
298 while (cache->next_offset + size > new_size)
299 new_size *= 2;
300
301 brw_cache_new_bo(cache, new_size);
302 }
303
304 /* If we would block on writing to an in-use program BO, just
305 * recreate it.
306 */
307 if (!brw->has_llc && cache->bo_used_by_gpu) {
308 perf_debug("Copying busy program cache buffer.\n");
309 brw_cache_new_bo(cache, cache->bo->size);
310 }
311
312 offset = cache->next_offset;
313
314 /* Programs are always 64-byte aligned, so set up the next one now */
315 cache->next_offset = ALIGN(offset + size, 64);
316
317 return offset;
318 }
319
320 const void *
321 brw_find_previous_compile(struct brw_cache *cache,
322 enum brw_cache_id cache_id,
323 unsigned program_string_id)
324 {
325 for (unsigned i = 0; i < cache->size; i++) {
326 for (struct brw_cache_item *c = cache->items[i]; c; c = c->next) {
327 if (c->cache_id == cache_id &&
328 get_program_string_id(cache_id, c->key) == program_string_id) {
329 return c->key;
330 }
331 }
332 }
333
334 return NULL;
335 }
336
337 void
338 brw_upload_cache(struct brw_cache *cache,
339 enum brw_cache_id cache_id,
340 const void *key,
341 GLuint key_size,
342 const void *data,
343 GLuint data_size,
344 const void *aux,
345 GLuint aux_size,
346 uint32_t *out_offset,
347 void *out_aux)
348 {
349 struct brw_context *brw = cache->brw;
350 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
351 const struct brw_cache_item *matching_data =
352 brw_lookup_prog(cache, cache_id, data, data_size);
353 GLuint hash;
354 void *tmp;
355
356 item->cache_id = cache_id;
357 item->size = data_size;
358 item->key = key;
359 item->key_size = key_size;
360 item->aux_size = aux_size;
361 hash = hash_key(item);
362 item->hash = hash;
363
364 /* If we can find a matching prog in the cache already, then reuse the
365 * existing stuff without creating new copy into the underlying buffer
366 * object. This is notably useful for programs generating shaders at
367 * runtime, where multiple shaders may compile to the same thing in our
368 * backend.
369 */
370 if (matching_data) {
371 item->offset = matching_data->offset;
372 } else {
373 item->offset = brw_alloc_item_data(cache, data_size);
374
375 /* Copy data to the buffer */
376 if (brw->has_llc) {
377 memcpy(cache->map + item->offset, data, data_size);
378 } else {
379 brw_bo_subdata(cache->bo, item->offset, data_size, data);
380 }
381 }
382
383 /* Set up the memory containing the key and aux_data */
384 tmp = malloc(key_size + aux_size);
385
386 memcpy(tmp, key, key_size);
387 memcpy(tmp + key_size, aux, aux_size);
388
389 item->key = tmp;
390
391 if (cache->n_items > cache->size * 1.5f)
392 rehash(cache);
393
394 hash %= cache->size;
395 item->next = cache->items[hash];
396 cache->items[hash] = item;
397 cache->n_items++;
398
399 *out_offset = item->offset;
400 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
401 cache->brw->ctx.NewDriverState |= 1 << cache_id;
402 }
403
404 void
405 brw_init_caches(struct brw_context *brw)
406 {
407 struct brw_cache *cache = &brw->cache;
408
409 cache->brw = brw;
410
411 cache->size = 7;
412 cache->n_items = 0;
413 cache->items =
414 calloc(cache->size, sizeof(struct brw_cache_item *));
415
416 cache->bo = brw_bo_alloc(brw->bufmgr, "program cache", 4096, 64);
417 if (can_do_exec_capture(brw->screen))
418 cache->bo->kflags = EXEC_OBJECT_CAPTURE;
419 if (brw->has_llc)
420 cache->map = brw_bo_map_unsynchronized(brw, cache->bo);
421 }
422
423 static void
424 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
425 {
426 struct brw_cache_item *c, *next;
427 GLuint i;
428
429 DBG("%s\n", __func__);
430
431 for (i = 0; i < cache->size; i++) {
432 for (c = cache->items[i]; c; c = next) {
433 next = c->next;
434 if (c->cache_id == BRW_CACHE_VS_PROG ||
435 c->cache_id == BRW_CACHE_TCS_PROG ||
436 c->cache_id == BRW_CACHE_TES_PROG ||
437 c->cache_id == BRW_CACHE_GS_PROG ||
438 c->cache_id == BRW_CACHE_FS_PROG ||
439 c->cache_id == BRW_CACHE_CS_PROG) {
440 const void *item_aux = c->key + c->key_size;
441 brw_stage_prog_data_free(item_aux);
442 }
443 free((void *)c->key);
444 free(c);
445 }
446 cache->items[i] = NULL;
447 }
448
449 cache->n_items = 0;
450
451 /* Start putting programs into the start of the BO again, since
452 * we'll never find the old results.
453 */
454 cache->next_offset = 0;
455
456 /* We need to make sure that the programs get regenerated, since
457 * any offsets leftover in brw_context will no longer be valid.
458 */
459 brw->NewGLState = ~0;
460 brw->ctx.NewDriverState = ~0ull;
461 brw->state.pipelines[BRW_RENDER_PIPELINE].mesa = ~0;
462 brw->state.pipelines[BRW_RENDER_PIPELINE].brw = ~0ull;
463 brw->state.pipelines[BRW_COMPUTE_PIPELINE].mesa = ~0;
464 brw->state.pipelines[BRW_COMPUTE_PIPELINE].brw = ~0ull;
465
466 /* Also, NULL out any stale program pointers. */
467 brw->vs.base.prog_data = NULL;
468 brw->tcs.base.prog_data = NULL;
469 brw->tes.base.prog_data = NULL;
470 brw->gs.base.prog_data = NULL;
471 brw->wm.base.prog_data = NULL;
472 brw->cs.base.prog_data = NULL;
473
474 intel_batchbuffer_flush(brw);
475 }
476
477 void
478 brw_program_cache_check_size(struct brw_context *brw)
479 {
480 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
481 * state cache.
482 */
483 if (brw->cache.n_items > 2000) {
484 perf_debug("Exceeded state cache size limit. Clearing the set "
485 "of compiled programs, which will trigger recompiles\n");
486 brw_clear_cache(brw, &brw->cache);
487 }
488 }
489
490
491 static void
492 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
493 {
494
495 DBG("%s\n", __func__);
496
497 /* This can be NULL if context creation failed early on */
498 if (cache->bo) {
499 if (brw->has_llc)
500 brw_bo_unmap(cache->bo);
501 brw_bo_unreference(cache->bo);
502 cache->bo = NULL;
503 cache->map = NULL;
504 }
505 brw_clear_cache(brw, cache);
506 free(cache->items);
507 cache->items = NULL;
508 cache->size = 0;
509 }
510
511
512 void
513 brw_destroy_caches(struct brw_context *brw)
514 {
515 brw_destroy_cache(brw, &brw->cache);
516 }
517
518 static const char *
519 cache_name(enum brw_cache_id cache_id)
520 {
521 switch (cache_id) {
522 case BRW_CACHE_VS_PROG:
523 return "VS kernel";
524 case BRW_CACHE_TCS_PROG:
525 return "TCS kernel";
526 case BRW_CACHE_TES_PROG:
527 return "TES kernel";
528 case BRW_CACHE_FF_GS_PROG:
529 return "Fixed-function GS kernel";
530 case BRW_CACHE_GS_PROG:
531 return "GS kernel";
532 case BRW_CACHE_CLIP_PROG:
533 return "CLIP kernel";
534 case BRW_CACHE_SF_PROG:
535 return "SF kernel";
536 case BRW_CACHE_FS_PROG:
537 return "FS kernel";
538 case BRW_CACHE_CS_PROG:
539 return "CS kernel";
540 default:
541 return "unknown";
542 }
543 }
544
545 void
546 brw_print_program_cache(struct brw_context *brw)
547 {
548 const struct brw_cache *cache = &brw->cache;
549 struct brw_cache_item *item;
550 void *map;
551
552 if (!brw->has_llc)
553 map = brw_bo_map_cpu(brw, cache->bo, MAP_READ);
554 else
555 map = cache->map;
556
557 for (unsigned i = 0; i < cache->size; i++) {
558 for (item = cache->items[i]; item; item = item->next) {
559 fprintf(stderr, "%s:\n", cache_name(i));
560 brw_disassemble(&brw->screen->devinfo, map,
561 item->offset, item->size, stderr);
562 }
563 }
564
565 if (!brw->has_llc)
566 brw_bo_unmap(cache->bo);
567 }