meson: Build i965 and dri stack
[mesa.git] / src / mesa / drivers / dri / i965 / brw_program_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_program_cache.c
33 *
34 * This file implements a simple program cache for 965. The consumers can
35 * query the hash table of programs using a cache_id and program key, and
36 * receive the corresponding program buffer object (plus associated auxiliary
37 * data) in return. Objects in the cache may not have relocations
38 * (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "main/streaming-load-memcpy.h"
49 #include "x86/common_x86_asm.h"
50 #include "intel_batchbuffer.h"
51 #include "brw_state.h"
52 #include "brw_wm.h"
53 #include "brw_gs.h"
54 #include "brw_cs.h"
55 #include "brw_program.h"
56 #include "compiler/brw_eu.h"
57
58 #define FILE_DEBUG_FLAG DEBUG_STATE
59
60 struct brw_cache_item {
61 /**
62 * Effectively part of the key, cache_id identifies what kind of state
63 * buffer is involved, and also which dirty flag should set.
64 */
65 enum brw_cache_id cache_id;
66
67 /** 32-bit hash of the key data */
68 GLuint hash;
69
70 /** for variable-sized keys */
71 GLuint key_size;
72 GLuint aux_size;
73 const void *key;
74
75 uint32_t offset;
76 uint32_t size;
77
78 struct brw_cache_item *next;
79 };
80
81 static unsigned
82 get_program_string_id(enum brw_cache_id cache_id, const void *key)
83 {
84 switch (cache_id) {
85 case BRW_CACHE_VS_PROG:
86 return ((struct brw_vs_prog_key *) key)->program_string_id;
87 case BRW_CACHE_TCS_PROG:
88 return ((struct brw_tcs_prog_key *) key)->program_string_id;
89 case BRW_CACHE_TES_PROG:
90 return ((struct brw_tes_prog_key *) key)->program_string_id;
91 case BRW_CACHE_GS_PROG:
92 return ((struct brw_gs_prog_key *) key)->program_string_id;
93 case BRW_CACHE_CS_PROG:
94 return ((struct brw_cs_prog_key *) key)->program_string_id;
95 case BRW_CACHE_FS_PROG:
96 return ((struct brw_wm_prog_key *) key)->program_string_id;
97 default:
98 unreachable("no program string id for this kind of program");
99 }
100 }
101
102 static GLuint
103 hash_key(struct brw_cache_item *item)
104 {
105 GLuint *ikey = (GLuint *)item->key;
106 GLuint hash = item->cache_id, i;
107
108 assert(item->key_size % 4 == 0);
109
110 /* I'm sure this can be improved on:
111 */
112 for (i = 0; i < item->key_size/4; i++) {
113 hash ^= ikey[i];
114 hash = (hash << 5) | (hash >> 27);
115 }
116
117 return hash;
118 }
119
120 static int
121 brw_cache_item_equals(const struct brw_cache_item *a,
122 const struct brw_cache_item *b)
123 {
124 return a->cache_id == b->cache_id &&
125 a->hash == b->hash &&
126 a->key_size == b->key_size &&
127 (memcmp(a->key, b->key, a->key_size) == 0);
128 }
129
130 static struct brw_cache_item *
131 search_cache(struct brw_cache *cache, GLuint hash,
132 struct brw_cache_item *lookup)
133 {
134 struct brw_cache_item *c;
135
136 #if 0
137 int bucketcount = 0;
138
139 for (c = cache->items[hash % cache->size]; c; c = c->next)
140 bucketcount++;
141
142 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
143 cache->size, bucketcount, cache->n_items);
144 #endif
145
146 for (c = cache->items[hash % cache->size]; c; c = c->next) {
147 if (brw_cache_item_equals(lookup, c))
148 return c;
149 }
150
151 return NULL;
152 }
153
154
155 static void
156 rehash(struct brw_cache *cache)
157 {
158 struct brw_cache_item **items;
159 struct brw_cache_item *c, *next;
160 GLuint size, i;
161
162 size = cache->size * 3;
163 items = calloc(size, sizeof(*items));
164
165 for (i = 0; i < cache->size; i++)
166 for (c = cache->items[i]; c; c = next) {
167 next = c->next;
168 c->next = items[c->hash % size];
169 items[c->hash % size] = c;
170 }
171
172 free(cache->items);
173 cache->items = items;
174 cache->size = size;
175 }
176
177
178 /**
179 * Returns the buffer object matching cache_id and key, or NULL.
180 */
181 bool
182 brw_search_cache(struct brw_cache *cache,
183 enum brw_cache_id cache_id,
184 const void *key, GLuint key_size,
185 uint32_t *inout_offset, void *inout_aux)
186 {
187 struct brw_context *brw = cache->brw;
188 struct brw_cache_item *item;
189 struct brw_cache_item lookup;
190 GLuint hash;
191
192 lookup.cache_id = cache_id;
193 lookup.key = key;
194 lookup.key_size = key_size;
195 hash = hash_key(&lookup);
196 lookup.hash = hash;
197
198 item = search_cache(cache, hash, &lookup);
199
200 if (item == NULL)
201 return false;
202
203 void *aux = ((char *) item->key) + item->key_size;
204
205 if (item->offset != *inout_offset || aux != *((void **) inout_aux)) {
206 brw->ctx.NewDriverState |= (1 << cache_id);
207 *inout_offset = item->offset;
208 *((void **) inout_aux) = aux;
209 }
210
211 return true;
212 }
213
214 static void
215 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
216 {
217 struct brw_context *brw = cache->brw;
218 struct brw_bo *new_bo;
219
220 perf_debug("Copying to larger program cache: %u kB -> %u kB\n",
221 (unsigned) cache->bo->size / 1024, new_size / 1024);
222
223 new_bo = brw_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
224 if (can_do_exec_capture(brw->screen))
225 new_bo->kflags = EXEC_OBJECT_CAPTURE;
226
227 void *map = brw_bo_map(brw, new_bo, MAP_READ | MAP_WRITE |
228 MAP_ASYNC | MAP_PERSISTENT);
229
230 /* Copy any existing data that needs to be saved. */
231 if (cache->next_offset != 0) {
232 #ifdef USE_SSE41
233 if (!cache->bo->cache_coherent && cpu_has_sse4_1)
234 _mesa_streaming_load_memcpy(map, cache->map, cache->next_offset);
235 else
236 #endif
237 memcpy(map, cache->map, cache->next_offset);
238 }
239
240 brw_bo_unmap(cache->bo);
241 brw_bo_unreference(cache->bo);
242 cache->bo = new_bo;
243 cache->map = map;
244
245 /* Since we have a new BO in place, we need to signal the units
246 * that depend on it (state base address on gen5+, or unit state before).
247 */
248 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
249 brw->batch.state_base_address_emitted = false;
250 }
251
252 /**
253 * Attempts to find an item in the cache with identical data.
254 */
255 static const struct brw_cache_item *
256 brw_lookup_prog(const struct brw_cache *cache,
257 enum brw_cache_id cache_id,
258 const void *data, unsigned data_size)
259 {
260 unsigned i;
261 const struct brw_cache_item *item;
262
263 for (i = 0; i < cache->size; i++) {
264 for (item = cache->items[i]; item; item = item->next) {
265 if (item->cache_id != cache_id || item->size != data_size ||
266 memcmp(cache->map + item->offset, data, item->size) != 0)
267 continue;
268
269 return item;
270 }
271 }
272
273 return NULL;
274 }
275
276 static uint32_t
277 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
278 {
279 uint32_t offset;
280
281 /* Allocate space in the cache BO for our new program. */
282 if (cache->next_offset + size > cache->bo->size) {
283 uint32_t new_size = cache->bo->size * 2;
284
285 while (cache->next_offset + size > new_size)
286 new_size *= 2;
287
288 brw_cache_new_bo(cache, new_size);
289 }
290
291 offset = cache->next_offset;
292
293 /* Programs are always 64-byte aligned, so set up the next one now */
294 cache->next_offset = ALIGN(offset + size, 64);
295
296 return offset;
297 }
298
299 const void *
300 brw_find_previous_compile(struct brw_cache *cache,
301 enum brw_cache_id cache_id,
302 unsigned program_string_id)
303 {
304 for (unsigned i = 0; i < cache->size; i++) {
305 for (struct brw_cache_item *c = cache->items[i]; c; c = c->next) {
306 if (c->cache_id == cache_id &&
307 get_program_string_id(cache_id, c->key) == program_string_id) {
308 return c->key;
309 }
310 }
311 }
312
313 return NULL;
314 }
315
316 void
317 brw_upload_cache(struct brw_cache *cache,
318 enum brw_cache_id cache_id,
319 const void *key,
320 GLuint key_size,
321 const void *data,
322 GLuint data_size,
323 const void *aux,
324 GLuint aux_size,
325 uint32_t *out_offset,
326 void *out_aux)
327 {
328 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
329 const struct brw_cache_item *matching_data =
330 brw_lookup_prog(cache, cache_id, data, data_size);
331 GLuint hash;
332 void *tmp;
333
334 item->cache_id = cache_id;
335 item->size = data_size;
336 item->key = key;
337 item->key_size = key_size;
338 item->aux_size = aux_size;
339 hash = hash_key(item);
340 item->hash = hash;
341
342 /* If we can find a matching prog in the cache already, then reuse the
343 * existing stuff without creating new copy into the underlying buffer
344 * object. This is notably useful for programs generating shaders at
345 * runtime, where multiple shaders may compile to the same thing in our
346 * backend.
347 */
348 if (matching_data) {
349 item->offset = matching_data->offset;
350 } else {
351 item->offset = brw_alloc_item_data(cache, data_size);
352
353 /* Copy data to the buffer */
354 memcpy(cache->map + item->offset, data, data_size);
355 }
356
357 /* Set up the memory containing the key and aux_data */
358 tmp = malloc(key_size + aux_size);
359
360 memcpy(tmp, key, key_size);
361 memcpy(tmp + key_size, aux, aux_size);
362
363 item->key = tmp;
364
365 if (cache->n_items > cache->size * 1.5f)
366 rehash(cache);
367
368 hash %= cache->size;
369 item->next = cache->items[hash];
370 cache->items[hash] = item;
371 cache->n_items++;
372
373 *out_offset = item->offset;
374 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
375 cache->brw->ctx.NewDriverState |= 1 << cache_id;
376 }
377
378 void
379 brw_init_caches(struct brw_context *brw)
380 {
381 struct brw_cache *cache = &brw->cache;
382
383 cache->brw = brw;
384
385 cache->size = 7;
386 cache->n_items = 0;
387 cache->items =
388 calloc(cache->size, sizeof(struct brw_cache_item *));
389
390 cache->bo = brw_bo_alloc(brw->bufmgr, "program cache", 16384, 64);
391 if (can_do_exec_capture(brw->screen))
392 cache->bo->kflags = EXEC_OBJECT_CAPTURE;
393
394 cache->map = brw_bo_map(brw, cache->bo, MAP_READ | MAP_WRITE |
395 MAP_ASYNC | MAP_PERSISTENT);
396 }
397
398 static void
399 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
400 {
401 struct brw_cache_item *c, *next;
402 GLuint i;
403
404 DBG("%s\n", __func__);
405
406 for (i = 0; i < cache->size; i++) {
407 for (c = cache->items[i]; c; c = next) {
408 next = c->next;
409 if (c->cache_id == BRW_CACHE_VS_PROG ||
410 c->cache_id == BRW_CACHE_TCS_PROG ||
411 c->cache_id == BRW_CACHE_TES_PROG ||
412 c->cache_id == BRW_CACHE_GS_PROG ||
413 c->cache_id == BRW_CACHE_FS_PROG ||
414 c->cache_id == BRW_CACHE_CS_PROG) {
415 const void *item_aux = c->key + c->key_size;
416 brw_stage_prog_data_free(item_aux);
417 }
418 free((void *)c->key);
419 free(c);
420 }
421 cache->items[i] = NULL;
422 }
423
424 cache->n_items = 0;
425
426 /* Start putting programs into the start of the BO again, since
427 * we'll never find the old results.
428 */
429 cache->next_offset = 0;
430
431 /* We need to make sure that the programs get regenerated, since
432 * any offsets leftover in brw_context will no longer be valid.
433 */
434 brw->NewGLState = ~0;
435 brw->ctx.NewDriverState = ~0ull;
436 brw->state.pipelines[BRW_RENDER_PIPELINE].mesa = ~0;
437 brw->state.pipelines[BRW_RENDER_PIPELINE].brw = ~0ull;
438 brw->state.pipelines[BRW_COMPUTE_PIPELINE].mesa = ~0;
439 brw->state.pipelines[BRW_COMPUTE_PIPELINE].brw = ~0ull;
440
441 /* Also, NULL out any stale program pointers. */
442 brw->vs.base.prog_data = NULL;
443 brw->tcs.base.prog_data = NULL;
444 brw->tes.base.prog_data = NULL;
445 brw->gs.base.prog_data = NULL;
446 brw->wm.base.prog_data = NULL;
447 brw->cs.base.prog_data = NULL;
448
449 intel_batchbuffer_flush(brw);
450 }
451
452 void
453 brw_program_cache_check_size(struct brw_context *brw)
454 {
455 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
456 * state cache.
457 */
458 if (brw->cache.n_items > 2000) {
459 perf_debug("Exceeded state cache size limit. Clearing the set "
460 "of compiled programs, which will trigger recompiles\n");
461 brw_clear_cache(brw, &brw->cache);
462 }
463 }
464
465
466 static void
467 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
468 {
469
470 DBG("%s\n", __func__);
471
472 /* This can be NULL if context creation failed early on */
473 if (cache->bo) {
474 brw_bo_unmap(cache->bo);
475 brw_bo_unreference(cache->bo);
476 cache->bo = NULL;
477 cache->map = NULL;
478 }
479 brw_clear_cache(brw, cache);
480 free(cache->items);
481 cache->items = NULL;
482 cache->size = 0;
483 }
484
485
486 void
487 brw_destroy_caches(struct brw_context *brw)
488 {
489 brw_destroy_cache(brw, &brw->cache);
490 }
491
492 static const char *
493 cache_name(enum brw_cache_id cache_id)
494 {
495 switch (cache_id) {
496 case BRW_CACHE_VS_PROG:
497 return "VS kernel";
498 case BRW_CACHE_TCS_PROG:
499 return "TCS kernel";
500 case BRW_CACHE_TES_PROG:
501 return "TES kernel";
502 case BRW_CACHE_FF_GS_PROG:
503 return "Fixed-function GS kernel";
504 case BRW_CACHE_GS_PROG:
505 return "GS kernel";
506 case BRW_CACHE_CLIP_PROG:
507 return "CLIP kernel";
508 case BRW_CACHE_SF_PROG:
509 return "SF kernel";
510 case BRW_CACHE_FS_PROG:
511 return "FS kernel";
512 case BRW_CACHE_CS_PROG:
513 return "CS kernel";
514 default:
515 return "unknown";
516 }
517 }
518
519 void
520 brw_print_program_cache(struct brw_context *brw)
521 {
522 const struct brw_cache *cache = &brw->cache;
523 struct brw_cache_item *item;
524
525 for (unsigned i = 0; i < cache->size; i++) {
526 for (item = cache->items[i]; item; item = item->next) {
527 fprintf(stderr, "%s:\n", cache_name(i));
528 brw_disassemble(&brw->screen->devinfo, cache->map,
529 item->offset, item->size, stderr);
530 }
531 }
532 }