i965: Push down inclusion of brw_program.h.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52 #include "brw_gs.h"
53 #include "brw_cs.h"
54 #include "brw_program.h"
55
56 #define FILE_DEBUG_FLAG DEBUG_STATE
57
58 static GLuint
59 hash_key(struct brw_cache_item *item)
60 {
61 GLuint *ikey = (GLuint *)item->key;
62 GLuint hash = item->cache_id, i;
63
64 assert(item->key_size % 4 == 0);
65
66 /* I'm sure this can be improved on:
67 */
68 for (i = 0; i < item->key_size/4; i++) {
69 hash ^= ikey[i];
70 hash = (hash << 5) | (hash >> 27);
71 }
72
73 return hash;
74 }
75
76 static int
77 brw_cache_item_equals(const struct brw_cache_item *a,
78 const struct brw_cache_item *b)
79 {
80 return a->cache_id == b->cache_id &&
81 a->hash == b->hash &&
82 a->key_size == b->key_size &&
83 (memcmp(a->key, b->key, a->key_size) == 0);
84 }
85
86 static struct brw_cache_item *
87 search_cache(struct brw_cache *cache, GLuint hash,
88 struct brw_cache_item *lookup)
89 {
90 struct brw_cache_item *c;
91
92 #if 0
93 int bucketcount = 0;
94
95 for (c = cache->items[hash % cache->size]; c; c = c->next)
96 bucketcount++;
97
98 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
99 cache->size, bucketcount, cache->n_items);
100 #endif
101
102 for (c = cache->items[hash % cache->size]; c; c = c->next) {
103 if (brw_cache_item_equals(lookup, c))
104 return c;
105 }
106
107 return NULL;
108 }
109
110
111 static void
112 rehash(struct brw_cache *cache)
113 {
114 struct brw_cache_item **items;
115 struct brw_cache_item *c, *next;
116 GLuint size, i;
117
118 size = cache->size * 3;
119 items = calloc(size, sizeof(*items));
120
121 for (i = 0; i < cache->size; i++)
122 for (c = cache->items[i]; c; c = next) {
123 next = c->next;
124 c->next = items[c->hash % size];
125 items[c->hash % size] = c;
126 }
127
128 free(cache->items);
129 cache->items = items;
130 cache->size = size;
131 }
132
133
134 /**
135 * Returns the buffer object matching cache_id and key, or NULL.
136 */
137 bool
138 brw_search_cache(struct brw_cache *cache,
139 enum brw_cache_id cache_id,
140 const void *key, GLuint key_size,
141 uint32_t *inout_offset, void *inout_aux)
142 {
143 struct brw_context *brw = cache->brw;
144 struct brw_cache_item *item;
145 struct brw_cache_item lookup;
146 GLuint hash;
147
148 lookup.cache_id = cache_id;
149 lookup.key = key;
150 lookup.key_size = key_size;
151 hash = hash_key(&lookup);
152 lookup.hash = hash;
153
154 item = search_cache(cache, hash, &lookup);
155
156 if (item == NULL)
157 return false;
158
159 void *aux = ((char *) item->key) + item->key_size;
160
161 if (item->offset != *inout_offset || aux != *((void **) inout_aux)) {
162 brw->ctx.NewDriverState |= (1 << cache_id);
163 *inout_offset = item->offset;
164 *((void **) inout_aux) = aux;
165 }
166
167 return true;
168 }
169
170 static void
171 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
172 {
173 struct brw_context *brw = cache->brw;
174 drm_intel_bo *new_bo;
175
176 new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
177 if (brw->has_llc)
178 drm_intel_gem_bo_map_unsynchronized(new_bo);
179
180 /* Copy any existing data that needs to be saved. */
181 if (cache->next_offset != 0) {
182 if (brw->has_llc) {
183 memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
184 } else {
185 drm_intel_bo_map(cache->bo, false);
186 drm_intel_bo_subdata(new_bo, 0, cache->next_offset,
187 cache->bo->virtual);
188 drm_intel_bo_unmap(cache->bo);
189 }
190 }
191
192 if (brw->has_llc)
193 drm_intel_bo_unmap(cache->bo);
194 drm_intel_bo_unreference(cache->bo);
195 cache->bo = new_bo;
196 cache->bo_used_by_gpu = false;
197
198 /* Since we have a new BO in place, we need to signal the units
199 * that depend on it (state base address on gen5+, or unit state before).
200 */
201 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
202 }
203
204 /**
205 * Attempts to find an item in the cache with identical data.
206 */
207 static const struct brw_cache_item *
208 brw_lookup_prog(const struct brw_cache *cache,
209 enum brw_cache_id cache_id,
210 const void *data, unsigned data_size)
211 {
212 const struct brw_context *brw = cache->brw;
213 unsigned i;
214 const struct brw_cache_item *item;
215
216 for (i = 0; i < cache->size; i++) {
217 for (item = cache->items[i]; item; item = item->next) {
218 int ret;
219
220 if (item->cache_id != cache_id || item->size != data_size)
221 continue;
222
223 if (!brw->has_llc)
224 drm_intel_bo_map(cache->bo, false);
225 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
226 if (!brw->has_llc)
227 drm_intel_bo_unmap(cache->bo);
228 if (ret)
229 continue;
230
231 return item;
232 }
233 }
234
235 return NULL;
236 }
237
238 static uint32_t
239 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
240 {
241 uint32_t offset;
242 struct brw_context *brw = cache->brw;
243
244 /* Allocate space in the cache BO for our new program. */
245 if (cache->next_offset + size > cache->bo->size) {
246 uint32_t new_size = cache->bo->size * 2;
247
248 while (cache->next_offset + size > new_size)
249 new_size *= 2;
250
251 brw_cache_new_bo(cache, new_size);
252 }
253
254 /* If we would block on writing to an in-use program BO, just
255 * recreate it.
256 */
257 if (!brw->has_llc && cache->bo_used_by_gpu) {
258 perf_debug("Copying busy program cache buffer.\n");
259 brw_cache_new_bo(cache, cache->bo->size);
260 }
261
262 offset = cache->next_offset;
263
264 /* Programs are always 64-byte aligned, so set up the next one now */
265 cache->next_offset = ALIGN(offset + size, 64);
266
267 return offset;
268 }
269
270 void
271 brw_upload_cache(struct brw_cache *cache,
272 enum brw_cache_id cache_id,
273 const void *key,
274 GLuint key_size,
275 const void *data,
276 GLuint data_size,
277 const void *aux,
278 GLuint aux_size,
279 uint32_t *out_offset,
280 void *out_aux)
281 {
282 struct brw_context *brw = cache->brw;
283 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
284 const struct brw_cache_item *matching_data =
285 brw_lookup_prog(cache, cache_id, data, data_size);
286 GLuint hash;
287 void *tmp;
288
289 item->cache_id = cache_id;
290 item->size = data_size;
291 item->key = key;
292 item->key_size = key_size;
293 item->aux_size = aux_size;
294 hash = hash_key(item);
295 item->hash = hash;
296
297 /* If we can find a matching prog in the cache already, then reuse the
298 * existing stuff without creating new copy into the underlying buffer
299 * object. This is notably useful for programs generating shaders at
300 * runtime, where multiple shaders may compile to the same thing in our
301 * backend.
302 */
303 if (matching_data) {
304 item->offset = matching_data->offset;
305 } else {
306 item->offset = brw_alloc_item_data(cache, data_size);
307
308 /* Copy data to the buffer */
309 if (brw->has_llc) {
310 memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
311 } else {
312 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
313 }
314 }
315
316 /* Set up the memory containing the key and aux_data */
317 tmp = malloc(key_size + aux_size);
318
319 memcpy(tmp, key, key_size);
320 memcpy(tmp + key_size, aux, aux_size);
321
322 item->key = tmp;
323
324 if (cache->n_items > cache->size * 1.5f)
325 rehash(cache);
326
327 hash %= cache->size;
328 item->next = cache->items[hash];
329 cache->items[hash] = item;
330 cache->n_items++;
331
332 *out_offset = item->offset;
333 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
334 cache->brw->ctx.NewDriverState |= 1 << cache_id;
335 }
336
337 void
338 brw_init_caches(struct brw_context *brw)
339 {
340 struct brw_cache *cache = &brw->cache;
341
342 cache->brw = brw;
343
344 cache->size = 7;
345 cache->n_items = 0;
346 cache->items =
347 calloc(cache->size, sizeof(struct brw_cache_item *));
348
349 cache->bo = drm_intel_bo_alloc(brw->bufmgr,
350 "program cache",
351 4096, 64);
352 if (brw->has_llc)
353 drm_intel_gem_bo_map_unsynchronized(cache->bo);
354 }
355
356 static void
357 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
358 {
359 struct brw_cache_item *c, *next;
360 GLuint i;
361
362 DBG("%s\n", __func__);
363
364 for (i = 0; i < cache->size; i++) {
365 for (c = cache->items[i]; c; c = next) {
366 next = c->next;
367 if (c->cache_id == BRW_CACHE_VS_PROG ||
368 c->cache_id == BRW_CACHE_GS_PROG ||
369 c->cache_id == BRW_CACHE_FS_PROG ||
370 c->cache_id == BRW_CACHE_CS_PROG) {
371 const void *item_aux = c->key + c->key_size;
372 brw_stage_prog_data_free(item_aux);
373 }
374 free((void *)c->key);
375 free(c);
376 }
377 cache->items[i] = NULL;
378 }
379
380 cache->n_items = 0;
381
382 /* Start putting programs into the start of the BO again, since
383 * we'll never find the old results.
384 */
385 cache->next_offset = 0;
386
387 /* We need to make sure that the programs get regenerated, since
388 * any offsets leftover in brw_context will no longer be valid.
389 */
390 brw->NewGLState |= ~0;
391 brw->ctx.NewDriverState |= ~0ull;
392 intel_batchbuffer_flush(brw);
393 }
394
395 void
396 brw_state_cache_check_size(struct brw_context *brw)
397 {
398 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
399 * state cache.
400 */
401 if (brw->cache.n_items > 2000) {
402 perf_debug("Exceeded state cache size limit. Clearing the set "
403 "of compiled programs, which will trigger recompiles\n");
404 brw_clear_cache(brw, &brw->cache);
405 }
406 }
407
408
409 static void
410 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
411 {
412
413 DBG("%s\n", __func__);
414
415 if (brw->has_llc)
416 drm_intel_bo_unmap(cache->bo);
417 drm_intel_bo_unreference(cache->bo);
418 cache->bo = NULL;
419 brw_clear_cache(brw, cache);
420 free(cache->items);
421 cache->items = NULL;
422 cache->size = 0;
423 }
424
425
426 void
427 brw_destroy_caches(struct brw_context *brw)
428 {
429 brw_destroy_cache(brw, &brw->cache);
430 }