i965: Make the param pointer arrays for the WM dynamically sized.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52
53 #define FILE_DEBUG_FLAG DEBUG_STATE
54
55 static GLuint
56 hash_key(struct brw_cache_item *item)
57 {
58 GLuint *ikey = (GLuint *)item->key;
59 GLuint hash = item->cache_id, i;
60
61 assert(item->key_size % 4 == 0);
62
63 /* I'm sure this can be improved on:
64 */
65 for (i = 0; i < item->key_size/4; i++) {
66 hash ^= ikey[i];
67 hash = (hash << 5) | (hash >> 27);
68 }
69
70 return hash;
71 }
72
73 static int
74 brw_cache_item_equals(const struct brw_cache_item *a,
75 const struct brw_cache_item *b)
76 {
77 return a->cache_id == b->cache_id &&
78 a->hash == b->hash &&
79 a->key_size == b->key_size &&
80 (memcmp(a->key, b->key, a->key_size) == 0);
81 }
82
83 static struct brw_cache_item *
84 search_cache(struct brw_cache *cache, GLuint hash,
85 struct brw_cache_item *lookup)
86 {
87 struct brw_cache_item *c;
88
89 #if 0
90 int bucketcount = 0;
91
92 for (c = cache->items[hash % cache->size]; c; c = c->next)
93 bucketcount++;
94
95 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
96 cache->size, bucketcount, cache->n_items);
97 #endif
98
99 for (c = cache->items[hash % cache->size]; c; c = c->next) {
100 if (brw_cache_item_equals(lookup, c))
101 return c;
102 }
103
104 return NULL;
105 }
106
107
108 static void
109 rehash(struct brw_cache *cache)
110 {
111 struct brw_cache_item **items;
112 struct brw_cache_item *c, *next;
113 GLuint size, i;
114
115 size = cache->size * 3;
116 items = calloc(1, size * sizeof(*items));
117
118 for (i = 0; i < cache->size; i++)
119 for (c = cache->items[i]; c; c = next) {
120 next = c->next;
121 c->next = items[c->hash % size];
122 items[c->hash % size] = c;
123 }
124
125 free(cache->items);
126 cache->items = items;
127 cache->size = size;
128 }
129
130
131 /**
132 * Returns the buffer object matching cache_id and key, or NULL.
133 */
134 bool
135 brw_search_cache(struct brw_cache *cache,
136 enum brw_cache_id cache_id,
137 const void *key, GLuint key_size,
138 uint32_t *inout_offset, void *out_aux)
139 {
140 struct brw_context *brw = cache->brw;
141 struct brw_cache_item *item;
142 struct brw_cache_item lookup;
143 GLuint hash;
144
145 lookup.cache_id = cache_id;
146 lookup.key = key;
147 lookup.key_size = key_size;
148 hash = hash_key(&lookup);
149 lookup.hash = hash;
150
151 item = search_cache(cache, hash, &lookup);
152
153 if (item == NULL)
154 return false;
155
156 *(void **)out_aux = ((char *)item->key + item->key_size);
157
158 if (item->offset != *inout_offset) {
159 brw->state.dirty.cache |= (1 << cache_id);
160 *inout_offset = item->offset;
161 }
162
163 return true;
164 }
165
166 static void
167 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
168 {
169 struct brw_context *brw = cache->brw;
170 struct intel_context *intel = &brw->intel;
171 drm_intel_bo *new_bo;
172
173 new_bo = drm_intel_bo_alloc(intel->bufmgr, "program cache", new_size, 64);
174
175 /* Copy any existing data that needs to be saved. */
176 if (cache->next_offset != 0) {
177 drm_intel_bo_map(cache->bo, false);
178 drm_intel_bo_subdata(new_bo, 0, cache->next_offset, cache->bo->virtual);
179 drm_intel_bo_unmap(cache->bo);
180 }
181
182 drm_intel_bo_unreference(cache->bo);
183 cache->bo = new_bo;
184 cache->bo_used_by_gpu = false;
185
186 /* Since we have a new BO in place, we need to signal the units
187 * that depend on it (state base address on gen5+, or unit state before).
188 */
189 brw->state.dirty.brw |= BRW_NEW_PROGRAM_CACHE;
190 }
191
192 /**
193 * Attempts to find an item in the cache with identical data and aux
194 * data to use
195 */
196 static bool
197 brw_try_upload_using_copy(struct brw_cache *cache,
198 struct brw_cache_item *result_item,
199 const void *data,
200 const void *aux)
201 {
202 int i;
203 struct brw_cache_item *item;
204
205 for (i = 0; i < cache->size; i++) {
206 for (item = cache->items[i]; item; item = item->next) {
207 const void *item_aux = item->key + item->key_size;
208 int ret;
209
210 if (item->cache_id != result_item->cache_id ||
211 item->size != result_item->size ||
212 item->aux_size != result_item->aux_size) {
213 continue;
214 }
215
216 if (cache->aux_compare[result_item->cache_id]) {
217 if (!cache->aux_compare[result_item->cache_id](item_aux, aux,
218 item->aux_size,
219 item->key))
220 continue;
221 } else if (memcmp(item_aux, aux, item->aux_size) != 0) {
222 continue;
223 }
224
225 drm_intel_bo_map(cache->bo, false);
226 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
227 drm_intel_bo_unmap(cache->bo);
228 if (ret)
229 continue;
230
231 result_item->offset = item->offset;
232
233 return true;
234 }
235 }
236
237 return false;
238 }
239
240 static void
241 brw_upload_item_data(struct brw_cache *cache,
242 struct brw_cache_item *item,
243 const void *data)
244 {
245 /* Allocate space in the cache BO for our new program. */
246 if (cache->next_offset + item->size > cache->bo->size) {
247 uint32_t new_size = cache->bo->size * 2;
248
249 while (cache->next_offset + item->size > new_size)
250 new_size *= 2;
251
252 brw_cache_new_bo(cache, new_size);
253 }
254
255 /* If we would block on writing to an in-use program BO, just
256 * recreate it.
257 */
258 if (cache->bo_used_by_gpu) {
259 brw_cache_new_bo(cache, cache->bo->size);
260 }
261
262 item->offset = cache->next_offset;
263
264 /* Programs are always 64-byte aligned, so set up the next one now */
265 cache->next_offset = ALIGN(item->offset + item->size, 64);
266 }
267
268 void
269 brw_upload_cache(struct brw_cache *cache,
270 enum brw_cache_id cache_id,
271 const void *key,
272 GLuint key_size,
273 const void *data,
274 GLuint data_size,
275 const void *aux,
276 GLuint aux_size,
277 uint32_t *out_offset,
278 void *out_aux)
279 {
280 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
281 GLuint hash;
282 void *tmp;
283
284 item->cache_id = cache_id;
285 item->size = data_size;
286 item->key = key;
287 item->key_size = key_size;
288 item->aux_size = aux_size;
289 hash = hash_key(item);
290 item->hash = hash;
291
292 /* If we can find a matching prog/prog_data combo in the cache
293 * already, then reuse the existing stuff. This will mean not
294 * flagging CACHE_NEW_* when transitioning between the two
295 * equivalent hash keys. This is notably useful for programs
296 * generating shaders at runtime, where multiple shaders may
297 * compile to the thing in our backend.
298 */
299 if (!brw_try_upload_using_copy(cache, item, data, aux)) {
300 brw_upload_item_data(cache, item, data);
301 }
302
303 /* Set up the memory containing the key and aux_data */
304 tmp = malloc(key_size + aux_size);
305
306 memcpy(tmp, key, key_size);
307 memcpy(tmp + key_size, aux, aux_size);
308
309 item->key = tmp;
310
311 if (cache->n_items > cache->size * 1.5)
312 rehash(cache);
313
314 hash %= cache->size;
315 item->next = cache->items[hash];
316 cache->items[hash] = item;
317 cache->n_items++;
318
319 /* Copy data to the buffer */
320 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
321
322 *out_offset = item->offset;
323 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
324 cache->brw->state.dirty.cache |= 1 << cache_id;
325 }
326
327 void
328 brw_init_caches(struct brw_context *brw)
329 {
330 struct intel_context *intel = &brw->intel;
331 struct brw_cache *cache = &brw->cache;
332
333 cache->brw = brw;
334
335 cache->size = 7;
336 cache->n_items = 0;
337 cache->items =
338 calloc(1, cache->size * sizeof(struct brw_cache_item));
339
340 cache->bo = drm_intel_bo_alloc(intel->bufmgr,
341 "program cache",
342 4096, 64);
343
344 cache->aux_compare[BRW_VS_PROG] = brw_vs_prog_data_compare;
345 cache->aux_compare[BRW_WM_PROG] = brw_wm_prog_data_compare;
346 cache->aux_free[BRW_WM_PROG] = brw_wm_prog_data_free;
347 }
348
349 static void
350 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
351 {
352 struct intel_context *intel = &brw->intel;
353 struct brw_cache_item *c, *next;
354 GLuint i;
355
356 DBG("%s\n", __FUNCTION__);
357
358 for (i = 0; i < cache->size; i++) {
359 for (c = cache->items[i]; c; c = next) {
360 next = c->next;
361 if (cache->aux_free[c->cache_id]) {
362 const void *item_aux = c->key + c->key_size;
363 cache->aux_free[c->cache_id](item_aux);
364 }
365 free((void *)c->key);
366 free(c);
367 }
368 cache->items[i] = NULL;
369 }
370
371 cache->n_items = 0;
372
373 /* Start putting programs into the start of the BO again, since
374 * we'll never find the old results.
375 */
376 cache->next_offset = 0;
377
378 /* We need to make sure that the programs get regenerated, since
379 * any offsets leftover in brw_context will no longer be valid.
380 */
381 brw->state.dirty.mesa |= ~0;
382 brw->state.dirty.brw |= ~0;
383 brw->state.dirty.cache |= ~0;
384 intel_batchbuffer_flush(intel);
385 }
386
387 void
388 brw_state_cache_check_size(struct brw_context *brw)
389 {
390 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
391 * state cache.
392 */
393 if (brw->cache.n_items > 2000) {
394 perf_debug("Exceeded state cache size limit. Clearing the set "
395 "of compiled programs, which will trigger recompiles\n");
396 brw_clear_cache(brw, &brw->cache);
397 }
398 }
399
400
401 static void
402 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
403 {
404
405 DBG("%s\n", __FUNCTION__);
406
407 drm_intel_bo_unreference(cache->bo);
408 cache->bo = NULL;
409 brw_clear_cache(brw, cache);
410 free(cache->items);
411 cache->items = NULL;
412 cache->size = 0;
413 }
414
415
416 void
417 brw_destroy_caches(struct brw_context *brw)
418 {
419 brw_destroy_cache(brw, &brw->cache);
420 }