i965: Fix intel_miptree_is_fast_clear_capable()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_cache.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32 /** @file brw_state_cache.c
33 *
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
39 *
40 * The inner workings are a simple hash table based on a CRC of the
41 * key data.
42 *
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
45 */
46
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
50 #include "brw_vs.h"
51 #include "brw_wm.h"
52 #include "brw_gs.h"
53 #include "brw_cs.h"
54
55 #define FILE_DEBUG_FLAG DEBUG_STATE
56
57 static GLuint
58 hash_key(struct brw_cache_item *item)
59 {
60 GLuint *ikey = (GLuint *)item->key;
61 GLuint hash = item->cache_id, i;
62
63 assert(item->key_size % 4 == 0);
64
65 /* I'm sure this can be improved on:
66 */
67 for (i = 0; i < item->key_size/4; i++) {
68 hash ^= ikey[i];
69 hash = (hash << 5) | (hash >> 27);
70 }
71
72 return hash;
73 }
74
75 static int
76 brw_cache_item_equals(const struct brw_cache_item *a,
77 const struct brw_cache_item *b)
78 {
79 return a->cache_id == b->cache_id &&
80 a->hash == b->hash &&
81 a->key_size == b->key_size &&
82 (memcmp(a->key, b->key, a->key_size) == 0);
83 }
84
85 static struct brw_cache_item *
86 search_cache(struct brw_cache *cache, GLuint hash,
87 struct brw_cache_item *lookup)
88 {
89 struct brw_cache_item *c;
90
91 #if 0
92 int bucketcount = 0;
93
94 for (c = cache->items[hash % cache->size]; c; c = c->next)
95 bucketcount++;
96
97 fprintf(stderr, "bucket %d/%d = %d/%d items\n", hash % cache->size,
98 cache->size, bucketcount, cache->n_items);
99 #endif
100
101 for (c = cache->items[hash % cache->size]; c; c = c->next) {
102 if (brw_cache_item_equals(lookup, c))
103 return c;
104 }
105
106 return NULL;
107 }
108
109
110 static void
111 rehash(struct brw_cache *cache)
112 {
113 struct brw_cache_item **items;
114 struct brw_cache_item *c, *next;
115 GLuint size, i;
116
117 size = cache->size * 3;
118 items = calloc(size, sizeof(*items));
119
120 for (i = 0; i < cache->size; i++)
121 for (c = cache->items[i]; c; c = next) {
122 next = c->next;
123 c->next = items[c->hash % size];
124 items[c->hash % size] = c;
125 }
126
127 free(cache->items);
128 cache->items = items;
129 cache->size = size;
130 }
131
132
133 /**
134 * Returns the buffer object matching cache_id and key, or NULL.
135 */
136 bool
137 brw_search_cache(struct brw_cache *cache,
138 enum brw_cache_id cache_id,
139 const void *key, GLuint key_size,
140 uint32_t *inout_offset, void *out_aux)
141 {
142 struct brw_context *brw = cache->brw;
143 struct brw_cache_item *item;
144 struct brw_cache_item lookup;
145 GLuint hash;
146
147 lookup.cache_id = cache_id;
148 lookup.key = key;
149 lookup.key_size = key_size;
150 hash = hash_key(&lookup);
151 lookup.hash = hash;
152
153 item = search_cache(cache, hash, &lookup);
154
155 if (item == NULL)
156 return false;
157
158 *(void **)out_aux = ((char *)item->key + item->key_size);
159
160 if (item->offset != *inout_offset) {
161 brw->ctx.NewDriverState |= (1 << cache_id);
162 *inout_offset = item->offset;
163 }
164
165 return true;
166 }
167
168 static void
169 brw_cache_new_bo(struct brw_cache *cache, uint32_t new_size)
170 {
171 struct brw_context *brw = cache->brw;
172 drm_intel_bo *new_bo;
173
174 new_bo = drm_intel_bo_alloc(brw->bufmgr, "program cache", new_size, 64);
175 if (brw->has_llc)
176 drm_intel_gem_bo_map_unsynchronized(new_bo);
177
178 /* Copy any existing data that needs to be saved. */
179 if (cache->next_offset != 0) {
180 if (brw->has_llc) {
181 memcpy(new_bo->virtual, cache->bo->virtual, cache->next_offset);
182 } else {
183 drm_intel_bo_map(cache->bo, false);
184 drm_intel_bo_subdata(new_bo, 0, cache->next_offset,
185 cache->bo->virtual);
186 drm_intel_bo_unmap(cache->bo);
187 }
188 }
189
190 if (brw->has_llc)
191 drm_intel_bo_unmap(cache->bo);
192 drm_intel_bo_unreference(cache->bo);
193 cache->bo = new_bo;
194 cache->bo_used_by_gpu = false;
195
196 /* Since we have a new BO in place, we need to signal the units
197 * that depend on it (state base address on gen5+, or unit state before).
198 */
199 brw->ctx.NewDriverState |= BRW_NEW_PROGRAM_CACHE;
200 }
201
202 /**
203 * Attempts to find an item in the cache with identical data.
204 */
205 static const struct brw_cache_item *
206 brw_lookup_prog(const struct brw_cache *cache,
207 enum brw_cache_id cache_id,
208 const void *data, unsigned data_size)
209 {
210 const struct brw_context *brw = cache->brw;
211 unsigned i;
212 const struct brw_cache_item *item;
213
214 for (i = 0; i < cache->size; i++) {
215 for (item = cache->items[i]; item; item = item->next) {
216 int ret;
217
218 if (item->cache_id != cache_id || item->size != data_size)
219 continue;
220
221 if (!brw->has_llc)
222 drm_intel_bo_map(cache->bo, false);
223 ret = memcmp(cache->bo->virtual + item->offset, data, item->size);
224 if (!brw->has_llc)
225 drm_intel_bo_unmap(cache->bo);
226 if (ret)
227 continue;
228
229 return item;
230 }
231 }
232
233 return NULL;
234 }
235
236 static uint32_t
237 brw_alloc_item_data(struct brw_cache *cache, uint32_t size)
238 {
239 uint32_t offset;
240 struct brw_context *brw = cache->brw;
241
242 /* Allocate space in the cache BO for our new program. */
243 if (cache->next_offset + size > cache->bo->size) {
244 uint32_t new_size = cache->bo->size * 2;
245
246 while (cache->next_offset + size > new_size)
247 new_size *= 2;
248
249 brw_cache_new_bo(cache, new_size);
250 }
251
252 /* If we would block on writing to an in-use program BO, just
253 * recreate it.
254 */
255 if (!brw->has_llc && cache->bo_used_by_gpu) {
256 perf_debug("Copying busy program cache buffer.\n");
257 brw_cache_new_bo(cache, cache->bo->size);
258 }
259
260 offset = cache->next_offset;
261
262 /* Programs are always 64-byte aligned, so set up the next one now */
263 cache->next_offset = ALIGN(offset + size, 64);
264
265 return offset;
266 }
267
268 void
269 brw_upload_cache(struct brw_cache *cache,
270 enum brw_cache_id cache_id,
271 const void *key,
272 GLuint key_size,
273 const void *data,
274 GLuint data_size,
275 const void *aux,
276 GLuint aux_size,
277 uint32_t *out_offset,
278 void *out_aux)
279 {
280 struct brw_context *brw = cache->brw;
281 struct brw_cache_item *item = CALLOC_STRUCT(brw_cache_item);
282 const struct brw_cache_item *matching_data =
283 brw_lookup_prog(cache, cache_id, data, data_size);
284 GLuint hash;
285 void *tmp;
286
287 item->cache_id = cache_id;
288 item->size = data_size;
289 item->key = key;
290 item->key_size = key_size;
291 item->aux_size = aux_size;
292 hash = hash_key(item);
293 item->hash = hash;
294
295 /* If we can find a matching prog in the cache already, then reuse the
296 * existing stuff without creating new copy into the underlying buffer
297 * object. This is notably useful for programs generating shaders at
298 * runtime, where multiple shaders may compile to the same thing in our
299 * backend.
300 */
301 if (matching_data) {
302 item->offset = matching_data->offset;
303 } else {
304 item->offset = brw_alloc_item_data(cache, data_size);
305
306 /* Copy data to the buffer */
307 if (brw->has_llc) {
308 memcpy((char *)cache->bo->virtual + item->offset, data, data_size);
309 } else {
310 drm_intel_bo_subdata(cache->bo, item->offset, data_size, data);
311 }
312 }
313
314 /* Set up the memory containing the key and aux_data */
315 tmp = malloc(key_size + aux_size);
316
317 memcpy(tmp, key, key_size);
318 memcpy(tmp + key_size, aux, aux_size);
319
320 item->key = tmp;
321
322 if (cache->n_items > cache->size * 1.5f)
323 rehash(cache);
324
325 hash %= cache->size;
326 item->next = cache->items[hash];
327 cache->items[hash] = item;
328 cache->n_items++;
329
330 *out_offset = item->offset;
331 *(void **)out_aux = (void *)((char *)item->key + item->key_size);
332 cache->brw->ctx.NewDriverState |= 1 << cache_id;
333 }
334
335 void
336 brw_init_caches(struct brw_context *brw)
337 {
338 struct brw_cache *cache = &brw->cache;
339
340 cache->brw = brw;
341
342 cache->size = 7;
343 cache->n_items = 0;
344 cache->items =
345 calloc(cache->size, sizeof(struct brw_cache_item *));
346
347 cache->bo = drm_intel_bo_alloc(brw->bufmgr,
348 "program cache",
349 4096, 64);
350 if (brw->has_llc)
351 drm_intel_gem_bo_map_unsynchronized(cache->bo);
352
353 cache->aux_free[BRW_CACHE_VS_PROG] = brw_stage_prog_data_free;
354 cache->aux_free[BRW_CACHE_GS_PROG] = brw_stage_prog_data_free;
355 cache->aux_free[BRW_CACHE_FS_PROG] = brw_stage_prog_data_free;
356 cache->aux_free[BRW_CACHE_CS_PROG] = brw_stage_prog_data_free;
357 }
358
359 static void
360 brw_clear_cache(struct brw_context *brw, struct brw_cache *cache)
361 {
362 struct brw_cache_item *c, *next;
363 GLuint i;
364
365 DBG("%s\n", __func__);
366
367 for (i = 0; i < cache->size; i++) {
368 for (c = cache->items[i]; c; c = next) {
369 next = c->next;
370 if (cache->aux_free[c->cache_id]) {
371 const void *item_aux = c->key + c->key_size;
372 cache->aux_free[c->cache_id](item_aux);
373 }
374 free((void *)c->key);
375 free(c);
376 }
377 cache->items[i] = NULL;
378 }
379
380 cache->n_items = 0;
381
382 /* Start putting programs into the start of the BO again, since
383 * we'll never find the old results.
384 */
385 cache->next_offset = 0;
386
387 /* We need to make sure that the programs get regenerated, since
388 * any offsets leftover in brw_context will no longer be valid.
389 */
390 brw->NewGLState |= ~0;
391 brw->ctx.NewDriverState |= ~0ull;
392 intel_batchbuffer_flush(brw);
393 }
394
395 void
396 brw_state_cache_check_size(struct brw_context *brw)
397 {
398 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
399 * state cache.
400 */
401 if (brw->cache.n_items > 2000) {
402 perf_debug("Exceeded state cache size limit. Clearing the set "
403 "of compiled programs, which will trigger recompiles\n");
404 brw_clear_cache(brw, &brw->cache);
405 }
406 }
407
408
409 static void
410 brw_destroy_cache(struct brw_context *brw, struct brw_cache *cache)
411 {
412
413 DBG("%s\n", __func__);
414
415 if (brw->has_llc)
416 drm_intel_bo_unmap(cache->bo);
417 drm_intel_bo_unreference(cache->bo);
418 cache->bo = NULL;
419 brw_clear_cache(brw, cache);
420 free(cache->items);
421 cache->items = NULL;
422 cache->size = 0;
423 }
424
425
426 void
427 brw_destroy_caches(struct brw_context *brw)
428 {
429 brw_destroy_cache(brw, &brw->cache);
430 }