2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
32 /** @file brw_state_cache.c
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
40 * The inner workings are a simple hash table based on a CRC of the
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
55 #define FILE_DEBUG_FLAG DEBUG_STATE
58 hash_key(struct brw_cache_item
*item
)
60 GLuint
*ikey
= (GLuint
*)item
->key
;
61 GLuint hash
= item
->cache_id
, i
;
63 assert(item
->key_size
% 4 == 0);
65 /* I'm sure this can be improved on:
67 for (i
= 0; i
< item
->key_size
/4; i
++) {
69 hash
= (hash
<< 5) | (hash
>> 27);
76 brw_cache_item_equals(const struct brw_cache_item
*a
,
77 const struct brw_cache_item
*b
)
79 return a
->cache_id
== b
->cache_id
&&
81 a
->key_size
== b
->key_size
&&
82 (memcmp(a
->key
, b
->key
, a
->key_size
) == 0);
85 static struct brw_cache_item
*
86 search_cache(struct brw_cache
*cache
, GLuint hash
,
87 struct brw_cache_item
*lookup
)
89 struct brw_cache_item
*c
;
94 for (c
= cache
->items
[hash
% cache
->size
]; c
; c
= c
->next
)
97 fprintf(stderr
, "bucket %d/%d = %d/%d items\n", hash
% cache
->size
,
98 cache
->size
, bucketcount
, cache
->n_items
);
101 for (c
= cache
->items
[hash
% cache
->size
]; c
; c
= c
->next
) {
102 if (brw_cache_item_equals(lookup
, c
))
111 rehash(struct brw_cache
*cache
)
113 struct brw_cache_item
**items
;
114 struct brw_cache_item
*c
, *next
;
117 size
= cache
->size
* 3;
118 items
= calloc(size
, sizeof(*items
));
120 for (i
= 0; i
< cache
->size
; i
++)
121 for (c
= cache
->items
[i
]; c
; c
= next
) {
123 c
->next
= items
[c
->hash
% size
];
124 items
[c
->hash
% size
] = c
;
128 cache
->items
= items
;
134 * Returns the buffer object matching cache_id and key, or NULL.
137 brw_search_cache(struct brw_cache
*cache
,
138 enum brw_cache_id cache_id
,
139 const void *key
, GLuint key_size
,
140 uint32_t *inout_offset
, void *out_aux
)
142 struct brw_context
*brw
= cache
->brw
;
143 struct brw_cache_item
*item
;
144 struct brw_cache_item lookup
;
147 lookup
.cache_id
= cache_id
;
149 lookup
.key_size
= key_size
;
150 hash
= hash_key(&lookup
);
153 item
= search_cache(cache
, hash
, &lookup
);
158 *(void **)out_aux
= ((char *)item
->key
+ item
->key_size
);
160 if (item
->offset
!= *inout_offset
) {
161 brw
->ctx
.NewDriverState
|= (1 << cache_id
);
162 *inout_offset
= item
->offset
;
169 brw_cache_new_bo(struct brw_cache
*cache
, uint32_t new_size
)
171 struct brw_context
*brw
= cache
->brw
;
172 drm_intel_bo
*new_bo
;
174 new_bo
= drm_intel_bo_alloc(brw
->bufmgr
, "program cache", new_size
, 64);
176 drm_intel_gem_bo_map_unsynchronized(new_bo
);
178 /* Copy any existing data that needs to be saved. */
179 if (cache
->next_offset
!= 0) {
181 memcpy(new_bo
->virtual, cache
->bo
->virtual, cache
->next_offset
);
183 drm_intel_bo_map(cache
->bo
, false);
184 drm_intel_bo_subdata(new_bo
, 0, cache
->next_offset
,
186 drm_intel_bo_unmap(cache
->bo
);
191 drm_intel_bo_unmap(cache
->bo
);
192 drm_intel_bo_unreference(cache
->bo
);
194 cache
->bo_used_by_gpu
= false;
196 /* Since we have a new BO in place, we need to signal the units
197 * that depend on it (state base address on gen5+, or unit state before).
199 brw
->ctx
.NewDriverState
|= BRW_NEW_PROGRAM_CACHE
;
203 * Attempts to find an item in the cache with identical data and aux
207 brw_try_upload_using_copy(struct brw_cache
*cache
,
208 struct brw_cache_item
*result_item
,
212 struct brw_context
*brw
= cache
->brw
;
214 struct brw_cache_item
*item
;
216 for (i
= 0; i
< cache
->size
; i
++) {
217 for (item
= cache
->items
[i
]; item
; item
= item
->next
) {
218 const void *item_aux
= item
->key
+ item
->key_size
;
221 if (item
->cache_id
!= result_item
->cache_id
||
222 item
->size
!= result_item
->size
||
223 item
->aux_size
!= result_item
->aux_size
) {
227 if (cache
->aux_compare
[result_item
->cache_id
]) {
228 if (!cache
->aux_compare
[result_item
->cache_id
](item_aux
, aux
))
230 } else if (memcmp(item_aux
, aux
, item
->aux_size
) != 0) {
235 drm_intel_bo_map(cache
->bo
, false);
236 ret
= memcmp(cache
->bo
->virtual + item
->offset
, data
, item
->size
);
238 drm_intel_bo_unmap(cache
->bo
);
242 result_item
->offset
= item
->offset
;
252 brw_alloc_item_data(struct brw_cache
*cache
, uint32_t size
)
255 struct brw_context
*brw
= cache
->brw
;
257 /* Allocate space in the cache BO for our new program. */
258 if (cache
->next_offset
+ size
> cache
->bo
->size
) {
259 uint32_t new_size
= cache
->bo
->size
* 2;
261 while (cache
->next_offset
+ size
> new_size
)
264 brw_cache_new_bo(cache
, new_size
);
267 /* If we would block on writing to an in-use program BO, just
270 if (!brw
->has_llc
&& cache
->bo_used_by_gpu
) {
271 perf_debug("Copying busy program cache buffer.\n");
272 brw_cache_new_bo(cache
, cache
->bo
->size
);
275 offset
= cache
->next_offset
;
277 /* Programs are always 64-byte aligned, so set up the next one now */
278 cache
->next_offset
= ALIGN(offset
+ size
, 64);
284 brw_upload_cache(struct brw_cache
*cache
,
285 enum brw_cache_id cache_id
,
292 uint32_t *out_offset
,
295 struct brw_context
*brw
= cache
->brw
;
296 struct brw_cache_item
*item
= CALLOC_STRUCT(brw_cache_item
);
300 item
->cache_id
= cache_id
;
301 item
->size
= data_size
;
303 item
->key_size
= key_size
;
304 item
->aux_size
= aux_size
;
305 hash
= hash_key(item
);
308 /* If we can find a matching prog/prog_data combo in the cache
309 * already, then reuse the existing stuff. This will mean not
310 * flagging CACHE_NEW_* when transitioning between the two
311 * equivalent hash keys. This is notably useful for programs
312 * generating shaders at runtime, where multiple shaders may
313 * compile to the thing in our backend.
315 if (!brw_try_upload_using_copy(cache
, item
, data
, aux
)) {
316 item
->offset
= brw_alloc_item_data(cache
, data_size
);
319 /* Set up the memory containing the key and aux_data */
320 tmp
= malloc(key_size
+ aux_size
);
322 memcpy(tmp
, key
, key_size
);
323 memcpy(tmp
+ key_size
, aux
, aux_size
);
327 if (cache
->n_items
> cache
->size
* 1.5f
)
331 item
->next
= cache
->items
[hash
];
332 cache
->items
[hash
] = item
;
335 /* Copy data to the buffer */
337 memcpy((char *) cache
->bo
->virtual + item
->offset
, data
, data_size
);
339 drm_intel_bo_subdata(cache
->bo
, item
->offset
, data_size
, data
);
342 *out_offset
= item
->offset
;
343 *(void **)out_aux
= (void *)((char *)item
->key
+ item
->key_size
);
344 cache
->brw
->ctx
.NewDriverState
|= 1 << cache_id
;
348 brw_init_caches(struct brw_context
*brw
)
350 struct brw_cache
*cache
= &brw
->cache
;
357 calloc(cache
->size
, sizeof(struct brw_cache_item
*));
359 cache
->bo
= drm_intel_bo_alloc(brw
->bufmgr
,
363 drm_intel_gem_bo_map_unsynchronized(cache
->bo
);
365 cache
->aux_compare
[BRW_CACHE_VS_PROG
] = brw_vs_prog_data_compare
;
366 cache
->aux_compare
[BRW_CACHE_GS_PROG
] = brw_gs_prog_data_compare
;
367 cache
->aux_compare
[BRW_CACHE_FS_PROG
] = brw_wm_prog_data_compare
;
368 cache
->aux_compare
[BRW_CACHE_CS_PROG
] = brw_cs_prog_data_compare
;
369 cache
->aux_free
[BRW_CACHE_VS_PROG
] = brw_stage_prog_data_free
;
370 cache
->aux_free
[BRW_CACHE_GS_PROG
] = brw_stage_prog_data_free
;
371 cache
->aux_free
[BRW_CACHE_FS_PROG
] = brw_stage_prog_data_free
;
372 cache
->aux_free
[BRW_CACHE_CS_PROG
] = brw_stage_prog_data_free
;
376 brw_clear_cache(struct brw_context
*brw
, struct brw_cache
*cache
)
378 struct brw_cache_item
*c
, *next
;
381 DBG("%s\n", __func__
);
383 for (i
= 0; i
< cache
->size
; i
++) {
384 for (c
= cache
->items
[i
]; c
; c
= next
) {
386 if (cache
->aux_free
[c
->cache_id
]) {
387 const void *item_aux
= c
->key
+ c
->key_size
;
388 cache
->aux_free
[c
->cache_id
](item_aux
);
390 free((void *)c
->key
);
393 cache
->items
[i
] = NULL
;
398 /* Start putting programs into the start of the BO again, since
399 * we'll never find the old results.
401 cache
->next_offset
= 0;
403 /* We need to make sure that the programs get regenerated, since
404 * any offsets leftover in brw_context will no longer be valid.
406 brw
->NewGLState
|= ~0;
407 brw
->ctx
.NewDriverState
|= ~0ull;
408 intel_batchbuffer_flush(brw
);
412 brw_state_cache_check_size(struct brw_context
*brw
)
414 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
417 if (brw
->cache
.n_items
> 2000) {
418 perf_debug("Exceeded state cache size limit. Clearing the set "
419 "of compiled programs, which will trigger recompiles\n");
420 brw_clear_cache(brw
, &brw
->cache
);
426 brw_destroy_cache(struct brw_context
*brw
, struct brw_cache
*cache
)
429 DBG("%s\n", __func__
);
432 drm_intel_bo_unmap(cache
->bo
);
433 drm_intel_bo_unreference(cache
->bo
);
435 brw_clear_cache(brw
, cache
);
443 brw_destroy_caches(struct brw_context
*brw
)
445 brw_destroy_cache(brw
, &brw
->cache
);