2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
32 /** @file brw_state_cache.c
34 * This file implements a simple static state cache for 965. The
35 * consumers can query the hash table of state using a cache_id,
36 * opaque key data, and receive the corresponding state buffer object
37 * of state (plus associated auxiliary data) in return. Objects in
38 * the cache may not have relocations (pointers to other BOs) in them.
40 * The inner workings are a simple hash table based on a CRC of the
43 * Replacement is not implemented. Instead, when the cache gets too
44 * big we throw out all of the cache data and let it get regenerated.
47 #include "main/imports.h"
48 #include "intel_batchbuffer.h"
49 #include "brw_state.h"
53 #include "brw_vec4_gs.h"
55 #define FILE_DEBUG_FLAG DEBUG_STATE
58 hash_key(struct brw_cache_item
*item
)
60 GLuint
*ikey
= (GLuint
*)item
->key
;
61 GLuint hash
= item
->cache_id
, i
;
63 assert(item
->key_size
% 4 == 0);
65 /* I'm sure this can be improved on:
67 for (i
= 0; i
< item
->key_size
/4; i
++) {
69 hash
= (hash
<< 5) | (hash
>> 27);
76 brw_cache_item_equals(const struct brw_cache_item
*a
,
77 const struct brw_cache_item
*b
)
79 return a
->cache_id
== b
->cache_id
&&
81 a
->key_size
== b
->key_size
&&
82 (memcmp(a
->key
, b
->key
, a
->key_size
) == 0);
85 static struct brw_cache_item
*
86 search_cache(struct brw_cache
*cache
, GLuint hash
,
87 struct brw_cache_item
*lookup
)
89 struct brw_cache_item
*c
;
94 for (c
= cache
->items
[hash
% cache
->size
]; c
; c
= c
->next
)
97 fprintf(stderr
, "bucket %d/%d = %d/%d items\n", hash
% cache
->size
,
98 cache
->size
, bucketcount
, cache
->n_items
);
101 for (c
= cache
->items
[hash
% cache
->size
]; c
; c
= c
->next
) {
102 if (brw_cache_item_equals(lookup
, c
))
111 rehash(struct brw_cache
*cache
)
113 struct brw_cache_item
**items
;
114 struct brw_cache_item
*c
, *next
;
117 size
= cache
->size
* 3;
118 items
= calloc(size
, sizeof(*items
));
120 for (i
= 0; i
< cache
->size
; i
++)
121 for (c
= cache
->items
[i
]; c
; c
= next
) {
123 c
->next
= items
[c
->hash
% size
];
124 items
[c
->hash
% size
] = c
;
128 cache
->items
= items
;
134 * Returns the buffer object matching cache_id and key, or NULL.
137 brw_search_cache(struct brw_cache
*cache
,
138 enum brw_cache_id cache_id
,
139 const void *key
, GLuint key_size
,
140 uint32_t *inout_offset
, void *out_aux
)
142 struct brw_context
*brw
= cache
->brw
;
143 struct brw_cache_item
*item
;
144 struct brw_cache_item lookup
;
147 lookup
.cache_id
= cache_id
;
149 lookup
.key_size
= key_size
;
150 hash
= hash_key(&lookup
);
153 item
= search_cache(cache
, hash
, &lookup
);
158 *(void **)out_aux
= ((char *)item
->key
+ item
->key_size
);
160 if (item
->offset
!= *inout_offset
) {
161 brw
->state
.dirty
.cache
|= (1 << cache_id
);
162 *inout_offset
= item
->offset
;
169 brw_cache_new_bo(struct brw_cache
*cache
, uint32_t new_size
)
171 struct brw_context
*brw
= cache
->brw
;
172 drm_intel_bo
*new_bo
;
174 new_bo
= drm_intel_bo_alloc(brw
->bufmgr
, "program cache", new_size
, 64);
176 /* Copy any existing data that needs to be saved. */
177 if (cache
->next_offset
!= 0) {
178 drm_intel_bo_map(cache
->bo
, false);
179 drm_intel_bo_subdata(new_bo
, 0, cache
->next_offset
, cache
->bo
->virtual);
180 drm_intel_bo_unmap(cache
->bo
);
183 drm_intel_bo_unreference(cache
->bo
);
185 cache
->bo_used_by_gpu
= false;
187 /* Since we have a new BO in place, we need to signal the units
188 * that depend on it (state base address on gen5+, or unit state before).
190 brw
->state
.dirty
.brw
|= BRW_NEW_PROGRAM_CACHE
;
194 * Attempts to find an item in the cache with identical data and aux
198 brw_try_upload_using_copy(struct brw_cache
*cache
,
199 struct brw_cache_item
*result_item
,
204 struct brw_cache_item
*item
;
206 for (i
= 0; i
< cache
->size
; i
++) {
207 for (item
= cache
->items
[i
]; item
; item
= item
->next
) {
208 const void *item_aux
= item
->key
+ item
->key_size
;
211 if (item
->cache_id
!= result_item
->cache_id
||
212 item
->size
!= result_item
->size
||
213 item
->aux_size
!= result_item
->aux_size
) {
217 if (cache
->aux_compare
[result_item
->cache_id
]) {
218 if (!cache
->aux_compare
[result_item
->cache_id
](item_aux
, aux
))
220 } else if (memcmp(item_aux
, aux
, item
->aux_size
) != 0) {
224 drm_intel_bo_map(cache
->bo
, false);
225 ret
= memcmp(cache
->bo
->virtual + item
->offset
, data
, item
->size
);
226 drm_intel_bo_unmap(cache
->bo
);
230 result_item
->offset
= item
->offset
;
240 brw_upload_item_data(struct brw_cache
*cache
,
241 struct brw_cache_item
*item
,
244 struct brw_context
*brw
= cache
->brw
;
246 /* Allocate space in the cache BO for our new program. */
247 if (cache
->next_offset
+ item
->size
> cache
->bo
->size
) {
248 uint32_t new_size
= cache
->bo
->size
* 2;
250 while (cache
->next_offset
+ item
->size
> new_size
)
253 brw_cache_new_bo(cache
, new_size
);
256 /* If we would block on writing to an in-use program BO, just
259 if (cache
->bo_used_by_gpu
) {
260 perf_debug("Copying busy program cache buffer.\n");
261 brw_cache_new_bo(cache
, cache
->bo
->size
);
264 item
->offset
= cache
->next_offset
;
266 /* Programs are always 64-byte aligned, so set up the next one now */
267 cache
->next_offset
= ALIGN(item
->offset
+ item
->size
, 64);
271 brw_upload_cache(struct brw_cache
*cache
,
272 enum brw_cache_id cache_id
,
279 uint32_t *out_offset
,
282 struct brw_cache_item
*item
= CALLOC_STRUCT(brw_cache_item
);
286 item
->cache_id
= cache_id
;
287 item
->size
= data_size
;
289 item
->key_size
= key_size
;
290 item
->aux_size
= aux_size
;
291 hash
= hash_key(item
);
294 /* If we can find a matching prog/prog_data combo in the cache
295 * already, then reuse the existing stuff. This will mean not
296 * flagging CACHE_NEW_* when transitioning between the two
297 * equivalent hash keys. This is notably useful for programs
298 * generating shaders at runtime, where multiple shaders may
299 * compile to the thing in our backend.
301 if (!brw_try_upload_using_copy(cache
, item
, data
, aux
)) {
302 brw_upload_item_data(cache
, item
, data
);
305 /* Set up the memory containing the key and aux_data */
306 tmp
= malloc(key_size
+ aux_size
);
308 memcpy(tmp
, key
, key_size
);
309 memcpy(tmp
+ key_size
, aux
, aux_size
);
313 if (cache
->n_items
> cache
->size
* 1.5)
317 item
->next
= cache
->items
[hash
];
318 cache
->items
[hash
] = item
;
321 /* Copy data to the buffer */
322 drm_intel_bo_subdata(cache
->bo
, item
->offset
, data_size
, data
);
324 *out_offset
= item
->offset
;
325 *(void **)out_aux
= (void *)((char *)item
->key
+ item
->key_size
);
326 cache
->brw
->state
.dirty
.cache
|= 1 << cache_id
;
330 brw_init_caches(struct brw_context
*brw
)
332 struct brw_cache
*cache
= &brw
->cache
;
339 calloc(cache
->size
, sizeof(struct brw_cache_item
*));
341 cache
->bo
= drm_intel_bo_alloc(brw
->bufmgr
,
345 cache
->aux_compare
[BRW_VS_PROG
] = brw_vs_prog_data_compare
;
346 cache
->aux_compare
[BRW_GS_PROG
] = brw_gs_prog_data_compare
;
347 cache
->aux_compare
[BRW_WM_PROG
] = brw_wm_prog_data_compare
;
348 cache
->aux_free
[BRW_VS_PROG
] = brw_stage_prog_data_free
;
349 cache
->aux_free
[BRW_GS_PROG
] = brw_stage_prog_data_free
;
350 cache
->aux_free
[BRW_WM_PROG
] = brw_stage_prog_data_free
;
354 brw_clear_cache(struct brw_context
*brw
, struct brw_cache
*cache
)
356 struct brw_cache_item
*c
, *next
;
359 DBG("%s\n", __FUNCTION__
);
361 for (i
= 0; i
< cache
->size
; i
++) {
362 for (c
= cache
->items
[i
]; c
; c
= next
) {
364 if (cache
->aux_free
[c
->cache_id
]) {
365 const void *item_aux
= c
->key
+ c
->key_size
;
366 cache
->aux_free
[c
->cache_id
](item_aux
);
368 free((void *)c
->key
);
371 cache
->items
[i
] = NULL
;
376 /* Start putting programs into the start of the BO again, since
377 * we'll never find the old results.
379 cache
->next_offset
= 0;
381 /* We need to make sure that the programs get regenerated, since
382 * any offsets leftover in brw_context will no longer be valid.
384 brw
->state
.dirty
.mesa
|= ~0;
385 brw
->state
.dirty
.brw
|= ~0ull;
386 brw
->state
.dirty
.cache
|= ~0;
387 intel_batchbuffer_flush(brw
);
391 brw_state_cache_check_size(struct brw_context
*brw
)
393 /* un-tuned guess. Each object is generally a page, so 2000 of them is 8 MB of
396 if (brw
->cache
.n_items
> 2000) {
397 perf_debug("Exceeded state cache size limit. Clearing the set "
398 "of compiled programs, which will trigger recompiles\n");
399 brw_clear_cache(brw
, &brw
->cache
);
405 brw_destroy_cache(struct brw_context
*brw
, struct brw_cache
*cache
)
408 DBG("%s\n", __FUNCTION__
);
410 drm_intel_bo_unreference(cache
->bo
);
412 brw_clear_cache(brw
, cache
);
420 brw_destroy_caches(struct brw_context
*brw
)
422 brw_destroy_cache(brw
, &brw
->cache
);