2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/hash_table.h"
29 #include "util/list.h"
30 #include "util/u_string.h"
32 #include "freedreno_batch.h"
33 #include "freedreno_batch_cache.h"
34 #include "freedreno_context.h"
35 #include "freedreno_resource.h"
39 * The batch cache provides lookup for mapping pipe_framebuffer_state
42 * It does this via hashtable, with key that roughly matches the
43 * pipe_framebuffer_state, as described below.
45 * Batch Cache hashtable key:
47 * To serialize the key, and to avoid dealing with holding a reference to
48 * pipe_surface's (which hold a reference to pipe_resource and complicate
49 * the whole refcnting thing), the key is variable length and inline's the
50 * pertinent details of the pipe_surface.
54 * Each batch needs to hold a reference to each resource it depends on (ie.
55 * anything that needs a mem2gmem). And a weak reference to resources it
56 * renders to. (If both src[n] and dst[n] are not NULL then they are the
59 * When a resource is destroyed, we need to remove entries in the batch
60 * cache that reference the resource, to avoid dangling pointer issues.
61 * So each resource holds a hashset of batches which have reference them
62 * in their hashtable key.
64 * When a batch has weak reference to no more resources (ie. all the
65 * surfaces it rendered to are destroyed) the batch can be destroyed.
66 * Could happen in an app that renders and never uses the result. More
67 * common scenario, I think, will be that some, but not all, of the
68 * surfaces are destroyed before the batch is submitted.
70 * If (for example), batch writes to zsbuf but that surface is destroyed
71 * before batch is submitted, we can skip gmem2mem (but still need to
72 * alloc gmem space as before. If the batch depended on previous contents
73 * of that surface, it would be holding a reference so the surface would
74 * not have been destroyed.
78 uint32_t width
, height
, layers
;
79 uint16_t samples
, num_surfs
;
80 struct fd_context
*ctx
;
82 struct pipe_resource
*texture
;
83 union pipe_surface_desc u
;
89 key_alloc(unsigned num_surfs
)
92 CALLOC_VARIANT_LENGTH_STRUCT(key
, sizeof(key
->surf
[0]) * num_surfs
);
97 key_hash(const void *_key
)
99 const struct key
*key
= _key
;
100 uint32_t hash
= _mesa_fnv32_1a_offset_bias
;
101 hash
= _mesa_fnv32_1a_accumulate_block(hash
, key
, offsetof(struct key
, surf
[0]));
102 hash
= _mesa_fnv32_1a_accumulate_block(hash
, key
->surf
, sizeof(key
->surf
[0]) * key
->num_surfs
);
107 key_equals(const void *_a
, const void *_b
)
109 const struct key
*a
= _a
;
110 const struct key
*b
= _b
;
111 return (memcmp(a
, b
, offsetof(struct key
, surf
[0])) == 0) &&
112 (memcmp(a
->surf
, b
->surf
, sizeof(a
->surf
[0]) * a
->num_surfs
) == 0);
116 fd_bc_init(struct fd_batch_cache
*cache
)
118 cache
->ht
= _mesa_hash_table_create(NULL
, key_hash
, key_equals
);
122 fd_bc_fini(struct fd_batch_cache
*cache
)
124 _mesa_hash_table_destroy(cache
->ht
, NULL
);
128 bc_flush(struct fd_batch_cache
*cache
, struct fd_context
*ctx
, bool deferred
)
130 /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
131 * can cause batches to be unref'd and freed under our feet, so grab
132 * a reference to all the batches we need up-front.
134 struct fd_batch
*batches
[ARRAY_SIZE(cache
->batches
)] = {0};
135 struct fd_batch
*batch
;
138 fd_context_lock(ctx
);
140 foreach_batch(batch
, cache
, cache
->batch_mask
) {
141 if (batch
->ctx
== ctx
) {
142 fd_batch_reference_locked(&batches
[n
++], batch
);
147 struct fd_batch
*current_batch
= ctx
->batch
;
149 for (unsigned i
= 0; i
< n
; i
++) {
150 if (batches
[i
] != current_batch
) {
151 fd_batch_add_dep(current_batch
, batches
[i
]);
155 fd_context_unlock(ctx
);
157 fd_context_unlock(ctx
);
159 for (unsigned i
= 0; i
< n
; i
++) {
160 fd_batch_flush(batches
[i
], false, false);
164 for (unsigned i
= 0; i
< n
; i
++) {
165 fd_batch_reference(&batches
[i
], NULL
);
170 fd_bc_flush(struct fd_batch_cache
*cache
, struct fd_context
*ctx
)
172 bc_flush(cache
, ctx
, false);
175 /* deferred flush doesn't actually flush, but it marks every other
176 * batch associated with the context as dependent on the current
177 * batch. So when the current batch gets flushed, all other batches
178 * that came before also get flushed.
181 fd_bc_flush_deferred(struct fd_batch_cache
*cache
, struct fd_context
*ctx
)
183 bc_flush(cache
, ctx
, true);
187 fd_bc_invalidate_context(struct fd_context
*ctx
)
189 struct fd_batch_cache
*cache
= &ctx
->screen
->batch_cache
;
190 struct fd_batch
*batch
;
192 mtx_lock(&ctx
->screen
->lock
);
194 foreach_batch(batch
, cache
, cache
->batch_mask
) {
195 if (batch
->ctx
== ctx
)
196 fd_bc_invalidate_batch(batch
, true);
199 mtx_unlock(&ctx
->screen
->lock
);
203 fd_bc_invalidate_batch(struct fd_batch
*batch
, bool destroy
)
208 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
209 struct key
*key
= (struct key
*)batch
->key
;
211 pipe_mutex_assert_locked(batch
->ctx
->screen
->lock
);
214 cache
->batches
[batch
->idx
] = NULL
;
215 cache
->batch_mask
&= ~(1 << batch
->idx
);
221 DBG("%p: key=%p", batch
, batch
->key
);
222 for (unsigned idx
= 0; idx
< key
->num_surfs
; idx
++) {
223 struct fd_resource
*rsc
= fd_resource(key
->surf
[idx
].texture
);
224 rsc
->bc_batch_mask
&= ~(1 << batch
->idx
);
227 struct hash_entry
*entry
=
228 _mesa_hash_table_search_pre_hashed(cache
->ht
, batch
->hash
, key
);
229 _mesa_hash_table_remove(cache
->ht
, entry
);
236 fd_bc_invalidate_resource(struct fd_resource
*rsc
, bool destroy
)
238 struct fd_screen
*screen
= fd_screen(rsc
->base
.screen
);
239 struct fd_batch
*batch
;
241 mtx_lock(&screen
->lock
);
244 foreach_batch(batch
, &screen
->batch_cache
, rsc
->batch_mask
) {
245 struct set_entry
*entry
= _mesa_set_search(batch
->resources
, rsc
);
246 _mesa_set_remove(batch
->resources
, entry
);
250 fd_batch_reference_locked(&rsc
->write_batch
, NULL
);
253 foreach_batch(batch
, &screen
->batch_cache
, rsc
->bc_batch_mask
)
254 fd_bc_invalidate_batch(batch
, false);
256 rsc
->bc_batch_mask
= 0;
258 mtx_unlock(&screen
->lock
);
262 fd_bc_alloc_batch(struct fd_batch_cache
*cache
, struct fd_context
*ctx
)
264 struct fd_batch
*batch
;
267 mtx_lock(&ctx
->screen
->lock
);
269 while ((idx
= ffs(~cache
->batch_mask
)) == 0) {
271 for (unsigned i
= 0; i
< ARRAY_SIZE(cache
->batches
); i
++) {
272 batch
= cache
->batches
[i
];
273 debug_printf("%d: needs_flush=%d, depends:", batch
->idx
, batch
->needs_flush
);
274 struct set_entry
*entry
;
275 set_foreach(batch
->dependencies
, entry
) {
276 struct fd_batch
*dep
= (struct fd_batch
*)entry
->key
;
277 debug_printf(" %d", dep
->idx
);
282 /* TODO: is LRU the better policy? Or perhaps the batch that
283 * depends on the fewest other batches?
285 struct fd_batch
*flush_batch
= NULL
;
286 for (unsigned i
= 0; i
< ARRAY_SIZE(cache
->batches
); i
++) {
287 if ((cache
->batches
[i
] == ctx
->batch
) ||
288 !cache
->batches
[i
]->needs_flush
)
290 if (!flush_batch
|| (cache
->batches
[i
]->seqno
< flush_batch
->seqno
))
291 fd_batch_reference_locked(&flush_batch
, cache
->batches
[i
]);
294 /* we can drop lock temporarily here, since we hold a ref,
295 * flush_batch won't disappear under us.
297 mtx_unlock(&ctx
->screen
->lock
);
298 DBG("%p: too many batches! flush forced!", flush_batch
);
299 fd_batch_flush(flush_batch
, true, false);
300 mtx_lock(&ctx
->screen
->lock
);
302 /* While the resources get cleaned up automatically, the flush_batch
303 * doesn't get removed from the dependencies of other batches, so
304 * it won't be unref'd and will remain in the table.
306 * TODO maybe keep a bitmask of batches that depend on me, to make
309 for (unsigned i
= 0; i
< ARRAY_SIZE(cache
->batches
); i
++) {
310 struct fd_batch
*other
= cache
->batches
[i
];
313 if (other
->dependents_mask
& (1 << flush_batch
->idx
)) {
314 other
->dependents_mask
&= ~(1 << flush_batch
->idx
);
315 struct fd_batch
*ref
= flush_batch
;
316 fd_batch_reference_locked(&ref
, NULL
);
320 fd_batch_reference_locked(&flush_batch
, NULL
);
323 idx
--; /* bit zero returns 1 for ffs() */
325 batch
= fd_batch_create(ctx
, false);
329 batch
->seqno
= cache
->cnt
++;
331 cache
->batch_mask
|= (1 << idx
);
333 debug_assert(cache
->batches
[idx
] == NULL
);
334 cache
->batches
[idx
] = batch
;
337 mtx_unlock(&ctx
->screen
->lock
);
342 static struct fd_batch
*
343 batch_from_key(struct fd_batch_cache
*cache
, struct key
*key
,
344 struct fd_context
*ctx
)
346 struct fd_batch
*batch
= NULL
;
347 uint32_t hash
= key_hash(key
);
348 struct hash_entry
*entry
=
349 _mesa_hash_table_search_pre_hashed(cache
->ht
, hash
, key
);
353 fd_batch_reference(&batch
, (struct fd_batch
*)entry
->data
);
357 batch
= fd_bc_alloc_batch(cache
, ctx
);
359 DBG("%p: hash=0x%08x, %ux%u, %u layers, %u samples", batch
, hash
,
360 key
->width
, key
->height
, key
->layers
, key
->samples
);
361 for (unsigned idx
= 0; idx
< key
->num_surfs
; idx
++) {
362 DBG("%p: surf[%u]: %p (%s) (%u,%u / %u,%u,%u)", batch
, key
->surf
[idx
].pos
,
363 key
->surf
[idx
].texture
, util_format_name(key
->surf
[idx
].format
),
364 key
->surf
[idx
].u
.buf
.first_element
, key
->surf
[idx
].u
.buf
.last_element
,
365 key
->surf
[idx
].u
.tex
.first_layer
, key
->surf
[idx
].u
.tex
.last_layer
,
366 key
->surf
[idx
].u
.tex
.level
);
372 mtx_lock(&ctx
->screen
->lock
);
374 _mesa_hash_table_insert_pre_hashed(cache
->ht
, hash
, key
, batch
);
378 for (unsigned idx
= 0; idx
< key
->num_surfs
; idx
++) {
379 struct fd_resource
*rsc
= fd_resource(key
->surf
[idx
].texture
);
380 rsc
->bc_batch_mask
= (1 << batch
->idx
);
383 mtx_unlock(&ctx
->screen
->lock
);
389 key_surf(struct key
*key
, unsigned idx
, unsigned pos
, struct pipe_surface
*psurf
)
391 key
->surf
[idx
].texture
= psurf
->texture
;
392 key
->surf
[idx
].u
= psurf
->u
;
393 key
->surf
[idx
].pos
= pos
;
394 key
->surf
[idx
].format
= psurf
->format
;
398 fd_batch_from_fb(struct fd_batch_cache
*cache
, struct fd_context
*ctx
,
399 const struct pipe_framebuffer_state
*pfb
)
401 unsigned idx
= 0, n
= pfb
->nr_cbufs
+ (pfb
->zsbuf
? 1 : 0);
402 struct key
*key
= key_alloc(n
);
404 key
->width
= pfb
->width
;
405 key
->height
= pfb
->height
;
406 key
->layers
= pfb
->layers
;
407 key
->samples
= pfb
->samples
;
411 key_surf(key
, idx
++, 0, pfb
->zsbuf
);
413 for (unsigned i
= 0; i
< pfb
->nr_cbufs
; i
++)
415 key_surf(key
, idx
++, i
+ 1, pfb
->cbufs
[i
]);
417 key
->num_surfs
= idx
;
419 return batch_from_key(cache
, key
, ctx
);