2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/list.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
39 batch_init(struct fd_batch
*batch
)
41 struct fd_context
*ctx
= batch
->ctx
;
44 if (ctx
->screen
->reorder
)
45 util_queue_fence_init(&batch
->flush_fence
);
47 /* if kernel is too old to support unlimited # of cmd buffers, we
48 * have no option but to allocate large worst-case sizes so that
49 * we don't need to grow the ringbuffer. Performance is likely to
50 * suffer, but there is no good alternative.
52 * XXX I think we can just require new enough kernel for this?
54 if ((fd_device_version(ctx
->screen
->dev
) < FD_VERSION_UNLIMITED_CMDS
) ||
55 (fd_mesa_debug
& FD_DBG_NOGROW
)){
59 batch
->submit
= fd_submit_new(ctx
->pipe
);
61 batch
->draw
= fd_submit_new_ringbuffer(batch
->submit
, size
,
62 FD_RINGBUFFER_PRIMARY
| FD_RINGBUFFER_GROWABLE
);
64 batch
->gmem
= fd_submit_new_ringbuffer(batch
->submit
, size
,
65 FD_RINGBUFFER_PRIMARY
| FD_RINGBUFFER_GROWABLE
);
66 batch
->draw
= fd_submit_new_ringbuffer(batch
->submit
, size
,
67 FD_RINGBUFFER_GROWABLE
);
69 if (ctx
->screen
->gpu_id
< 600) {
70 batch
->binning
= fd_submit_new_ringbuffer(batch
->submit
,
71 size
, FD_RINGBUFFER_GROWABLE
);
75 batch
->in_fence_fd
= -1;
76 batch
->fence
= fd_fence_create(batch
);
79 batch
->fast_cleared
= 0;
80 batch
->invalidated
= 0;
81 batch
->restore
= batch
->resolve
= 0;
82 batch
->needs_flush
= false;
83 batch
->flushed
= false;
84 batch
->gmem_reason
= 0;
86 batch
->num_vertices
= 0;
87 batch
->stage
= FD_STAGE_NULL
;
91 util_dynarray_init(&batch
->draw_patches
, NULL
);
93 if (is_a2xx(ctx
->screen
)) {
94 util_dynarray_init(&batch
->shader_patches
, NULL
);
95 util_dynarray_init(&batch
->gmem_patches
, NULL
);
98 if (is_a3xx(ctx
->screen
))
99 util_dynarray_init(&batch
->rbrc_patches
, NULL
);
101 assert(batch
->resources
->entries
== 0);
103 util_dynarray_init(&batch
->samples
, NULL
);
107 fd_batch_create(struct fd_context
*ctx
, bool nondraw
)
109 struct fd_batch
*batch
= CALLOC_STRUCT(fd_batch
);
116 pipe_reference_init(&batch
->reference
, 1);
118 batch
->nondraw
= nondraw
;
120 batch
->resources
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
121 _mesa_key_pointer_equal
);
129 batch_fini(struct fd_batch
*batch
)
133 pipe_resource_reference(&batch
->query_buf
, NULL
);
135 if (batch
->in_fence_fd
!= -1)
136 close(batch
->in_fence_fd
);
138 /* in case batch wasn't flushed but fence was created: */
139 fd_fence_populate(batch
->fence
, 0, -1);
141 fd_fence_ref(NULL
, &batch
->fence
, NULL
);
143 fd_ringbuffer_del(batch
->draw
);
144 if (!batch
->nondraw
) {
146 fd_ringbuffer_del(batch
->binning
);
147 fd_ringbuffer_del(batch
->gmem
);
149 debug_assert(!batch
->binning
);
150 debug_assert(!batch
->gmem
);
153 if (batch
->lrz_clear
) {
154 fd_ringbuffer_del(batch
->lrz_clear
);
155 batch
->lrz_clear
= NULL
;
158 if (batch
->tile_setup
) {
159 fd_ringbuffer_del(batch
->tile_setup
);
160 batch
->tile_setup
= NULL
;
163 if (batch
->tile_fini
) {
164 fd_ringbuffer_del(batch
->tile_fini
);
165 batch
->tile_fini
= NULL
;
168 fd_submit_del(batch
->submit
);
170 util_dynarray_fini(&batch
->draw_patches
);
172 if (is_a2xx(batch
->ctx
->screen
)) {
173 util_dynarray_fini(&batch
->shader_patches
);
174 util_dynarray_fini(&batch
->gmem_patches
);
177 if (is_a3xx(batch
->ctx
->screen
))
178 util_dynarray_fini(&batch
->rbrc_patches
);
180 while (batch
->samples
.size
> 0) {
181 struct fd_hw_sample
*samp
=
182 util_dynarray_pop(&batch
->samples
, struct fd_hw_sample
*);
183 fd_hw_sample_reference(batch
->ctx
, &samp
, NULL
);
185 util_dynarray_fini(&batch
->samples
);
187 if (batch
->ctx
->screen
->reorder
)
188 util_queue_fence_destroy(&batch
->flush_fence
);
192 batch_flush_reset_dependencies(struct fd_batch
*batch
, bool flush
)
194 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
195 struct fd_batch
*dep
;
197 foreach_batch(dep
, cache
, batch
->dependents_mask
) {
199 fd_batch_flush(dep
, false, false);
200 fd_batch_reference(&dep
, NULL
);
203 batch
->dependents_mask
= 0;
207 batch_reset_resources_locked(struct fd_batch
*batch
)
209 pipe_mutex_assert_locked(batch
->ctx
->screen
->lock
);
211 set_foreach(batch
->resources
, entry
) {
212 struct fd_resource
*rsc
= (struct fd_resource
*)entry
->key
;
213 _mesa_set_remove(batch
->resources
, entry
);
214 debug_assert(rsc
->batch_mask
& (1 << batch
->idx
));
215 rsc
->batch_mask
&= ~(1 << batch
->idx
);
216 if (rsc
->write_batch
== batch
)
217 fd_batch_reference_locked(&rsc
->write_batch
, NULL
);
222 batch_reset_resources(struct fd_batch
*batch
)
224 mtx_lock(&batch
->ctx
->screen
->lock
);
225 batch_reset_resources_locked(batch
);
226 mtx_unlock(&batch
->ctx
->screen
->lock
);
230 batch_reset(struct fd_batch
*batch
)
234 fd_batch_sync(batch
);
236 batch_flush_reset_dependencies(batch
, false);
237 batch_reset_resources(batch
);
244 fd_batch_reset(struct fd_batch
*batch
)
246 if (batch
->needs_flush
)
251 __fd_batch_destroy(struct fd_batch
*batch
)
253 struct fd_context
*ctx
= batch
->ctx
;
257 fd_context_assert_locked(batch
->ctx
);
259 fd_bc_invalidate_batch(batch
, true);
261 batch_reset_resources_locked(batch
);
262 debug_assert(batch
->resources
->entries
== 0);
263 _mesa_set_destroy(batch
->resources
, NULL
);
265 fd_context_unlock(ctx
);
266 batch_flush_reset_dependencies(batch
, false);
267 debug_assert(batch
->dependents_mask
== 0);
269 util_copy_framebuffer_state(&batch
->framebuffer
, NULL
);
272 fd_context_lock(ctx
);
276 __fd_batch_describe(char* buf
, const struct fd_batch
*batch
)
278 util_sprintf(buf
, "fd_batch<%u>", batch
->seqno
);
282 fd_batch_sync(struct fd_batch
*batch
)
284 if (!batch
->ctx
->screen
->reorder
)
286 util_queue_fence_wait(&batch
->flush_fence
);
290 batch_flush_func(void *job
, int id
)
292 struct fd_batch
*batch
= job
;
296 fd_gmem_render_tiles(batch
);
297 batch_reset_resources(batch
);
301 batch_cleanup_func(void *job
, int id
)
303 struct fd_batch
*batch
= job
;
304 fd_batch_reference(&batch
, NULL
);
308 batch_flush(struct fd_batch
*batch
, bool force
)
310 DBG("%p: needs_flush=%d", batch
, batch
->needs_flush
);
315 batch
->needs_flush
= false;
317 /* close out the draw cmds by making sure any active queries are
320 fd_batch_set_stage(batch
, FD_STAGE_NULL
);
322 batch_flush_reset_dependencies(batch
, true);
324 batch
->flushed
= true;
326 if (batch
->ctx
->screen
->reorder
) {
327 struct fd_batch
*tmp
= NULL
;
328 fd_batch_reference(&tmp
, batch
);
330 if (!util_queue_is_initialized(&batch
->ctx
->flush_queue
))
331 util_queue_init(&batch
->ctx
->flush_queue
, "flush_queue", 16, 1, 0);
333 util_queue_add_job(&batch
->ctx
->flush_queue
,
334 batch
, &batch
->flush_fence
,
335 batch_flush_func
, batch_cleanup_func
);
337 fd_gmem_render_tiles(batch
);
338 batch_reset_resources(batch
);
341 debug_assert(batch
->reference
.count
> 0);
343 mtx_lock(&batch
->ctx
->screen
->lock
);
344 fd_bc_invalidate_batch(batch
, false);
345 mtx_unlock(&batch
->ctx
->screen
->lock
);
348 /* NOTE: could drop the last ref to batch
350 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
351 * to kernel before this returns, as opposed to just being queued to be
353 * @force: force a flush even if no rendering, mostly useful if you need
357 fd_batch_flush(struct fd_batch
*batch
, bool sync
, bool force
)
359 struct fd_batch
*tmp
= NULL
;
360 bool newbatch
= false;
362 /* NOTE: we need to hold an extra ref across the body of flush,
363 * since the last ref to this batch could be dropped when cleaning
366 fd_batch_reference(&tmp
, batch
);
368 if (batch
== batch
->ctx
->batch
) {
369 batch
->ctx
->batch
= NULL
;
373 batch_flush(tmp
, force
);
376 struct fd_context
*ctx
= batch
->ctx
;
377 struct fd_batch
*new_batch
;
379 if (ctx
->screen
->reorder
) {
380 /* defer allocating new batch until one is needed for rendering
381 * to avoid unused batches for apps that create many contexts
385 new_batch
= fd_bc_alloc_batch(&ctx
->screen
->batch_cache
, ctx
, false);
386 util_copy_framebuffer_state(&new_batch
->framebuffer
, &batch
->framebuffer
);
389 fd_batch_reference(&batch
, NULL
);
390 ctx
->batch
= new_batch
;
391 fd_context_all_dirty(ctx
);
397 fd_batch_reference(&tmp
, NULL
);
400 /* does 'batch' depend directly or indirectly on 'other' ? */
402 batch_depends_on(struct fd_batch
*batch
, struct fd_batch
*other
)
404 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
405 struct fd_batch
*dep
;
407 if (batch
->dependents_mask
& (1 << other
->idx
))
410 foreach_batch(dep
, cache
, batch
->dependents_mask
)
411 if (batch_depends_on(batch
, dep
))
418 fd_batch_add_dep(struct fd_batch
*batch
, struct fd_batch
*dep
)
420 if (batch
->dependents_mask
& (1 << dep
->idx
))
423 /* a loop should not be possible */
424 debug_assert(!batch_depends_on(dep
, batch
));
426 struct fd_batch
*other
= NULL
;
427 fd_batch_reference_locked(&other
, dep
);
428 batch
->dependents_mask
|= (1 << dep
->idx
);
429 DBG("%p: added dependency on %p", batch
, dep
);
433 flush_write_batch(struct fd_resource
*rsc
)
435 struct fd_batch
*b
= NULL
;
436 fd_batch_reference_locked(&b
, rsc
->write_batch
);
438 mtx_unlock(&b
->ctx
->screen
->lock
);
439 fd_batch_flush(b
, true, false);
440 mtx_lock(&b
->ctx
->screen
->lock
);
442 fd_bc_invalidate_batch(b
, false);
443 fd_batch_reference_locked(&b
, NULL
);
447 fd_batch_resource_used(struct fd_batch
*batch
, struct fd_resource
*rsc
, bool write
)
449 pipe_mutex_assert_locked(batch
->ctx
->screen
->lock
);
452 fd_batch_resource_used(batch
, rsc
->stencil
, write
);
454 DBG("%p: %s %p", batch
, write
? "write" : "read", rsc
);
459 /* note, invalidate write batch, to avoid further writes to rsc
460 * resulting in a write-after-read hazard.
464 /* if we are pending read or write by any other batch: */
465 if (rsc
->batch_mask
& ~(1 << batch
->idx
)) {
466 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
467 struct fd_batch
*dep
;
469 if (rsc
->write_batch
&& rsc
->write_batch
!= batch
)
470 flush_write_batch(rsc
);
472 foreach_batch(dep
, cache
, rsc
->batch_mask
) {
473 struct fd_batch
*b
= NULL
;
476 /* note that batch_add_dep could flush and unref dep, so
477 * we need to hold a reference to keep it live for the
478 * fd_bc_invalidate_batch()
480 fd_batch_reference(&b
, dep
);
481 fd_batch_add_dep(batch
, b
);
482 fd_bc_invalidate_batch(b
, false);
483 fd_batch_reference_locked(&b
, NULL
);
486 fd_batch_reference_locked(&rsc
->write_batch
, batch
);
488 /* If reading a resource pending a write, go ahead and flush the
489 * writer. This avoids situations where we end up having to
490 * flush the current batch in _resource_used()
492 if (rsc
->write_batch
&& rsc
->write_batch
!= batch
)
493 flush_write_batch(rsc
);
496 if (rsc
->batch_mask
& (1 << batch
->idx
)) {
497 debug_assert(_mesa_set_search(batch
->resources
, rsc
));
501 debug_assert(!_mesa_set_search(batch
->resources
, rsc
));
503 _mesa_set_add(batch
->resources
, rsc
);
504 rsc
->batch_mask
|= (1 << batch
->idx
);
508 fd_batch_check_size(struct fd_batch
*batch
)
510 debug_assert(!batch
->flushed
);
512 if (unlikely(fd_mesa_debug
& FD_DBG_FLUSH
)) {
513 fd_batch_flush(batch
, true, false);
517 if (fd_device_version(batch
->ctx
->screen
->dev
) >= FD_VERSION_UNLIMITED_CMDS
)
520 struct fd_ringbuffer
*ring
= batch
->draw
;
521 if ((ring
->cur
- ring
->start
) > (ring
->size
/4 - 0x1000))
522 fd_batch_flush(batch
, true, false);
525 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
526 * been one since last draw:
529 fd_wfi(struct fd_batch
*batch
, struct fd_ringbuffer
*ring
)
531 if (batch
->needs_wfi
) {
532 if (batch
->ctx
->screen
->gpu_id
>= 500)
536 batch
->needs_wfi
= false;