2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
27 #include "util/list.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_resource.h"
35 #include "freedreno_query_hw.h"
38 batch_init(struct fd_batch
*batch
)
40 struct fd_context
*ctx
= batch
->ctx
;
43 if (ctx
->screen
->reorder
)
44 util_queue_fence_init(&batch
->flush_fence
);
46 /* if kernel is too old to support unlimited # of cmd buffers, we
47 * have no option but to allocate large worst-case sizes so that
48 * we don't need to grow the ringbuffer. Performance is likely to
49 * suffer, but there is no good alternative.
51 if (fd_device_version(ctx
->screen
->dev
) < FD_VERSION_UNLIMITED_CMDS
) {
55 batch
->draw
= fd_ringbuffer_new(ctx
->screen
->pipe
, size
);
56 batch
->binning
= fd_ringbuffer_new(ctx
->screen
->pipe
, size
);
57 batch
->gmem
= fd_ringbuffer_new(ctx
->screen
->pipe
, size
);
59 fd_ringbuffer_set_parent(batch
->gmem
, NULL
);
60 fd_ringbuffer_set_parent(batch
->draw
, batch
->gmem
);
61 fd_ringbuffer_set_parent(batch
->binning
, batch
->gmem
);
63 batch
->cleared
= batch
->partial_cleared
= 0;
64 batch
->restore
= batch
->resolve
= 0;
65 batch
->needs_flush
= false;
66 batch
->gmem_reason
= 0;
68 batch
->stage
= FD_STAGE_NULL
;
72 /* reset maximal bounds: */
73 batch
->max_scissor
.minx
= batch
->max_scissor
.miny
= ~0;
74 batch
->max_scissor
.maxx
= batch
->max_scissor
.maxy
= 0;
76 util_dynarray_init(&batch
->draw_patches
);
78 if (is_a3xx(ctx
->screen
))
79 util_dynarray_init(&batch
->rbrc_patches
);
81 assert(batch
->resources
->entries
== 0);
83 util_dynarray_init(&batch
->samples
);
87 fd_batch_create(struct fd_context
*ctx
)
89 struct fd_batch
*batch
= CALLOC_STRUCT(fd_batch
);
96 pipe_reference_init(&batch
->reference
, 1);
99 batch
->resources
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
100 _mesa_key_pointer_equal
);
108 batch_fini(struct fd_batch
*batch
)
110 pipe_resource_reference(&batch
->query_buf
, NULL
);
112 fd_ringbuffer_del(batch
->draw
);
113 fd_ringbuffer_del(batch
->binning
);
114 fd_ringbuffer_del(batch
->gmem
);
116 util_dynarray_fini(&batch
->draw_patches
);
118 if (is_a3xx(batch
->ctx
->screen
))
119 util_dynarray_fini(&batch
->rbrc_patches
);
121 while (batch
->samples
.size
> 0) {
122 struct fd_hw_sample
*samp
=
123 util_dynarray_pop(&batch
->samples
, struct fd_hw_sample
*);
124 fd_hw_sample_reference(batch
->ctx
, &samp
, NULL
);
126 util_dynarray_fini(&batch
->samples
);
128 if (batch
->ctx
->screen
->reorder
)
129 util_queue_fence_destroy(&batch
->flush_fence
);
133 batch_flush_reset_dependencies(struct fd_batch
*batch
, bool flush
)
135 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
136 struct fd_batch
*dep
;
138 foreach_batch(dep
, cache
, batch
->dependents_mask
) {
140 fd_batch_flush(dep
, false);
141 fd_batch_reference(&dep
, NULL
);
144 batch
->dependents_mask
= 0;
148 batch_reset_resources(struct fd_batch
*batch
)
150 struct set_entry
*entry
;
152 set_foreach(batch
->resources
, entry
) {
153 struct fd_resource
*rsc
= (struct fd_resource
*)entry
->key
;
154 _mesa_set_remove(batch
->resources
, entry
);
155 debug_assert(rsc
->batch_mask
& (1 << batch
->idx
));
156 rsc
->batch_mask
&= ~(1 << batch
->idx
);
157 if (rsc
->write_batch
== batch
)
158 fd_batch_reference(&rsc
->write_batch
, NULL
);
163 batch_reset(struct fd_batch
*batch
)
167 fd_batch_sync(batch
);
169 batch_flush_reset_dependencies(batch
, false);
170 batch_reset_resources(batch
);
177 fd_batch_reset(struct fd_batch
*batch
)
179 if (batch
->needs_flush
)
184 __fd_batch_destroy(struct fd_batch
*batch
)
186 fd_bc_invalidate_batch(batch
, true);
190 util_copy_framebuffer_state(&batch
->framebuffer
, NULL
);
194 batch_reset_resources(batch
);
195 debug_assert(batch
->resources
->entries
== 0);
196 _mesa_set_destroy(batch
->resources
, NULL
);
198 batch_flush_reset_dependencies(batch
, false);
199 debug_assert(batch
->dependents_mask
== 0);
205 __fd_batch_describe(char* buf
, const struct fd_batch
*batch
)
207 util_sprintf(buf
, "fd_batch<%u>", batch
->seqno
);
211 fd_batch_sync(struct fd_batch
*batch
)
213 if (!batch
->ctx
->screen
->reorder
)
215 util_queue_job_wait(&batch
->flush_fence
);
219 batch_flush_func(void *job
, int id
)
221 struct fd_batch
*batch
= job
;
223 fd_gmem_render_tiles(batch
);
224 batch_reset_resources(batch
);
225 batch
->ctx
->last_fence
= fd_ringbuffer_timestamp(batch
->gmem
);
229 batch_cleanup_func(void *job
, int id
)
231 struct fd_batch
*batch
= job
;
232 fd_batch_reference(&batch
, NULL
);
236 batch_flush(struct fd_batch
*batch
)
238 DBG("%p: needs_flush=%d", batch
, batch
->needs_flush
);
240 if (!batch
->needs_flush
)
243 batch
->needs_flush
= false;
245 /* close out the draw cmds by making sure any active queries are
248 fd_hw_query_set_stage(batch
, batch
->draw
, FD_STAGE_NULL
);
250 batch
->ctx
->dirty
= ~0;
251 batch_flush_reset_dependencies(batch
, true);
253 if (batch
->ctx
->screen
->reorder
) {
254 struct fd_batch
*tmp
= NULL
;
255 fd_batch_reference(&tmp
, batch
);
256 util_queue_add_job(&batch
->ctx
->flush_queue
,
257 batch
, &batch
->flush_fence
,
258 batch_flush_func
, batch_cleanup_func
);
260 fd_gmem_render_tiles(batch
);
261 batch_reset_resources(batch
);
262 batch
->ctx
->last_fence
= fd_ringbuffer_timestamp(batch
->gmem
);
265 debug_assert(batch
->reference
.count
> 0);
267 if (batch
== batch
->ctx
->batch
) {
270 fd_bc_invalidate_batch(batch
, false);
274 /* NOTE: could drop the last ref to batch */
276 fd_batch_flush(struct fd_batch
*batch
, bool sync
)
278 /* NOTE: we need to hold an extra ref across the body of flush,
279 * since the last ref to this batch could be dropped when cleaning
282 struct fd_batch
*tmp
= NULL
;
283 fd_batch_reference(&tmp
, batch
);
287 fd_batch_reference(&tmp
, NULL
);
290 /* does 'batch' depend directly or indirectly on 'other' ? */
292 batch_depends_on(struct fd_batch
*batch
, struct fd_batch
*other
)
294 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
295 struct fd_batch
*dep
;
297 if (batch
->dependents_mask
& (1 << other
->idx
))
300 foreach_batch(dep
, cache
, batch
->dependents_mask
)
301 if (batch_depends_on(batch
, dep
))
308 batch_add_dep(struct fd_batch
*batch
, struct fd_batch
*dep
)
310 if (batch
->dependents_mask
& (1 << dep
->idx
))
313 /* if the new depedency already depends on us, we need to flush
314 * to avoid a loop in the dependency graph.
316 if (batch_depends_on(dep
, batch
)) {
317 DBG("%p: flush forced on %p!", batch
, dep
);
318 fd_batch_flush(dep
, false);
320 struct fd_batch
*other
= NULL
;
321 fd_batch_reference(&other
, dep
);
322 batch
->dependents_mask
|= (1 << dep
->idx
);
323 DBG("%p: added dependency on %p", batch
, dep
);
328 fd_batch_resource_used(struct fd_batch
*batch
, struct fd_resource
*rsc
, bool write
)
331 fd_batch_resource_used(batch
, rsc
->stencil
, write
);
333 DBG("%p: %s %p", batch
, write
? "write" : "read", rsc
);
335 /* note, invalidate write batch, to avoid further writes to rsc
336 * resulting in a write-after-read hazard.
340 /* if we are pending read or write by any other batch: */
341 if (rsc
->batch_mask
!= (1 << batch
->idx
)) {
342 struct fd_batch_cache
*cache
= &batch
->ctx
->screen
->batch_cache
;
343 struct fd_batch
*dep
;
344 foreach_batch(dep
, cache
, rsc
->batch_mask
) {
345 struct fd_batch
*b
= NULL
;
346 /* note that batch_add_dep could flush and unref dep, so
347 * we need to hold a reference to keep it live for the
348 * fd_bc_invalidate_batch()
350 fd_batch_reference(&b
, dep
);
351 batch_add_dep(batch
, b
);
352 fd_bc_invalidate_batch(b
, false);
353 fd_batch_reference_locked(&b
, NULL
);
356 fd_batch_reference(&rsc
->write_batch
, batch
);
358 if (rsc
->write_batch
) {
359 batch_add_dep(batch
, rsc
->write_batch
);
360 fd_bc_invalidate_batch(rsc
->write_batch
, false);
364 if (rsc
->batch_mask
& (1 << batch
->idx
))
367 debug_assert(!_mesa_set_search(batch
->resources
, rsc
));
369 _mesa_set_add(batch
->resources
, rsc
);
370 rsc
->batch_mask
|= (1 << batch
->idx
);
374 fd_batch_check_size(struct fd_batch
*batch
)
376 if (fd_device_version(batch
->ctx
->screen
->dev
) >= FD_VERSION_UNLIMITED_CMDS
)
379 struct fd_ringbuffer
*ring
= batch
->draw
;
380 if (((ring
->cur
- ring
->start
) > (ring
->size
/4 - 0x1000)) ||
381 (fd_mesa_debug
& FD_DBG_FLUSH
))
382 fd_batch_flush(batch
, true);