freedreno: add non-draw batches for compute/blit
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
37
38 static void
39 batch_init(struct fd_batch *batch)
40 {
41 struct fd_context *ctx = batch->ctx;
42 unsigned size = 0;
43
44 if (ctx->screen->reorder)
45 util_queue_fence_init(&batch->flush_fence);
46
47 /* if kernel is too old to support unlimited # of cmd buffers, we
48 * have no option but to allocate large worst-case sizes so that
49 * we don't need to grow the ringbuffer. Performance is likely to
50 * suffer, but there is no good alternative.
51 */
52 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
53 (fd_mesa_debug & FD_DBG_NOGROW)){
54 size = 0x100000;
55 }
56
57 batch->draw = fd_ringbuffer_new(ctx->pipe, size);
58 if (!batch->nondraw) {
59 batch->binning = fd_ringbuffer_new(ctx->pipe, size);
60 batch->gmem = fd_ringbuffer_new(ctx->pipe, size);
61
62 fd_ringbuffer_set_parent(batch->gmem, NULL);
63 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
64 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
65 } else {
66 fd_ringbuffer_set_parent(batch->draw, NULL);
67 }
68
69 batch->in_fence_fd = -1;
70 batch->fence = fd_fence_create(batch);
71
72 batch->cleared = batch->partial_cleared = 0;
73 batch->restore = batch->resolve = 0;
74 batch->needs_flush = false;
75 batch->gmem_reason = 0;
76 batch->num_draws = 0;
77 batch->stage = FD_STAGE_NULL;
78
79 fd_reset_wfi(batch);
80
81 /* reset maximal bounds: */
82 batch->max_scissor.minx = batch->max_scissor.miny = ~0;
83 batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
84
85 util_dynarray_init(&batch->draw_patches, NULL);
86
87 if (is_a3xx(ctx->screen))
88 util_dynarray_init(&batch->rbrc_patches, NULL);
89
90 assert(batch->resources->entries == 0);
91
92 util_dynarray_init(&batch->samples, NULL);
93 }
94
95 struct fd_batch *
96 fd_batch_create(struct fd_context *ctx, bool nondraw)
97 {
98 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
99
100 if (!batch)
101 return NULL;
102
103 DBG("%p", batch);
104
105 pipe_reference_init(&batch->reference, 1);
106 batch->ctx = ctx;
107 batch->nondraw = nondraw;
108
109 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
110 _mesa_key_pointer_equal);
111
112 batch_init(batch);
113
114 return batch;
115 }
116
117 static void
118 batch_fini(struct fd_batch *batch)
119 {
120 pipe_resource_reference(&batch->query_buf, NULL);
121
122 if (batch->in_fence_fd != -1)
123 close(batch->in_fence_fd);
124
125 /* in case batch wasn't flushed but fence was created: */
126 fd_fence_populate(batch->fence, 0, -1);
127
128 fd_fence_ref(NULL, &batch->fence, NULL);
129
130 fd_ringbuffer_del(batch->draw);
131 if (!batch->nondraw) {
132 fd_ringbuffer_del(batch->binning);
133 fd_ringbuffer_del(batch->gmem);
134 } else {
135 debug_assert(!batch->binning);
136 debug_assert(!batch->gmem);
137 }
138 if (batch->lrz_clear) {
139 fd_ringbuffer_del(batch->lrz_clear);
140 batch->lrz_clear = NULL;
141 }
142
143 util_dynarray_fini(&batch->draw_patches);
144
145 if (is_a3xx(batch->ctx->screen))
146 util_dynarray_fini(&batch->rbrc_patches);
147
148 while (batch->samples.size > 0) {
149 struct fd_hw_sample *samp =
150 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
151 fd_hw_sample_reference(batch->ctx, &samp, NULL);
152 }
153 util_dynarray_fini(&batch->samples);
154
155 if (batch->ctx->screen->reorder)
156 util_queue_fence_destroy(&batch->flush_fence);
157 }
158
159 static void
160 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
161 {
162 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
163 struct fd_batch *dep;
164
165 foreach_batch(dep, cache, batch->dependents_mask) {
166 if (flush)
167 fd_batch_flush(dep, false, false);
168 fd_batch_reference(&dep, NULL);
169 }
170
171 batch->dependents_mask = 0;
172 }
173
174 static void
175 batch_reset_resources_locked(struct fd_batch *batch)
176 {
177 struct set_entry *entry;
178
179 pipe_mutex_assert_locked(batch->ctx->screen->lock);
180
181 set_foreach(batch->resources, entry) {
182 struct fd_resource *rsc = (struct fd_resource *)entry->key;
183 _mesa_set_remove(batch->resources, entry);
184 debug_assert(rsc->batch_mask & (1 << batch->idx));
185 rsc->batch_mask &= ~(1 << batch->idx);
186 if (rsc->write_batch == batch)
187 fd_batch_reference_locked(&rsc->write_batch, NULL);
188 }
189 }
190
191 static void
192 batch_reset_resources(struct fd_batch *batch)
193 {
194 mtx_lock(&batch->ctx->screen->lock);
195 batch_reset_resources_locked(batch);
196 mtx_unlock(&batch->ctx->screen->lock);
197 }
198
199 static void
200 batch_reset(struct fd_batch *batch)
201 {
202 DBG("%p", batch);
203
204 fd_batch_sync(batch);
205
206 batch_flush_reset_dependencies(batch, false);
207 batch_reset_resources(batch);
208
209 batch_fini(batch);
210 batch_init(batch);
211 }
212
213 void
214 fd_batch_reset(struct fd_batch *batch)
215 {
216 if (batch->needs_flush)
217 batch_reset(batch);
218 }
219
220 void
221 __fd_batch_destroy(struct fd_batch *batch)
222 {
223 DBG("%p", batch);
224
225 util_copy_framebuffer_state(&batch->framebuffer, NULL);
226
227 mtx_lock(&batch->ctx->screen->lock);
228 fd_bc_invalidate_batch(batch, true);
229 mtx_unlock(&batch->ctx->screen->lock);
230
231 batch_fini(batch);
232
233 batch_reset_resources(batch);
234 debug_assert(batch->resources->entries == 0);
235 _mesa_set_destroy(batch->resources, NULL);
236
237 batch_flush_reset_dependencies(batch, false);
238 debug_assert(batch->dependents_mask == 0);
239
240 free(batch);
241 }
242
243 void
244 __fd_batch_describe(char* buf, const struct fd_batch *batch)
245 {
246 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
247 }
248
249 void
250 fd_batch_sync(struct fd_batch *batch)
251 {
252 if (!batch->ctx->screen->reorder)
253 return;
254 util_queue_fence_wait(&batch->flush_fence);
255 }
256
257 static void
258 batch_flush_func(void *job, int id)
259 {
260 struct fd_batch *batch = job;
261
262 fd_gmem_render_tiles(batch);
263 batch_reset_resources(batch);
264 }
265
266 static void
267 batch_cleanup_func(void *job, int id)
268 {
269 struct fd_batch *batch = job;
270 fd_batch_reference(&batch, NULL);
271 }
272
273 static void
274 batch_flush(struct fd_batch *batch, bool force)
275 {
276 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
277
278 if (!batch->needs_flush) {
279 if (force) {
280 fd_gmem_render_noop(batch);
281 goto out;
282 }
283 return;
284 }
285
286 batch->needs_flush = false;
287
288 /* close out the draw cmds by making sure any active queries are
289 * paused:
290 */
291 fd_batch_set_stage(batch, FD_STAGE_NULL);
292
293 fd_context_all_dirty(batch->ctx);
294 batch_flush_reset_dependencies(batch, true);
295
296 if (batch->ctx->screen->reorder) {
297 struct fd_batch *tmp = NULL;
298 fd_batch_reference(&tmp, batch);
299
300 if (!util_queue_is_initialized(&batch->ctx->flush_queue))
301 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
302
303 util_queue_add_job(&batch->ctx->flush_queue,
304 batch, &batch->flush_fence,
305 batch_flush_func, batch_cleanup_func);
306 } else {
307 fd_gmem_render_tiles(batch);
308 batch_reset_resources(batch);
309 }
310
311 debug_assert(batch->reference.count > 0);
312
313 out:
314 if (batch == batch->ctx->batch) {
315 batch_reset(batch);
316 } else {
317 mtx_lock(&batch->ctx->screen->lock);
318 fd_bc_invalidate_batch(batch, false);
319 mtx_unlock(&batch->ctx->screen->lock);
320 }
321 }
322
323 /* NOTE: could drop the last ref to batch
324 *
325 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
326 * to kernel before this returns, as opposed to just being queued to be
327 * flushed
328 * @force: force a flush even if no rendering, mostly useful if you need
329 * a fence to sync on
330 */
331 void
332 fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
333 {
334 /* NOTE: we need to hold an extra ref across the body of flush,
335 * since the last ref to this batch could be dropped when cleaning
336 * up used_resources
337 */
338 struct fd_batch *tmp = NULL;
339
340 fd_batch_reference(&tmp, batch);
341 batch_flush(tmp, force);
342 if (sync)
343 fd_batch_sync(tmp);
344 fd_batch_reference(&tmp, NULL);
345 }
346
347 /* does 'batch' depend directly or indirectly on 'other' ? */
348 static bool
349 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
350 {
351 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
352 struct fd_batch *dep;
353
354 if (batch->dependents_mask & (1 << other->idx))
355 return true;
356
357 foreach_batch(dep, cache, batch->dependents_mask)
358 if (batch_depends_on(batch, dep))
359 return true;
360
361 return false;
362 }
363
364 void
365 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
366 {
367 if (batch->dependents_mask & (1 << dep->idx))
368 return;
369
370 /* if the new depedency already depends on us, we need to flush
371 * to avoid a loop in the dependency graph.
372 */
373 if (batch_depends_on(dep, batch)) {
374 DBG("%p: flush forced on %p!", batch, dep);
375 mtx_unlock(&batch->ctx->screen->lock);
376 fd_batch_flush(dep, false, false);
377 mtx_lock(&batch->ctx->screen->lock);
378 } else {
379 struct fd_batch *other = NULL;
380 fd_batch_reference_locked(&other, dep);
381 batch->dependents_mask |= (1 << dep->idx);
382 DBG("%p: added dependency on %p", batch, dep);
383 }
384 }
385
386 void
387 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
388 {
389 pipe_mutex_assert_locked(batch->ctx->screen->lock);
390
391 if (rsc->stencil)
392 fd_batch_resource_used(batch, rsc->stencil, write);
393
394 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
395
396 if (write)
397 rsc->valid = true;
398
399 /* note, invalidate write batch, to avoid further writes to rsc
400 * resulting in a write-after-read hazard.
401 */
402
403 if (write) {
404 /* if we are pending read or write by any other batch: */
405 if (rsc->batch_mask != (1 << batch->idx)) {
406 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
407 struct fd_batch *dep;
408 foreach_batch(dep, cache, rsc->batch_mask) {
409 struct fd_batch *b = NULL;
410 if (dep == batch)
411 continue;
412 /* note that batch_add_dep could flush and unref dep, so
413 * we need to hold a reference to keep it live for the
414 * fd_bc_invalidate_batch()
415 */
416 fd_batch_reference(&b, dep);
417 fd_batch_add_dep(batch, b);
418 fd_bc_invalidate_batch(b, false);
419 fd_batch_reference_locked(&b, NULL);
420 }
421 }
422 fd_batch_reference_locked(&rsc->write_batch, batch);
423 } else {
424 if (rsc->write_batch) {
425 fd_batch_add_dep(batch, rsc->write_batch);
426 fd_bc_invalidate_batch(rsc->write_batch, false);
427 }
428 }
429
430 if (rsc->batch_mask & (1 << batch->idx))
431 return;
432
433 debug_assert(!_mesa_set_search(batch->resources, rsc));
434
435 _mesa_set_add(batch->resources, rsc);
436 rsc->batch_mask |= (1 << batch->idx);
437 }
438
439 void
440 fd_batch_check_size(struct fd_batch *batch)
441 {
442 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
443 return;
444
445 struct fd_ringbuffer *ring = batch->draw;
446 if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
447 (fd_mesa_debug & FD_DBG_FLUSH))
448 fd_batch_flush(batch, true, false);
449 }
450
451 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
452 * been one since last draw:
453 */
454 void
455 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
456 {
457 if (batch->needs_wfi) {
458 if (batch->ctx->screen->gpu_id >= 500)
459 OUT_WFI5(ring);
460 else
461 OUT_WFI(ring);
462 batch->needs_wfi = false;
463 }
464 }