4ad10aeb35ac7deb31f83cf20c75944b9520532c
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
37
38 static void
39 batch_init(struct fd_batch *batch)
40 {
41 struct fd_context *ctx = batch->ctx;
42 enum fd_ringbuffer_flags flags = 0;
43 unsigned size = 0;
44
45 /* if kernel is too old to support unlimited # of cmd buffers, we
46 * have no option but to allocate large worst-case sizes so that
47 * we don't need to grow the ringbuffer. Performance is likely to
48 * suffer, but there is no good alternative.
49 *
50 * XXX I think we can just require new enough kernel for this?
51 */
52 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
53 (fd_mesa_debug & FD_DBG_NOGROW)){
54 size = 0x100000;
55 } else {
56 flags = FD_RINGBUFFER_GROWABLE;
57 }
58
59 batch->submit = fd_submit_new(ctx->pipe);
60 if (batch->nondraw) {
61 batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
62 FD_RINGBUFFER_PRIMARY | flags);
63 } else {
64 batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
65 FD_RINGBUFFER_PRIMARY | flags);
66 batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
67 flags);
68
69 if (ctx->screen->gpu_id < 600) {
70 batch->binning = fd_submit_new_ringbuffer(batch->submit,
71 size, flags);
72 }
73 }
74
75 batch->in_fence_fd = -1;
76 batch->fence = fd_fence_create(batch);
77
78 batch->cleared = 0;
79 batch->fast_cleared = 0;
80 batch->invalidated = 0;
81 batch->restore = batch->resolve = 0;
82 batch->needs_flush = false;
83 batch->flushed = false;
84 batch->gmem_reason = 0;
85 batch->num_draws = 0;
86 batch->num_vertices = 0;
87 batch->num_bins_per_pipe = 0;
88 batch->prim_strm_bits = 0;
89 batch->draw_strm_bits = 0;
90 batch->stage = FD_STAGE_NULL;
91
92 fd_reset_wfi(batch);
93
94 util_dynarray_init(&batch->draw_patches, NULL);
95 util_dynarray_init(&batch->fb_read_patches, NULL);
96
97 if (is_a2xx(ctx->screen)) {
98 util_dynarray_init(&batch->shader_patches, NULL);
99 util_dynarray_init(&batch->gmem_patches, NULL);
100 }
101
102 if (is_a3xx(ctx->screen))
103 util_dynarray_init(&batch->rbrc_patches, NULL);
104
105 assert(batch->resources->entries == 0);
106
107 util_dynarray_init(&batch->samples, NULL);
108
109 list_inithead(&batch->log_chunks);
110 }
111
112 struct fd_batch *
113 fd_batch_create(struct fd_context *ctx, bool nondraw)
114 {
115 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
116
117 if (!batch)
118 return NULL;
119
120 DBG("%p", batch);
121
122 pipe_reference_init(&batch->reference, 1);
123 batch->ctx = ctx;
124 batch->nondraw = nondraw;
125
126 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
127 _mesa_key_pointer_equal);
128
129 batch_init(batch);
130
131 fd_screen_assert_locked(ctx->screen);
132 if (BATCH_DEBUG) {
133 _mesa_set_add(ctx->screen->live_batches, batch);
134 }
135
136 return batch;
137 }
138
139 static void
140 batch_fini(struct fd_batch *batch)
141 {
142 DBG("%p", batch);
143
144 pipe_resource_reference(&batch->query_buf, NULL);
145
146 if (batch->in_fence_fd != -1)
147 close(batch->in_fence_fd);
148
149 /* in case batch wasn't flushed but fence was created: */
150 fd_fence_populate(batch->fence, 0, -1);
151
152 fd_fence_ref(&batch->fence, NULL);
153
154 fd_ringbuffer_del(batch->draw);
155 if (!batch->nondraw) {
156 if (batch->binning)
157 fd_ringbuffer_del(batch->binning);
158 fd_ringbuffer_del(batch->gmem);
159 } else {
160 debug_assert(!batch->binning);
161 debug_assert(!batch->gmem);
162 }
163
164 if (batch->lrz_clear) {
165 fd_ringbuffer_del(batch->lrz_clear);
166 batch->lrz_clear = NULL;
167 }
168
169 if (batch->epilogue) {
170 fd_ringbuffer_del(batch->epilogue);
171 batch->epilogue = NULL;
172 }
173
174 if (batch->tile_setup) {
175 fd_ringbuffer_del(batch->tile_setup);
176 batch->tile_setup = NULL;
177 }
178
179 if (batch->tile_fini) {
180 fd_ringbuffer_del(batch->tile_fini);
181 batch->tile_fini = NULL;
182 }
183
184 if (batch->tessellation) {
185 fd_bo_del(batch->tessfactor_bo);
186 fd_bo_del(batch->tessparam_bo);
187 fd_ringbuffer_del(batch->tess_addrs_constobj);
188 }
189
190 fd_submit_del(batch->submit);
191
192 util_dynarray_fini(&batch->draw_patches);
193 util_dynarray_fini(&batch->fb_read_patches);
194
195 if (is_a2xx(batch->ctx->screen)) {
196 util_dynarray_fini(&batch->shader_patches);
197 util_dynarray_fini(&batch->gmem_patches);
198 }
199
200 if (is_a3xx(batch->ctx->screen))
201 util_dynarray_fini(&batch->rbrc_patches);
202
203 while (batch->samples.size > 0) {
204 struct fd_hw_sample *samp =
205 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
206 fd_hw_sample_reference(batch->ctx, &samp, NULL);
207 }
208 util_dynarray_fini(&batch->samples);
209
210 assert(list_is_empty(&batch->log_chunks));
211 }
212
213 static void
214 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
215 {
216 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
217 struct fd_batch *dep;
218
219 foreach_batch(dep, cache, batch->dependents_mask) {
220 if (flush)
221 fd_batch_flush(dep);
222 fd_batch_reference(&dep, NULL);
223 }
224
225 batch->dependents_mask = 0;
226 }
227
228 static void
229 batch_reset_resources_locked(struct fd_batch *batch)
230 {
231 fd_screen_assert_locked(batch->ctx->screen);
232
233 set_foreach(batch->resources, entry) {
234 struct fd_resource *rsc = (struct fd_resource *)entry->key;
235 _mesa_set_remove(batch->resources, entry);
236 debug_assert(rsc->batch_mask & (1 << batch->idx));
237 rsc->batch_mask &= ~(1 << batch->idx);
238 if (rsc->write_batch == batch)
239 fd_batch_reference_locked(&rsc->write_batch, NULL);
240 }
241 }
242
243 static void
244 batch_reset_resources(struct fd_batch *batch)
245 {
246 fd_screen_lock(batch->ctx->screen);
247 batch_reset_resources_locked(batch);
248 fd_screen_unlock(batch->ctx->screen);
249 }
250
251 static void
252 batch_reset(struct fd_batch *batch)
253 {
254 DBG("%p", batch);
255
256 batch_flush_reset_dependencies(batch, false);
257 batch_reset_resources(batch);
258
259 batch_fini(batch);
260 batch_init(batch);
261 }
262
263 void
264 fd_batch_reset(struct fd_batch *batch)
265 {
266 if (batch->needs_flush)
267 batch_reset(batch);
268 }
269
270 void
271 __fd_batch_destroy(struct fd_batch *batch)
272 {
273 struct fd_context *ctx = batch->ctx;
274
275 DBG("%p", batch);
276
277 fd_context_assert_locked(batch->ctx);
278
279 if (BATCH_DEBUG) {
280 _mesa_set_remove_key(ctx->screen->live_batches, batch);
281 }
282
283 fd_bc_invalidate_batch(batch, true);
284
285 batch_reset_resources_locked(batch);
286 debug_assert(batch->resources->entries == 0);
287 _mesa_set_destroy(batch->resources, NULL);
288
289 fd_context_unlock(ctx);
290 batch_flush_reset_dependencies(batch, false);
291 debug_assert(batch->dependents_mask == 0);
292
293 util_copy_framebuffer_state(&batch->framebuffer, NULL);
294 batch_fini(batch);
295 free(batch);
296 fd_context_lock(ctx);
297 }
298
299 void
300 __fd_batch_describe(char* buf, const struct fd_batch *batch)
301 {
302 sprintf(buf, "fd_batch<%u>", batch->seqno);
303 }
304
305 static void
306 batch_flush(struct fd_batch *batch)
307 {
308 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
309
310 if (batch->flushed)
311 return;
312
313 batch->needs_flush = false;
314
315 /* close out the draw cmds by making sure any active queries are
316 * paused:
317 */
318 fd_batch_set_stage(batch, FD_STAGE_NULL);
319
320 batch_flush_reset_dependencies(batch, true);
321
322 batch->flushed = true;
323
324 fd_fence_ref(&batch->ctx->last_fence, batch->fence);
325
326 fd_gmem_render_tiles(batch);
327 batch_reset_resources(batch);
328
329 debug_assert(batch->reference.count > 0);
330
331 fd_screen_lock(batch->ctx->screen);
332 fd_bc_invalidate_batch(batch, false);
333 fd_screen_unlock(batch->ctx->screen);
334 }
335
336 /* NOTE: could drop the last ref to batch
337 *
338 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
339 * to kernel before this returns, as opposed to just being queued to be
340 * flushed
341 * @force: force a flush even if no rendering, mostly useful if you need
342 * a fence to sync on
343 */
344 void
345 fd_batch_flush(struct fd_batch *batch)
346 {
347 struct fd_batch *tmp = NULL;
348
349 /* NOTE: we need to hold an extra ref across the body of flush,
350 * since the last ref to this batch could be dropped when cleaning
351 * up used_resources
352 */
353 fd_batch_reference(&tmp, batch);
354
355 batch_flush(tmp);
356
357 if (batch == batch->ctx->batch) {
358 fd_batch_reference(&batch->ctx->batch, NULL);
359 }
360
361 fd_batch_reference(&tmp, NULL);
362 }
363
364 /* find a batches dependents mask, including recursive dependencies: */
365 static uint32_t
366 recursive_dependents_mask(struct fd_batch *batch)
367 {
368 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
369 struct fd_batch *dep;
370 uint32_t dependents_mask = batch->dependents_mask;
371
372 foreach_batch(dep, cache, batch->dependents_mask)
373 dependents_mask |= recursive_dependents_mask(dep);
374
375 return dependents_mask;
376 }
377
378 void
379 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
380 {
381 fd_screen_assert_locked(batch->ctx->screen);
382
383 if (batch->dependents_mask & (1 << dep->idx))
384 return;
385
386 /* a loop should not be possible */
387 debug_assert(!((1 << batch->idx) & recursive_dependents_mask(dep)));
388
389 struct fd_batch *other = NULL;
390 fd_batch_reference_locked(&other, dep);
391 batch->dependents_mask |= (1 << dep->idx);
392 DBG("%p: added dependency on %p", batch, dep);
393 }
394
395 static void
396 flush_write_batch(struct fd_resource *rsc)
397 {
398 struct fd_batch *b = NULL;
399 fd_batch_reference_locked(&b, rsc->write_batch);
400
401 fd_screen_unlock(b->ctx->screen);
402 fd_batch_flush(b);
403 fd_screen_lock(b->ctx->screen);
404
405 fd_bc_invalidate_batch(b, false);
406 fd_batch_reference_locked(&b, NULL);
407 }
408
409 static void
410 fd_batch_add_resource(struct fd_batch *batch, struct fd_resource *rsc)
411 {
412
413 if (likely(fd_batch_references_resource(batch, rsc))) {
414 debug_assert(_mesa_set_search(batch->resources, rsc));
415 return;
416 }
417
418 debug_assert(!_mesa_set_search(batch->resources, rsc));
419
420 _mesa_set_add(batch->resources, rsc);
421 rsc->batch_mask |= (1 << batch->idx);
422 }
423
424 void
425 fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc)
426 {
427 fd_screen_assert_locked(batch->ctx->screen);
428
429 if (rsc->stencil)
430 fd_batch_resource_write(batch, rsc->stencil);
431
432 DBG("%p: write %p", batch, rsc);
433
434 rsc->valid = true;
435
436 /* note, invalidate write batch, to avoid further writes to rsc
437 * resulting in a write-after-read hazard.
438 */
439 /* if we are pending read or write by any other batch: */
440 if (unlikely(rsc->batch_mask & ~(1 << batch->idx))) {
441 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
442 struct fd_batch *dep;
443
444 if (rsc->write_batch && rsc->write_batch != batch)
445 flush_write_batch(rsc);
446
447 foreach_batch(dep, cache, rsc->batch_mask) {
448 struct fd_batch *b = NULL;
449 if (dep == batch)
450 continue;
451 /* note that batch_add_dep could flush and unref dep, so
452 * we need to hold a reference to keep it live for the
453 * fd_bc_invalidate_batch()
454 */
455 fd_batch_reference(&b, dep);
456 fd_batch_add_dep(batch, b);
457 fd_bc_invalidate_batch(b, false);
458 fd_batch_reference_locked(&b, NULL);
459 }
460 }
461 fd_batch_reference_locked(&rsc->write_batch, batch);
462
463 fd_batch_add_resource(batch, rsc);
464 }
465
466 void
467 fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc)
468 {
469 fd_screen_assert_locked(batch->ctx->screen);
470
471 if (rsc->stencil)
472 fd_batch_resource_read(batch, rsc->stencil);
473
474 DBG("%p: read %p", batch, rsc);
475
476 /* If reading a resource pending a write, go ahead and flush the
477 * writer. This avoids situations where we end up having to
478 * flush the current batch in _resource_used()
479 */
480 if (unlikely(rsc->write_batch && rsc->write_batch != batch))
481 flush_write_batch(rsc);
482
483 fd_batch_add_resource(batch, rsc);
484 }
485
486 void
487 fd_batch_check_size(struct fd_batch *batch)
488 {
489 debug_assert(!batch->flushed);
490
491 if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
492 fd_batch_flush(batch);
493 return;
494 }
495
496 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
497 return;
498
499 struct fd_ringbuffer *ring = batch->draw;
500 if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
501 fd_batch_flush(batch);
502 }
503
504 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
505 * been one since last draw:
506 */
507 void
508 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
509 {
510 if (batch->needs_wfi) {
511 if (batch->ctx->screen->gpu_id >= 500)
512 OUT_WFI5(ring);
513 else
514 OUT_WFI(ring);
515 batch->needs_wfi = false;
516 }
517 }