util: use C99 declaration in the for-loop set_foreach() macro
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_fence.h"
35 #include "freedreno_resource.h"
36 #include "freedreno_query_hw.h"
37
38 static void
39 batch_init(struct fd_batch *batch)
40 {
41 struct fd_context *ctx = batch->ctx;
42 unsigned size = 0;
43
44 if (ctx->screen->reorder)
45 util_queue_fence_init(&batch->flush_fence);
46
47 /* if kernel is too old to support unlimited # of cmd buffers, we
48 * have no option but to allocate large worst-case sizes so that
49 * we don't need to grow the ringbuffer. Performance is likely to
50 * suffer, but there is no good alternative.
51 */
52 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
53 (fd_mesa_debug & FD_DBG_NOGROW)){
54 size = 0x100000;
55 }
56
57 batch->draw = fd_ringbuffer_new(ctx->pipe, size);
58 if (!batch->nondraw) {
59 batch->gmem = fd_ringbuffer_new(ctx->pipe, size);
60
61 fd_ringbuffer_set_parent(batch->gmem, NULL);
62 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
63
64 if (ctx->screen->gpu_id < 600) {
65 batch->binning = fd_ringbuffer_new(ctx->pipe, size);
66 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
67 }
68 } else {
69 fd_ringbuffer_set_parent(batch->draw, NULL);
70 }
71
72 batch->in_fence_fd = -1;
73 batch->fence = fd_fence_create(batch);
74
75 batch->cleared = 0;
76 batch->invalidated = 0;
77 batch->restore = batch->resolve = 0;
78 batch->needs_flush = false;
79 batch->flushed = false;
80 batch->gmem_reason = 0;
81 batch->num_draws = 0;
82 batch->stage = FD_STAGE_NULL;
83
84 fd_reset_wfi(batch);
85
86 util_dynarray_init(&batch->draw_patches, NULL);
87
88 if (is_a3xx(ctx->screen))
89 util_dynarray_init(&batch->rbrc_patches, NULL);
90
91 util_dynarray_init(&batch->gmem_patches, NULL);
92
93 assert(batch->resources->entries == 0);
94
95 util_dynarray_init(&batch->samples, NULL);
96 }
97
98 struct fd_batch *
99 fd_batch_create(struct fd_context *ctx, bool nondraw)
100 {
101 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
102
103 if (!batch)
104 return NULL;
105
106 DBG("%p", batch);
107
108 pipe_reference_init(&batch->reference, 1);
109 batch->ctx = ctx;
110 batch->nondraw = nondraw;
111
112 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
113 _mesa_key_pointer_equal);
114
115 batch_init(batch);
116
117 return batch;
118 }
119
120 static void
121 batch_fini(struct fd_batch *batch)
122 {
123 DBG("%p", batch);
124
125 pipe_resource_reference(&batch->query_buf, NULL);
126
127 if (batch->in_fence_fd != -1)
128 close(batch->in_fence_fd);
129
130 /* in case batch wasn't flushed but fence was created: */
131 fd_fence_populate(batch->fence, 0, -1);
132
133 fd_fence_ref(NULL, &batch->fence, NULL);
134
135 fd_ringbuffer_del(batch->draw);
136 if (!batch->nondraw) {
137 if (batch->binning)
138 fd_ringbuffer_del(batch->binning);
139 fd_ringbuffer_del(batch->gmem);
140 } else {
141 debug_assert(!batch->binning);
142 debug_assert(!batch->gmem);
143 }
144 if (batch->lrz_clear) {
145 fd_ringbuffer_del(batch->lrz_clear);
146 batch->lrz_clear = NULL;
147 }
148
149 util_dynarray_fini(&batch->draw_patches);
150
151 if (is_a3xx(batch->ctx->screen))
152 util_dynarray_fini(&batch->rbrc_patches);
153
154 util_dynarray_fini(&batch->gmem_patches);
155
156 while (batch->samples.size > 0) {
157 struct fd_hw_sample *samp =
158 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
159 fd_hw_sample_reference(batch->ctx, &samp, NULL);
160 }
161 util_dynarray_fini(&batch->samples);
162
163 if (batch->ctx->screen->reorder)
164 util_queue_fence_destroy(&batch->flush_fence);
165 }
166
167 static void
168 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
169 {
170 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
171 struct fd_batch *dep;
172
173 foreach_batch(dep, cache, batch->dependents_mask) {
174 if (flush)
175 fd_batch_flush(dep, false, false);
176 fd_batch_reference(&dep, NULL);
177 }
178
179 batch->dependents_mask = 0;
180 }
181
182 static void
183 batch_reset_resources_locked(struct fd_batch *batch)
184 {
185 pipe_mutex_assert_locked(batch->ctx->screen->lock);
186
187 set_foreach(batch->resources, entry) {
188 struct fd_resource *rsc = (struct fd_resource *)entry->key;
189 _mesa_set_remove(batch->resources, entry);
190 debug_assert(rsc->batch_mask & (1 << batch->idx));
191 rsc->batch_mask &= ~(1 << batch->idx);
192 if (rsc->write_batch == batch)
193 fd_batch_reference_locked(&rsc->write_batch, NULL);
194 }
195 }
196
197 static void
198 batch_reset_resources(struct fd_batch *batch)
199 {
200 mtx_lock(&batch->ctx->screen->lock);
201 batch_reset_resources_locked(batch);
202 mtx_unlock(&batch->ctx->screen->lock);
203 }
204
205 static void
206 batch_reset(struct fd_batch *batch)
207 {
208 DBG("%p", batch);
209
210 fd_batch_sync(batch);
211
212 batch_flush_reset_dependencies(batch, false);
213 batch_reset_resources(batch);
214
215 batch_fini(batch);
216 batch_init(batch);
217 }
218
219 void
220 fd_batch_reset(struct fd_batch *batch)
221 {
222 if (batch->needs_flush)
223 batch_reset(batch);
224 }
225
226 void
227 __fd_batch_destroy(struct fd_batch *batch)
228 {
229 struct fd_context *ctx = batch->ctx;
230
231 DBG("%p", batch);
232
233 fd_context_assert_locked(batch->ctx);
234
235 fd_bc_invalidate_batch(batch, true);
236
237 batch_reset_resources_locked(batch);
238 debug_assert(batch->resources->entries == 0);
239 _mesa_set_destroy(batch->resources, NULL);
240
241 fd_context_unlock(ctx);
242 batch_flush_reset_dependencies(batch, false);
243 debug_assert(batch->dependents_mask == 0);
244
245 util_copy_framebuffer_state(&batch->framebuffer, NULL);
246 batch_fini(batch);
247 free(batch);
248 fd_context_lock(ctx);
249 }
250
251 void
252 __fd_batch_describe(char* buf, const struct fd_batch *batch)
253 {
254 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
255 }
256
257 void
258 fd_batch_sync(struct fd_batch *batch)
259 {
260 if (!batch->ctx->screen->reorder)
261 return;
262 util_queue_fence_wait(&batch->flush_fence);
263 }
264
265 static void
266 batch_flush_func(void *job, int id)
267 {
268 struct fd_batch *batch = job;
269
270 DBG("%p", batch);
271
272 fd_gmem_render_tiles(batch);
273 batch_reset_resources(batch);
274 }
275
276 static void
277 batch_cleanup_func(void *job, int id)
278 {
279 struct fd_batch *batch = job;
280 fd_batch_reference(&batch, NULL);
281 }
282
283 static void
284 batch_flush(struct fd_batch *batch, bool force)
285 {
286 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
287
288 if (batch->flushed)
289 return;
290
291 batch->needs_flush = false;
292
293 /* close out the draw cmds by making sure any active queries are
294 * paused:
295 */
296 fd_batch_set_stage(batch, FD_STAGE_NULL);
297
298 batch_flush_reset_dependencies(batch, true);
299
300 batch->flushed = true;
301
302 if (batch->ctx->screen->reorder) {
303 struct fd_batch *tmp = NULL;
304 fd_batch_reference(&tmp, batch);
305
306 if (!util_queue_is_initialized(&batch->ctx->flush_queue))
307 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
308
309 util_queue_add_job(&batch->ctx->flush_queue,
310 batch, &batch->flush_fence,
311 batch_flush_func, batch_cleanup_func);
312 } else {
313 fd_gmem_render_tiles(batch);
314 batch_reset_resources(batch);
315 }
316
317 debug_assert(batch->reference.count > 0);
318
319 mtx_lock(&batch->ctx->screen->lock);
320 fd_bc_invalidate_batch(batch, false);
321 mtx_unlock(&batch->ctx->screen->lock);
322 }
323
324 /* NOTE: could drop the last ref to batch
325 *
326 * @sync: synchronize with flush_queue, ensures batch is *actually* flushed
327 * to kernel before this returns, as opposed to just being queued to be
328 * flushed
329 * @force: force a flush even if no rendering, mostly useful if you need
330 * a fence to sync on
331 */
332 void
333 fd_batch_flush(struct fd_batch *batch, bool sync, bool force)
334 {
335 struct fd_batch *tmp = NULL;
336 bool newbatch = false;
337
338 /* NOTE: we need to hold an extra ref across the body of flush,
339 * since the last ref to this batch could be dropped when cleaning
340 * up used_resources
341 */
342 fd_batch_reference(&tmp, batch);
343
344 if (batch == batch->ctx->batch) {
345 batch->ctx->batch = NULL;
346 newbatch = true;
347 }
348
349 batch_flush(tmp, force);
350
351 if (newbatch) {
352 struct fd_context *ctx = batch->ctx;
353 struct fd_batch *new_batch;
354
355 if (ctx->screen->reorder) {
356 /* defer allocating new batch until one is needed for rendering
357 * to avoid unused batches for apps that create many contexts
358 */
359 new_batch = NULL;
360 } else {
361 new_batch = fd_batch_create(ctx, false);
362 util_copy_framebuffer_state(&new_batch->framebuffer, &batch->framebuffer);
363 }
364
365 fd_batch_reference(&batch, NULL);
366 ctx->batch = new_batch;
367 }
368
369 if (sync)
370 fd_batch_sync(tmp);
371
372 fd_batch_reference(&tmp, NULL);
373 }
374
375 /* does 'batch' depend directly or indirectly on 'other' ? */
376 static bool
377 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
378 {
379 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
380 struct fd_batch *dep;
381
382 if (batch->dependents_mask & (1 << other->idx))
383 return true;
384
385 foreach_batch(dep, cache, batch->dependents_mask)
386 if (batch_depends_on(batch, dep))
387 return true;
388
389 return false;
390 }
391
392 void
393 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
394 {
395 if (batch->dependents_mask & (1 << dep->idx))
396 return;
397
398 /* a loop should not be possible */
399 debug_assert(!batch_depends_on(dep, batch));
400
401 struct fd_batch *other = NULL;
402 fd_batch_reference_locked(&other, dep);
403 batch->dependents_mask |= (1 << dep->idx);
404 DBG("%p: added dependency on %p", batch, dep);
405 }
406
407 static void
408 flush_write_batch(struct fd_resource *rsc)
409 {
410 struct fd_batch *b = NULL;
411 fd_batch_reference(&b, rsc->write_batch);
412
413 mtx_unlock(&b->ctx->screen->lock);
414 fd_batch_flush(b, true, false);
415 mtx_lock(&b->ctx->screen->lock);
416
417 fd_bc_invalidate_batch(b, false);
418 fd_batch_reference_locked(&b, NULL);
419 }
420
421 void
422 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
423 {
424 pipe_mutex_assert_locked(batch->ctx->screen->lock);
425
426 if (rsc->stencil)
427 fd_batch_resource_used(batch, rsc->stencil, write);
428
429 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
430
431 if (write)
432 rsc->valid = true;
433
434 /* note, invalidate write batch, to avoid further writes to rsc
435 * resulting in a write-after-read hazard.
436 */
437
438 if (write) {
439 /* if we are pending read or write by any other batch: */
440 if (rsc->batch_mask & ~(1 << batch->idx)) {
441 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
442 struct fd_batch *dep;
443
444 if (rsc->write_batch && rsc->write_batch != batch)
445 flush_write_batch(rsc);
446
447 foreach_batch(dep, cache, rsc->batch_mask) {
448 struct fd_batch *b = NULL;
449 if (dep == batch)
450 continue;
451 /* note that batch_add_dep could flush and unref dep, so
452 * we need to hold a reference to keep it live for the
453 * fd_bc_invalidate_batch()
454 */
455 fd_batch_reference(&b, dep);
456 fd_batch_add_dep(batch, b);
457 fd_bc_invalidate_batch(b, false);
458 fd_batch_reference_locked(&b, NULL);
459 }
460 }
461 fd_batch_reference_locked(&rsc->write_batch, batch);
462 } else {
463 /* If reading a resource pending a write, go ahead and flush the
464 * writer. This avoids situations where we end up having to
465 * flush the current batch in _resource_used()
466 */
467 if (rsc->write_batch && rsc->write_batch != batch)
468 flush_write_batch(rsc);
469 }
470
471 if (rsc->batch_mask & (1 << batch->idx))
472 return;
473
474 debug_assert(!_mesa_set_search(batch->resources, rsc));
475
476 _mesa_set_add(batch->resources, rsc);
477 rsc->batch_mask |= (1 << batch->idx);
478 }
479
480 void
481 fd_batch_check_size(struct fd_batch *batch)
482 {
483 debug_assert(!batch->flushed);
484
485 if (unlikely(fd_mesa_debug & FD_DBG_FLUSH)) {
486 fd_batch_flush(batch, true, false);
487 return;
488 }
489
490 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
491 return;
492
493 struct fd_ringbuffer *ring = batch->draw;
494 if ((ring->cur - ring->start) > (ring->size/4 - 0x1000))
495 fd_batch_flush(batch, true, false);
496 }
497
498 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
499 * been one since last draw:
500 */
501 void
502 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
503 {
504 if (batch->needs_wfi) {
505 if (batch->ctx->screen->gpu_id >= 500)
506 OUT_WFI5(ring);
507 else
508 OUT_WFI(ring);
509 batch->needs_wfi = false;
510 }
511 }