util: Port nir_array functionality to u_dynarray
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_resource.h"
35 #include "freedreno_query_hw.h"
36
37 static void
38 batch_init(struct fd_batch *batch)
39 {
40 struct fd_context *ctx = batch->ctx;
41 unsigned size = 0;
42
43 if (ctx->screen->reorder)
44 util_queue_fence_init(&batch->flush_fence);
45
46 /* if kernel is too old to support unlimited # of cmd buffers, we
47 * have no option but to allocate large worst-case sizes so that
48 * we don't need to grow the ringbuffer. Performance is likely to
49 * suffer, but there is no good alternative.
50 */
51 if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
52 (fd_mesa_debug & FD_DBG_NOGROW)){
53 size = 0x100000;
54 }
55
56 batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size);
57 batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
58 batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size);
59
60 fd_ringbuffer_set_parent(batch->gmem, NULL);
61 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
62 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
63
64 batch->in_fence_fd = -1;
65
66 batch->cleared = batch->partial_cleared = 0;
67 batch->restore = batch->resolve = 0;
68 batch->needs_flush = false;
69 batch->gmem_reason = 0;
70 batch->num_draws = 0;
71 batch->stage = FD_STAGE_NULL;
72
73 fd_reset_wfi(batch);
74
75 /* reset maximal bounds: */
76 batch->max_scissor.minx = batch->max_scissor.miny = ~0;
77 batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
78
79 util_dynarray_init(&batch->draw_patches, NULL);
80
81 if (is_a3xx(ctx->screen))
82 util_dynarray_init(&batch->rbrc_patches, NULL);
83
84 assert(batch->resources->entries == 0);
85
86 util_dynarray_init(&batch->samples, NULL);
87 }
88
89 struct fd_batch *
90 fd_batch_create(struct fd_context *ctx)
91 {
92 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
93
94 if (!batch)
95 return NULL;
96
97 DBG("%p", batch);
98
99 pipe_reference_init(&batch->reference, 1);
100 batch->ctx = ctx;
101
102 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
103 _mesa_key_pointer_equal);
104
105 batch_init(batch);
106
107 return batch;
108 }
109
110 static void
111 batch_fini(struct fd_batch *batch)
112 {
113 pipe_resource_reference(&batch->query_buf, NULL);
114
115 if (batch->in_fence_fd != -1)
116 close(batch->in_fence_fd);
117
118 fd_ringbuffer_del(batch->draw);
119 fd_ringbuffer_del(batch->binning);
120 fd_ringbuffer_del(batch->gmem);
121 if (batch->lrz_clear) {
122 fd_ringbuffer_del(batch->lrz_clear);
123 batch->lrz_clear = NULL;
124 }
125
126 util_dynarray_fini(&batch->draw_patches);
127
128 if (is_a3xx(batch->ctx->screen))
129 util_dynarray_fini(&batch->rbrc_patches);
130
131 while (batch->samples.size > 0) {
132 struct fd_hw_sample *samp =
133 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
134 fd_hw_sample_reference(batch->ctx, &samp, NULL);
135 }
136 util_dynarray_fini(&batch->samples);
137
138 if (batch->ctx->screen->reorder)
139 util_queue_fence_destroy(&batch->flush_fence);
140 }
141
142 static void
143 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
144 {
145 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
146 struct fd_batch *dep;
147
148 foreach_batch(dep, cache, batch->dependents_mask) {
149 if (flush)
150 fd_batch_flush(dep, false);
151 fd_batch_reference(&dep, NULL);
152 }
153
154 batch->dependents_mask = 0;
155 }
156
157 static void
158 batch_reset_resources_locked(struct fd_batch *batch)
159 {
160 struct set_entry *entry;
161
162 pipe_mutex_assert_locked(batch->ctx->screen->lock);
163
164 set_foreach(batch->resources, entry) {
165 struct fd_resource *rsc = (struct fd_resource *)entry->key;
166 _mesa_set_remove(batch->resources, entry);
167 debug_assert(rsc->batch_mask & (1 << batch->idx));
168 rsc->batch_mask &= ~(1 << batch->idx);
169 if (rsc->write_batch == batch)
170 fd_batch_reference_locked(&rsc->write_batch, NULL);
171 }
172 }
173
174 static void
175 batch_reset_resources(struct fd_batch *batch)
176 {
177 mtx_lock(&batch->ctx->screen->lock);
178 batch_reset_resources_locked(batch);
179 mtx_unlock(&batch->ctx->screen->lock);
180 }
181
182 static void
183 batch_reset(struct fd_batch *batch)
184 {
185 DBG("%p", batch);
186
187 fd_batch_sync(batch);
188
189 batch_flush_reset_dependencies(batch, false);
190 batch_reset_resources(batch);
191
192 batch_fini(batch);
193 batch_init(batch);
194 }
195
196 void
197 fd_batch_reset(struct fd_batch *batch)
198 {
199 if (batch->needs_flush)
200 batch_reset(batch);
201 }
202
203 void
204 __fd_batch_destroy(struct fd_batch *batch)
205 {
206 DBG("%p", batch);
207
208 util_copy_framebuffer_state(&batch->framebuffer, NULL);
209
210 mtx_lock(&batch->ctx->screen->lock);
211 fd_bc_invalidate_batch(batch, true);
212 mtx_unlock(&batch->ctx->screen->lock);
213
214 batch_fini(batch);
215
216 batch_reset_resources(batch);
217 debug_assert(batch->resources->entries == 0);
218 _mesa_set_destroy(batch->resources, NULL);
219
220 batch_flush_reset_dependencies(batch, false);
221 debug_assert(batch->dependents_mask == 0);
222
223 free(batch);
224 }
225
226 void
227 __fd_batch_describe(char* buf, const struct fd_batch *batch)
228 {
229 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
230 }
231
232 void
233 fd_batch_sync(struct fd_batch *batch)
234 {
235 if (!batch->ctx->screen->reorder)
236 return;
237 util_queue_fence_wait(&batch->flush_fence);
238 }
239
240 static void
241 batch_flush_func(void *job, int id)
242 {
243 struct fd_batch *batch = job;
244
245 fd_gmem_render_tiles(batch);
246 batch_reset_resources(batch);
247 }
248
249 static void
250 batch_cleanup_func(void *job, int id)
251 {
252 struct fd_batch *batch = job;
253 fd_batch_reference(&batch, NULL);
254 }
255
256 static void
257 batch_flush(struct fd_batch *batch)
258 {
259 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
260
261 if (!batch->needs_flush)
262 return;
263
264 batch->needs_flush = false;
265
266 /* close out the draw cmds by making sure any active queries are
267 * paused:
268 */
269 fd_batch_set_stage(batch, FD_STAGE_NULL);
270
271 fd_context_all_dirty(batch->ctx);
272 batch_flush_reset_dependencies(batch, true);
273
274 if (batch->ctx->screen->reorder) {
275 struct fd_batch *tmp = NULL;
276 fd_batch_reference(&tmp, batch);
277
278 if (!util_queue_is_initialized(&batch->ctx->flush_queue))
279 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0);
280
281 util_queue_add_job(&batch->ctx->flush_queue,
282 batch, &batch->flush_fence,
283 batch_flush_func, batch_cleanup_func);
284 } else {
285 fd_gmem_render_tiles(batch);
286 batch_reset_resources(batch);
287 }
288
289 debug_assert(batch->reference.count > 0);
290
291 if (batch == batch->ctx->batch) {
292 batch_reset(batch);
293 } else {
294 mtx_lock(&batch->ctx->screen->lock);
295 fd_bc_invalidate_batch(batch, false);
296 mtx_unlock(&batch->ctx->screen->lock);
297 }
298 }
299
300 /* NOTE: could drop the last ref to batch */
301 void
302 fd_batch_flush(struct fd_batch *batch, bool sync)
303 {
304 /* NOTE: we need to hold an extra ref across the body of flush,
305 * since the last ref to this batch could be dropped when cleaning
306 * up used_resources
307 */
308 struct fd_batch *tmp = NULL;
309 fd_batch_reference(&tmp, batch);
310 batch_flush(tmp);
311 if (sync)
312 fd_batch_sync(tmp);
313 fd_batch_reference(&tmp, NULL);
314 }
315
316 /* does 'batch' depend directly or indirectly on 'other' ? */
317 static bool
318 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
319 {
320 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
321 struct fd_batch *dep;
322
323 if (batch->dependents_mask & (1 << other->idx))
324 return true;
325
326 foreach_batch(dep, cache, batch->dependents_mask)
327 if (batch_depends_on(batch, dep))
328 return true;
329
330 return false;
331 }
332
333 static void
334 batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
335 {
336 if (batch->dependents_mask & (1 << dep->idx))
337 return;
338
339 /* if the new depedency already depends on us, we need to flush
340 * to avoid a loop in the dependency graph.
341 */
342 if (batch_depends_on(dep, batch)) {
343 DBG("%p: flush forced on %p!", batch, dep);
344 mtx_unlock(&batch->ctx->screen->lock);
345 fd_batch_flush(dep, false);
346 mtx_lock(&batch->ctx->screen->lock);
347 } else {
348 struct fd_batch *other = NULL;
349 fd_batch_reference_locked(&other, dep);
350 batch->dependents_mask |= (1 << dep->idx);
351 DBG("%p: added dependency on %p", batch, dep);
352 }
353 }
354
355 void
356 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
357 {
358 pipe_mutex_assert_locked(batch->ctx->screen->lock);
359
360 if (rsc->stencil)
361 fd_batch_resource_used(batch, rsc->stencil, write);
362
363 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
364
365 /* note, invalidate write batch, to avoid further writes to rsc
366 * resulting in a write-after-read hazard.
367 */
368
369 if (write) {
370 /* if we are pending read or write by any other batch: */
371 if (rsc->batch_mask != (1 << batch->idx)) {
372 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
373 struct fd_batch *dep;
374 foreach_batch(dep, cache, rsc->batch_mask) {
375 struct fd_batch *b = NULL;
376 /* note that batch_add_dep could flush and unref dep, so
377 * we need to hold a reference to keep it live for the
378 * fd_bc_invalidate_batch()
379 */
380 fd_batch_reference(&b, dep);
381 batch_add_dep(batch, b);
382 fd_bc_invalidate_batch(b, false);
383 fd_batch_reference_locked(&b, NULL);
384 }
385 }
386 fd_batch_reference_locked(&rsc->write_batch, batch);
387 } else {
388 if (rsc->write_batch) {
389 batch_add_dep(batch, rsc->write_batch);
390 fd_bc_invalidate_batch(rsc->write_batch, false);
391 }
392 }
393
394 if (rsc->batch_mask & (1 << batch->idx))
395 return;
396
397 debug_assert(!_mesa_set_search(batch->resources, rsc));
398
399 _mesa_set_add(batch->resources, rsc);
400 rsc->batch_mask |= (1 << batch->idx);
401 }
402
403 void
404 fd_batch_check_size(struct fd_batch *batch)
405 {
406 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
407 return;
408
409 struct fd_ringbuffer *ring = batch->draw;
410 if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
411 (fd_mesa_debug & FD_DBG_FLUSH))
412 fd_batch_flush(batch, true);
413 }
414
415 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
416 * been one since last draw:
417 */
418 void
419 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
420 {
421 if (batch->needs_wfi) {
422 if (batch->ctx->screen->gpu_id >= 500)
423 OUT_WFI5(ring);
424 else
425 OUT_WFI(ring);
426 batch->needs_wfi = false;
427 }
428 }