freedreno: native fence fd support
[mesa.git] / src / gallium / drivers / freedreno / freedreno_batch.c
1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "util/list.h"
28 #include "util/set.h"
29 #include "util/hash_table.h"
30 #include "util/u_string.h"
31
32 #include "freedreno_batch.h"
33 #include "freedreno_context.h"
34 #include "freedreno_resource.h"
35 #include "freedreno_query_hw.h"
36
37 static void
38 batch_init(struct fd_batch *batch)
39 {
40 struct fd_context *ctx = batch->ctx;
41 unsigned size = 0;
42
43 if (ctx->screen->reorder)
44 util_queue_fence_init(&batch->flush_fence);
45
46 /* if kernel is too old to support unlimited # of cmd buffers, we
47 * have no option but to allocate large worst-case sizes so that
48 * we don't need to grow the ringbuffer. Performance is likely to
49 * suffer, but there is no good alternative.
50 */
51 if (fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) {
52 size = 0x100000;
53 }
54
55 batch->draw = fd_ringbuffer_new(ctx->screen->pipe, size);
56 batch->binning = fd_ringbuffer_new(ctx->screen->pipe, size);
57 batch->gmem = fd_ringbuffer_new(ctx->screen->pipe, size);
58
59 fd_ringbuffer_set_parent(batch->gmem, NULL);
60 fd_ringbuffer_set_parent(batch->draw, batch->gmem);
61 fd_ringbuffer_set_parent(batch->binning, batch->gmem);
62
63 batch->in_fence_fd = -1;
64
65 batch->cleared = batch->partial_cleared = 0;
66 batch->restore = batch->resolve = 0;
67 batch->needs_flush = false;
68 batch->gmem_reason = 0;
69 batch->num_draws = 0;
70 batch->stage = FD_STAGE_NULL;
71
72 fd_reset_wfi(batch);
73
74 /* reset maximal bounds: */
75 batch->max_scissor.minx = batch->max_scissor.miny = ~0;
76 batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
77
78 util_dynarray_init(&batch->draw_patches);
79
80 if (is_a3xx(ctx->screen))
81 util_dynarray_init(&batch->rbrc_patches);
82
83 assert(batch->resources->entries == 0);
84
85 util_dynarray_init(&batch->samples);
86 }
87
88 struct fd_batch *
89 fd_batch_create(struct fd_context *ctx)
90 {
91 struct fd_batch *batch = CALLOC_STRUCT(fd_batch);
92
93 if (!batch)
94 return NULL;
95
96 DBG("%p", batch);
97
98 pipe_reference_init(&batch->reference, 1);
99 batch->ctx = ctx;
100
101 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer,
102 _mesa_key_pointer_equal);
103
104 batch_init(batch);
105
106 return batch;
107 }
108
109 static void
110 batch_fini(struct fd_batch *batch)
111 {
112 pipe_resource_reference(&batch->query_buf, NULL);
113
114 if (batch->in_fence_fd != -1)
115 close(batch->in_fence_fd);
116
117 fd_ringbuffer_del(batch->draw);
118 fd_ringbuffer_del(batch->binning);
119 fd_ringbuffer_del(batch->gmem);
120
121 util_dynarray_fini(&batch->draw_patches);
122
123 if (is_a3xx(batch->ctx->screen))
124 util_dynarray_fini(&batch->rbrc_patches);
125
126 while (batch->samples.size > 0) {
127 struct fd_hw_sample *samp =
128 util_dynarray_pop(&batch->samples, struct fd_hw_sample *);
129 fd_hw_sample_reference(batch->ctx, &samp, NULL);
130 }
131 util_dynarray_fini(&batch->samples);
132
133 if (batch->ctx->screen->reorder)
134 util_queue_fence_destroy(&batch->flush_fence);
135 }
136
137 static void
138 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush)
139 {
140 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
141 struct fd_batch *dep;
142
143 foreach_batch(dep, cache, batch->dependents_mask) {
144 if (flush)
145 fd_batch_flush(dep, false);
146 fd_batch_reference(&dep, NULL);
147 }
148
149 batch->dependents_mask = 0;
150 }
151
152 static void
153 batch_reset_resources_locked(struct fd_batch *batch)
154 {
155 struct set_entry *entry;
156
157 pipe_mutex_assert_locked(batch->ctx->screen->lock);
158
159 set_foreach(batch->resources, entry) {
160 struct fd_resource *rsc = (struct fd_resource *)entry->key;
161 _mesa_set_remove(batch->resources, entry);
162 debug_assert(rsc->batch_mask & (1 << batch->idx));
163 rsc->batch_mask &= ~(1 << batch->idx);
164 if (rsc->write_batch == batch)
165 fd_batch_reference_locked(&rsc->write_batch, NULL);
166 }
167 }
168
169 static void
170 batch_reset_resources(struct fd_batch *batch)
171 {
172 pipe_mutex_lock(batch->ctx->screen->lock);
173 batch_reset_resources_locked(batch);
174 pipe_mutex_unlock(batch->ctx->screen->lock);
175 }
176
177 static void
178 batch_reset(struct fd_batch *batch)
179 {
180 DBG("%p", batch);
181
182 fd_batch_sync(batch);
183
184 batch_flush_reset_dependencies(batch, false);
185 batch_reset_resources(batch);
186
187 batch_fini(batch);
188 batch_init(batch);
189 }
190
191 void
192 fd_batch_reset(struct fd_batch *batch)
193 {
194 if (batch->needs_flush)
195 batch_reset(batch);
196 }
197
198 void
199 __fd_batch_destroy(struct fd_batch *batch)
200 {
201 DBG("%p", batch);
202
203 util_copy_framebuffer_state(&batch->framebuffer, NULL);
204
205 pipe_mutex_lock(batch->ctx->screen->lock);
206 fd_bc_invalidate_batch(batch, true);
207 pipe_mutex_unlock(batch->ctx->screen->lock);
208
209 batch_fini(batch);
210
211 batch_reset_resources(batch);
212 debug_assert(batch->resources->entries == 0);
213 _mesa_set_destroy(batch->resources, NULL);
214
215 batch_flush_reset_dependencies(batch, false);
216 debug_assert(batch->dependents_mask == 0);
217
218 free(batch);
219 }
220
221 void
222 __fd_batch_describe(char* buf, const struct fd_batch *batch)
223 {
224 util_sprintf(buf, "fd_batch<%u>", batch->seqno);
225 }
226
227 void
228 fd_batch_sync(struct fd_batch *batch)
229 {
230 if (!batch->ctx->screen->reorder)
231 return;
232 util_queue_job_wait(&batch->flush_fence);
233 }
234
235 static void
236 batch_flush_func(void *job, int id)
237 {
238 struct fd_batch *batch = job;
239
240 fd_gmem_render_tiles(batch);
241 batch_reset_resources(batch);
242 }
243
244 static void
245 batch_cleanup_func(void *job, int id)
246 {
247 struct fd_batch *batch = job;
248 fd_batch_reference(&batch, NULL);
249 }
250
251 static void
252 batch_flush(struct fd_batch *batch)
253 {
254 DBG("%p: needs_flush=%d", batch, batch->needs_flush);
255
256 if (!batch->needs_flush)
257 return;
258
259 batch->needs_flush = false;
260
261 /* close out the draw cmds by making sure any active queries are
262 * paused:
263 */
264 fd_hw_query_set_stage(batch, batch->draw, FD_STAGE_NULL);
265
266 batch->ctx->dirty = ~0;
267 batch_flush_reset_dependencies(batch, true);
268
269 if (batch->ctx->screen->reorder) {
270 struct fd_batch *tmp = NULL;
271 fd_batch_reference(&tmp, batch);
272
273 if (!util_queue_is_initialized(&batch->ctx->flush_queue))
274 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1);
275
276 util_queue_add_job(&batch->ctx->flush_queue,
277 batch, &batch->flush_fence,
278 batch_flush_func, batch_cleanup_func);
279 } else {
280 fd_gmem_render_tiles(batch);
281 batch_reset_resources(batch);
282 }
283
284 debug_assert(batch->reference.count > 0);
285
286 if (batch == batch->ctx->batch) {
287 batch_reset(batch);
288 } else {
289 pipe_mutex_lock(batch->ctx->screen->lock);
290 fd_bc_invalidate_batch(batch, false);
291 pipe_mutex_unlock(batch->ctx->screen->lock);
292 }
293 }
294
295 /* NOTE: could drop the last ref to batch */
296 void
297 fd_batch_flush(struct fd_batch *batch, bool sync)
298 {
299 /* NOTE: we need to hold an extra ref across the body of flush,
300 * since the last ref to this batch could be dropped when cleaning
301 * up used_resources
302 */
303 struct fd_batch *tmp = NULL;
304 fd_batch_reference(&tmp, batch);
305 batch_flush(tmp);
306 if (sync)
307 fd_batch_sync(tmp);
308 fd_batch_reference(&tmp, NULL);
309 }
310
311 /* does 'batch' depend directly or indirectly on 'other' ? */
312 static bool
313 batch_depends_on(struct fd_batch *batch, struct fd_batch *other)
314 {
315 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
316 struct fd_batch *dep;
317
318 if (batch->dependents_mask & (1 << other->idx))
319 return true;
320
321 foreach_batch(dep, cache, batch->dependents_mask)
322 if (batch_depends_on(batch, dep))
323 return true;
324
325 return false;
326 }
327
328 static void
329 batch_add_dep(struct fd_batch *batch, struct fd_batch *dep)
330 {
331 if (batch->dependents_mask & (1 << dep->idx))
332 return;
333
334 /* if the new depedency already depends on us, we need to flush
335 * to avoid a loop in the dependency graph.
336 */
337 if (batch_depends_on(dep, batch)) {
338 DBG("%p: flush forced on %p!", batch, dep);
339 pipe_mutex_unlock(batch->ctx->screen->lock);
340 fd_batch_flush(dep, false);
341 pipe_mutex_lock(batch->ctx->screen->lock);
342 } else {
343 struct fd_batch *other = NULL;
344 fd_batch_reference_locked(&other, dep);
345 batch->dependents_mask |= (1 << dep->idx);
346 DBG("%p: added dependency on %p", batch, dep);
347 }
348 }
349
350 void
351 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write)
352 {
353 pipe_mutex_assert_locked(batch->ctx->screen->lock);
354
355 if (rsc->stencil)
356 fd_batch_resource_used(batch, rsc->stencil, write);
357
358 DBG("%p: %s %p", batch, write ? "write" : "read", rsc);
359
360 /* note, invalidate write batch, to avoid further writes to rsc
361 * resulting in a write-after-read hazard.
362 */
363
364 if (write) {
365 /* if we are pending read or write by any other batch: */
366 if (rsc->batch_mask != (1 << batch->idx)) {
367 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
368 struct fd_batch *dep;
369 foreach_batch(dep, cache, rsc->batch_mask) {
370 struct fd_batch *b = NULL;
371 /* note that batch_add_dep could flush and unref dep, so
372 * we need to hold a reference to keep it live for the
373 * fd_bc_invalidate_batch()
374 */
375 fd_batch_reference(&b, dep);
376 batch_add_dep(batch, b);
377 fd_bc_invalidate_batch(b, false);
378 fd_batch_reference_locked(&b, NULL);
379 }
380 }
381 fd_batch_reference_locked(&rsc->write_batch, batch);
382 } else {
383 if (rsc->write_batch) {
384 batch_add_dep(batch, rsc->write_batch);
385 fd_bc_invalidate_batch(rsc->write_batch, false);
386 }
387 }
388
389 if (rsc->batch_mask & (1 << batch->idx))
390 return;
391
392 debug_assert(!_mesa_set_search(batch->resources, rsc));
393
394 _mesa_set_add(batch->resources, rsc);
395 rsc->batch_mask |= (1 << batch->idx);
396 }
397
398 void
399 fd_batch_check_size(struct fd_batch *batch)
400 {
401 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS)
402 return;
403
404 struct fd_ringbuffer *ring = batch->draw;
405 if (((ring->cur - ring->start) > (ring->size/4 - 0x1000)) ||
406 (fd_mesa_debug & FD_DBG_FLUSH))
407 fd_batch_flush(batch, true);
408 }
409
410 /* emit a WAIT_FOR_IDLE only if needed, ie. if there has not already
411 * been one since last draw:
412 */
413 void
414 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
415 {
416 if (batch->needs_wfi) {
417 if (batch->ctx->screen->gpu_id >= 500)
418 OUT_WFI5(ring);
419 else
420 OUT_WFI(ring);
421 batch->needs_wfi = false;
422 }
423 }