radeonsi: add support for importing PIPE_FD_TYPE_SYNCOBJ semaphores
[mesa.git] / src / gallium / drivers / radeonsi / si_fence.c
1 /*
2 * Copyright 2013-2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25 #include <libsync.h>
26
27 #include "util/os_time.h"
28 #include "util/u_memory.h"
29 #include "util/u_queue.h"
30 #include "util/u_upload_mgr.h"
31
32 #include "si_pipe.h"
33 #include "radeon/r600_cs.h"
34
35 struct si_fine_fence {
36 struct r600_resource *buf;
37 unsigned offset;
38 };
39
40 struct si_multi_fence {
41 struct pipe_reference reference;
42 struct pipe_fence_handle *gfx;
43 struct pipe_fence_handle *sdma;
44 struct tc_unflushed_batch_token *tc_token;
45 struct util_queue_fence ready;
46
47 /* If the context wasn't flushed at fence creation, this is non-NULL. */
48 struct {
49 struct r600_common_context *ctx;
50 unsigned ib_index;
51 } gfx_unflushed;
52
53 struct si_fine_fence fine;
54 };
55
56 static void si_add_fence_dependency(struct r600_common_context *rctx,
57 struct pipe_fence_handle *fence)
58 {
59 struct radeon_winsys *ws = rctx->ws;
60
61 if (rctx->dma.cs)
62 ws->cs_add_fence_dependency(rctx->dma.cs, fence);
63 ws->cs_add_fence_dependency(rctx->gfx.cs, fence);
64 }
65
66 static void si_fence_reference(struct pipe_screen *screen,
67 struct pipe_fence_handle **dst,
68 struct pipe_fence_handle *src)
69 {
70 struct radeon_winsys *ws = ((struct si_screen*)screen)->ws;
71 struct si_multi_fence **rdst = (struct si_multi_fence **)dst;
72 struct si_multi_fence *rsrc = (struct si_multi_fence *)src;
73
74 if (pipe_reference(&(*rdst)->reference, &rsrc->reference)) {
75 ws->fence_reference(&(*rdst)->gfx, NULL);
76 ws->fence_reference(&(*rdst)->sdma, NULL);
77 tc_unflushed_batch_token_reference(&(*rdst)->tc_token, NULL);
78 r600_resource_reference(&(*rdst)->fine.buf, NULL);
79 FREE(*rdst);
80 }
81 *rdst = rsrc;
82 }
83
84 static struct si_multi_fence *si_create_multi_fence()
85 {
86 struct si_multi_fence *fence = CALLOC_STRUCT(si_multi_fence);
87 if (!fence)
88 return NULL;
89
90 pipe_reference_init(&fence->reference, 1);
91 util_queue_fence_init(&fence->ready);
92
93 return fence;
94 }
95
96 struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
97 struct tc_unflushed_batch_token *tc_token)
98 {
99 struct si_multi_fence *fence = si_create_multi_fence();
100 if (!fence)
101 return NULL;
102
103 util_queue_fence_reset(&fence->ready);
104 tc_unflushed_batch_token_reference(&fence->tc_token, tc_token);
105
106 return (struct pipe_fence_handle *)fence;
107 }
108
109 static void si_fence_server_sync(struct pipe_context *ctx,
110 struct pipe_fence_handle *fence)
111 {
112 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
113 struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
114
115 util_queue_fence_wait(&rfence->ready);
116
117 /* Unflushed fences from the same context are no-ops. */
118 if (rfence->gfx_unflushed.ctx &&
119 rfence->gfx_unflushed.ctx == rctx)
120 return;
121
122 /* All unflushed commands will not start execution before
123 * this fence dependency is signalled.
124 *
125 * Should we flush the context to allow more GPU parallelism?
126 */
127 if (rfence->sdma)
128 si_add_fence_dependency(rctx, rfence->sdma);
129 if (rfence->gfx)
130 si_add_fence_dependency(rctx, rfence->gfx);
131 }
132
133 static bool si_fine_fence_signaled(struct radeon_winsys *rws,
134 const struct si_fine_fence *fine)
135 {
136 char *map = rws->buffer_map(fine->buf->buf, NULL, PIPE_TRANSFER_READ |
137 PIPE_TRANSFER_UNSYNCHRONIZED);
138 if (!map)
139 return false;
140
141 uint32_t *fence = (uint32_t*)(map + fine->offset);
142 return *fence != 0;
143 }
144
145 static void si_fine_fence_set(struct si_context *ctx,
146 struct si_fine_fence *fine,
147 unsigned flags)
148 {
149 uint32_t *fence_ptr;
150
151 assert(util_bitcount(flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) == 1);
152
153 /* Use uncached system memory for the fence. */
154 u_upload_alloc(ctx->b.cached_gtt_allocator, 0, 4, 4,
155 &fine->offset, (struct pipe_resource **)&fine->buf, (void **)&fence_ptr);
156 if (!fine->buf)
157 return;
158
159 *fence_ptr = 0;
160
161 uint64_t fence_va = fine->buf->gpu_address + fine->offset;
162
163 radeon_add_to_buffer_list(&ctx->b, &ctx->b.gfx, fine->buf,
164 RADEON_USAGE_WRITE, RADEON_PRIO_QUERY);
165 if (flags & PIPE_FLUSH_TOP_OF_PIPE) {
166 struct radeon_winsys_cs *cs = ctx->b.gfx.cs;
167 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
168 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
169 S_370_WR_CONFIRM(1) |
170 S_370_ENGINE_SEL(V_370_PFP));
171 radeon_emit(cs, fence_va);
172 radeon_emit(cs, fence_va >> 32);
173 radeon_emit(cs, 0x80000000);
174 } else if (flags & PIPE_FLUSH_BOTTOM_OF_PIPE) {
175 si_gfx_write_event_eop(&ctx->b, V_028A90_BOTTOM_OF_PIPE_TS, 0,
176 EOP_DATA_SEL_VALUE_32BIT,
177 NULL, fence_va, 0x80000000,
178 PIPE_QUERY_GPU_FINISHED);
179 } else {
180 assert(false);
181 }
182 }
183
184 static boolean si_fence_finish(struct pipe_screen *screen,
185 struct pipe_context *ctx,
186 struct pipe_fence_handle *fence,
187 uint64_t timeout)
188 {
189 struct radeon_winsys *rws = ((struct si_screen*)screen)->ws;
190 struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
191 int64_t abs_timeout = os_time_get_absolute_timeout(timeout);
192
193 if (!util_queue_fence_is_signalled(&rfence->ready)) {
194 if (rfence->tc_token) {
195 /* Ensure that si_flush_from_st will be called for
196 * this fence, but only if we're in the API thread
197 * where the context is current.
198 *
199 * Note that the batch containing the flush may already
200 * be in flight in the driver thread, so the fence
201 * may not be ready yet when this call returns.
202 */
203 threaded_context_flush(ctx, rfence->tc_token,
204 timeout == 0);
205 }
206
207 if (!timeout)
208 return false;
209
210 if (timeout == PIPE_TIMEOUT_INFINITE) {
211 util_queue_fence_wait(&rfence->ready);
212 } else {
213 if (!util_queue_fence_wait_timeout(&rfence->ready, abs_timeout))
214 return false;
215 }
216
217 if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
218 int64_t time = os_time_get_nano();
219 timeout = abs_timeout > time ? abs_timeout - time : 0;
220 }
221 }
222
223 if (rfence->sdma) {
224 if (!rws->fence_wait(rws, rfence->sdma, timeout))
225 return false;
226
227 /* Recompute the timeout after waiting. */
228 if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
229 int64_t time = os_time_get_nano();
230 timeout = abs_timeout > time ? abs_timeout - time : 0;
231 }
232 }
233
234 if (!rfence->gfx)
235 return true;
236
237 if (rfence->fine.buf &&
238 si_fine_fence_signaled(rws, &rfence->fine)) {
239 rws->fence_reference(&rfence->gfx, NULL);
240 r600_resource_reference(&rfence->fine.buf, NULL);
241 return true;
242 }
243
244 /* Flush the gfx IB if it hasn't been flushed yet. */
245 if (ctx && rfence->gfx_unflushed.ctx) {
246 struct si_context *sctx;
247
248 sctx = (struct si_context *)threaded_context_unwrap_unsync(ctx);
249 if (rfence->gfx_unflushed.ctx == &sctx->b &&
250 rfence->gfx_unflushed.ib_index == sctx->b.num_gfx_cs_flushes) {
251 /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
252 * spec says:
253 *
254 * "If the sync object being blocked upon will not be
255 * signaled in finite time (for example, by an associated
256 * fence command issued previously, but not yet flushed to
257 * the graphics pipeline), then ClientWaitSync may hang
258 * forever. To help prevent this behavior, if
259 * ClientWaitSync is called and all of the following are
260 * true:
261 *
262 * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
263 * * sync is unsignaled when ClientWaitSync is called,
264 * * and the calls to ClientWaitSync and FenceSync were
265 * issued from the same context,
266 *
267 * then the GL will behave as if the equivalent of Flush
268 * were inserted immediately after the creation of sync."
269 *
270 * This means we need to flush for such fences even when we're
271 * not going to wait.
272 */
273 threaded_context_unwrap_sync(ctx);
274 sctx->b.gfx.flush(&sctx->b, timeout ? 0 : PIPE_FLUSH_ASYNC, NULL);
275 rfence->gfx_unflushed.ctx = NULL;
276
277 if (!timeout)
278 return false;
279
280 /* Recompute the timeout after all that. */
281 if (timeout && timeout != PIPE_TIMEOUT_INFINITE) {
282 int64_t time = os_time_get_nano();
283 timeout = abs_timeout > time ? abs_timeout - time : 0;
284 }
285 }
286 }
287
288 if (rws->fence_wait(rws, rfence->gfx, timeout))
289 return true;
290
291 /* Re-check in case the GPU is slow or hangs, but the commands before
292 * the fine-grained fence have completed. */
293 if (rfence->fine.buf &&
294 si_fine_fence_signaled(rws, &rfence->fine))
295 return true;
296
297 return false;
298 }
299
300 static void si_create_fence_fd(struct pipe_context *ctx,
301 struct pipe_fence_handle **pfence, int fd,
302 enum pipe_fd_type type)
303 {
304 struct si_screen *sscreen = (struct si_screen*)ctx->screen;
305 struct radeon_winsys *ws = sscreen->ws;
306 struct si_multi_fence *rfence;
307
308 *pfence = NULL;
309
310 rfence = si_create_multi_fence();
311 if (!rfence)
312 return;
313
314 switch (type) {
315 case PIPE_FD_TYPE_NATIVE_SYNC:
316 if (!sscreen->info.has_fence_to_handle)
317 goto finish;
318
319 rfence->gfx = ws->fence_import_sync_file(ws, fd);
320 break;
321
322 case PIPE_FD_TYPE_SYNCOBJ:
323 if (!sscreen->info.has_syncobj)
324 goto finish;
325
326 rfence->gfx = ws->fence_import_syncobj(ws, fd);
327 break;
328
329 default:
330 unreachable("bad fence fd type when importing");
331 }
332
333 finish:
334 if (!rfence->gfx) {
335 FREE(rfence);
336 return;
337 }
338
339 *pfence = (struct pipe_fence_handle*)rfence;
340 }
341
342 static int si_fence_get_fd(struct pipe_screen *screen,
343 struct pipe_fence_handle *fence)
344 {
345 struct si_screen *sscreen = (struct si_screen*)screen;
346 struct radeon_winsys *ws = sscreen->ws;
347 struct si_multi_fence *rfence = (struct si_multi_fence *)fence;
348 int gfx_fd = -1, sdma_fd = -1;
349
350 if (!sscreen->info.has_fence_to_handle)
351 return -1;
352
353 util_queue_fence_wait(&rfence->ready);
354
355 /* Deferred fences aren't supported. */
356 assert(!rfence->gfx_unflushed.ctx);
357 if (rfence->gfx_unflushed.ctx)
358 return -1;
359
360 if (rfence->sdma) {
361 sdma_fd = ws->fence_export_sync_file(ws, rfence->sdma);
362 if (sdma_fd == -1)
363 return -1;
364 }
365 if (rfence->gfx) {
366 gfx_fd = ws->fence_export_sync_file(ws, rfence->gfx);
367 if (gfx_fd == -1) {
368 if (sdma_fd != -1)
369 close(sdma_fd);
370 return -1;
371 }
372 }
373
374 /* If we don't have FDs at this point, it means we don't have fences
375 * either. */
376 if (sdma_fd == -1 && gfx_fd == -1)
377 return ws->export_signalled_sync_file(ws);
378 if (sdma_fd == -1)
379 return gfx_fd;
380 if (gfx_fd == -1)
381 return sdma_fd;
382
383 /* Get a fence that will be a combination of both fences. */
384 sync_accumulate("radeonsi", &gfx_fd, sdma_fd);
385 close(sdma_fd);
386 return gfx_fd;
387 }
388
389 static void si_flush_from_st(struct pipe_context *ctx,
390 struct pipe_fence_handle **fence,
391 unsigned flags)
392 {
393 struct pipe_screen *screen = ctx->screen;
394 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
395 struct radeon_winsys *ws = rctx->ws;
396 struct pipe_fence_handle *gfx_fence = NULL;
397 struct pipe_fence_handle *sdma_fence = NULL;
398 bool deferred_fence = false;
399 struct si_fine_fence fine = {};
400 unsigned rflags = PIPE_FLUSH_ASYNC;
401
402 if (flags & PIPE_FLUSH_END_OF_FRAME)
403 rflags |= PIPE_FLUSH_END_OF_FRAME;
404
405 if (flags & (PIPE_FLUSH_TOP_OF_PIPE | PIPE_FLUSH_BOTTOM_OF_PIPE)) {
406 assert(flags & PIPE_FLUSH_DEFERRED);
407 assert(fence);
408
409 si_fine_fence_set((struct si_context *)rctx, &fine, flags);
410 }
411
412 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
413 if (rctx->dma.cs)
414 rctx->dma.flush(rctx, rflags, fence ? &sdma_fence : NULL);
415
416 if (!radeon_emitted(rctx->gfx.cs, rctx->initial_gfx_cs_size)) {
417 if (fence)
418 ws->fence_reference(&gfx_fence, rctx->last_gfx_fence);
419 if (!(flags & PIPE_FLUSH_DEFERRED))
420 ws->cs_sync_flush(rctx->gfx.cs);
421 } else {
422 /* Instead of flushing, create a deferred fence. Constraints:
423 * - The state tracker must allow a deferred flush.
424 * - The state tracker must request a fence.
425 * - fence_get_fd is not allowed.
426 * Thread safety in fence_finish must be ensured by the state tracker.
427 */
428 if (flags & PIPE_FLUSH_DEFERRED &&
429 !(flags & PIPE_FLUSH_FENCE_FD) &&
430 fence) {
431 gfx_fence = rctx->ws->cs_get_next_fence(rctx->gfx.cs);
432 deferred_fence = true;
433 } else {
434 rctx->gfx.flush(rctx, rflags, fence ? &gfx_fence : NULL);
435 }
436 }
437
438 /* Both engines can signal out of order, so we need to keep both fences. */
439 if (fence) {
440 struct si_multi_fence *multi_fence;
441
442 if (flags & TC_FLUSH_ASYNC) {
443 multi_fence = (struct si_multi_fence *)*fence;
444 assert(multi_fence);
445 } else {
446 multi_fence = si_create_multi_fence();
447 if (!multi_fence) {
448 ws->fence_reference(&sdma_fence, NULL);
449 ws->fence_reference(&gfx_fence, NULL);
450 goto finish;
451 }
452
453 screen->fence_reference(screen, fence, NULL);
454 *fence = (struct pipe_fence_handle*)multi_fence;
455 }
456
457 /* If both fences are NULL, fence_finish will always return true. */
458 multi_fence->gfx = gfx_fence;
459 multi_fence->sdma = sdma_fence;
460
461 if (deferred_fence) {
462 multi_fence->gfx_unflushed.ctx = rctx;
463 multi_fence->gfx_unflushed.ib_index = rctx->num_gfx_cs_flushes;
464 }
465
466 multi_fence->fine = fine;
467 fine.buf = NULL;
468
469 if (flags & TC_FLUSH_ASYNC) {
470 util_queue_fence_signal(&multi_fence->ready);
471 tc_unflushed_batch_token_reference(&multi_fence->tc_token, NULL);
472 }
473 }
474 assert(!fine.buf);
475 finish:
476 if (!(flags & PIPE_FLUSH_DEFERRED)) {
477 if (rctx->dma.cs)
478 ws->cs_sync_flush(rctx->dma.cs);
479 ws->cs_sync_flush(rctx->gfx.cs);
480 }
481 }
482
483 void si_init_fence_functions(struct si_context *ctx)
484 {
485 ctx->b.b.flush = si_flush_from_st;
486 ctx->b.b.create_fence_fd = si_create_fence_fd;
487 ctx->b.b.fence_server_sync = si_fence_server_sync;
488 }
489
490 void si_init_screen_fence_functions(struct si_screen *screen)
491 {
492 screen->b.fence_finish = si_fence_finish;
493 screen->b.fence_reference = si_fence_reference;
494 screen->b.fence_get_fd = si_fence_get_fd;
495 }