2 * Copyright 2013-2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "util/os_time.h"
29 #include "util/u_memory.h"
30 #include "util/u_queue.h"
31 #include "util/u_upload_mgr.h"
33 #include "si_build_pm4.h"
35 struct si_fine_fence
{
36 struct r600_resource
*buf
;
40 struct si_multi_fence
{
41 struct pipe_reference reference
;
42 struct pipe_fence_handle
*gfx
;
43 struct pipe_fence_handle
*sdma
;
44 struct tc_unflushed_batch_token
*tc_token
;
45 struct util_queue_fence ready
;
47 /* If the context wasn't flushed at fence creation, this is non-NULL. */
49 struct si_context
*ctx
;
53 struct si_fine_fence fine
;
59 * \param event EVENT_TYPE_*
60 * \param event_flags Optional cache flush flags (TC)
61 * \param data_sel 1 = fence, 3 = timestamp
63 * \param va GPU address
64 * \param old_value Previous fence value (for a bug workaround)
65 * \param new_value Fence value to write for this event.
67 void si_gfx_write_event_eop(struct si_context
*ctx
,
68 unsigned event
, unsigned event_flags
,
70 struct r600_resource
*buf
, uint64_t va
,
71 uint32_t new_fence
, unsigned query_type
)
73 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
74 unsigned op
= EVENT_TYPE(event
) |
77 unsigned sel
= EOP_DATA_SEL(data_sel
);
79 /* Wait for write confirmation before writing data, but don't send
81 if (data_sel
!= EOP_DATA_SEL_DISCARD
)
82 sel
|= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
);
84 if (ctx
->chip_class
>= GFX9
) {
85 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
86 * counters) must immediately precede every timestamp event to
87 * prevent a GPU hang on GFX9.
89 * Occlusion queries don't need to do it here, because they
90 * always do ZPASS_DONE before the timestamp.
92 if (ctx
->chip_class
== GFX9
&&
93 query_type
!= PIPE_QUERY_OCCLUSION_COUNTER
&&
94 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE
&&
95 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
96 struct r600_resource
*scratch
= ctx
->eop_bug_scratch
;
98 assert(16 * ctx
->screen
->info
.num_render_backends
<=
100 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
101 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
102 radeon_emit(cs
, scratch
->gpu_address
);
103 radeon_emit(cs
, scratch
->gpu_address
>> 32);
105 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, scratch
,
106 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
109 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, 6, 0));
111 radeon_emit(cs
, sel
);
112 radeon_emit(cs
, va
); /* address lo */
113 radeon_emit(cs
, va
>> 32); /* address hi */
114 radeon_emit(cs
, new_fence
); /* immediate data lo */
115 radeon_emit(cs
, 0); /* immediate data hi */
116 radeon_emit(cs
, 0); /* unused */
118 if (ctx
->chip_class
== CIK
||
119 ctx
->chip_class
== VI
) {
120 struct r600_resource
*scratch
= ctx
->eop_bug_scratch
;
121 uint64_t va
= scratch
->gpu_address
;
123 /* Two EOP events are required to make all engines go idle
124 * (and optional cache flushes executed) before the timestamp
127 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
130 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
131 radeon_emit(cs
, 0); /* immediate data */
132 radeon_emit(cs
, 0); /* unused */
134 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, scratch
,
135 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
138 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
141 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
142 radeon_emit(cs
, new_fence
); /* immediate data */
143 radeon_emit(cs
, 0); /* unused */
147 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_WRITE
,
152 unsigned si_gfx_write_fence_dwords(struct si_screen
*screen
)
156 if (screen
->info
.chip_class
== CIK
||
157 screen
->info
.chip_class
== VI
)
163 void si_gfx_wait_fence(struct si_context
*ctx
,
164 uint64_t va
, uint32_t ref
, uint32_t mask
)
166 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
168 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
169 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_MEM_SPACE(1));
171 radeon_emit(cs
, va
>> 32);
172 radeon_emit(cs
, ref
); /* reference value */
173 radeon_emit(cs
, mask
); /* mask */
174 radeon_emit(cs
, 4); /* poll interval */
177 static void si_add_fence_dependency(struct si_context
*sctx
,
178 struct pipe_fence_handle
*fence
)
180 struct radeon_winsys
*ws
= sctx
->ws
;
183 ws
->cs_add_fence_dependency(sctx
->dma_cs
, fence
);
184 ws
->cs_add_fence_dependency(sctx
->gfx_cs
, fence
);
187 static void si_add_syncobj_signal(struct si_context
*sctx
,
188 struct pipe_fence_handle
*fence
)
190 sctx
->ws
->cs_add_syncobj_signal(sctx
->gfx_cs
, fence
);
193 static void si_fence_reference(struct pipe_screen
*screen
,
194 struct pipe_fence_handle
**dst
,
195 struct pipe_fence_handle
*src
)
197 struct radeon_winsys
*ws
= ((struct si_screen
*)screen
)->ws
;
198 struct si_multi_fence
**rdst
= (struct si_multi_fence
**)dst
;
199 struct si_multi_fence
*rsrc
= (struct si_multi_fence
*)src
;
201 if (pipe_reference(&(*rdst
)->reference
, &rsrc
->reference
)) {
202 ws
->fence_reference(&(*rdst
)->gfx
, NULL
);
203 ws
->fence_reference(&(*rdst
)->sdma
, NULL
);
204 tc_unflushed_batch_token_reference(&(*rdst
)->tc_token
, NULL
);
205 r600_resource_reference(&(*rdst
)->fine
.buf
, NULL
);
211 static struct si_multi_fence
*si_create_multi_fence()
213 struct si_multi_fence
*fence
= CALLOC_STRUCT(si_multi_fence
);
217 pipe_reference_init(&fence
->reference
, 1);
218 util_queue_fence_init(&fence
->ready
);
223 struct pipe_fence_handle
*si_create_fence(struct pipe_context
*ctx
,
224 struct tc_unflushed_batch_token
*tc_token
)
226 struct si_multi_fence
*fence
= si_create_multi_fence();
230 util_queue_fence_reset(&fence
->ready
);
231 tc_unflushed_batch_token_reference(&fence
->tc_token
, tc_token
);
233 return (struct pipe_fence_handle
*)fence
;
236 static bool si_fine_fence_signaled(struct radeon_winsys
*rws
,
237 const struct si_fine_fence
*fine
)
239 char *map
= rws
->buffer_map(fine
->buf
->buf
, NULL
, PIPE_TRANSFER_READ
|
240 PIPE_TRANSFER_UNSYNCHRONIZED
);
244 uint32_t *fence
= (uint32_t*)(map
+ fine
->offset
);
248 static void si_fine_fence_set(struct si_context
*ctx
,
249 struct si_fine_fence
*fine
,
254 assert(util_bitcount(flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) == 1);
256 /* Use uncached system memory for the fence. */
257 u_upload_alloc(ctx
->cached_gtt_allocator
, 0, 4, 4,
258 &fine
->offset
, (struct pipe_resource
**)&fine
->buf
, (void **)&fence_ptr
);
264 uint64_t fence_va
= fine
->buf
->gpu_address
+ fine
->offset
;
266 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, fine
->buf
,
267 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
268 if (flags
& PIPE_FLUSH_TOP_OF_PIPE
) {
269 struct radeon_cmdbuf
*cs
= ctx
->gfx_cs
;
270 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
271 radeon_emit(cs
, S_370_DST_SEL(V_370_MEM_ASYNC
) |
272 S_370_WR_CONFIRM(1) |
273 S_370_ENGINE_SEL(V_370_PFP
));
274 radeon_emit(cs
, fence_va
);
275 radeon_emit(cs
, fence_va
>> 32);
276 radeon_emit(cs
, 0x80000000);
277 } else if (flags
& PIPE_FLUSH_BOTTOM_OF_PIPE
) {
278 si_gfx_write_event_eop(ctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
279 EOP_DATA_SEL_VALUE_32BIT
,
280 NULL
, fence_va
, 0x80000000,
281 PIPE_QUERY_GPU_FINISHED
);
287 static boolean
si_fence_finish(struct pipe_screen
*screen
,
288 struct pipe_context
*ctx
,
289 struct pipe_fence_handle
*fence
,
292 struct radeon_winsys
*rws
= ((struct si_screen
*)screen
)->ws
;
293 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
294 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
296 if (!util_queue_fence_is_signalled(&rfence
->ready
)) {
297 if (rfence
->tc_token
) {
298 /* Ensure that si_flush_from_st will be called for
299 * this fence, but only if we're in the API thread
300 * where the context is current.
302 * Note that the batch containing the flush may already
303 * be in flight in the driver thread, so the fence
304 * may not be ready yet when this call returns.
306 threaded_context_flush(ctx
, rfence
->tc_token
,
313 if (timeout
== PIPE_TIMEOUT_INFINITE
) {
314 util_queue_fence_wait(&rfence
->ready
);
316 if (!util_queue_fence_wait_timeout(&rfence
->ready
, abs_timeout
))
320 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
321 int64_t time
= os_time_get_nano();
322 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
327 if (!rws
->fence_wait(rws
, rfence
->sdma
, timeout
))
330 /* Recompute the timeout after waiting. */
331 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
332 int64_t time
= os_time_get_nano();
333 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
340 if (rfence
->fine
.buf
&&
341 si_fine_fence_signaled(rws
, &rfence
->fine
)) {
342 rws
->fence_reference(&rfence
->gfx
, NULL
);
343 r600_resource_reference(&rfence
->fine
.buf
, NULL
);
347 /* Flush the gfx IB if it hasn't been flushed yet. */
348 if (ctx
&& rfence
->gfx_unflushed
.ctx
) {
349 struct si_context
*sctx
;
351 sctx
= (struct si_context
*)threaded_context_unwrap_unsync(ctx
);
352 if (rfence
->gfx_unflushed
.ctx
== sctx
&&
353 rfence
->gfx_unflushed
.ib_index
== sctx
->num_gfx_cs_flushes
) {
354 /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
357 * "If the sync object being blocked upon will not be
358 * signaled in finite time (for example, by an associated
359 * fence command issued previously, but not yet flushed to
360 * the graphics pipeline), then ClientWaitSync may hang
361 * forever. To help prevent this behavior, if
362 * ClientWaitSync is called and all of the following are
365 * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
366 * * sync is unsignaled when ClientWaitSync is called,
367 * * and the calls to ClientWaitSync and FenceSync were
368 * issued from the same context,
370 * then the GL will behave as if the equivalent of Flush
371 * were inserted immediately after the creation of sync."
373 * This means we need to flush for such fences even when we're
376 threaded_context_unwrap_sync(ctx
);
377 si_flush_gfx_cs(sctx
,
378 (timeout
? 0 : PIPE_FLUSH_ASYNC
) |
379 RADEON_FLUSH_START_NEXT_GFX_IB_NOW
,
381 rfence
->gfx_unflushed
.ctx
= NULL
;
386 /* Recompute the timeout after all that. */
387 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
388 int64_t time
= os_time_get_nano();
389 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
394 if (rws
->fence_wait(rws
, rfence
->gfx
, timeout
))
397 /* Re-check in case the GPU is slow or hangs, but the commands before
398 * the fine-grained fence have completed. */
399 if (rfence
->fine
.buf
&&
400 si_fine_fence_signaled(rws
, &rfence
->fine
))
406 static void si_create_fence_fd(struct pipe_context
*ctx
,
407 struct pipe_fence_handle
**pfence
, int fd
,
408 enum pipe_fd_type type
)
410 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
411 struct radeon_winsys
*ws
= sscreen
->ws
;
412 struct si_multi_fence
*rfence
;
416 rfence
= si_create_multi_fence();
421 case PIPE_FD_TYPE_NATIVE_SYNC
:
422 if (!sscreen
->info
.has_fence_to_handle
)
425 rfence
->gfx
= ws
->fence_import_sync_file(ws
, fd
);
428 case PIPE_FD_TYPE_SYNCOBJ
:
429 if (!sscreen
->info
.has_syncobj
)
432 rfence
->gfx
= ws
->fence_import_syncobj(ws
, fd
);
436 unreachable("bad fence fd type when importing");
445 *pfence
= (struct pipe_fence_handle
*)rfence
;
448 static int si_fence_get_fd(struct pipe_screen
*screen
,
449 struct pipe_fence_handle
*fence
)
451 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
452 struct radeon_winsys
*ws
= sscreen
->ws
;
453 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
454 int gfx_fd
= -1, sdma_fd
= -1;
456 if (!sscreen
->info
.has_fence_to_handle
)
459 util_queue_fence_wait(&rfence
->ready
);
461 /* Deferred fences aren't supported. */
462 assert(!rfence
->gfx_unflushed
.ctx
);
463 if (rfence
->gfx_unflushed
.ctx
)
467 sdma_fd
= ws
->fence_export_sync_file(ws
, rfence
->sdma
);
472 gfx_fd
= ws
->fence_export_sync_file(ws
, rfence
->gfx
);
480 /* If we don't have FDs at this point, it means we don't have fences
482 if (sdma_fd
== -1 && gfx_fd
== -1)
483 return ws
->export_signalled_sync_file(ws
);
489 /* Get a fence that will be a combination of both fences. */
490 sync_accumulate("radeonsi", &gfx_fd
, sdma_fd
);
495 static void si_flush_from_st(struct pipe_context
*ctx
,
496 struct pipe_fence_handle
**fence
,
499 struct pipe_screen
*screen
= ctx
->screen
;
500 struct si_context
*sctx
= (struct si_context
*)ctx
;
501 struct radeon_winsys
*ws
= sctx
->ws
;
502 struct pipe_fence_handle
*gfx_fence
= NULL
;
503 struct pipe_fence_handle
*sdma_fence
= NULL
;
504 bool deferred_fence
= false;
505 struct si_fine_fence fine
= {};
506 unsigned rflags
= PIPE_FLUSH_ASYNC
;
508 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
509 rflags
|= PIPE_FLUSH_END_OF_FRAME
;
511 if (flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) {
512 assert(flags
& PIPE_FLUSH_DEFERRED
);
515 si_fine_fence_set(sctx
, &fine
, flags
);
518 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
520 si_flush_dma_cs(sctx
, rflags
, fence
? &sdma_fence
: NULL
);
522 if (!radeon_emitted(sctx
->gfx_cs
, sctx
->initial_gfx_cs_size
)) {
524 ws
->fence_reference(&gfx_fence
, sctx
->last_gfx_fence
);
525 if (!(flags
& PIPE_FLUSH_DEFERRED
))
526 ws
->cs_sync_flush(sctx
->gfx_cs
);
528 /* Instead of flushing, create a deferred fence. Constraints:
529 * - The state tracker must allow a deferred flush.
530 * - The state tracker must request a fence.
531 * - fence_get_fd is not allowed.
532 * Thread safety in fence_finish must be ensured by the state tracker.
534 if (flags
& PIPE_FLUSH_DEFERRED
&&
535 !(flags
& PIPE_FLUSH_FENCE_FD
) &&
537 gfx_fence
= sctx
->ws
->cs_get_next_fence(sctx
->gfx_cs
);
538 deferred_fence
= true;
540 si_flush_gfx_cs(sctx
, rflags
, fence
? &gfx_fence
: NULL
);
544 /* Both engines can signal out of order, so we need to keep both fences. */
546 struct si_multi_fence
*multi_fence
;
548 if (flags
& TC_FLUSH_ASYNC
) {
549 multi_fence
= (struct si_multi_fence
*)*fence
;
552 multi_fence
= si_create_multi_fence();
554 ws
->fence_reference(&sdma_fence
, NULL
);
555 ws
->fence_reference(&gfx_fence
, NULL
);
559 screen
->fence_reference(screen
, fence
, NULL
);
560 *fence
= (struct pipe_fence_handle
*)multi_fence
;
563 /* If both fences are NULL, fence_finish will always return true. */
564 multi_fence
->gfx
= gfx_fence
;
565 multi_fence
->sdma
= sdma_fence
;
567 if (deferred_fence
) {
568 multi_fence
->gfx_unflushed
.ctx
= sctx
;
569 multi_fence
->gfx_unflushed
.ib_index
= sctx
->num_gfx_cs_flushes
;
572 multi_fence
->fine
= fine
;
575 if (flags
& TC_FLUSH_ASYNC
) {
576 util_queue_fence_signal(&multi_fence
->ready
);
577 tc_unflushed_batch_token_reference(&multi_fence
->tc_token
, NULL
);
582 if (!(flags
& PIPE_FLUSH_DEFERRED
)) {
584 ws
->cs_sync_flush(sctx
->dma_cs
);
585 ws
->cs_sync_flush(sctx
->gfx_cs
);
589 static void si_fence_server_signal(struct pipe_context
*ctx
,
590 struct pipe_fence_handle
*fence
)
592 struct si_context
*sctx
= (struct si_context
*)ctx
;
593 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
595 /* We should have at least one syncobj to signal */
596 assert(rfence
->sdma
|| rfence
->gfx
);
599 si_add_syncobj_signal(sctx
, rfence
->sdma
);
601 si_add_syncobj_signal(sctx
, rfence
->gfx
);
604 * The spec does not require a flush here. We insert a flush
605 * because syncobj based signals are not directly placed into
606 * the command stream. Instead the signal happens when the
607 * submission associated with the syncobj finishes execution.
609 * Therefore, we must make sure that we flush the pipe to avoid
610 * new work being emitted and getting executed before the signal
613 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
616 static void si_fence_server_sync(struct pipe_context
*ctx
,
617 struct pipe_fence_handle
*fence
)
619 struct si_context
*sctx
= (struct si_context
*)ctx
;
620 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
622 util_queue_fence_wait(&rfence
->ready
);
624 /* Unflushed fences from the same context are no-ops. */
625 if (rfence
->gfx_unflushed
.ctx
&&
626 rfence
->gfx_unflushed
.ctx
== sctx
)
629 /* All unflushed commands will not start execution before
630 * this fence dependency is signalled.
632 * Therefore we must flush before inserting the dependency
634 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
637 si_add_fence_dependency(sctx
, rfence
->sdma
);
639 si_add_fence_dependency(sctx
, rfence
->gfx
);
642 void si_init_fence_functions(struct si_context
*ctx
)
644 ctx
->b
.flush
= si_flush_from_st
;
645 ctx
->b
.create_fence_fd
= si_create_fence_fd
;
646 ctx
->b
.fence_server_sync
= si_fence_server_sync
;
647 ctx
->b
.fence_server_signal
= si_fence_server_signal
;
650 void si_init_screen_fence_functions(struct si_screen
*screen
)
652 screen
->b
.fence_finish
= si_fence_finish
;
653 screen
->b
.fence_reference
= si_fence_reference
;
654 screen
->b
.fence_get_fd
= si_fence_get_fd
;