2 * Copyright 2013-2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 #include "si_build_pm4.h"
27 #include "util/os_time.h"
28 #include "util/u_memory.h"
29 #include "util/u_queue.h"
30 #include "util/u_upload_mgr.h"
34 struct si_fine_fence
{
35 struct si_resource
*buf
;
39 struct si_multi_fence
{
40 struct pipe_reference reference
;
41 struct pipe_fence_handle
*gfx
;
42 struct pipe_fence_handle
*sdma
;
43 struct tc_unflushed_batch_token
*tc_token
;
44 struct util_queue_fence ready
;
46 /* If the context wasn't flushed at fence creation, this is non-NULL. */
48 struct si_context
*ctx
;
52 struct si_fine_fence fine
;
58 * \param event EVENT_TYPE_*
59 * \param event_flags Optional cache flush flags (TC)
60 * \param dst_sel MEM or TC_L2
61 * \param int_sel NONE or SEND_DATA_AFTER_WR_CONFIRM
62 * \param data_sel DISCARD, VALUE_32BIT, TIMESTAMP, or GDS
64 * \param va GPU address
65 * \param old_value Previous fence value (for a bug workaround)
66 * \param new_value Fence value to write for this event.
68 void si_cp_release_mem(struct si_context
*ctx
, struct radeon_cmdbuf
*cs
, unsigned event
,
69 unsigned event_flags
, unsigned dst_sel
, unsigned int_sel
, unsigned data_sel
,
70 struct si_resource
*buf
, uint64_t va
, uint32_t new_fence
,
73 unsigned op
= EVENT_TYPE(event
) |
74 EVENT_INDEX(event
== V_028A90_CS_DONE
|| event
== V_028A90_PS_DONE
? 6 : 5) |
76 unsigned sel
= EOP_DST_SEL(dst_sel
) | EOP_INT_SEL(int_sel
) | EOP_DATA_SEL(data_sel
);
77 bool compute_ib
= !ctx
->has_graphics
|| cs
== ctx
->prim_discard_compute_cs
;
79 if (ctx
->chip_class
>= GFX9
|| (compute_ib
&& ctx
->chip_class
>= GFX7
)) {
80 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
81 * counters) must immediately precede every timestamp event to
82 * prevent a GPU hang on GFX9.
84 * Occlusion queries don't need to do it here, because they
85 * always do ZPASS_DONE before the timestamp.
87 if (ctx
->chip_class
== GFX9
&& !compute_ib
&& query_type
!= PIPE_QUERY_OCCLUSION_COUNTER
&&
88 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE
&&
89 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
90 struct si_resource
*scratch
= ctx
->eop_bug_scratch
;
92 assert(16 * ctx
->screen
->info
.num_render_backends
<= scratch
->b
.b
.width0
);
93 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
94 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
95 radeon_emit(cs
, scratch
->gpu_address
);
96 radeon_emit(cs
, scratch
->gpu_address
>> 32);
98 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, scratch
, RADEON_USAGE_WRITE
,
102 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, ctx
->chip_class
>= GFX9
? 6 : 5, 0));
104 radeon_emit(cs
, sel
);
105 radeon_emit(cs
, va
); /* address lo */
106 radeon_emit(cs
, va
>> 32); /* address hi */
107 radeon_emit(cs
, new_fence
); /* immediate data lo */
108 radeon_emit(cs
, 0); /* immediate data hi */
109 if (ctx
->chip_class
>= GFX9
)
110 radeon_emit(cs
, 0); /* unused */
112 if (ctx
->chip_class
== GFX7
|| ctx
->chip_class
== GFX8
) {
113 struct si_resource
*scratch
= ctx
->eop_bug_scratch
;
114 uint64_t va
= scratch
->gpu_address
;
116 /* Two EOP events are required to make all engines go idle
117 * (and optional cache flushes executed) before the timestamp
120 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
123 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
124 radeon_emit(cs
, 0); /* immediate data */
125 radeon_emit(cs
, 0); /* unused */
127 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, scratch
, RADEON_USAGE_WRITE
,
131 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
134 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
135 radeon_emit(cs
, new_fence
); /* immediate data */
136 radeon_emit(cs
, 0); /* unused */
140 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
144 unsigned si_cp_write_fence_dwords(struct si_screen
*screen
)
148 if (screen
->info
.chip_class
== GFX7
|| screen
->info
.chip_class
== GFX8
)
154 void si_cp_wait_mem(struct si_context
*ctx
, struct radeon_cmdbuf
*cs
, uint64_t va
, uint32_t ref
,
155 uint32_t mask
, unsigned flags
)
157 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
158 radeon_emit(cs
, WAIT_REG_MEM_MEM_SPACE(1) | flags
);
160 radeon_emit(cs
, va
>> 32);
161 radeon_emit(cs
, ref
); /* reference value */
162 radeon_emit(cs
, mask
); /* mask */
163 radeon_emit(cs
, 4); /* poll interval */
166 static void si_add_fence_dependency(struct si_context
*sctx
, struct pipe_fence_handle
*fence
)
168 struct radeon_winsys
*ws
= sctx
->ws
;
171 ws
->cs_add_fence_dependency(sctx
->sdma_cs
, fence
, 0);
172 ws
->cs_add_fence_dependency(sctx
->gfx_cs
, fence
, 0);
175 static void si_add_syncobj_signal(struct si_context
*sctx
, struct pipe_fence_handle
*fence
)
177 sctx
->ws
->cs_add_syncobj_signal(sctx
->gfx_cs
, fence
);
180 static void si_fence_reference(struct pipe_screen
*screen
, struct pipe_fence_handle
**dst
,
181 struct pipe_fence_handle
*src
)
183 struct radeon_winsys
*ws
= ((struct si_screen
*)screen
)->ws
;
184 struct si_multi_fence
**sdst
= (struct si_multi_fence
**)dst
;
185 struct si_multi_fence
*ssrc
= (struct si_multi_fence
*)src
;
187 if (pipe_reference(&(*sdst
)->reference
, &ssrc
->reference
)) {
188 ws
->fence_reference(&(*sdst
)->gfx
, NULL
);
189 ws
->fence_reference(&(*sdst
)->sdma
, NULL
);
190 tc_unflushed_batch_token_reference(&(*sdst
)->tc_token
, NULL
);
191 si_resource_reference(&(*sdst
)->fine
.buf
, NULL
);
197 static struct si_multi_fence
*si_create_multi_fence()
199 struct si_multi_fence
*fence
= CALLOC_STRUCT(si_multi_fence
);
203 pipe_reference_init(&fence
->reference
, 1);
204 util_queue_fence_init(&fence
->ready
);
209 struct pipe_fence_handle
*si_create_fence(struct pipe_context
*ctx
,
210 struct tc_unflushed_batch_token
*tc_token
)
212 struct si_multi_fence
*fence
= si_create_multi_fence();
216 util_queue_fence_reset(&fence
->ready
);
217 tc_unflushed_batch_token_reference(&fence
->tc_token
, tc_token
);
219 return (struct pipe_fence_handle
*)fence
;
222 static bool si_fine_fence_signaled(struct radeon_winsys
*rws
, const struct si_fine_fence
*fine
)
225 rws
->buffer_map(fine
->buf
->buf
, NULL
, PIPE_TRANSFER_READ
| PIPE_TRANSFER_UNSYNCHRONIZED
);
229 uint32_t *fence
= (uint32_t *)(map
+ fine
->offset
);
233 static void si_fine_fence_set(struct si_context
*ctx
, struct si_fine_fence
*fine
, unsigned flags
)
237 assert(util_bitcount(flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) == 1);
239 /* Use cached system memory for the fence. */
240 u_upload_alloc(ctx
->cached_gtt_allocator
, 0, 4, 4, &fine
->offset
,
241 (struct pipe_resource
**)&fine
->buf
, (void **)&fence_ptr
);
247 if (flags
& PIPE_FLUSH_TOP_OF_PIPE
) {
248 uint32_t value
= 0x80000000;
250 si_cp_write_data(ctx
, fine
->buf
, fine
->offset
, 4, V_370_MEM
, V_370_PFP
, &value
);
251 } else if (flags
& PIPE_FLUSH_BOTTOM_OF_PIPE
) {
252 uint64_t fence_va
= fine
->buf
->gpu_address
+ fine
->offset
;
254 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, fine
->buf
, RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
255 si_cp_release_mem(ctx
, ctx
->gfx_cs
, V_028A90_BOTTOM_OF_PIPE_TS
, 0, EOP_DST_SEL_MEM
,
256 EOP_INT_SEL_NONE
, EOP_DATA_SEL_VALUE_32BIT
, NULL
, fence_va
, 0x80000000,
257 PIPE_QUERY_GPU_FINISHED
);
263 static bool si_fence_finish(struct pipe_screen
*screen
, struct pipe_context
*ctx
,
264 struct pipe_fence_handle
*fence
, uint64_t timeout
)
266 struct radeon_winsys
*rws
= ((struct si_screen
*)screen
)->ws
;
267 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
268 struct si_context
*sctx
;
269 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
271 ctx
= threaded_context_unwrap_sync(ctx
);
272 sctx
= (struct si_context
*)(ctx
? ctx
: NULL
);
274 if (!util_queue_fence_is_signalled(&sfence
->ready
)) {
275 if (sfence
->tc_token
) {
276 /* Ensure that si_flush_from_st will be called for
277 * this fence, but only if we're in the API thread
278 * where the context is current.
280 * Note that the batch containing the flush may already
281 * be in flight in the driver thread, so the fence
282 * may not be ready yet when this call returns.
284 threaded_context_flush(ctx
, sfence
->tc_token
, timeout
== 0);
290 if (timeout
== PIPE_TIMEOUT_INFINITE
) {
291 util_queue_fence_wait(&sfence
->ready
);
293 if (!util_queue_fence_wait_timeout(&sfence
->ready
, abs_timeout
))
297 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
298 int64_t time
= os_time_get_nano();
299 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
304 if (!rws
->fence_wait(rws
, sfence
->sdma
, timeout
))
307 /* Recompute the timeout after waiting. */
308 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
309 int64_t time
= os_time_get_nano();
310 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
317 if (sfence
->fine
.buf
&& si_fine_fence_signaled(rws
, &sfence
->fine
)) {
318 rws
->fence_reference(&sfence
->gfx
, NULL
);
319 si_resource_reference(&sfence
->fine
.buf
, NULL
);
323 /* Flush the gfx IB if it hasn't been flushed yet. */
324 if (sctx
&& sfence
->gfx_unflushed
.ctx
== sctx
&&
325 sfence
->gfx_unflushed
.ib_index
== sctx
->num_gfx_cs_flushes
) {
326 /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
329 * "If the sync object being blocked upon will not be
330 * signaled in finite time (for example, by an associated
331 * fence command issued previously, but not yet flushed to
332 * the graphics pipeline), then ClientWaitSync may hang
333 * forever. To help prevent this behavior, if
334 * ClientWaitSync is called and all of the following are
337 * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
338 * * sync is unsignaled when ClientWaitSync is called,
339 * * and the calls to ClientWaitSync and FenceSync were
340 * issued from the same context,
342 * then the GL will behave as if the equivalent of Flush
343 * were inserted immediately after the creation of sync."
345 * This means we need to flush for such fences even when we're
348 si_flush_gfx_cs(sctx
, (timeout
? 0 : PIPE_FLUSH_ASYNC
) | RADEON_FLUSH_START_NEXT_GFX_IB_NOW
,
350 sfence
->gfx_unflushed
.ctx
= NULL
;
355 /* Recompute the timeout after all that. */
356 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
357 int64_t time
= os_time_get_nano();
358 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
362 if (rws
->fence_wait(rws
, sfence
->gfx
, timeout
))
365 /* Re-check in case the GPU is slow or hangs, but the commands before
366 * the fine-grained fence have completed. */
367 if (sfence
->fine
.buf
&& si_fine_fence_signaled(rws
, &sfence
->fine
))
373 static void si_create_fence_fd(struct pipe_context
*ctx
, struct pipe_fence_handle
**pfence
, int fd
,
374 enum pipe_fd_type type
)
376 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
377 struct radeon_winsys
*ws
= sscreen
->ws
;
378 struct si_multi_fence
*sfence
;
382 sfence
= si_create_multi_fence();
387 case PIPE_FD_TYPE_NATIVE_SYNC
:
388 if (!sscreen
->info
.has_fence_to_handle
)
391 sfence
->gfx
= ws
->fence_import_sync_file(ws
, fd
);
394 case PIPE_FD_TYPE_SYNCOBJ
:
395 if (!sscreen
->info
.has_syncobj
)
398 sfence
->gfx
= ws
->fence_import_syncobj(ws
, fd
);
402 unreachable("bad fence fd type when importing");
411 *pfence
= (struct pipe_fence_handle
*)sfence
;
414 static int si_fence_get_fd(struct pipe_screen
*screen
, struct pipe_fence_handle
*fence
)
416 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
417 struct radeon_winsys
*ws
= sscreen
->ws
;
418 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
419 int gfx_fd
= -1, sdma_fd
= -1;
421 if (!sscreen
->info
.has_fence_to_handle
)
424 util_queue_fence_wait(&sfence
->ready
);
426 /* Deferred fences aren't supported. */
427 assert(!sfence
->gfx_unflushed
.ctx
);
428 if (sfence
->gfx_unflushed
.ctx
)
432 sdma_fd
= ws
->fence_export_sync_file(ws
, sfence
->sdma
);
437 gfx_fd
= ws
->fence_export_sync_file(ws
, sfence
->gfx
);
445 /* If we don't have FDs at this point, it means we don't have fences
447 if (sdma_fd
== -1 && gfx_fd
== -1)
448 return ws
->export_signalled_sync_file(ws
);
454 /* Get a fence that will be a combination of both fences. */
455 sync_accumulate("radeonsi", &gfx_fd
, sdma_fd
);
460 static void si_flush_from_st(struct pipe_context
*ctx
, struct pipe_fence_handle
**fence
,
463 struct pipe_screen
*screen
= ctx
->screen
;
464 struct si_context
*sctx
= (struct si_context
*)ctx
;
465 struct radeon_winsys
*ws
= sctx
->ws
;
466 struct pipe_fence_handle
*gfx_fence
= NULL
;
467 struct pipe_fence_handle
*sdma_fence
= NULL
;
468 bool deferred_fence
= false;
469 struct si_fine_fence fine
= {};
470 unsigned rflags
= PIPE_FLUSH_ASYNC
;
472 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
473 rflags
|= PIPE_FLUSH_END_OF_FRAME
;
475 if (flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) {
476 assert(flags
& PIPE_FLUSH_DEFERRED
);
479 si_fine_fence_set(sctx
, &fine
, flags
);
482 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
484 si_flush_dma_cs(sctx
, rflags
, fence
? &sdma_fence
: NULL
);
486 if (!radeon_emitted(sctx
->gfx_cs
, sctx
->initial_gfx_cs_size
)) {
488 ws
->fence_reference(&gfx_fence
, sctx
->last_gfx_fence
);
489 if (!(flags
& PIPE_FLUSH_DEFERRED
))
490 ws
->cs_sync_flush(sctx
->gfx_cs
);
492 /* Instead of flushing, create a deferred fence. Constraints:
493 * - the gallium frontend must allow a deferred flush.
494 * - the gallium frontend must request a fence.
495 * - fence_get_fd is not allowed.
496 * Thread safety in fence_finish must be ensured by the gallium frontend.
498 if (flags
& PIPE_FLUSH_DEFERRED
&& !(flags
& PIPE_FLUSH_FENCE_FD
) && fence
) {
499 gfx_fence
= sctx
->ws
->cs_get_next_fence(sctx
->gfx_cs
);
500 deferred_fence
= true;
502 si_flush_gfx_cs(sctx
, rflags
, fence
? &gfx_fence
: NULL
);
506 /* Both engines can signal out of order, so we need to keep both fences. */
508 struct si_multi_fence
*multi_fence
;
510 if (flags
& TC_FLUSH_ASYNC
) {
511 multi_fence
= (struct si_multi_fence
*)*fence
;
514 multi_fence
= si_create_multi_fence();
516 ws
->fence_reference(&sdma_fence
, NULL
);
517 ws
->fence_reference(&gfx_fence
, NULL
);
521 screen
->fence_reference(screen
, fence
, NULL
);
522 *fence
= (struct pipe_fence_handle
*)multi_fence
;
525 /* If both fences are NULL, fence_finish will always return true. */
526 multi_fence
->gfx
= gfx_fence
;
527 multi_fence
->sdma
= sdma_fence
;
529 if (deferred_fence
) {
530 multi_fence
->gfx_unflushed
.ctx
= sctx
;
531 multi_fence
->gfx_unflushed
.ib_index
= sctx
->num_gfx_cs_flushes
;
534 multi_fence
->fine
= fine
;
537 if (flags
& TC_FLUSH_ASYNC
) {
538 util_queue_fence_signal(&multi_fence
->ready
);
539 tc_unflushed_batch_token_reference(&multi_fence
->tc_token
, NULL
);
544 if (!(flags
& (PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_ASYNC
))) {
546 ws
->cs_sync_flush(sctx
->sdma_cs
);
547 ws
->cs_sync_flush(sctx
->gfx_cs
);
551 static void si_fence_server_signal(struct pipe_context
*ctx
, struct pipe_fence_handle
*fence
)
553 struct si_context
*sctx
= (struct si_context
*)ctx
;
554 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
556 /* We should have at least one syncobj to signal */
557 assert(sfence
->sdma
|| sfence
->gfx
);
560 si_add_syncobj_signal(sctx
, sfence
->sdma
);
562 si_add_syncobj_signal(sctx
, sfence
->gfx
);
565 * The spec does not require a flush here. We insert a flush
566 * because syncobj based signals are not directly placed into
567 * the command stream. Instead the signal happens when the
568 * submission associated with the syncobj finishes execution.
570 * Therefore, we must make sure that we flush the pipe to avoid
571 * new work being emitted and getting executed before the signal
574 * Set sctx->initial_gfx_cs_size to force IB submission even if
577 sctx
->initial_gfx_cs_size
= 0;
578 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
581 static void si_fence_server_sync(struct pipe_context
*ctx
, struct pipe_fence_handle
*fence
)
583 struct si_context
*sctx
= (struct si_context
*)ctx
;
584 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
586 util_queue_fence_wait(&sfence
->ready
);
588 /* Unflushed fences from the same context are no-ops. */
589 if (sfence
->gfx_unflushed
.ctx
&& sfence
->gfx_unflushed
.ctx
== sctx
)
592 /* All unflushed commands will not start execution before this fence
593 * dependency is signalled. That's fine. Flushing is very expensive
594 * if we get fence_server_sync after every draw call. (which happens
595 * with Android/SurfaceFlinger)
597 * In a nutshell, when CPU overhead is greater than GPU overhead,
598 * or when the time it takes to execute an IB on the GPU is less than
599 * the time it takes to create and submit that IB, flushing decreases
600 * performance. Therefore, DO NOT FLUSH.
603 si_add_fence_dependency(sctx
, sfence
->sdma
);
605 si_add_fence_dependency(sctx
, sfence
->gfx
);
608 void si_init_fence_functions(struct si_context
*ctx
)
610 ctx
->b
.flush
= si_flush_from_st
;
611 ctx
->b
.create_fence_fd
= si_create_fence_fd
;
612 ctx
->b
.fence_server_sync
= si_fence_server_sync
;
613 ctx
->b
.fence_server_signal
= si_fence_server_signal
;
616 void si_init_screen_fence_functions(struct si_screen
*screen
)
618 screen
->b
.fence_finish
= si_fence_finish
;
619 screen
->b
.fence_reference
= si_fence_reference
;
620 screen
->b
.fence_get_fd
= si_fence_get_fd
;