2 * Copyright 2013-2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "util/os_time.h"
29 #include "util/u_memory.h"
30 #include "util/u_queue.h"
31 #include "util/u_upload_mgr.h"
33 #include "si_build_pm4.h"
35 struct si_fine_fence
{
36 struct si_resource
*buf
;
40 struct si_multi_fence
{
41 struct pipe_reference reference
;
42 struct pipe_fence_handle
*gfx
;
43 struct pipe_fence_handle
*sdma
;
44 struct tc_unflushed_batch_token
*tc_token
;
45 struct util_queue_fence ready
;
47 /* If the context wasn't flushed at fence creation, this is non-NULL. */
49 struct si_context
*ctx
;
53 struct si_fine_fence fine
;
59 * \param event EVENT_TYPE_*
60 * \param event_flags Optional cache flush flags (TC)
61 * \param dst_sel MEM or TC_L2
62 * \param int_sel NONE or SEND_DATA_AFTER_WR_CONFIRM
63 * \param data_sel DISCARD, VALUE_32BIT, TIMESTAMP, or GDS
65 * \param va GPU address
66 * \param old_value Previous fence value (for a bug workaround)
67 * \param new_value Fence value to write for this event.
69 void si_cp_release_mem(struct si_context
*ctx
, struct radeon_cmdbuf
*cs
,
70 unsigned event
, unsigned event_flags
,
71 unsigned dst_sel
, unsigned int_sel
, unsigned data_sel
,
72 struct si_resource
*buf
, uint64_t va
,
73 uint32_t new_fence
, unsigned query_type
)
75 unsigned op
= EVENT_TYPE(event
) |
76 EVENT_INDEX(event
== V_028A90_CS_DONE
||
77 event
== V_028A90_PS_DONE
? 6 : 5) |
79 unsigned sel
= EOP_DST_SEL(dst_sel
) |
80 EOP_INT_SEL(int_sel
) |
81 EOP_DATA_SEL(data_sel
);
83 if (ctx
->chip_class
>= GFX9
|| cs
== ctx
->prim_discard_compute_cs
) {
84 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
85 * counters) must immediately precede every timestamp event to
86 * prevent a GPU hang on GFX9.
88 * Occlusion queries don't need to do it here, because they
89 * always do ZPASS_DONE before the timestamp.
91 if (ctx
->chip_class
== GFX9
&&
92 cs
!= ctx
->prim_discard_compute_cs
&&
93 query_type
!= PIPE_QUERY_OCCLUSION_COUNTER
&&
94 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE
&&
95 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
96 struct si_resource
*scratch
= ctx
->eop_bug_scratch
;
98 assert(16 * ctx
->screen
->info
.num_render_backends
<=
100 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
101 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
102 radeon_emit(cs
, scratch
->gpu_address
);
103 radeon_emit(cs
, scratch
->gpu_address
>> 32);
105 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, scratch
,
106 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
109 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, ctx
->chip_class
>= GFX9
? 6 : 5, 0));
111 radeon_emit(cs
, sel
);
112 radeon_emit(cs
, va
); /* address lo */
113 radeon_emit(cs
, va
>> 32); /* address hi */
114 radeon_emit(cs
, new_fence
); /* immediate data lo */
115 radeon_emit(cs
, 0); /* immediate data hi */
116 if (ctx
->chip_class
>= GFX9
)
117 radeon_emit(cs
, 0); /* unused */
119 if (ctx
->chip_class
== GFX7
||
120 ctx
->chip_class
== GFX8
) {
121 struct si_resource
*scratch
= ctx
->eop_bug_scratch
;
122 uint64_t va
= scratch
->gpu_address
;
124 /* Two EOP events are required to make all engines go idle
125 * (and optional cache flushes executed) before the timestamp
128 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
131 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
132 radeon_emit(cs
, 0); /* immediate data */
133 radeon_emit(cs
, 0); /* unused */
135 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, scratch
,
136 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
139 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
142 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
143 radeon_emit(cs
, new_fence
); /* immediate data */
144 radeon_emit(cs
, 0); /* unused */
148 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, buf
, RADEON_USAGE_WRITE
,
153 unsigned si_cp_write_fence_dwords(struct si_screen
*screen
)
157 if (screen
->info
.chip_class
== GFX7
||
158 screen
->info
.chip_class
== GFX8
)
164 void si_cp_wait_mem(struct si_context
*ctx
, struct radeon_cmdbuf
*cs
,
165 uint64_t va
, uint32_t ref
, uint32_t mask
, unsigned flags
)
167 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
168 radeon_emit(cs
, WAIT_REG_MEM_MEM_SPACE(1) | flags
);
170 radeon_emit(cs
, va
>> 32);
171 radeon_emit(cs
, ref
); /* reference value */
172 radeon_emit(cs
, mask
); /* mask */
173 radeon_emit(cs
, 4); /* poll interval */
176 static void si_add_fence_dependency(struct si_context
*sctx
,
177 struct pipe_fence_handle
*fence
)
179 struct radeon_winsys
*ws
= sctx
->ws
;
182 ws
->cs_add_fence_dependency(sctx
->dma_cs
, fence
, 0);
183 ws
->cs_add_fence_dependency(sctx
->gfx_cs
, fence
, 0);
186 static void si_add_syncobj_signal(struct si_context
*sctx
,
187 struct pipe_fence_handle
*fence
)
189 sctx
->ws
->cs_add_syncobj_signal(sctx
->gfx_cs
, fence
);
192 static void si_fence_reference(struct pipe_screen
*screen
,
193 struct pipe_fence_handle
**dst
,
194 struct pipe_fence_handle
*src
)
196 struct radeon_winsys
*ws
= ((struct si_screen
*)screen
)->ws
;
197 struct si_multi_fence
**sdst
= (struct si_multi_fence
**)dst
;
198 struct si_multi_fence
*ssrc
= (struct si_multi_fence
*)src
;
200 if (pipe_reference(&(*sdst
)->reference
, &ssrc
->reference
)) {
201 ws
->fence_reference(&(*sdst
)->gfx
, NULL
);
202 ws
->fence_reference(&(*sdst
)->sdma
, NULL
);
203 tc_unflushed_batch_token_reference(&(*sdst
)->tc_token
, NULL
);
204 si_resource_reference(&(*sdst
)->fine
.buf
, NULL
);
210 static struct si_multi_fence
*si_create_multi_fence()
212 struct si_multi_fence
*fence
= CALLOC_STRUCT(si_multi_fence
);
216 pipe_reference_init(&fence
->reference
, 1);
217 util_queue_fence_init(&fence
->ready
);
222 struct pipe_fence_handle
*si_create_fence(struct pipe_context
*ctx
,
223 struct tc_unflushed_batch_token
*tc_token
)
225 struct si_multi_fence
*fence
= si_create_multi_fence();
229 util_queue_fence_reset(&fence
->ready
);
230 tc_unflushed_batch_token_reference(&fence
->tc_token
, tc_token
);
232 return (struct pipe_fence_handle
*)fence
;
235 static bool si_fine_fence_signaled(struct radeon_winsys
*rws
,
236 const struct si_fine_fence
*fine
)
238 char *map
= rws
->buffer_map(fine
->buf
->buf
, NULL
, PIPE_TRANSFER_READ
|
239 PIPE_TRANSFER_UNSYNCHRONIZED
);
243 uint32_t *fence
= (uint32_t*)(map
+ fine
->offset
);
247 static void si_fine_fence_set(struct si_context
*ctx
,
248 struct si_fine_fence
*fine
,
253 assert(util_bitcount(flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) == 1);
255 /* Use cached system memory for the fence. */
256 u_upload_alloc(ctx
->cached_gtt_allocator
, 0, 4, 4,
257 &fine
->offset
, (struct pipe_resource
**)&fine
->buf
, (void **)&fence_ptr
);
263 if (flags
& PIPE_FLUSH_TOP_OF_PIPE
) {
264 uint32_t value
= 0x80000000;
266 si_cp_write_data(ctx
, fine
->buf
, fine
->offset
, 4,
267 V_370_MEM
, V_370_PFP
, &value
);
268 } else if (flags
& PIPE_FLUSH_BOTTOM_OF_PIPE
) {
269 uint64_t fence_va
= fine
->buf
->gpu_address
+ fine
->offset
;
271 radeon_add_to_buffer_list(ctx
, ctx
->gfx_cs
, fine
->buf
,
272 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
273 si_cp_release_mem(ctx
, ctx
->gfx_cs
,
274 V_028A90_BOTTOM_OF_PIPE_TS
, 0,
275 EOP_DST_SEL_MEM
, EOP_INT_SEL_NONE
,
276 EOP_DATA_SEL_VALUE_32BIT
,
277 NULL
, fence_va
, 0x80000000,
278 PIPE_QUERY_GPU_FINISHED
);
284 static boolean
si_fence_finish(struct pipe_screen
*screen
,
285 struct pipe_context
*ctx
,
286 struct pipe_fence_handle
*fence
,
289 struct radeon_winsys
*rws
= ((struct si_screen
*)screen
)->ws
;
290 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
291 struct si_context
*sctx
;
292 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
294 ctx
= threaded_context_unwrap_sync(ctx
);
295 sctx
= (struct si_context
*)(ctx
? ctx
: NULL
);
297 if (!util_queue_fence_is_signalled(&sfence
->ready
)) {
298 if (sfence
->tc_token
) {
299 /* Ensure that si_flush_from_st will be called for
300 * this fence, but only if we're in the API thread
301 * where the context is current.
303 * Note that the batch containing the flush may already
304 * be in flight in the driver thread, so the fence
305 * may not be ready yet when this call returns.
307 threaded_context_flush(ctx
, sfence
->tc_token
,
314 if (timeout
== PIPE_TIMEOUT_INFINITE
) {
315 util_queue_fence_wait(&sfence
->ready
);
317 if (!util_queue_fence_wait_timeout(&sfence
->ready
, abs_timeout
))
321 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
322 int64_t time
= os_time_get_nano();
323 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
328 if (!rws
->fence_wait(rws
, sfence
->sdma
, timeout
))
331 /* Recompute the timeout after waiting. */
332 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
333 int64_t time
= os_time_get_nano();
334 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
341 if (sfence
->fine
.buf
&&
342 si_fine_fence_signaled(rws
, &sfence
->fine
)) {
343 rws
->fence_reference(&sfence
->gfx
, NULL
);
344 si_resource_reference(&sfence
->fine
.buf
, NULL
);
348 /* Flush the gfx IB if it hasn't been flushed yet. */
349 if (sctx
&& sfence
->gfx_unflushed
.ctx
== sctx
&&
350 sfence
->gfx_unflushed
.ib_index
== sctx
->num_gfx_cs_flushes
) {
351 /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
354 * "If the sync object being blocked upon will not be
355 * signaled in finite time (for example, by an associated
356 * fence command issued previously, but not yet flushed to
357 * the graphics pipeline), then ClientWaitSync may hang
358 * forever. To help prevent this behavior, if
359 * ClientWaitSync is called and all of the following are
362 * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
363 * * sync is unsignaled when ClientWaitSync is called,
364 * * and the calls to ClientWaitSync and FenceSync were
365 * issued from the same context,
367 * then the GL will behave as if the equivalent of Flush
368 * were inserted immediately after the creation of sync."
370 * This means we need to flush for such fences even when we're
373 si_flush_gfx_cs(sctx
,
374 (timeout
? 0 : PIPE_FLUSH_ASYNC
) |
375 RADEON_FLUSH_START_NEXT_GFX_IB_NOW
,
377 sfence
->gfx_unflushed
.ctx
= NULL
;
382 /* Recompute the timeout after all that. */
383 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
384 int64_t time
= os_time_get_nano();
385 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
389 if (rws
->fence_wait(rws
, sfence
->gfx
, timeout
))
392 /* Re-check in case the GPU is slow or hangs, but the commands before
393 * the fine-grained fence have completed. */
394 if (sfence
->fine
.buf
&&
395 si_fine_fence_signaled(rws
, &sfence
->fine
))
401 static void si_create_fence_fd(struct pipe_context
*ctx
,
402 struct pipe_fence_handle
**pfence
, int fd
,
403 enum pipe_fd_type type
)
405 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
406 struct radeon_winsys
*ws
= sscreen
->ws
;
407 struct si_multi_fence
*sfence
;
411 sfence
= si_create_multi_fence();
416 case PIPE_FD_TYPE_NATIVE_SYNC
:
417 if (!sscreen
->info
.has_fence_to_handle
)
420 sfence
->gfx
= ws
->fence_import_sync_file(ws
, fd
);
423 case PIPE_FD_TYPE_SYNCOBJ
:
424 if (!sscreen
->info
.has_syncobj
)
427 sfence
->gfx
= ws
->fence_import_syncobj(ws
, fd
);
431 unreachable("bad fence fd type when importing");
440 *pfence
= (struct pipe_fence_handle
*)sfence
;
443 static int si_fence_get_fd(struct pipe_screen
*screen
,
444 struct pipe_fence_handle
*fence
)
446 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
447 struct radeon_winsys
*ws
= sscreen
->ws
;
448 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
449 int gfx_fd
= -1, sdma_fd
= -1;
451 if (!sscreen
->info
.has_fence_to_handle
)
454 util_queue_fence_wait(&sfence
->ready
);
456 /* Deferred fences aren't supported. */
457 assert(!sfence
->gfx_unflushed
.ctx
);
458 if (sfence
->gfx_unflushed
.ctx
)
462 sdma_fd
= ws
->fence_export_sync_file(ws
, sfence
->sdma
);
467 gfx_fd
= ws
->fence_export_sync_file(ws
, sfence
->gfx
);
475 /* If we don't have FDs at this point, it means we don't have fences
477 if (sdma_fd
== -1 && gfx_fd
== -1)
478 return ws
->export_signalled_sync_file(ws
);
484 /* Get a fence that will be a combination of both fences. */
485 sync_accumulate("radeonsi", &gfx_fd
, sdma_fd
);
490 static void si_flush_from_st(struct pipe_context
*ctx
,
491 struct pipe_fence_handle
**fence
,
494 struct pipe_screen
*screen
= ctx
->screen
;
495 struct si_context
*sctx
= (struct si_context
*)ctx
;
496 struct radeon_winsys
*ws
= sctx
->ws
;
497 struct pipe_fence_handle
*gfx_fence
= NULL
;
498 struct pipe_fence_handle
*sdma_fence
= NULL
;
499 bool deferred_fence
= false;
500 struct si_fine_fence fine
= {};
501 unsigned rflags
= PIPE_FLUSH_ASYNC
;
503 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
504 rflags
|= PIPE_FLUSH_END_OF_FRAME
;
506 if (flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) {
507 assert(flags
& PIPE_FLUSH_DEFERRED
);
510 si_fine_fence_set(sctx
, &fine
, flags
);
513 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
515 si_flush_dma_cs(sctx
, rflags
, fence
? &sdma_fence
: NULL
);
517 if (!radeon_emitted(sctx
->gfx_cs
, sctx
->initial_gfx_cs_size
)) {
519 ws
->fence_reference(&gfx_fence
, sctx
->last_gfx_fence
);
520 if (!(flags
& PIPE_FLUSH_DEFERRED
))
521 ws
->cs_sync_flush(sctx
->gfx_cs
);
523 /* Instead of flushing, create a deferred fence. Constraints:
524 * - The state tracker must allow a deferred flush.
525 * - The state tracker must request a fence.
526 * - fence_get_fd is not allowed.
527 * Thread safety in fence_finish must be ensured by the state tracker.
529 if (flags
& PIPE_FLUSH_DEFERRED
&&
530 !(flags
& PIPE_FLUSH_FENCE_FD
) &&
532 gfx_fence
= sctx
->ws
->cs_get_next_fence(sctx
->gfx_cs
);
533 deferred_fence
= true;
535 si_flush_gfx_cs(sctx
, rflags
, fence
? &gfx_fence
: NULL
);
539 /* Both engines can signal out of order, so we need to keep both fences. */
541 struct si_multi_fence
*multi_fence
;
543 if (flags
& TC_FLUSH_ASYNC
) {
544 multi_fence
= (struct si_multi_fence
*)*fence
;
547 multi_fence
= si_create_multi_fence();
549 ws
->fence_reference(&sdma_fence
, NULL
);
550 ws
->fence_reference(&gfx_fence
, NULL
);
554 screen
->fence_reference(screen
, fence
, NULL
);
555 *fence
= (struct pipe_fence_handle
*)multi_fence
;
558 /* If both fences are NULL, fence_finish will always return true. */
559 multi_fence
->gfx
= gfx_fence
;
560 multi_fence
->sdma
= sdma_fence
;
562 if (deferred_fence
) {
563 multi_fence
->gfx_unflushed
.ctx
= sctx
;
564 multi_fence
->gfx_unflushed
.ib_index
= sctx
->num_gfx_cs_flushes
;
567 multi_fence
->fine
= fine
;
570 if (flags
& TC_FLUSH_ASYNC
) {
571 util_queue_fence_signal(&multi_fence
->ready
);
572 tc_unflushed_batch_token_reference(&multi_fence
->tc_token
, NULL
);
577 if (!(flags
& (PIPE_FLUSH_DEFERRED
| PIPE_FLUSH_ASYNC
))) {
579 ws
->cs_sync_flush(sctx
->dma_cs
);
580 ws
->cs_sync_flush(sctx
->gfx_cs
);
584 static void si_fence_server_signal(struct pipe_context
*ctx
,
585 struct pipe_fence_handle
*fence
)
587 struct si_context
*sctx
= (struct si_context
*)ctx
;
588 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
590 /* We should have at least one syncobj to signal */
591 assert(sfence
->sdma
|| sfence
->gfx
);
594 si_add_syncobj_signal(sctx
, sfence
->sdma
);
596 si_add_syncobj_signal(sctx
, sfence
->gfx
);
599 * The spec does not require a flush here. We insert a flush
600 * because syncobj based signals are not directly placed into
601 * the command stream. Instead the signal happens when the
602 * submission associated with the syncobj finishes execution.
604 * Therefore, we must make sure that we flush the pipe to avoid
605 * new work being emitted and getting executed before the signal
608 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
611 static void si_fence_server_sync(struct pipe_context
*ctx
,
612 struct pipe_fence_handle
*fence
)
614 struct si_context
*sctx
= (struct si_context
*)ctx
;
615 struct si_multi_fence
*sfence
= (struct si_multi_fence
*)fence
;
617 util_queue_fence_wait(&sfence
->ready
);
619 /* Unflushed fences from the same context are no-ops. */
620 if (sfence
->gfx_unflushed
.ctx
&&
621 sfence
->gfx_unflushed
.ctx
== sctx
)
624 /* All unflushed commands will not start execution before
625 * this fence dependency is signalled.
627 * Therefore we must flush before inserting the dependency
629 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
632 si_add_fence_dependency(sctx
, sfence
->sdma
);
634 si_add_fence_dependency(sctx
, sfence
->gfx
);
637 void si_init_fence_functions(struct si_context
*ctx
)
639 ctx
->b
.flush
= si_flush_from_st
;
640 ctx
->b
.create_fence_fd
= si_create_fence_fd
;
641 ctx
->b
.fence_server_sync
= si_fence_server_sync
;
642 ctx
->b
.fence_server_signal
= si_fence_server_signal
;
645 void si_init_screen_fence_functions(struct si_screen
*screen
)
647 screen
->b
.fence_finish
= si_fence_finish
;
648 screen
->b
.fence_reference
= si_fence_reference
;
649 screen
->b
.fence_get_fd
= si_fence_get_fd
;