2 * Copyright 2013-2017 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "util/os_time.h"
29 #include "util/u_memory.h"
30 #include "util/u_queue.h"
31 #include "util/u_upload_mgr.h"
34 #include "radeon/r600_cs.h"
36 struct si_fine_fence
{
37 struct r600_resource
*buf
;
41 struct si_multi_fence
{
42 struct pipe_reference reference
;
43 struct pipe_fence_handle
*gfx
;
44 struct pipe_fence_handle
*sdma
;
45 struct tc_unflushed_batch_token
*tc_token
;
46 struct util_queue_fence ready
;
48 /* If the context wasn't flushed at fence creation, this is non-NULL. */
50 struct si_context
*ctx
;
54 struct si_fine_fence fine
;
60 * \param event EVENT_TYPE_*
61 * \param event_flags Optional cache flush flags (TC)
62 * \param data_sel 1 = fence, 3 = timestamp
64 * \param va GPU address
65 * \param old_value Previous fence value (for a bug workaround)
66 * \param new_value Fence value to write for this event.
68 void si_gfx_write_event_eop(struct si_context
*ctx
,
69 unsigned event
, unsigned event_flags
,
71 struct r600_resource
*buf
, uint64_t va
,
72 uint32_t new_fence
, unsigned query_type
)
74 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx_cs
;
75 unsigned op
= EVENT_TYPE(event
) |
78 unsigned sel
= EOP_DATA_SEL(data_sel
);
80 /* Wait for write confirmation before writing data, but don't send
82 if (data_sel
!= EOP_DATA_SEL_DISCARD
)
83 sel
|= EOP_INT_SEL(EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM
);
85 if (ctx
->b
.chip_class
>= GFX9
) {
86 /* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
87 * counters) must immediately precede every timestamp event to
88 * prevent a GPU hang on GFX9.
90 * Occlusion queries don't need to do it here, because they
91 * always do ZPASS_DONE before the timestamp.
93 if (ctx
->b
.chip_class
== GFX9
&&
94 query_type
!= PIPE_QUERY_OCCLUSION_COUNTER
&&
95 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE
&&
96 query_type
!= PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE
) {
97 struct r600_resource
*scratch
= ctx
->b
.eop_bug_scratch
;
99 assert(16 * ctx
->b
.screen
->info
.num_render_backends
<=
100 scratch
->b
.b
.width0
);
101 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE
, 2, 0));
102 radeon_emit(cs
, EVENT_TYPE(EVENT_TYPE_ZPASS_DONE
) | EVENT_INDEX(1));
103 radeon_emit(cs
, scratch
->gpu_address
);
104 radeon_emit(cs
, scratch
->gpu_address
>> 32);
106 radeon_add_to_buffer_list(ctx
, ctx
->b
.gfx_cs
, scratch
,
107 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
110 radeon_emit(cs
, PKT3(PKT3_RELEASE_MEM
, 6, 0));
112 radeon_emit(cs
, sel
);
113 radeon_emit(cs
, va
); /* address lo */
114 radeon_emit(cs
, va
>> 32); /* address hi */
115 radeon_emit(cs
, new_fence
); /* immediate data lo */
116 radeon_emit(cs
, 0); /* immediate data hi */
117 radeon_emit(cs
, 0); /* unused */
119 if (ctx
->b
.chip_class
== CIK
||
120 ctx
->b
.chip_class
== VI
) {
121 struct r600_resource
*scratch
= ctx
->b
.eop_bug_scratch
;
122 uint64_t va
= scratch
->gpu_address
;
124 /* Two EOP events are required to make all engines go idle
125 * (and optional cache flushes executed) before the timestamp
128 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
131 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
132 radeon_emit(cs
, 0); /* immediate data */
133 radeon_emit(cs
, 0); /* unused */
135 radeon_add_to_buffer_list(ctx
, ctx
->b
.gfx_cs
, scratch
,
136 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
139 radeon_emit(cs
, PKT3(PKT3_EVENT_WRITE_EOP
, 4, 0));
142 radeon_emit(cs
, ((va
>> 32) & 0xffff) | sel
);
143 radeon_emit(cs
, new_fence
); /* immediate data */
144 radeon_emit(cs
, 0); /* unused */
148 radeon_add_to_buffer_list(ctx
, ctx
->b
.gfx_cs
, buf
, RADEON_USAGE_WRITE
,
153 unsigned si_gfx_write_fence_dwords(struct si_screen
*screen
)
157 if (screen
->info
.chip_class
== CIK
||
158 screen
->info
.chip_class
== VI
)
164 void si_gfx_wait_fence(struct si_context
*ctx
,
165 uint64_t va
, uint32_t ref
, uint32_t mask
)
167 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx_cs
;
169 radeon_emit(cs
, PKT3(PKT3_WAIT_REG_MEM
, 5, 0));
170 radeon_emit(cs
, WAIT_REG_MEM_EQUAL
| WAIT_REG_MEM_MEM_SPACE(1));
172 radeon_emit(cs
, va
>> 32);
173 radeon_emit(cs
, ref
); /* reference value */
174 radeon_emit(cs
, mask
); /* mask */
175 radeon_emit(cs
, 4); /* poll interval */
178 static void si_add_fence_dependency(struct si_context
*sctx
,
179 struct pipe_fence_handle
*fence
)
181 struct radeon_winsys
*ws
= sctx
->b
.ws
;
184 ws
->cs_add_fence_dependency(sctx
->b
.dma_cs
, fence
);
185 ws
->cs_add_fence_dependency(sctx
->b
.gfx_cs
, fence
);
188 static void si_add_syncobj_signal(struct si_context
*sctx
,
189 struct pipe_fence_handle
*fence
)
191 sctx
->b
.ws
->cs_add_syncobj_signal(sctx
->b
.gfx_cs
, fence
);
194 static void si_fence_reference(struct pipe_screen
*screen
,
195 struct pipe_fence_handle
**dst
,
196 struct pipe_fence_handle
*src
)
198 struct radeon_winsys
*ws
= ((struct si_screen
*)screen
)->ws
;
199 struct si_multi_fence
**rdst
= (struct si_multi_fence
**)dst
;
200 struct si_multi_fence
*rsrc
= (struct si_multi_fence
*)src
;
202 if (pipe_reference(&(*rdst
)->reference
, &rsrc
->reference
)) {
203 ws
->fence_reference(&(*rdst
)->gfx
, NULL
);
204 ws
->fence_reference(&(*rdst
)->sdma
, NULL
);
205 tc_unflushed_batch_token_reference(&(*rdst
)->tc_token
, NULL
);
206 r600_resource_reference(&(*rdst
)->fine
.buf
, NULL
);
212 static struct si_multi_fence
*si_create_multi_fence()
214 struct si_multi_fence
*fence
= CALLOC_STRUCT(si_multi_fence
);
218 pipe_reference_init(&fence
->reference
, 1);
219 util_queue_fence_init(&fence
->ready
);
224 struct pipe_fence_handle
*si_create_fence(struct pipe_context
*ctx
,
225 struct tc_unflushed_batch_token
*tc_token
)
227 struct si_multi_fence
*fence
= si_create_multi_fence();
231 util_queue_fence_reset(&fence
->ready
);
232 tc_unflushed_batch_token_reference(&fence
->tc_token
, tc_token
);
234 return (struct pipe_fence_handle
*)fence
;
237 static bool si_fine_fence_signaled(struct radeon_winsys
*rws
,
238 const struct si_fine_fence
*fine
)
240 char *map
= rws
->buffer_map(fine
->buf
->buf
, NULL
, PIPE_TRANSFER_READ
|
241 PIPE_TRANSFER_UNSYNCHRONIZED
);
245 uint32_t *fence
= (uint32_t*)(map
+ fine
->offset
);
249 static void si_fine_fence_set(struct si_context
*ctx
,
250 struct si_fine_fence
*fine
,
255 assert(util_bitcount(flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) == 1);
257 /* Use uncached system memory for the fence. */
258 u_upload_alloc(ctx
->b
.cached_gtt_allocator
, 0, 4, 4,
259 &fine
->offset
, (struct pipe_resource
**)&fine
->buf
, (void **)&fence_ptr
);
265 uint64_t fence_va
= fine
->buf
->gpu_address
+ fine
->offset
;
267 radeon_add_to_buffer_list(ctx
, ctx
->b
.gfx_cs
, fine
->buf
,
268 RADEON_USAGE_WRITE
, RADEON_PRIO_QUERY
);
269 if (flags
& PIPE_FLUSH_TOP_OF_PIPE
) {
270 struct radeon_winsys_cs
*cs
= ctx
->b
.gfx_cs
;
271 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
272 radeon_emit(cs
, S_370_DST_SEL(V_370_MEM_ASYNC
) |
273 S_370_WR_CONFIRM(1) |
274 S_370_ENGINE_SEL(V_370_PFP
));
275 radeon_emit(cs
, fence_va
);
276 radeon_emit(cs
, fence_va
>> 32);
277 radeon_emit(cs
, 0x80000000);
278 } else if (flags
& PIPE_FLUSH_BOTTOM_OF_PIPE
) {
279 si_gfx_write_event_eop(ctx
, V_028A90_BOTTOM_OF_PIPE_TS
, 0,
280 EOP_DATA_SEL_VALUE_32BIT
,
281 NULL
, fence_va
, 0x80000000,
282 PIPE_QUERY_GPU_FINISHED
);
288 static boolean
si_fence_finish(struct pipe_screen
*screen
,
289 struct pipe_context
*ctx
,
290 struct pipe_fence_handle
*fence
,
293 struct radeon_winsys
*rws
= ((struct si_screen
*)screen
)->ws
;
294 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
295 int64_t abs_timeout
= os_time_get_absolute_timeout(timeout
);
297 if (!util_queue_fence_is_signalled(&rfence
->ready
)) {
298 if (rfence
->tc_token
) {
299 /* Ensure that si_flush_from_st will be called for
300 * this fence, but only if we're in the API thread
301 * where the context is current.
303 * Note that the batch containing the flush may already
304 * be in flight in the driver thread, so the fence
305 * may not be ready yet when this call returns.
307 threaded_context_flush(ctx
, rfence
->tc_token
,
314 if (timeout
== PIPE_TIMEOUT_INFINITE
) {
315 util_queue_fence_wait(&rfence
->ready
);
317 if (!util_queue_fence_wait_timeout(&rfence
->ready
, abs_timeout
))
321 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
322 int64_t time
= os_time_get_nano();
323 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
328 if (!rws
->fence_wait(rws
, rfence
->sdma
, timeout
))
331 /* Recompute the timeout after waiting. */
332 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
333 int64_t time
= os_time_get_nano();
334 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
341 if (rfence
->fine
.buf
&&
342 si_fine_fence_signaled(rws
, &rfence
->fine
)) {
343 rws
->fence_reference(&rfence
->gfx
, NULL
);
344 r600_resource_reference(&rfence
->fine
.buf
, NULL
);
348 /* Flush the gfx IB if it hasn't been flushed yet. */
349 if (ctx
&& rfence
->gfx_unflushed
.ctx
) {
350 struct si_context
*sctx
;
352 sctx
= (struct si_context
*)threaded_context_unwrap_unsync(ctx
);
353 if (rfence
->gfx_unflushed
.ctx
== sctx
&&
354 rfence
->gfx_unflushed
.ib_index
== sctx
->b
.num_gfx_cs_flushes
) {
355 /* Section 4.1.2 (Signaling) of the OpenGL 4.6 (Core profile)
358 * "If the sync object being blocked upon will not be
359 * signaled in finite time (for example, by an associated
360 * fence command issued previously, but not yet flushed to
361 * the graphics pipeline), then ClientWaitSync may hang
362 * forever. To help prevent this behavior, if
363 * ClientWaitSync is called and all of the following are
366 * * the SYNC_FLUSH_COMMANDS_BIT bit is set in flags,
367 * * sync is unsignaled when ClientWaitSync is called,
368 * * and the calls to ClientWaitSync and FenceSync were
369 * issued from the same context,
371 * then the GL will behave as if the equivalent of Flush
372 * were inserted immediately after the creation of sync."
374 * This means we need to flush for such fences even when we're
377 threaded_context_unwrap_sync(ctx
);
378 si_flush_gfx_cs(sctx
, timeout
? 0 : PIPE_FLUSH_ASYNC
, NULL
);
379 rfence
->gfx_unflushed
.ctx
= NULL
;
384 /* Recompute the timeout after all that. */
385 if (timeout
&& timeout
!= PIPE_TIMEOUT_INFINITE
) {
386 int64_t time
= os_time_get_nano();
387 timeout
= abs_timeout
> time
? abs_timeout
- time
: 0;
392 if (rws
->fence_wait(rws
, rfence
->gfx
, timeout
))
395 /* Re-check in case the GPU is slow or hangs, but the commands before
396 * the fine-grained fence have completed. */
397 if (rfence
->fine
.buf
&&
398 si_fine_fence_signaled(rws
, &rfence
->fine
))
404 static void si_create_fence_fd(struct pipe_context
*ctx
,
405 struct pipe_fence_handle
**pfence
, int fd
,
406 enum pipe_fd_type type
)
408 struct si_screen
*sscreen
= (struct si_screen
*)ctx
->screen
;
409 struct radeon_winsys
*ws
= sscreen
->ws
;
410 struct si_multi_fence
*rfence
;
414 rfence
= si_create_multi_fence();
419 case PIPE_FD_TYPE_NATIVE_SYNC
:
420 if (!sscreen
->info
.has_fence_to_handle
)
423 rfence
->gfx
= ws
->fence_import_sync_file(ws
, fd
);
426 case PIPE_FD_TYPE_SYNCOBJ
:
427 if (!sscreen
->info
.has_syncobj
)
430 rfence
->gfx
= ws
->fence_import_syncobj(ws
, fd
);
434 unreachable("bad fence fd type when importing");
443 *pfence
= (struct pipe_fence_handle
*)rfence
;
446 static int si_fence_get_fd(struct pipe_screen
*screen
,
447 struct pipe_fence_handle
*fence
)
449 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
450 struct radeon_winsys
*ws
= sscreen
->ws
;
451 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
452 int gfx_fd
= -1, sdma_fd
= -1;
454 if (!sscreen
->info
.has_fence_to_handle
)
457 util_queue_fence_wait(&rfence
->ready
);
459 /* Deferred fences aren't supported. */
460 assert(!rfence
->gfx_unflushed
.ctx
);
461 if (rfence
->gfx_unflushed
.ctx
)
465 sdma_fd
= ws
->fence_export_sync_file(ws
, rfence
->sdma
);
470 gfx_fd
= ws
->fence_export_sync_file(ws
, rfence
->gfx
);
478 /* If we don't have FDs at this point, it means we don't have fences
480 if (sdma_fd
== -1 && gfx_fd
== -1)
481 return ws
->export_signalled_sync_file(ws
);
487 /* Get a fence that will be a combination of both fences. */
488 sync_accumulate("radeonsi", &gfx_fd
, sdma_fd
);
493 static void si_flush_from_st(struct pipe_context
*ctx
,
494 struct pipe_fence_handle
**fence
,
497 struct pipe_screen
*screen
= ctx
->screen
;
498 struct si_context
*sctx
= (struct si_context
*)ctx
;
499 struct radeon_winsys
*ws
= sctx
->b
.ws
;
500 struct pipe_fence_handle
*gfx_fence
= NULL
;
501 struct pipe_fence_handle
*sdma_fence
= NULL
;
502 bool deferred_fence
= false;
503 struct si_fine_fence fine
= {};
504 unsigned rflags
= PIPE_FLUSH_ASYNC
;
506 if (flags
& PIPE_FLUSH_END_OF_FRAME
)
507 rflags
|= PIPE_FLUSH_END_OF_FRAME
;
509 if (flags
& (PIPE_FLUSH_TOP_OF_PIPE
| PIPE_FLUSH_BOTTOM_OF_PIPE
)) {
510 assert(flags
& PIPE_FLUSH_DEFERRED
);
513 si_fine_fence_set(sctx
, &fine
, flags
);
516 /* DMA IBs are preambles to gfx IBs, therefore must be flushed first. */
518 si_flush_dma_cs(sctx
, rflags
, fence
? &sdma_fence
: NULL
);
520 if (!radeon_emitted(sctx
->b
.gfx_cs
, sctx
->b
.initial_gfx_cs_size
)) {
522 ws
->fence_reference(&gfx_fence
, sctx
->b
.last_gfx_fence
);
523 if (!(flags
& PIPE_FLUSH_DEFERRED
))
524 ws
->cs_sync_flush(sctx
->b
.gfx_cs
);
526 /* Instead of flushing, create a deferred fence. Constraints:
527 * - The state tracker must allow a deferred flush.
528 * - The state tracker must request a fence.
529 * - fence_get_fd is not allowed.
530 * Thread safety in fence_finish must be ensured by the state tracker.
532 if (flags
& PIPE_FLUSH_DEFERRED
&&
533 !(flags
& PIPE_FLUSH_FENCE_FD
) &&
535 gfx_fence
= sctx
->b
.ws
->cs_get_next_fence(sctx
->b
.gfx_cs
);
536 deferred_fence
= true;
538 si_flush_gfx_cs(sctx
, rflags
, fence
? &gfx_fence
: NULL
);
542 /* Both engines can signal out of order, so we need to keep both fences. */
544 struct si_multi_fence
*multi_fence
;
546 if (flags
& TC_FLUSH_ASYNC
) {
547 multi_fence
= (struct si_multi_fence
*)*fence
;
550 multi_fence
= si_create_multi_fence();
552 ws
->fence_reference(&sdma_fence
, NULL
);
553 ws
->fence_reference(&gfx_fence
, NULL
);
557 screen
->fence_reference(screen
, fence
, NULL
);
558 *fence
= (struct pipe_fence_handle
*)multi_fence
;
561 /* If both fences are NULL, fence_finish will always return true. */
562 multi_fence
->gfx
= gfx_fence
;
563 multi_fence
->sdma
= sdma_fence
;
565 if (deferred_fence
) {
566 multi_fence
->gfx_unflushed
.ctx
= sctx
;
567 multi_fence
->gfx_unflushed
.ib_index
= sctx
->b
.num_gfx_cs_flushes
;
570 multi_fence
->fine
= fine
;
573 if (flags
& TC_FLUSH_ASYNC
) {
574 util_queue_fence_signal(&multi_fence
->ready
);
575 tc_unflushed_batch_token_reference(&multi_fence
->tc_token
, NULL
);
580 if (!(flags
& PIPE_FLUSH_DEFERRED
)) {
582 ws
->cs_sync_flush(sctx
->b
.dma_cs
);
583 ws
->cs_sync_flush(sctx
->b
.gfx_cs
);
587 static void si_fence_server_signal(struct pipe_context
*ctx
,
588 struct pipe_fence_handle
*fence
)
590 struct si_context
*sctx
= (struct si_context
*)ctx
;
591 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
593 /* We should have at least one syncobj to signal */
594 assert(rfence
->sdma
|| rfence
->gfx
);
597 si_add_syncobj_signal(sctx
, rfence
->sdma
);
599 si_add_syncobj_signal(sctx
, rfence
->gfx
);
602 * The spec does not require a flush here. We insert a flush
603 * because syncobj based signals are not directly placed into
604 * the command stream. Instead the signal happens when the
605 * submission associated with the syncobj finishes execution.
607 * Therefore, we must make sure that we flush the pipe to avoid
608 * new work being emitted and getting executed before the signal
611 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
614 static void si_fence_server_sync(struct pipe_context
*ctx
,
615 struct pipe_fence_handle
*fence
)
617 struct si_context
*sctx
= (struct si_context
*)ctx
;
618 struct si_multi_fence
*rfence
= (struct si_multi_fence
*)fence
;
620 util_queue_fence_wait(&rfence
->ready
);
622 /* Unflushed fences from the same context are no-ops. */
623 if (rfence
->gfx_unflushed
.ctx
&&
624 rfence
->gfx_unflushed
.ctx
== sctx
)
627 /* All unflushed commands will not start execution before
628 * this fence dependency is signalled.
630 * Therefore we must flush before inserting the dependency
632 si_flush_from_st(ctx
, NULL
, PIPE_FLUSH_ASYNC
);
635 si_add_fence_dependency(sctx
, rfence
->sdma
);
637 si_add_fence_dependency(sctx
, rfence
->gfx
);
640 void si_init_fence_functions(struct si_context
*ctx
)
642 ctx
->b
.b
.flush
= si_flush_from_st
;
643 ctx
->b
.b
.create_fence_fd
= si_create_fence_fd
;
644 ctx
->b
.b
.fence_server_sync
= si_fence_server_sync
;
645 ctx
->b
.b
.fence_server_signal
= si_fence_server_signal
;
648 void si_init_screen_fence_functions(struct si_screen
*screen
)
650 screen
->b
.fence_finish
= si_fence_finish
;
651 screen
->b
.fence_reference
= si_fence_reference
;
652 screen
->b
.fence_get_fd
= si_fence_get_fd
;