2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 #include "amdgpu_cs.h"
30 #include "util/os_time.h"
34 #include "amd/common/sid.h"
36 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", false)
38 #ifndef AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
39 #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
42 #ifndef AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
43 #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
48 static struct pipe_fence_handle
*
49 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
50 unsigned ip_instance
, unsigned ring
)
52 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
54 fence
->reference
.count
= 1;
57 fence
->fence
.context
= ctx
->ctx
;
58 fence
->fence
.ip_type
= ip_type
;
59 fence
->fence
.ip_instance
= ip_instance
;
60 fence
->fence
.ring
= ring
;
61 util_queue_fence_init(&fence
->submitted
);
62 util_queue_fence_reset(&fence
->submitted
);
63 p_atomic_inc(&ctx
->refcount
);
64 return (struct pipe_fence_handle
*)fence
;
67 static struct pipe_fence_handle
*
68 amdgpu_fence_import_syncobj(struct radeon_winsys
*rws
, int fd
)
70 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
71 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
77 pipe_reference_init(&fence
->reference
, 1);
80 r
= amdgpu_cs_import_syncobj(ws
->dev
, fd
, &fence
->syncobj
);
86 util_queue_fence_init(&fence
->submitted
);
88 assert(amdgpu_fence_is_syncobj(fence
));
89 return (struct pipe_fence_handle
*)fence
;
92 static struct pipe_fence_handle
*
93 amdgpu_fence_import_sync_file(struct radeon_winsys
*rws
, int fd
)
95 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
96 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
101 pipe_reference_init(&fence
->reference
, 1);
103 /* fence->ctx == NULL means that the fence is syncobj-based. */
105 /* Convert sync_file into syncobj. */
106 int r
= amdgpu_cs_create_syncobj(ws
->dev
, &fence
->syncobj
);
112 r
= amdgpu_cs_syncobj_import_sync_file(ws
->dev
, fence
->syncobj
, fd
);
114 amdgpu_cs_destroy_syncobj(ws
->dev
, fence
->syncobj
);
119 util_queue_fence_init(&fence
->submitted
);
121 return (struct pipe_fence_handle
*)fence
;
124 static int amdgpu_fence_export_sync_file(struct radeon_winsys
*rws
,
125 struct pipe_fence_handle
*pfence
)
127 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
128 struct amdgpu_fence
*fence
= (struct amdgpu_fence
*)pfence
;
130 if (amdgpu_fence_is_syncobj(fence
)) {
133 /* Convert syncobj into sync_file. */
134 r
= amdgpu_cs_syncobj_export_sync_file(ws
->dev
, fence
->syncobj
, &fd
);
138 util_queue_fence_wait(&fence
->submitted
);
140 /* Convert the amdgpu fence into a fence FD. */
142 if (amdgpu_cs_fence_to_handle(ws
->dev
, &fence
->fence
,
143 AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD
,
150 static int amdgpu_export_signalled_sync_file(struct radeon_winsys
*rws
)
152 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
156 int r
= amdgpu_cs_create_syncobj2(ws
->dev
, DRM_SYNCOBJ_CREATE_SIGNALED
,
162 r
= amdgpu_cs_syncobj_export_sync_file(ws
->dev
, syncobj
, &fd
);
167 amdgpu_cs_destroy_syncobj(ws
->dev
, syncobj
);
171 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
173 uint64_t *user_fence_cpu_address
)
175 struct amdgpu_fence
*afence
= (struct amdgpu_fence
*)fence
;
177 afence
->fence
.fence
= seq_no
;
178 afence
->user_fence_cpu_address
= user_fence_cpu_address
;
179 util_queue_fence_signal(&afence
->submitted
);
182 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
184 struct amdgpu_fence
*afence
= (struct amdgpu_fence
*)fence
;
186 afence
->signalled
= true;
187 util_queue_fence_signal(&afence
->submitted
);
190 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
193 struct amdgpu_fence
*afence
= (struct amdgpu_fence
*)fence
;
196 uint64_t *user_fence_cpu
;
199 if (afence
->signalled
)
202 /* Handle syncobjs. */
203 if (amdgpu_fence_is_syncobj(afence
)) {
204 /* Absolute timeouts are only be used by BO fences, which aren't
205 * backed by syncobjs.
209 if (amdgpu_cs_syncobj_wait(afence
->ws
->dev
, &afence
->syncobj
, 1,
213 afence
->signalled
= true;
218 abs_timeout
= timeout
;
220 abs_timeout
= os_time_get_absolute_timeout(timeout
);
222 /* The fence might not have a number assigned if its IB is being
223 * submitted in the other thread right now. Wait until the submission
225 if (!util_queue_fence_wait_timeout(&afence
->submitted
, abs_timeout
))
228 user_fence_cpu
= afence
->user_fence_cpu_address
;
229 if (user_fence_cpu
) {
230 if (*user_fence_cpu
>= afence
->fence
.fence
) {
231 afence
->signalled
= true;
235 /* No timeout, just query: no need for the ioctl. */
236 if (!absolute
&& !timeout
)
240 /* Now use the libdrm query. */
241 r
= amdgpu_cs_query_fence_status(&afence
->fence
,
243 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
246 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
251 /* This variable can only transition from false to true, so it doesn't
252 * matter if threads race for it. */
253 afence
->signalled
= true;
259 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
260 struct pipe_fence_handle
*fence
,
263 return amdgpu_fence_wait(fence
, timeout
, false);
266 static struct pipe_fence_handle
*
267 amdgpu_cs_get_next_fence(struct radeon_cmdbuf
*rcs
)
269 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
270 struct pipe_fence_handle
*fence
= NULL
;
272 if (debug_get_option_noop())
275 if (cs
->next_fence
) {
276 amdgpu_fence_reference(&fence
, cs
->next_fence
);
280 fence
= amdgpu_fence_create(cs
->ctx
,
281 cs
->csc
->ib
[IB_MAIN
].ip_type
,
282 cs
->csc
->ib
[IB_MAIN
].ip_instance
,
283 cs
->csc
->ib
[IB_MAIN
].ring
);
287 amdgpu_fence_reference(&cs
->next_fence
, fence
);
293 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
295 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
297 struct amdgpu_bo_alloc_request alloc_buffer
= {};
298 amdgpu_bo_handle buf_handle
;
303 ctx
->ws
= amdgpu_winsys(ws
);
305 ctx
->initial_num_total_rejected_cs
= ctx
->ws
->num_total_rejected_cs
;
307 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
309 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
313 alloc_buffer
.alloc_size
= ctx
->ws
->info
.gart_page_size
;
314 alloc_buffer
.phys_alignment
= ctx
->ws
->info
.gart_page_size
;
315 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
317 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
319 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
320 goto error_user_fence_alloc
;
323 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
325 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
326 goto error_user_fence_map
;
329 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
330 ctx
->user_fence_bo
= buf_handle
;
332 return (struct radeon_winsys_ctx
*)ctx
;
334 error_user_fence_map
:
335 amdgpu_bo_free(buf_handle
);
336 error_user_fence_alloc
:
337 amdgpu_cs_ctx_free(ctx
->ctx
);
343 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
345 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
348 static enum pipe_reset_status
349 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
351 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
354 /* Return a failure due to a GPU hang. */
355 if (ctx
->ws
->info
.drm_minor
>= 24) {
358 r
= amdgpu_cs_query_reset_state2(ctx
->ctx
, &flags
);
360 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
361 return PIPE_NO_RESET
;
364 if (flags
& AMDGPU_CTX_QUERY2_FLAGS_RESET
) {
365 if (flags
& AMDGPU_CTX_QUERY2_FLAGS_GUILTY
)
366 return PIPE_GUILTY_CONTEXT_RESET
;
368 return PIPE_INNOCENT_CONTEXT_RESET
;
371 uint32_t result
, hangs
;
373 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
375 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
376 return PIPE_NO_RESET
;
380 case AMDGPU_CTX_GUILTY_RESET
:
381 return PIPE_GUILTY_CONTEXT_RESET
;
382 case AMDGPU_CTX_INNOCENT_RESET
:
383 return PIPE_INNOCENT_CONTEXT_RESET
;
384 case AMDGPU_CTX_UNKNOWN_RESET
:
385 return PIPE_UNKNOWN_CONTEXT_RESET
;
389 /* Return a failure due to a rejected command submission. */
390 if (ctx
->ws
->num_total_rejected_cs
> ctx
->initial_num_total_rejected_cs
) {
391 return ctx
->num_rejected_cs
? PIPE_GUILTY_CONTEXT_RESET
:
392 PIPE_INNOCENT_CONTEXT_RESET
;
394 return PIPE_NO_RESET
;
397 /* COMMAND SUBMISSION */
399 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context
*cs
)
401 return cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_UVD
&&
402 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCE
&&
403 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_UVD_ENC
&&
404 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCN_DEC
&&
405 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCN_ENC
&&
406 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCN_JPEG
;
409 static bool amdgpu_cs_has_chaining(struct amdgpu_cs
*cs
)
411 return cs
->ctx
->ws
->info
.chip_class
>= GFX7
&&
412 (cs
->ring_type
== RING_GFX
|| cs
->ring_type
== RING_COMPUTE
);
415 static unsigned amdgpu_cs_epilog_dws(struct amdgpu_cs
*cs
)
417 if (amdgpu_cs_has_chaining(cs
))
418 return 4; /* for chaining */
423 int amdgpu_lookup_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
425 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
426 int i
= cs
->buffer_indices_hashlist
[hash
];
427 struct amdgpu_cs_buffer
*buffers
;
431 buffers
= cs
->real_buffers
;
432 num_buffers
= cs
->num_real_buffers
;
433 } else if (!bo
->sparse
) {
434 buffers
= cs
->slab_buffers
;
435 num_buffers
= cs
->num_slab_buffers
;
437 buffers
= cs
->sparse_buffers
;
438 num_buffers
= cs
->num_sparse_buffers
;
441 /* not found or found */
442 if (i
< 0 || (i
< num_buffers
&& buffers
[i
].bo
== bo
))
445 /* Hash collision, look for the BO in the list of buffers linearly. */
446 for (i
= num_buffers
- 1; i
>= 0; i
--) {
447 if (buffers
[i
].bo
== bo
) {
448 /* Put this buffer in the hash list.
449 * This will prevent additional hash collisions if there are
450 * several consecutive lookup_buffer calls for the same buffer.
452 * Example: Assuming buffers A,B,C collide in the hash list,
453 * the following sequence of buffers:
454 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
455 * will collide here: ^ and here: ^,
456 * meaning that we should get very few collisions in the end. */
457 cs
->buffer_indices_hashlist
[hash
] = i
;
465 amdgpu_do_add_real_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
467 struct amdgpu_cs_buffer
*buffer
;
470 /* New buffer, check if the backing array is large enough. */
471 if (cs
->num_real_buffers
>= cs
->max_real_buffers
) {
473 MAX2(cs
->max_real_buffers
+ 16, (unsigned)(cs
->max_real_buffers
* 1.3));
474 struct amdgpu_cs_buffer
*new_buffers
;
476 new_buffers
= MALLOC(new_max
* sizeof(*new_buffers
));
479 fprintf(stderr
, "amdgpu_do_add_buffer: allocation failed\n");
484 memcpy(new_buffers
, cs
->real_buffers
, cs
->num_real_buffers
* sizeof(*new_buffers
));
486 FREE(cs
->real_buffers
);
488 cs
->max_real_buffers
= new_max
;
489 cs
->real_buffers
= new_buffers
;
492 idx
= cs
->num_real_buffers
;
493 buffer
= &cs
->real_buffers
[idx
];
495 memset(buffer
, 0, sizeof(*buffer
));
496 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
497 p_atomic_inc(&bo
->num_cs_references
);
498 cs
->num_real_buffers
++;
504 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs
*acs
, struct amdgpu_winsys_bo
*bo
)
506 struct amdgpu_cs_context
*cs
= acs
->csc
;
508 int idx
= amdgpu_lookup_buffer(cs
, bo
);
513 idx
= amdgpu_do_add_real_buffer(cs
, bo
);
515 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
516 cs
->buffer_indices_hashlist
[hash
] = idx
;
518 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
519 acs
->main
.base
.used_vram
+= bo
->base
.size
;
520 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
521 acs
->main
.base
.used_gart
+= bo
->base
.size
;
526 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs
*acs
,
527 struct amdgpu_winsys_bo
*bo
)
529 struct amdgpu_cs_context
*cs
= acs
->csc
;
530 struct amdgpu_cs_buffer
*buffer
;
532 int idx
= amdgpu_lookup_buffer(cs
, bo
);
538 real_idx
= amdgpu_lookup_or_add_real_buffer(acs
, bo
->u
.slab
.real
);
542 /* New buffer, check if the backing array is large enough. */
543 if (cs
->num_slab_buffers
>= cs
->max_slab_buffers
) {
545 MAX2(cs
->max_slab_buffers
+ 16, (unsigned)(cs
->max_slab_buffers
* 1.3));
546 struct amdgpu_cs_buffer
*new_buffers
;
548 new_buffers
= REALLOC(cs
->slab_buffers
,
549 cs
->max_slab_buffers
* sizeof(*new_buffers
),
550 new_max
* sizeof(*new_buffers
));
552 fprintf(stderr
, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
556 cs
->max_slab_buffers
= new_max
;
557 cs
->slab_buffers
= new_buffers
;
560 idx
= cs
->num_slab_buffers
;
561 buffer
= &cs
->slab_buffers
[idx
];
563 memset(buffer
, 0, sizeof(*buffer
));
564 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
565 buffer
->u
.slab
.real_idx
= real_idx
;
566 p_atomic_inc(&bo
->num_cs_references
);
567 cs
->num_slab_buffers
++;
569 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
570 cs
->buffer_indices_hashlist
[hash
] = idx
;
575 static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs
*acs
,
576 struct amdgpu_winsys_bo
*bo
)
578 struct amdgpu_cs_context
*cs
= acs
->csc
;
579 struct amdgpu_cs_buffer
*buffer
;
581 int idx
= amdgpu_lookup_buffer(cs
, bo
);
586 /* New buffer, check if the backing array is large enough. */
587 if (cs
->num_sparse_buffers
>= cs
->max_sparse_buffers
) {
589 MAX2(cs
->max_sparse_buffers
+ 16, (unsigned)(cs
->max_sparse_buffers
* 1.3));
590 struct amdgpu_cs_buffer
*new_buffers
;
592 new_buffers
= REALLOC(cs
->sparse_buffers
,
593 cs
->max_sparse_buffers
* sizeof(*new_buffers
),
594 new_max
* sizeof(*new_buffers
));
596 fprintf(stderr
, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
600 cs
->max_sparse_buffers
= new_max
;
601 cs
->sparse_buffers
= new_buffers
;
604 idx
= cs
->num_sparse_buffers
;
605 buffer
= &cs
->sparse_buffers
[idx
];
607 memset(buffer
, 0, sizeof(*buffer
));
608 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
609 p_atomic_inc(&bo
->num_cs_references
);
610 cs
->num_sparse_buffers
++;
612 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
613 cs
->buffer_indices_hashlist
[hash
] = idx
;
615 /* We delay adding the backing buffers until we really have to. However,
616 * we cannot delay accounting for memory use.
618 simple_mtx_lock(&bo
->lock
);
620 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
621 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
622 acs
->main
.base
.used_vram
+= backing
->bo
->base
.size
;
623 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
624 acs
->main
.base
.used_gart
+= backing
->bo
->base
.size
;
627 simple_mtx_unlock(&bo
->lock
);
632 static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf
*rcs
,
633 struct pb_buffer
*buf
,
634 enum radeon_bo_usage usage
,
635 enum radeon_bo_domain domains
,
636 enum radeon_bo_priority priority
)
638 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
639 * the buffer placement during command submission.
641 struct amdgpu_cs
*acs
= amdgpu_cs(rcs
);
642 struct amdgpu_cs_context
*cs
= acs
->csc
;
643 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
644 struct amdgpu_cs_buffer
*buffer
;
647 /* Fast exit for no-op calls.
648 * This is very effective with suballocators and linear uploaders that
649 * are outside of the winsys.
651 if (bo
== cs
->last_added_bo
&&
652 (usage
& cs
->last_added_bo_usage
) == usage
&&
653 (1u << priority
) & cs
->last_added_bo_priority_usage
)
654 return cs
->last_added_bo_index
;
658 index
= amdgpu_lookup_or_add_slab_buffer(acs
, bo
);
662 buffer
= &cs
->slab_buffers
[index
];
663 buffer
->usage
|= usage
;
665 usage
&= ~RADEON_USAGE_SYNCHRONIZED
;
666 index
= buffer
->u
.slab
.real_idx
;
668 index
= amdgpu_lookup_or_add_real_buffer(acs
, bo
);
673 buffer
= &cs
->real_buffers
[index
];
675 index
= amdgpu_lookup_or_add_sparse_buffer(acs
, bo
);
679 buffer
= &cs
->sparse_buffers
[index
];
682 buffer
->u
.real
.priority_usage
|= 1u << priority
;
683 buffer
->usage
|= usage
;
685 cs
->last_added_bo
= bo
;
686 cs
->last_added_bo_index
= index
;
687 cs
->last_added_bo_usage
= buffer
->usage
;
688 cs
->last_added_bo_priority_usage
= buffer
->u
.real
.priority_usage
;
692 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys
*ws
,
693 struct amdgpu_ib
*ib
,
694 enum ring_type ring_type
)
696 struct pb_buffer
*pb
;
698 unsigned buffer_size
;
700 /* Always create a buffer that is at least as large as the maximum seen IB
701 * size, aligned to a power of two (and multiplied by 4 to reduce internal
702 * fragmentation if chaining is not available). Limit to 512k dwords, which
703 * is the largest power of two that fits into the size field of the
704 * INDIRECT_BUFFER packet.
706 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib
)))
707 buffer_size
= 4 *util_next_power_of_two(ib
->max_ib_size
);
709 buffer_size
= 4 *util_next_power_of_two(4 * ib
->max_ib_size
);
711 const unsigned min_size
= MAX2(ib
->max_check_space_size
, 8 * 1024 * 4);
712 const unsigned max_size
= 512 * 1024 * 4;
714 buffer_size
= MIN2(buffer_size
, max_size
);
715 buffer_size
= MAX2(buffer_size
, min_size
); /* min_size is more important */
717 pb
= amdgpu_bo_create(ws
, buffer_size
,
718 ws
->info
.gart_page_size
,
720 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
721 (ring_type
== RING_GFX
||
722 ring_type
== RING_COMPUTE
||
723 ring_type
== RING_DMA
?
724 RADEON_FLAG_32BIT
| RADEON_FLAG_GTT_WC
: 0));
728 mapped
= amdgpu_bo_map(pb
, NULL
, PIPE_TRANSFER_WRITE
);
730 pb_reference(&pb
, NULL
);
734 pb_reference(&ib
->big_ib_buffer
, pb
);
735 pb_reference(&pb
, NULL
);
737 ib
->ib_mapped
= mapped
;
738 ib
->used_ib_space
= 0;
743 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type
)
745 /* The maximum IB size including all chained IBs. */
748 /* Smaller submits means the GPU gets busy sooner and there is less
749 * waiting for buffers and fences. Proof:
750 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
753 case IB_PARALLEL_COMPUTE
:
754 /* Always chain this IB. */
757 unreachable("bad ib_type");
761 static bool amdgpu_get_new_ib(struct amdgpu_winsys
*ws
, struct amdgpu_cs
*cs
,
762 enum ib_type ib_type
)
764 /* Small IBs are better than big IBs, because the GPU goes idle quicker
765 * and there is less waiting for buffers and fences. Proof:
766 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
768 struct amdgpu_ib
*ib
= NULL
;
769 struct drm_amdgpu_cs_chunk_ib
*info
= &cs
->csc
->ib
[ib_type
];
770 /* This is the minimum size of a contiguous IB. */
771 unsigned ib_size
= 4 * 1024 * 4;
774 case IB_PARALLEL_COMPUTE
:
775 ib
= &cs
->compute_ib
;
781 unreachable("unhandled IB type");
784 /* Always allocate at least the size of the biggest cs_check_space call,
785 * because precisely the last call might have requested this size.
787 ib_size
= MAX2(ib_size
, ib
->max_check_space_size
);
789 if (!amdgpu_cs_has_chaining(cs
)) {
790 ib_size
= MAX2(ib_size
,
791 4 * MIN2(util_next_power_of_two(ib
->max_ib_size
),
792 amdgpu_ib_max_submit_dwords(ib_type
)));
795 ib
->max_ib_size
= ib
->max_ib_size
- ib
->max_ib_size
/ 32;
797 ib
->base
.prev_dw
= 0;
798 ib
->base
.num_prev
= 0;
799 ib
->base
.current
.cdw
= 0;
800 ib
->base
.current
.buf
= NULL
;
802 /* Allocate a new buffer for IBs if the current buffer is all used. */
803 if (!ib
->big_ib_buffer
||
804 ib
->used_ib_space
+ ib_size
> ib
->big_ib_buffer
->size
) {
805 if (!amdgpu_ib_new_buffer(ws
, ib
, cs
->ring_type
))
809 info
->va_start
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
+ ib
->used_ib_space
;
811 /* ib_bytes is in dwords and the conversion to bytes will be done before
813 ib
->ptr_ib_size
= &info
->ib_bytes
;
814 ib
->ptr_ib_size_inside_ib
= false;
816 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
817 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
819 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
821 ib_size
= ib
->big_ib_buffer
->size
- ib
->used_ib_space
;
822 ib
->base
.current
.max_dw
= ib_size
/ 4 - amdgpu_cs_epilog_dws(cs
);
823 assert(ib
->base
.current
.max_dw
>= ib
->max_check_space_size
/ 4);
824 ib
->base
.gpu_address
= info
->va_start
;
828 static void amdgpu_set_ib_size(struct amdgpu_ib
*ib
)
830 if (ib
->ptr_ib_size_inside_ib
) {
831 *ib
->ptr_ib_size
= ib
->base
.current
.cdw
|
832 S_3F2_CHAIN(1) | S_3F2_VALID(1);
834 *ib
->ptr_ib_size
= ib
->base
.current
.cdw
;
838 static void amdgpu_ib_finalize(struct amdgpu_winsys
*ws
, struct amdgpu_ib
*ib
)
840 amdgpu_set_ib_size(ib
);
841 ib
->used_ib_space
+= ib
->base
.current
.cdw
* 4;
842 ib
->used_ib_space
= align(ib
->used_ib_space
, ws
->info
.ib_start_alignment
);
843 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, ib
->base
.prev_dw
+ ib
->base
.current
.cdw
);
846 static bool amdgpu_init_cs_context(struct amdgpu_winsys
*ws
,
847 struct amdgpu_cs_context
*cs
,
848 enum ring_type ring_type
)
852 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_DMA
;
856 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_UVD
;
860 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_UVD_ENC
;
864 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCE
;
868 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCN_DEC
;
872 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCN_ENC
;
876 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCN_JPEG
;
881 cs
->ib
[IB_MAIN
].ip_type
= ring_type
== RING_GFX
? AMDGPU_HW_IP_GFX
:
882 AMDGPU_HW_IP_COMPUTE
;
884 /* The kernel shouldn't invalidate L2 and vL1. The proper place for cache
885 * invalidation is the beginning of IBs (the previous commit does that),
886 * because completion of an IB doesn't care about the state of GPU caches,
887 * but the beginning of an IB does. Draw calls from multiple IBs can be
888 * executed in parallel, so draw calls from the current IB can finish after
889 * the next IB starts drawing, and so the cache flush at the end of IB
892 if (ws
->info
.drm_minor
>= 26)
893 cs
->ib
[IB_MAIN
].flags
= AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE
;
900 cs
->ib
[IB_PARALLEL_COMPUTE
].ip_type
= AMDGPU_HW_IP_COMPUTE
;
901 cs
->ib
[IB_PARALLEL_COMPUTE
].flags
= AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE
;
903 memset(cs
->buffer_indices_hashlist
, -1, sizeof(cs
->buffer_indices_hashlist
));
904 cs
->last_added_bo
= NULL
;
908 static void cleanup_fence_list(struct amdgpu_fence_list
*fences
)
910 for (unsigned i
= 0; i
< fences
->num
; i
++)
911 amdgpu_fence_reference(&fences
->list
[i
], NULL
);
915 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context
*cs
)
919 for (i
= 0; i
< cs
->num_real_buffers
; i
++) {
920 p_atomic_dec(&cs
->real_buffers
[i
].bo
->num_cs_references
);
921 amdgpu_winsys_bo_reference(&cs
->real_buffers
[i
].bo
, NULL
);
923 for (i
= 0; i
< cs
->num_slab_buffers
; i
++) {
924 p_atomic_dec(&cs
->slab_buffers
[i
].bo
->num_cs_references
);
925 amdgpu_winsys_bo_reference(&cs
->slab_buffers
[i
].bo
, NULL
);
927 for (i
= 0; i
< cs
->num_sparse_buffers
; i
++) {
928 p_atomic_dec(&cs
->sparse_buffers
[i
].bo
->num_cs_references
);
929 amdgpu_winsys_bo_reference(&cs
->sparse_buffers
[i
].bo
, NULL
);
931 cleanup_fence_list(&cs
->fence_dependencies
);
932 cleanup_fence_list(&cs
->syncobj_dependencies
);
933 cleanup_fence_list(&cs
->syncobj_to_signal
);
934 cleanup_fence_list(&cs
->compute_fence_dependencies
);
935 cleanup_fence_list(&cs
->compute_start_fence_dependencies
);
937 cs
->num_real_buffers
= 0;
938 cs
->num_slab_buffers
= 0;
939 cs
->num_sparse_buffers
= 0;
940 amdgpu_fence_reference(&cs
->fence
, NULL
);
942 memset(cs
->buffer_indices_hashlist
, -1, sizeof(cs
->buffer_indices_hashlist
));
943 cs
->last_added_bo
= NULL
;
946 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context
*cs
)
948 amdgpu_cs_context_cleanup(cs
);
949 FREE(cs
->real_buffers
);
950 FREE(cs
->slab_buffers
);
951 FREE(cs
->sparse_buffers
);
952 FREE(cs
->fence_dependencies
.list
);
953 FREE(cs
->syncobj_dependencies
.list
);
954 FREE(cs
->syncobj_to_signal
.list
);
955 FREE(cs
->compute_fence_dependencies
.list
);
956 FREE(cs
->compute_start_fence_dependencies
.list
);
960 static struct radeon_cmdbuf
*
961 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
962 enum ring_type ring_type
,
963 void (*flush
)(void *ctx
, unsigned flags
,
964 struct pipe_fence_handle
**fence
),
966 bool stop_exec_on_failure
)
968 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
969 struct amdgpu_cs
*cs
;
971 cs
= CALLOC_STRUCT(amdgpu_cs
);
976 util_queue_fence_init(&cs
->flush_completed
);
979 cs
->flush_cs
= flush
;
980 cs
->flush_data
= flush_ctx
;
981 cs
->ring_type
= ring_type
;
982 cs
->stop_exec_on_failure
= stop_exec_on_failure
;
984 struct amdgpu_cs_fence_info fence_info
;
985 fence_info
.handle
= cs
->ctx
->user_fence_bo
;
986 fence_info
.offset
= cs
->ring_type
;
987 amdgpu_cs_chunk_fence_info_to_data(&fence_info
, (void*)&cs
->fence_chunk
);
989 cs
->main
.ib_type
= IB_MAIN
;
990 cs
->compute_ib
.ib_type
= IB_PARALLEL_COMPUTE
;
992 if (!amdgpu_init_cs_context(ctx
->ws
, &cs
->csc1
, ring_type
)) {
997 if (!amdgpu_init_cs_context(ctx
->ws
, &cs
->csc2
, ring_type
)) {
998 amdgpu_destroy_cs_context(&cs
->csc1
);
1003 /* Set the first submission context as current. */
1004 cs
->csc
= &cs
->csc1
;
1005 cs
->cst
= &cs
->csc2
;
1007 if (!amdgpu_get_new_ib(ctx
->ws
, cs
, IB_MAIN
)) {
1008 amdgpu_destroy_cs_context(&cs
->csc2
);
1009 amdgpu_destroy_cs_context(&cs
->csc1
);
1014 p_atomic_inc(&ctx
->ws
->num_cs
);
1015 return &cs
->main
.base
;
1018 static struct radeon_cmdbuf
*
1019 amdgpu_cs_add_parallel_compute_ib(struct radeon_cmdbuf
*ib
,
1020 bool uses_gds_ordered_append
)
1022 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)ib
;
1023 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
1025 if (cs
->ring_type
!= RING_GFX
)
1028 /* only one secondary IB can be added */
1029 if (cs
->compute_ib
.ib_mapped
)
1032 /* Allocate the compute IB. */
1033 if (!amdgpu_get_new_ib(ws
, cs
, IB_PARALLEL_COMPUTE
))
1036 if (uses_gds_ordered_append
) {
1037 cs
->csc1
.ib
[IB_PARALLEL_COMPUTE
].flags
|=
1038 AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
;
1039 cs
->csc2
.ib
[IB_PARALLEL_COMPUTE
].flags
|=
1040 AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
;
1042 return &cs
->compute_ib
.base
;
1045 static bool amdgpu_cs_validate(struct radeon_cmdbuf
*rcs
)
1050 static bool amdgpu_cs_check_space(struct radeon_cmdbuf
*rcs
, unsigned dw
,
1051 bool force_chaining
)
1053 struct amdgpu_ib
*ib
= amdgpu_ib(rcs
);
1054 struct amdgpu_cs
*cs
= amdgpu_cs_from_ib(ib
);
1055 unsigned requested_size
= rcs
->prev_dw
+ rcs
->current
.cdw
+ dw
;
1056 unsigned cs_epilog_dw
= amdgpu_cs_epilog_dws(cs
);
1057 unsigned need_byte_size
= (dw
+ cs_epilog_dw
) * 4;
1059 uint32_t *new_ptr_ib_size
;
1061 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
1063 /* 125% of the size for IB epilog. */
1064 unsigned safe_byte_size
= need_byte_size
+ need_byte_size
/ 4;
1065 ib
->max_check_space_size
= MAX2(ib
->max_check_space_size
,
1068 /* If force_chaining is true, we can't return. We have to chain. */
1069 if (!force_chaining
) {
1070 if (requested_size
> amdgpu_ib_max_submit_dwords(ib
->ib_type
))
1073 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, requested_size
);
1075 if (rcs
->current
.max_dw
- rcs
->current
.cdw
>= dw
)
1079 if (!amdgpu_cs_has_chaining(cs
)) {
1080 assert(!force_chaining
);
1084 /* Allocate a new chunk */
1085 if (rcs
->num_prev
>= rcs
->max_prev
) {
1086 unsigned new_max_prev
= MAX2(1, 2 * rcs
->max_prev
);
1087 struct radeon_cmdbuf_chunk
*new_prev
;
1089 new_prev
= REALLOC(rcs
->prev
,
1090 sizeof(*new_prev
) * rcs
->max_prev
,
1091 sizeof(*new_prev
) * new_max_prev
);
1095 rcs
->prev
= new_prev
;
1096 rcs
->max_prev
= new_max_prev
;
1099 if (!amdgpu_ib_new_buffer(cs
->ctx
->ws
, ib
, cs
->ring_type
))
1102 assert(ib
->used_ib_space
== 0);
1103 va
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
;
1105 /* This space was originally reserved. */
1106 rcs
->current
.max_dw
+= cs_epilog_dw
;
1108 /* Pad with NOPs and add INDIRECT_BUFFER packet */
1109 while ((rcs
->current
.cdw
& 7) != 4)
1110 radeon_emit(rcs
, 0xffff1000); /* type3 nop packet */
1112 radeon_emit(rcs
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
1113 radeon_emit(rcs
, va
);
1114 radeon_emit(rcs
, va
>> 32);
1115 new_ptr_ib_size
= &rcs
->current
.buf
[rcs
->current
.cdw
++];
1117 assert((rcs
->current
.cdw
& 7) == 0);
1118 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
1120 amdgpu_set_ib_size(ib
);
1121 ib
->ptr_ib_size
= new_ptr_ib_size
;
1122 ib
->ptr_ib_size_inside_ib
= true;
1124 /* Hook up the new chunk */
1125 rcs
->prev
[rcs
->num_prev
].buf
= rcs
->current
.buf
;
1126 rcs
->prev
[rcs
->num_prev
].cdw
= rcs
->current
.cdw
;
1127 rcs
->prev
[rcs
->num_prev
].max_dw
= rcs
->current
.cdw
; /* no modifications */
1130 ib
->base
.prev_dw
+= ib
->base
.current
.cdw
;
1131 ib
->base
.current
.cdw
= 0;
1133 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
1134 ib
->base
.current
.max_dw
= ib
->big_ib_buffer
->size
/ 4 - cs_epilog_dw
;
1135 assert(ib
->base
.current
.max_dw
>= ib
->max_check_space_size
/ 4);
1136 ib
->base
.gpu_address
= va
;
1138 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
1139 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
1144 static unsigned amdgpu_cs_get_buffer_list(struct radeon_cmdbuf
*rcs
,
1145 struct radeon_bo_list_item
*list
)
1147 struct amdgpu_cs_context
*cs
= amdgpu_cs(rcs
)->csc
;
1151 for (i
= 0; i
< cs
->num_real_buffers
; i
++) {
1152 list
[i
].bo_size
= cs
->real_buffers
[i
].bo
->base
.size
;
1153 list
[i
].vm_address
= cs
->real_buffers
[i
].bo
->va
;
1154 list
[i
].priority_usage
= cs
->real_buffers
[i
].u
.real
.priority_usage
;
1157 return cs
->num_real_buffers
;
1160 static void add_fence_to_list(struct amdgpu_fence_list
*fences
,
1161 struct amdgpu_fence
*fence
)
1163 unsigned idx
= fences
->num
++;
1165 if (idx
>= fences
->max
) {
1167 const unsigned increment
= 8;
1169 fences
->max
= idx
+ increment
;
1170 size
= fences
->max
* sizeof(fences
->list
[0]);
1171 fences
->list
= realloc(fences
->list
, size
);
1172 /* Clear the newly-allocated elements. */
1173 memset(fences
->list
+ idx
, 0,
1174 increment
* sizeof(fences
->list
[0]));
1176 amdgpu_fence_reference(&fences
->list
[idx
], (struct pipe_fence_handle
*)fence
);
1179 /* TODO: recognizing dependencies as no-ops doesn't take the parallel
1180 * compute IB into account. The compute IB won't wait for these.
1181 * Also, the scheduler can execute compute and SDMA IBs on any rings.
1182 * Should we always insert dependencies?
1184 static bool is_noop_fence_dependency(struct amdgpu_cs
*acs
,
1185 struct amdgpu_fence
*fence
)
1187 struct amdgpu_cs_context
*cs
= acs
->csc
;
1189 if (!amdgpu_fence_is_syncobj(fence
) &&
1190 fence
->ctx
== acs
->ctx
&&
1191 fence
->fence
.ip_type
== cs
->ib
[IB_MAIN
].ip_type
&&
1192 fence
->fence
.ip_instance
== cs
->ib
[IB_MAIN
].ip_instance
&&
1193 fence
->fence
.ring
== cs
->ib
[IB_MAIN
].ring
)
1196 return amdgpu_fence_wait((void *)fence
, 0, false);
1199 static void amdgpu_cs_add_fence_dependency(struct radeon_cmdbuf
*rws
,
1200 struct pipe_fence_handle
*pfence
,
1201 unsigned dependency_flags
)
1203 struct amdgpu_cs
*acs
= amdgpu_cs(rws
);
1204 struct amdgpu_cs_context
*cs
= acs
->csc
;
1205 struct amdgpu_fence
*fence
= (struct amdgpu_fence
*)pfence
;
1207 util_queue_fence_wait(&fence
->submitted
);
1209 if (dependency_flags
& RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY
) {
1210 /* Syncobjs are not needed here. */
1211 assert(!amdgpu_fence_is_syncobj(fence
));
1213 if (acs
->ctx
->ws
->info
.has_scheduled_fence_dependency
&&
1214 dependency_flags
& RADEON_DEPENDENCY_START_FENCE
)
1215 add_fence_to_list(&cs
->compute_start_fence_dependencies
, fence
);
1217 add_fence_to_list(&cs
->compute_fence_dependencies
, fence
);
1221 /* Start fences are not needed here. */
1222 assert(!(dependency_flags
& RADEON_DEPENDENCY_START_FENCE
));
1224 if (is_noop_fence_dependency(acs
, fence
))
1227 if (amdgpu_fence_is_syncobj(fence
))
1228 add_fence_to_list(&cs
->syncobj_dependencies
, fence
);
1230 add_fence_to_list(&cs
->fence_dependencies
, fence
);
1233 static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs
*acs
,
1234 struct amdgpu_cs_buffer
*buffer
)
1236 struct amdgpu_cs_context
*cs
= acs
->csc
;
1237 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1238 unsigned new_num_fences
= 0;
1240 for (unsigned j
= 0; j
< bo
->num_fences
; ++j
) {
1241 struct amdgpu_fence
*bo_fence
= (void *)bo
->fences
[j
];
1243 if (is_noop_fence_dependency(acs
, bo_fence
))
1246 amdgpu_fence_reference(&bo
->fences
[new_num_fences
], bo
->fences
[j
]);
1249 if (!(buffer
->usage
& RADEON_USAGE_SYNCHRONIZED
))
1252 add_fence_to_list(&cs
->fence_dependencies
, bo_fence
);
1255 for (unsigned j
= new_num_fences
; j
< bo
->num_fences
; ++j
)
1256 amdgpu_fence_reference(&bo
->fences
[j
], NULL
);
1258 bo
->num_fences
= new_num_fences
;
1261 /* Add the given list of fences to the buffer's fence list.
1263 * Must be called with the winsys bo_fence_lock held.
1265 void amdgpu_add_fences(struct amdgpu_winsys_bo
*bo
,
1266 unsigned num_fences
,
1267 struct pipe_fence_handle
**fences
)
1269 if (bo
->num_fences
+ num_fences
> bo
->max_fences
) {
1270 unsigned new_max_fences
= MAX2(bo
->num_fences
+ num_fences
, bo
->max_fences
* 2);
1271 struct pipe_fence_handle
**new_fences
=
1273 bo
->num_fences
* sizeof(*new_fences
),
1274 new_max_fences
* sizeof(*new_fences
));
1275 if (likely(new_fences
)) {
1276 bo
->fences
= new_fences
;
1277 bo
->max_fences
= new_max_fences
;
1281 fprintf(stderr
, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
1282 if (!bo
->num_fences
)
1285 bo
->num_fences
--; /* prefer to keep the most recent fence if possible */
1286 amdgpu_fence_reference(&bo
->fences
[bo
->num_fences
], NULL
);
1288 drop
= bo
->num_fences
+ num_fences
- bo
->max_fences
;
1294 for (unsigned i
= 0; i
< num_fences
; ++i
) {
1295 bo
->fences
[bo
->num_fences
] = NULL
;
1296 amdgpu_fence_reference(&bo
->fences
[bo
->num_fences
], fences
[i
]);
1301 static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs
*acs
,
1302 struct pipe_fence_handle
*fence
,
1303 unsigned num_buffers
,
1304 struct amdgpu_cs_buffer
*buffers
)
1306 for (unsigned i
= 0; i
< num_buffers
; i
++) {
1307 struct amdgpu_cs_buffer
*buffer
= &buffers
[i
];
1308 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1310 amdgpu_add_bo_fence_dependencies(acs
, buffer
);
1311 p_atomic_inc(&bo
->num_active_ioctls
);
1312 amdgpu_add_fences(bo
, 1, &fence
);
1316 /* Since the kernel driver doesn't synchronize execution between different
1317 * rings automatically, we have to add fence dependencies manually.
1319 static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs
*acs
)
1321 struct amdgpu_cs_context
*cs
= acs
->csc
;
1323 amdgpu_add_fence_dependencies_bo_list(acs
, cs
->fence
, cs
->num_real_buffers
, cs
->real_buffers
);
1324 amdgpu_add_fence_dependencies_bo_list(acs
, cs
->fence
, cs
->num_slab_buffers
, cs
->slab_buffers
);
1325 amdgpu_add_fence_dependencies_bo_list(acs
, cs
->fence
, cs
->num_sparse_buffers
, cs
->sparse_buffers
);
1328 static void amdgpu_cs_add_syncobj_signal(struct radeon_cmdbuf
*rws
,
1329 struct pipe_fence_handle
*fence
)
1331 struct amdgpu_cs
*acs
= amdgpu_cs(rws
);
1332 struct amdgpu_cs_context
*cs
= acs
->csc
;
1334 assert(amdgpu_fence_is_syncobj((struct amdgpu_fence
*)fence
));
1336 add_fence_to_list(&cs
->syncobj_to_signal
, (struct amdgpu_fence
*)fence
);
1339 /* Add backing of sparse buffers to the buffer list.
1341 * This is done late, during submission, to keep the buffer list short before
1342 * submit, and to avoid managing fences for the backing buffers.
1344 static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context
*cs
)
1346 for (unsigned i
= 0; i
< cs
->num_sparse_buffers
; ++i
) {
1347 struct amdgpu_cs_buffer
*buffer
= &cs
->sparse_buffers
[i
];
1348 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1350 simple_mtx_lock(&bo
->lock
);
1352 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
1353 /* We can directly add the buffer here, because we know that each
1354 * backing buffer occurs only once.
1356 int idx
= amdgpu_do_add_real_buffer(cs
, backing
->bo
);
1358 fprintf(stderr
, "%s: failed to add buffer\n", __FUNCTION__
);
1359 simple_mtx_unlock(&bo
->lock
);
1363 cs
->real_buffers
[idx
].usage
= buffer
->usage
& ~RADEON_USAGE_SYNCHRONIZED
;
1364 cs
->real_buffers
[idx
].u
.real
.priority_usage
= buffer
->u
.real
.priority_usage
;
1365 p_atomic_inc(&backing
->bo
->num_active_ioctls
);
1368 simple_mtx_unlock(&bo
->lock
);
1374 void amdgpu_cs_submit_ib(void *job
, int thread_index
)
1376 struct amdgpu_cs
*acs
= (struct amdgpu_cs
*)job
;
1377 struct amdgpu_winsys
*ws
= acs
->ctx
->ws
;
1378 struct amdgpu_cs_context
*cs
= acs
->cst
;
1380 uint32_t bo_list
= 0;
1381 uint64_t seq_no
= 0;
1382 bool has_user_fence
= amdgpu_cs_has_user_fence(cs
);
1383 bool use_bo_list_create
= ws
->info
.drm_minor
< 27;
1384 struct drm_amdgpu_bo_list_in bo_list_in
;
1386 /* Prepare the buffer list. */
1387 if (ws
->debug_all_bos
) {
1388 /* The buffer list contains all buffers. This is a slow path that
1389 * ensures that no buffer is missing in the BO list.
1391 unsigned num_handles
= 0;
1392 struct drm_amdgpu_bo_list_entry
*list
=
1393 alloca(ws
->num_buffers
* sizeof(struct drm_amdgpu_bo_list_entry
));
1394 struct amdgpu_winsys_bo
*bo
;
1396 simple_mtx_lock(&ws
->global_bo_list_lock
);
1397 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, u
.real
.global_list_item
) {
1401 list
[num_handles
].bo_handle
= bo
->u
.real
.kms_handle
;
1402 list
[num_handles
].bo_priority
= 0;
1406 r
= amdgpu_bo_list_create_raw(ws
->dev
, ws
->num_buffers
, list
, &bo_list
);
1407 simple_mtx_unlock(&ws
->global_bo_list_lock
);
1409 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
1413 if (!amdgpu_add_sparse_backing_buffers(cs
)) {
1414 fprintf(stderr
, "amdgpu: amdgpu_add_sparse_backing_buffers failed\n");
1419 struct drm_amdgpu_bo_list_entry
*list
=
1420 alloca((cs
->num_real_buffers
+ 2) * sizeof(struct drm_amdgpu_bo_list_entry
));
1422 unsigned num_handles
= 0;
1423 for (i
= 0; i
< cs
->num_real_buffers
; ++i
) {
1424 struct amdgpu_cs_buffer
*buffer
= &cs
->real_buffers
[i
];
1426 if (buffer
->bo
->is_local
)
1429 assert(buffer
->u
.real
.priority_usage
!= 0);
1431 list
[num_handles
].bo_handle
= buffer
->bo
->u
.real
.kms_handle
;
1432 list
[num_handles
].bo_priority
= (util_last_bit(buffer
->u
.real
.priority_usage
) - 1) / 2;
1436 if (use_bo_list_create
) {
1437 /* Legacy path creating the buffer list handle and passing it to the CS ioctl. */
1438 r
= amdgpu_bo_list_create_raw(ws
->dev
, num_handles
, list
, &bo_list
);
1440 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
1444 /* Standard path passing the buffer list via the CS ioctl. */
1445 bo_list_in
.operation
= ~0;
1446 bo_list_in
.list_handle
= ~0;
1447 bo_list_in
.bo_number
= num_handles
;
1448 bo_list_in
.bo_info_size
= sizeof(struct drm_amdgpu_bo_list_entry
);
1449 bo_list_in
.bo_info_ptr
= (uint64_t)(uintptr_t)list
;
1453 if (acs
->ring_type
== RING_GFX
)
1454 ws
->gfx_bo_list_counter
+= cs
->num_real_buffers
;
1456 if (acs
->stop_exec_on_failure
&& acs
->ctx
->num_rejected_cs
) {
1459 struct drm_amdgpu_cs_chunk chunks
[6];
1460 unsigned num_chunks
= 0;
1463 if (!use_bo_list_create
) {
1464 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_BO_HANDLES
;
1465 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_bo_list_in
) / 4;
1466 chunks
[num_chunks
].chunk_data
= (uintptr_t)&bo_list_in
;
1470 /* Fence dependencies. */
1471 unsigned num_dependencies
= cs
->fence_dependencies
.num
;
1472 if (num_dependencies
) {
1473 struct drm_amdgpu_cs_chunk_dep
*dep_chunk
=
1474 alloca(num_dependencies
* sizeof(*dep_chunk
));
1476 for (unsigned i
= 0; i
< num_dependencies
; i
++) {
1477 struct amdgpu_fence
*fence
=
1478 (struct amdgpu_fence
*)cs
->fence_dependencies
.list
[i
];
1480 assert(util_queue_fence_is_signalled(&fence
->submitted
));
1481 amdgpu_cs_chunk_fence_to_dep(&fence
->fence
, &dep_chunk
[i
]);
1484 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_DEPENDENCIES
;
1485 chunks
[num_chunks
].length_dw
= sizeof(dep_chunk
[0]) / 4 * num_dependencies
;
1486 chunks
[num_chunks
].chunk_data
= (uintptr_t)dep_chunk
;
1490 /* Syncobj dependencies. */
1491 unsigned num_syncobj_dependencies
= cs
->syncobj_dependencies
.num
;
1492 if (num_syncobj_dependencies
) {
1493 struct drm_amdgpu_cs_chunk_sem
*sem_chunk
=
1494 alloca(num_syncobj_dependencies
* sizeof(sem_chunk
[0]));
1496 for (unsigned i
= 0; i
< num_syncobj_dependencies
; i
++) {
1497 struct amdgpu_fence
*fence
=
1498 (struct amdgpu_fence
*)cs
->syncobj_dependencies
.list
[i
];
1500 if (!amdgpu_fence_is_syncobj(fence
))
1503 assert(util_queue_fence_is_signalled(&fence
->submitted
));
1504 sem_chunk
[i
].handle
= fence
->syncobj
;
1507 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_SYNCOBJ_IN
;
1508 chunks
[num_chunks
].length_dw
= sizeof(sem_chunk
[0]) / 4 * num_syncobj_dependencies
;
1509 chunks
[num_chunks
].chunk_data
= (uintptr_t)sem_chunk
;
1513 /* Submit the parallel compute IB first. */
1514 if (cs
->ib
[IB_PARALLEL_COMPUTE
].ib_bytes
> 0) {
1515 unsigned old_num_chunks
= num_chunks
;
1517 /* Add compute fence dependencies. */
1518 unsigned num_dependencies
= cs
->compute_fence_dependencies
.num
;
1519 if (num_dependencies
) {
1520 struct drm_amdgpu_cs_chunk_dep
*dep_chunk
=
1521 alloca(num_dependencies
* sizeof(*dep_chunk
));
1523 for (unsigned i
= 0; i
< num_dependencies
; i
++) {
1524 struct amdgpu_fence
*fence
=
1525 (struct amdgpu_fence
*)cs
->compute_fence_dependencies
.list
[i
];
1527 assert(util_queue_fence_is_signalled(&fence
->submitted
));
1528 amdgpu_cs_chunk_fence_to_dep(&fence
->fence
, &dep_chunk
[i
]);
1531 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_DEPENDENCIES
;
1532 chunks
[num_chunks
].length_dw
= sizeof(dep_chunk
[0]) / 4 * num_dependencies
;
1533 chunks
[num_chunks
].chunk_data
= (uintptr_t)dep_chunk
;
1537 /* Add compute start fence dependencies. */
1538 unsigned num_start_dependencies
= cs
->compute_start_fence_dependencies
.num
;
1539 if (num_start_dependencies
) {
1540 struct drm_amdgpu_cs_chunk_dep
*dep_chunk
=
1541 alloca(num_start_dependencies
* sizeof(*dep_chunk
));
1543 for (unsigned i
= 0; i
< num_start_dependencies
; i
++) {
1544 struct amdgpu_fence
*fence
=
1545 (struct amdgpu_fence
*)cs
->compute_start_fence_dependencies
.list
[i
];
1547 assert(util_queue_fence_is_signalled(&fence
->submitted
));
1548 amdgpu_cs_chunk_fence_to_dep(&fence
->fence
, &dep_chunk
[i
]);
1551 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
;
1552 chunks
[num_chunks
].length_dw
= sizeof(dep_chunk
[0]) / 4 * num_start_dependencies
;
1553 chunks
[num_chunks
].chunk_data
= (uintptr_t)dep_chunk
;
1557 /* Convert from dwords to bytes. */
1558 cs
->ib
[IB_PARALLEL_COMPUTE
].ib_bytes
*= 4;
1559 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_IB
;
1560 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_ib
) / 4;
1561 chunks
[num_chunks
].chunk_data
= (uintptr_t)&cs
->ib
[IB_PARALLEL_COMPUTE
];
1564 r
= amdgpu_cs_submit_raw2(ws
->dev
, acs
->ctx
->ctx
, bo_list
,
1565 num_chunks
, chunks
, NULL
);
1569 /* Back off the compute chunks. */
1570 num_chunks
= old_num_chunks
;
1573 /* Syncobj signals. */
1574 unsigned num_syncobj_to_signal
= cs
->syncobj_to_signal
.num
;
1575 if (num_syncobj_to_signal
) {
1576 struct drm_amdgpu_cs_chunk_sem
*sem_chunk
=
1577 alloca(num_syncobj_to_signal
* sizeof(sem_chunk
[0]));
1579 for (unsigned i
= 0; i
< num_syncobj_to_signal
; i
++) {
1580 struct amdgpu_fence
*fence
=
1581 (struct amdgpu_fence
*)cs
->syncobj_to_signal
.list
[i
];
1583 assert(amdgpu_fence_is_syncobj(fence
));
1584 sem_chunk
[i
].handle
= fence
->syncobj
;
1587 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_SYNCOBJ_OUT
;
1588 chunks
[num_chunks
].length_dw
= sizeof(sem_chunk
[0]) / 4
1589 * num_syncobj_to_signal
;
1590 chunks
[num_chunks
].chunk_data
= (uintptr_t)sem_chunk
;
1595 if (has_user_fence
) {
1596 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_FENCE
;
1597 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_fence
) / 4;
1598 chunks
[num_chunks
].chunk_data
= (uintptr_t)&acs
->fence_chunk
;
1603 cs
->ib
[IB_MAIN
].ib_bytes
*= 4; /* Convert from dwords to bytes. */
1604 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_IB
;
1605 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_ib
) / 4;
1606 chunks
[num_chunks
].chunk_data
= (uintptr_t)&cs
->ib
[IB_MAIN
];
1609 assert(num_chunks
<= ARRAY_SIZE(chunks
));
1611 r
= amdgpu_cs_submit_raw2(ws
->dev
, acs
->ctx
->ctx
, bo_list
,
1612 num_chunks
, chunks
, &seq_no
);
1618 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
1619 else if (r
== -ECANCELED
)
1620 fprintf(stderr
, "amdgpu: The CS has been cancelled because the context is lost.\n");
1622 fprintf(stderr
, "amdgpu: The CS has been rejected, "
1623 "see dmesg for more information (%i).\n", r
);
1625 acs
->ctx
->num_rejected_cs
++;
1626 ws
->num_total_rejected_cs
++;
1629 uint64_t *user_fence
= NULL
;
1632 user_fence
= acs
->ctx
->user_fence_cpu_address_base
+ acs
->ring_type
;
1633 amdgpu_fence_submitted(cs
->fence
, seq_no
, user_fence
);
1638 amdgpu_bo_list_destroy_raw(ws
->dev
, bo_list
);
1641 /* If there was an error, signal the fence, because it won't be signalled
1642 * by the hardware. */
1644 amdgpu_fence_signalled(cs
->fence
);
1648 for (i
= 0; i
< cs
->num_real_buffers
; i
++)
1649 p_atomic_dec(&cs
->real_buffers
[i
].bo
->num_active_ioctls
);
1650 for (i
= 0; i
< cs
->num_slab_buffers
; i
++)
1651 p_atomic_dec(&cs
->slab_buffers
[i
].bo
->num_active_ioctls
);
1652 for (i
= 0; i
< cs
->num_sparse_buffers
; i
++)
1653 p_atomic_dec(&cs
->sparse_buffers
[i
].bo
->num_active_ioctls
);
1655 amdgpu_cs_context_cleanup(cs
);
1658 /* Make sure the previous submission is completed. */
1659 void amdgpu_cs_sync_flush(struct radeon_cmdbuf
*rcs
)
1661 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1663 /* Wait for any pending ioctl of this CS to complete. */
1664 util_queue_fence_wait(&cs
->flush_completed
);
1667 static int amdgpu_cs_flush(struct radeon_cmdbuf
*rcs
,
1669 struct pipe_fence_handle
**fence
)
1671 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1672 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
1675 rcs
->current
.max_dw
+= amdgpu_cs_epilog_dws(cs
);
1677 switch (cs
->ring_type
) {
1679 /* pad DMA ring to 8 DWs */
1680 if (ws
->info
.chip_class
<= GFX6
) {
1681 while (rcs
->current
.cdw
& 7)
1682 radeon_emit(rcs
, 0xf0000000); /* NOP packet */
1684 while (rcs
->current
.cdw
& 7)
1685 radeon_emit(rcs
, 0x00000000); /* NOP packet */
1690 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1691 if (ws
->info
.gfx_ib_pad_with_type2
) {
1692 while (rcs
->current
.cdw
& 7)
1693 radeon_emit(rcs
, 0x80000000); /* type2 nop packet */
1695 while (rcs
->current
.cdw
& 7)
1696 radeon_emit(rcs
, 0xffff1000); /* type3 nop packet */
1698 if (cs
->ring_type
== RING_GFX
)
1699 ws
->gfx_ib_size_counter
+= (rcs
->prev_dw
+ rcs
->current
.cdw
) * 4;
1701 /* Also pad secondary IBs. */
1702 if (cs
->compute_ib
.ib_mapped
) {
1703 while (cs
->compute_ib
.base
.current
.cdw
& 7)
1704 radeon_emit(&cs
->compute_ib
.base
, 0xffff1000); /* type3 nop packet */
1709 while (rcs
->current
.cdw
& 15)
1710 radeon_emit(rcs
, 0x80000000); /* type2 nop packet */
1713 if (rcs
->current
.cdw
% 2)
1715 while (rcs
->current
.cdw
& 15) {
1716 radeon_emit(rcs
, 0x60000000); /* nop packet */
1717 radeon_emit(rcs
, 0x00000000);
1721 while (rcs
->current
.cdw
& 15)
1722 radeon_emit(rcs
, 0x81ff); /* nop packet */
1728 if (rcs
->current
.cdw
> rcs
->current
.max_dw
) {
1729 fprintf(stderr
, "amdgpu: command stream overflowed\n");
1732 /* If the CS is not empty or overflowed.... */
1733 if (likely(radeon_emitted(&cs
->main
.base
, 0) &&
1734 cs
->main
.base
.current
.cdw
<= cs
->main
.base
.current
.max_dw
&&
1735 !debug_get_option_noop())) {
1736 struct amdgpu_cs_context
*cur
= cs
->csc
;
1739 amdgpu_ib_finalize(ws
, &cs
->main
);
1741 if (cs
->compute_ib
.ib_mapped
)
1742 amdgpu_ib_finalize(ws
, &cs
->compute_ib
);
1744 /* Create a fence. */
1745 amdgpu_fence_reference(&cur
->fence
, NULL
);
1746 if (cs
->next_fence
) {
1747 /* just move the reference */
1748 cur
->fence
= cs
->next_fence
;
1749 cs
->next_fence
= NULL
;
1751 cur
->fence
= amdgpu_fence_create(cs
->ctx
,
1752 cur
->ib
[IB_MAIN
].ip_type
,
1753 cur
->ib
[IB_MAIN
].ip_instance
,
1754 cur
->ib
[IB_MAIN
].ring
);
1757 amdgpu_fence_reference(fence
, cur
->fence
);
1759 amdgpu_cs_sync_flush(rcs
);
1763 * This fence must be held until the submission is queued to ensure
1764 * that the order of fence dependency updates matches the order of
1767 simple_mtx_lock(&ws
->bo_fence_lock
);
1768 amdgpu_add_fence_dependencies_bo_lists(cs
);
1770 /* Swap command streams. "cst" is going to be submitted. */
1775 util_queue_add_job(&ws
->cs_queue
, cs
, &cs
->flush_completed
,
1776 amdgpu_cs_submit_ib
, NULL
, 0);
1777 /* The submission has been queued, unlock the fence now. */
1778 simple_mtx_unlock(&ws
->bo_fence_lock
);
1780 if (!(flags
& PIPE_FLUSH_ASYNC
)) {
1781 amdgpu_cs_sync_flush(rcs
);
1782 error_code
= cur
->error_code
;
1785 amdgpu_cs_context_cleanup(cs
->csc
);
1788 amdgpu_get_new_ib(ws
, cs
, IB_MAIN
);
1789 if (cs
->compute_ib
.ib_mapped
)
1790 amdgpu_get_new_ib(ws
, cs
, IB_PARALLEL_COMPUTE
);
1792 cs
->main
.base
.used_gart
= 0;
1793 cs
->main
.base
.used_vram
= 0;
1795 if (cs
->ring_type
== RING_GFX
)
1797 else if (cs
->ring_type
== RING_DMA
)
1803 static void amdgpu_cs_destroy(struct radeon_cmdbuf
*rcs
)
1805 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1807 amdgpu_cs_sync_flush(rcs
);
1808 util_queue_fence_destroy(&cs
->flush_completed
);
1809 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
1810 pb_reference(&cs
->main
.big_ib_buffer
, NULL
);
1811 FREE(cs
->main
.base
.prev
);
1812 pb_reference(&cs
->compute_ib
.big_ib_buffer
, NULL
);
1813 FREE(cs
->compute_ib
.base
.prev
);
1814 amdgpu_destroy_cs_context(&cs
->csc1
);
1815 amdgpu_destroy_cs_context(&cs
->csc2
);
1816 amdgpu_fence_reference(&cs
->next_fence
, NULL
);
1820 static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf
*rcs
,
1821 struct pb_buffer
*_buf
,
1822 enum radeon_bo_usage usage
)
1824 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1825 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
1827 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
1830 void amdgpu_cs_init_functions(struct amdgpu_screen_winsys
*ws
)
1832 ws
->base
.ctx_create
= amdgpu_ctx_create
;
1833 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
1834 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
1835 ws
->base
.cs_create
= amdgpu_cs_create
;
1836 ws
->base
.cs_add_parallel_compute_ib
= amdgpu_cs_add_parallel_compute_ib
;
1837 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
1838 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
1839 ws
->base
.cs_validate
= amdgpu_cs_validate
;
1840 ws
->base
.cs_check_space
= amdgpu_cs_check_space
;
1841 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
1842 ws
->base
.cs_flush
= amdgpu_cs_flush
;
1843 ws
->base
.cs_get_next_fence
= amdgpu_cs_get_next_fence
;
1844 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
1845 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
1846 ws
->base
.cs_add_fence_dependency
= amdgpu_cs_add_fence_dependency
;
1847 ws
->base
.cs_add_syncobj_signal
= amdgpu_cs_add_syncobj_signal
;
1848 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
1849 ws
->base
.fence_reference
= amdgpu_fence_reference
;
1850 ws
->base
.fence_import_syncobj
= amdgpu_fence_import_syncobj
;
1851 ws
->base
.fence_import_sync_file
= amdgpu_fence_import_sync_file
;
1852 ws
->base
.fence_export_sync_file
= amdgpu_fence_export_sync_file
;
1853 ws
->base
.export_signalled_sync_file
= amdgpu_export_signalled_sync_file
;