2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 #include "amdgpu_cs.h"
30 #include "util/os_time.h"
34 #include "amd/common/sid.h"
36 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", false)
40 static struct pipe_fence_handle
*
41 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
42 unsigned ip_instance
, unsigned ring
)
44 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
46 fence
->reference
.count
= 1;
49 fence
->fence
.context
= ctx
->ctx
;
50 fence
->fence
.ip_type
= ip_type
;
51 fence
->fence
.ip_instance
= ip_instance
;
52 fence
->fence
.ring
= ring
;
53 util_queue_fence_init(&fence
->submitted
);
54 util_queue_fence_reset(&fence
->submitted
);
55 p_atomic_inc(&ctx
->refcount
);
56 return (struct pipe_fence_handle
*)fence
;
59 static struct pipe_fence_handle
*
60 amdgpu_fence_import_sync_file(struct radeon_winsys
*rws
, int fd
)
62 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
63 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
68 pipe_reference_init(&fence
->reference
, 1);
70 /* fence->ctx == NULL means that the fence is syncobj-based. */
72 /* Convert sync_file into syncobj. */
73 int r
= amdgpu_cs_create_syncobj(ws
->dev
, &fence
->syncobj
);
79 r
= amdgpu_cs_syncobj_import_sync_file(ws
->dev
, fence
->syncobj
, fd
);
81 amdgpu_cs_destroy_syncobj(ws
->dev
, fence
->syncobj
);
86 util_queue_fence_init(&fence
->submitted
);
88 return (struct pipe_fence_handle
*)fence
;
91 static int amdgpu_fence_export_sync_file(struct radeon_winsys
*rws
,
92 struct pipe_fence_handle
*pfence
)
94 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
95 struct amdgpu_fence
*fence
= (struct amdgpu_fence
*)pfence
;
97 if (amdgpu_fence_is_syncobj(fence
)) {
100 /* Convert syncobj into sync_file. */
101 r
= amdgpu_cs_syncobj_export_sync_file(ws
->dev
, fence
->syncobj
, &fd
);
105 util_queue_fence_wait(&fence
->submitted
);
107 /* Convert the amdgpu fence into a fence FD. */
109 if (amdgpu_cs_fence_to_handle(ws
->dev
, &fence
->fence
,
110 AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD
,
117 static int amdgpu_export_signalled_sync_file(struct radeon_winsys
*rws
)
119 struct amdgpu_winsys
*ws
= amdgpu_winsys(rws
);
123 int r
= amdgpu_cs_create_syncobj2(ws
->dev
, DRM_SYNCOBJ_CREATE_SIGNALED
,
129 r
= amdgpu_cs_syncobj_export_sync_file(ws
->dev
, syncobj
, &fd
);
134 amdgpu_cs_destroy_syncobj(ws
->dev
, syncobj
);
138 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
140 uint64_t *user_fence_cpu_address
)
142 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
144 rfence
->fence
.fence
= seq_no
;
145 rfence
->user_fence_cpu_address
= user_fence_cpu_address
;
146 util_queue_fence_signal(&rfence
->submitted
);
149 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
151 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
153 rfence
->signalled
= true;
154 util_queue_fence_signal(&rfence
->submitted
);
157 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
160 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
163 uint64_t *user_fence_cpu
;
166 if (rfence
->signalled
)
169 /* Handle syncobjs. */
170 if (amdgpu_fence_is_syncobj(rfence
)) {
171 /* Absolute timeouts are only be used by BO fences, which aren't
172 * backed by syncobjs.
176 if (amdgpu_cs_syncobj_wait(rfence
->ws
->dev
, &rfence
->syncobj
, 1,
180 rfence
->signalled
= true;
185 abs_timeout
= timeout
;
187 abs_timeout
= os_time_get_absolute_timeout(timeout
);
189 /* The fence might not have a number assigned if its IB is being
190 * submitted in the other thread right now. Wait until the submission
192 if (!util_queue_fence_wait_timeout(&rfence
->submitted
, abs_timeout
))
195 user_fence_cpu
= rfence
->user_fence_cpu_address
;
196 if (user_fence_cpu
) {
197 if (*user_fence_cpu
>= rfence
->fence
.fence
) {
198 rfence
->signalled
= true;
202 /* No timeout, just query: no need for the ioctl. */
203 if (!absolute
&& !timeout
)
207 /* Now use the libdrm query. */
208 r
= amdgpu_cs_query_fence_status(&rfence
->fence
,
210 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
213 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
218 /* This variable can only transition from false to true, so it doesn't
219 * matter if threads race for it. */
220 rfence
->signalled
= true;
226 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
227 struct pipe_fence_handle
*fence
,
230 return amdgpu_fence_wait(fence
, timeout
, false);
233 static struct pipe_fence_handle
*
234 amdgpu_cs_get_next_fence(struct radeon_winsys_cs
*rcs
)
236 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
237 struct pipe_fence_handle
*fence
= NULL
;
239 if (debug_get_option_noop())
242 if (cs
->next_fence
) {
243 amdgpu_fence_reference(&fence
, cs
->next_fence
);
247 fence
= amdgpu_fence_create(cs
->ctx
,
248 cs
->csc
->ib
[IB_MAIN
].ip_type
,
249 cs
->csc
->ib
[IB_MAIN
].ip_instance
,
250 cs
->csc
->ib
[IB_MAIN
].ring
);
254 amdgpu_fence_reference(&cs
->next_fence
, fence
);
260 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
262 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
264 struct amdgpu_bo_alloc_request alloc_buffer
= {};
265 amdgpu_bo_handle buf_handle
;
270 ctx
->ws
= amdgpu_winsys(ws
);
272 ctx
->initial_num_total_rejected_cs
= ctx
->ws
->num_total_rejected_cs
;
274 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
276 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
280 alloc_buffer
.alloc_size
= ctx
->ws
->info
.gart_page_size
;
281 alloc_buffer
.phys_alignment
= ctx
->ws
->info
.gart_page_size
;
282 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
284 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
286 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
287 goto error_user_fence_alloc
;
290 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
292 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
293 goto error_user_fence_map
;
296 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
297 ctx
->user_fence_bo
= buf_handle
;
299 return (struct radeon_winsys_ctx
*)ctx
;
301 error_user_fence_map
:
302 amdgpu_bo_free(buf_handle
);
303 error_user_fence_alloc
:
304 amdgpu_cs_ctx_free(ctx
->ctx
);
310 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
312 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
315 static enum pipe_reset_status
316 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
318 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
319 uint32_t result
, hangs
;
322 /* Return a failure due to a rejected command submission. */
323 if (ctx
->ws
->num_total_rejected_cs
> ctx
->initial_num_total_rejected_cs
) {
324 return ctx
->num_rejected_cs
? PIPE_GUILTY_CONTEXT_RESET
:
325 PIPE_INNOCENT_CONTEXT_RESET
;
328 /* Return a failure due to a GPU hang. */
329 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
331 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
332 return PIPE_NO_RESET
;
336 case AMDGPU_CTX_GUILTY_RESET
:
337 return PIPE_GUILTY_CONTEXT_RESET
;
338 case AMDGPU_CTX_INNOCENT_RESET
:
339 return PIPE_INNOCENT_CONTEXT_RESET
;
340 case AMDGPU_CTX_UNKNOWN_RESET
:
341 return PIPE_UNKNOWN_CONTEXT_RESET
;
342 case AMDGPU_CTX_NO_RESET
:
344 return PIPE_NO_RESET
;
348 /* COMMAND SUBMISSION */
350 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context
*cs
)
352 return cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_UVD
&&
353 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCE
&&
354 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCN_DEC
&&
355 cs
->ib
[IB_MAIN
].ip_type
!= AMDGPU_HW_IP_VCN_ENC
;
358 static bool amdgpu_cs_has_chaining(struct amdgpu_cs
*cs
)
360 return cs
->ctx
->ws
->info
.chip_class
>= CIK
&&
361 cs
->ring_type
== RING_GFX
;
364 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type
)
366 if (ring_type
== RING_GFX
)
367 return 4; /* for chaining */
372 int amdgpu_lookup_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
374 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
375 int i
= cs
->buffer_indices_hashlist
[hash
];
376 struct amdgpu_cs_buffer
*buffers
;
380 buffers
= cs
->real_buffers
;
381 num_buffers
= cs
->num_real_buffers
;
382 } else if (!bo
->sparse
) {
383 buffers
= cs
->slab_buffers
;
384 num_buffers
= cs
->num_slab_buffers
;
386 buffers
= cs
->sparse_buffers
;
387 num_buffers
= cs
->num_sparse_buffers
;
390 /* not found or found */
391 if (i
< 0 || (i
< num_buffers
&& buffers
[i
].bo
== bo
))
394 /* Hash collision, look for the BO in the list of buffers linearly. */
395 for (i
= num_buffers
- 1; i
>= 0; i
--) {
396 if (buffers
[i
].bo
== bo
) {
397 /* Put this buffer in the hash list.
398 * This will prevent additional hash collisions if there are
399 * several consecutive lookup_buffer calls for the same buffer.
401 * Example: Assuming buffers A,B,C collide in the hash list,
402 * the following sequence of buffers:
403 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
404 * will collide here: ^ and here: ^,
405 * meaning that we should get very few collisions in the end. */
406 cs
->buffer_indices_hashlist
[hash
] = i
;
414 amdgpu_do_add_real_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
416 struct amdgpu_cs_buffer
*buffer
;
419 /* New buffer, check if the backing array is large enough. */
420 if (cs
->num_real_buffers
>= cs
->max_real_buffers
) {
422 MAX2(cs
->max_real_buffers
+ 16, (unsigned)(cs
->max_real_buffers
* 1.3));
423 struct amdgpu_cs_buffer
*new_buffers
;
425 new_buffers
= MALLOC(new_max
* sizeof(*new_buffers
));
428 fprintf(stderr
, "amdgpu_do_add_buffer: allocation failed\n");
433 memcpy(new_buffers
, cs
->real_buffers
, cs
->num_real_buffers
* sizeof(*new_buffers
));
435 FREE(cs
->real_buffers
);
437 cs
->max_real_buffers
= new_max
;
438 cs
->real_buffers
= new_buffers
;
441 idx
= cs
->num_real_buffers
;
442 buffer
= &cs
->real_buffers
[idx
];
444 memset(buffer
, 0, sizeof(*buffer
));
445 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
446 p_atomic_inc(&bo
->num_cs_references
);
447 cs
->num_real_buffers
++;
453 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs
*acs
, struct amdgpu_winsys_bo
*bo
)
455 struct amdgpu_cs_context
*cs
= acs
->csc
;
457 int idx
= amdgpu_lookup_buffer(cs
, bo
);
462 idx
= amdgpu_do_add_real_buffer(cs
, bo
);
464 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
465 cs
->buffer_indices_hashlist
[hash
] = idx
;
467 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
468 acs
->main
.base
.used_vram
+= bo
->base
.size
;
469 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
470 acs
->main
.base
.used_gart
+= bo
->base
.size
;
475 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs
*acs
,
476 struct amdgpu_winsys_bo
*bo
)
478 struct amdgpu_cs_context
*cs
= acs
->csc
;
479 struct amdgpu_cs_buffer
*buffer
;
481 int idx
= amdgpu_lookup_buffer(cs
, bo
);
487 real_idx
= amdgpu_lookup_or_add_real_buffer(acs
, bo
->u
.slab
.real
);
491 /* New buffer, check if the backing array is large enough. */
492 if (cs
->num_slab_buffers
>= cs
->max_slab_buffers
) {
494 MAX2(cs
->max_slab_buffers
+ 16, (unsigned)(cs
->max_slab_buffers
* 1.3));
495 struct amdgpu_cs_buffer
*new_buffers
;
497 new_buffers
= REALLOC(cs
->slab_buffers
,
498 cs
->max_slab_buffers
* sizeof(*new_buffers
),
499 new_max
* sizeof(*new_buffers
));
501 fprintf(stderr
, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
505 cs
->max_slab_buffers
= new_max
;
506 cs
->slab_buffers
= new_buffers
;
509 idx
= cs
->num_slab_buffers
;
510 buffer
= &cs
->slab_buffers
[idx
];
512 memset(buffer
, 0, sizeof(*buffer
));
513 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
514 buffer
->u
.slab
.real_idx
= real_idx
;
515 p_atomic_inc(&bo
->num_cs_references
);
516 cs
->num_slab_buffers
++;
518 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
519 cs
->buffer_indices_hashlist
[hash
] = idx
;
524 static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs
*acs
,
525 struct amdgpu_winsys_bo
*bo
)
527 struct amdgpu_cs_context
*cs
= acs
->csc
;
528 struct amdgpu_cs_buffer
*buffer
;
530 int idx
= amdgpu_lookup_buffer(cs
, bo
);
535 /* New buffer, check if the backing array is large enough. */
536 if (cs
->num_sparse_buffers
>= cs
->max_sparse_buffers
) {
538 MAX2(cs
->max_sparse_buffers
+ 16, (unsigned)(cs
->max_sparse_buffers
* 1.3));
539 struct amdgpu_cs_buffer
*new_buffers
;
541 new_buffers
= REALLOC(cs
->sparse_buffers
,
542 cs
->max_sparse_buffers
* sizeof(*new_buffers
),
543 new_max
* sizeof(*new_buffers
));
545 fprintf(stderr
, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
549 cs
->max_sparse_buffers
= new_max
;
550 cs
->sparse_buffers
= new_buffers
;
553 idx
= cs
->num_sparse_buffers
;
554 buffer
= &cs
->sparse_buffers
[idx
];
556 memset(buffer
, 0, sizeof(*buffer
));
557 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
558 p_atomic_inc(&bo
->num_cs_references
);
559 cs
->num_sparse_buffers
++;
561 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
562 cs
->buffer_indices_hashlist
[hash
] = idx
;
564 /* We delay adding the backing buffers until we really have to. However,
565 * we cannot delay accounting for memory use.
567 simple_mtx_lock(&bo
->u
.sparse
.commit_lock
);
569 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
570 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
571 acs
->main
.base
.used_vram
+= backing
->bo
->base
.size
;
572 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
573 acs
->main
.base
.used_gart
+= backing
->bo
->base
.size
;
576 simple_mtx_unlock(&bo
->u
.sparse
.commit_lock
);
581 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
582 struct pb_buffer
*buf
,
583 enum radeon_bo_usage usage
,
584 enum radeon_bo_domain domains
,
585 enum radeon_bo_priority priority
)
587 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
588 * the buffer placement during command submission.
590 struct amdgpu_cs
*acs
= amdgpu_cs(rcs
);
591 struct amdgpu_cs_context
*cs
= acs
->csc
;
592 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
593 struct amdgpu_cs_buffer
*buffer
;
596 /* Fast exit for no-op calls.
597 * This is very effective with suballocators and linear uploaders that
598 * are outside of the winsys.
600 if (bo
== cs
->last_added_bo
&&
601 (usage
& cs
->last_added_bo_usage
) == usage
&&
602 (1ull << priority
) & cs
->last_added_bo_priority_usage
)
603 return cs
->last_added_bo_index
;
607 index
= amdgpu_lookup_or_add_slab_buffer(acs
, bo
);
611 buffer
= &cs
->slab_buffers
[index
];
612 buffer
->usage
|= usage
;
614 usage
&= ~RADEON_USAGE_SYNCHRONIZED
;
615 index
= buffer
->u
.slab
.real_idx
;
617 index
= amdgpu_lookup_or_add_real_buffer(acs
, bo
);
622 buffer
= &cs
->real_buffers
[index
];
624 index
= amdgpu_lookup_or_add_sparse_buffer(acs
, bo
);
628 buffer
= &cs
->sparse_buffers
[index
];
631 buffer
->u
.real
.priority_usage
|= 1ull << priority
;
632 buffer
->usage
|= usage
;
634 cs
->last_added_bo
= bo
;
635 cs
->last_added_bo_index
= index
;
636 cs
->last_added_bo_usage
= buffer
->usage
;
637 cs
->last_added_bo_priority_usage
= buffer
->u
.real
.priority_usage
;
641 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys
*ws
, struct amdgpu_ib
*ib
,
642 enum ring_type ring_type
)
644 struct pb_buffer
*pb
;
646 unsigned buffer_size
;
648 /* Always create a buffer that is at least as large as the maximum seen IB
649 * size, aligned to a power of two (and multiplied by 4 to reduce internal
650 * fragmentation if chaining is not available). Limit to 512k dwords, which
651 * is the largest power of two that fits into the size field of the
652 * INDIRECT_BUFFER packet.
654 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib
)))
655 buffer_size
= 4 *util_next_power_of_two(ib
->max_ib_size
);
657 buffer_size
= 4 *util_next_power_of_two(4 * ib
->max_ib_size
);
659 buffer_size
= MIN2(buffer_size
, 4 * 512 * 1024);
661 switch (ib
->ib_type
) {
663 buffer_size
= MAX2(buffer_size
, 8 * 1024 * 4);
666 unreachable("unhandled IB type");
669 pb
= ws
->base
.buffer_create(&ws
->base
, buffer_size
,
670 ws
->info
.gart_page_size
,
672 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
673 RADEON_FLAG_READ_ONLY
|
674 (ring_type
== RING_GFX
||
675 ring_type
== RING_COMPUTE
||
676 ring_type
== RING_DMA
?
677 RADEON_FLAG_GTT_WC
: 0));
681 mapped
= ws
->base
.buffer_map(pb
, NULL
, PIPE_TRANSFER_WRITE
);
683 pb_reference(&pb
, NULL
);
687 pb_reference(&ib
->big_ib_buffer
, pb
);
688 pb_reference(&pb
, NULL
);
690 ib
->ib_mapped
= mapped
;
691 ib
->used_ib_space
= 0;
696 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type
)
700 /* Smaller submits means the GPU gets busy sooner and there is less
701 * waiting for buffers and fences. Proof:
702 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
706 unreachable("bad ib_type");
710 static bool amdgpu_get_new_ib(struct radeon_winsys
*ws
, struct amdgpu_cs
*cs
,
711 enum ib_type ib_type
)
713 struct amdgpu_winsys
*aws
= (struct amdgpu_winsys
*)ws
;
714 /* Small IBs are better than big IBs, because the GPU goes idle quicker
715 * and there is less waiting for buffers and fences. Proof:
716 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
718 struct amdgpu_ib
*ib
= NULL
;
719 struct drm_amdgpu_cs_chunk_ib
*info
= &cs
->csc
->ib
[ib_type
];
720 unsigned ib_size
= 0;
725 ib_size
= 4 * 1024 * 4;
728 unreachable("unhandled IB type");
731 if (!amdgpu_cs_has_chaining(cs
)) {
732 ib_size
= MAX2(ib_size
,
733 4 * MIN2(util_next_power_of_two(ib
->max_ib_size
),
734 amdgpu_ib_max_submit_dwords(ib_type
)));
737 ib
->max_ib_size
= ib
->max_ib_size
- ib
->max_ib_size
/ 32;
739 ib
->base
.prev_dw
= 0;
740 ib
->base
.num_prev
= 0;
741 ib
->base
.current
.cdw
= 0;
742 ib
->base
.current
.buf
= NULL
;
744 /* Allocate a new buffer for IBs if the current buffer is all used. */
745 if (!ib
->big_ib_buffer
||
746 ib
->used_ib_space
+ ib_size
> ib
->big_ib_buffer
->size
) {
747 if (!amdgpu_ib_new_buffer(aws
, ib
, cs
->ring_type
))
751 info
->va_start
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
+ ib
->used_ib_space
;
753 /* ib_bytes is in dwords and the conversion to bytes will be done before
755 ib
->ptr_ib_size
= &info
->ib_bytes
;
756 ib
->ptr_ib_size_inside_ib
= false;
758 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
759 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
761 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
763 ib_size
= ib
->big_ib_buffer
->size
- ib
->used_ib_space
;
764 ib
->base
.current
.max_dw
= ib_size
/ 4 - amdgpu_cs_epilog_dws(cs
->ring_type
);
768 static void amdgpu_set_ib_size(struct amdgpu_ib
*ib
)
770 if (ib
->ptr_ib_size_inside_ib
) {
771 *ib
->ptr_ib_size
= ib
->base
.current
.cdw
|
772 S_3F2_CHAIN(1) | S_3F2_VALID(1);
774 *ib
->ptr_ib_size
= ib
->base
.current
.cdw
;
778 static void amdgpu_ib_finalize(struct amdgpu_ib
*ib
)
780 amdgpu_set_ib_size(ib
);
781 ib
->used_ib_space
+= ib
->base
.current
.cdw
* 4;
782 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, ib
->base
.prev_dw
+ ib
->base
.current
.cdw
);
785 static bool amdgpu_init_cs_context(struct amdgpu_cs_context
*cs
,
786 enum ring_type ring_type
)
790 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_DMA
;
794 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_UVD
;
798 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCE
;
802 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_COMPUTE
;
806 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCN_DEC
;
810 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_VCN_ENC
;
815 cs
->ib
[IB_MAIN
].ip_type
= AMDGPU_HW_IP_GFX
;
819 memset(cs
->buffer_indices_hashlist
, -1, sizeof(cs
->buffer_indices_hashlist
));
820 cs
->last_added_bo
= NULL
;
824 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context
*cs
)
828 for (i
= 0; i
< cs
->num_real_buffers
; i
++) {
829 p_atomic_dec(&cs
->real_buffers
[i
].bo
->num_cs_references
);
830 amdgpu_winsys_bo_reference(&cs
->real_buffers
[i
].bo
, NULL
);
832 for (i
= 0; i
< cs
->num_slab_buffers
; i
++) {
833 p_atomic_dec(&cs
->slab_buffers
[i
].bo
->num_cs_references
);
834 amdgpu_winsys_bo_reference(&cs
->slab_buffers
[i
].bo
, NULL
);
836 for (i
= 0; i
< cs
->num_sparse_buffers
; i
++) {
837 p_atomic_dec(&cs
->sparse_buffers
[i
].bo
->num_cs_references
);
838 amdgpu_winsys_bo_reference(&cs
->sparse_buffers
[i
].bo
, NULL
);
840 for (i
= 0; i
< cs
->num_fence_dependencies
; i
++)
841 amdgpu_fence_reference(&cs
->fence_dependencies
[i
], NULL
);
843 cs
->num_real_buffers
= 0;
844 cs
->num_slab_buffers
= 0;
845 cs
->num_sparse_buffers
= 0;
846 cs
->num_fence_dependencies
= 0;
847 amdgpu_fence_reference(&cs
->fence
, NULL
);
849 memset(cs
->buffer_indices_hashlist
, -1, sizeof(cs
->buffer_indices_hashlist
));
850 cs
->last_added_bo
= NULL
;
853 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context
*cs
)
855 amdgpu_cs_context_cleanup(cs
);
857 FREE(cs
->real_buffers
);
859 FREE(cs
->slab_buffers
);
860 FREE(cs
->sparse_buffers
);
861 FREE(cs
->fence_dependencies
);
865 static struct radeon_winsys_cs
*
866 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
867 enum ring_type ring_type
,
868 void (*flush
)(void *ctx
, unsigned flags
,
869 struct pipe_fence_handle
**fence
),
872 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
873 struct amdgpu_cs
*cs
;
875 cs
= CALLOC_STRUCT(amdgpu_cs
);
880 util_queue_fence_init(&cs
->flush_completed
);
883 cs
->flush_cs
= flush
;
884 cs
->flush_data
= flush_ctx
;
885 cs
->ring_type
= ring_type
;
887 struct amdgpu_cs_fence_info fence_info
;
888 fence_info
.handle
= cs
->ctx
->user_fence_bo
;
889 fence_info
.offset
= cs
->ring_type
;
890 amdgpu_cs_chunk_fence_info_to_data(&fence_info
, (void*)&cs
->fence_chunk
);
892 cs
->main
.ib_type
= IB_MAIN
;
894 if (!amdgpu_init_cs_context(&cs
->csc1
, ring_type
)) {
899 if (!amdgpu_init_cs_context(&cs
->csc2
, ring_type
)) {
900 amdgpu_destroy_cs_context(&cs
->csc1
);
905 /* Set the first submission context as current. */
909 if (!amdgpu_get_new_ib(&ctx
->ws
->base
, cs
, IB_MAIN
)) {
910 amdgpu_destroy_cs_context(&cs
->csc2
);
911 amdgpu_destroy_cs_context(&cs
->csc1
);
916 p_atomic_inc(&ctx
->ws
->num_cs
);
917 return &cs
->main
.base
;
920 static bool amdgpu_cs_validate(struct radeon_winsys_cs
*rcs
)
925 static bool amdgpu_cs_check_space(struct radeon_winsys_cs
*rcs
, unsigned dw
)
927 struct amdgpu_ib
*ib
= amdgpu_ib(rcs
);
928 struct amdgpu_cs
*cs
= amdgpu_cs_from_ib(ib
);
929 unsigned requested_size
= rcs
->prev_dw
+ rcs
->current
.cdw
+ dw
;
931 uint32_t *new_ptr_ib_size
;
933 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
935 if (requested_size
> amdgpu_ib_max_submit_dwords(ib
->ib_type
))
938 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, requested_size
);
940 if (rcs
->current
.max_dw
- rcs
->current
.cdw
>= dw
)
943 if (!amdgpu_cs_has_chaining(cs
))
946 /* Allocate a new chunk */
947 if (rcs
->num_prev
>= rcs
->max_prev
) {
948 unsigned new_max_prev
= MAX2(1, 2 * rcs
->max_prev
);
949 struct radeon_winsys_cs_chunk
*new_prev
;
951 new_prev
= REALLOC(rcs
->prev
,
952 sizeof(*new_prev
) * rcs
->max_prev
,
953 sizeof(*new_prev
) * new_max_prev
);
957 rcs
->prev
= new_prev
;
958 rcs
->max_prev
= new_max_prev
;
961 if (!amdgpu_ib_new_buffer(cs
->ctx
->ws
, ib
, cs
->ring_type
))
964 assert(ib
->used_ib_space
== 0);
965 va
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
;
967 /* This space was originally reserved. */
968 rcs
->current
.max_dw
+= 4;
969 assert(ib
->used_ib_space
+ 4 * rcs
->current
.max_dw
<= ib
->big_ib_buffer
->size
);
971 /* Pad with NOPs and add INDIRECT_BUFFER packet */
972 while ((rcs
->current
.cdw
& 7) != 4)
973 radeon_emit(rcs
, 0xffff1000); /* type3 nop packet */
975 radeon_emit(rcs
, PKT3(ib
->ib_type
== IB_MAIN
? PKT3_INDIRECT_BUFFER_CIK
976 : PKT3_INDIRECT_BUFFER_CONST
, 2, 0));
977 radeon_emit(rcs
, va
);
978 radeon_emit(rcs
, va
>> 32);
979 new_ptr_ib_size
= &rcs
->current
.buf
[rcs
->current
.cdw
++];
981 assert((rcs
->current
.cdw
& 7) == 0);
982 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
984 amdgpu_set_ib_size(ib
);
985 ib
->ptr_ib_size
= new_ptr_ib_size
;
986 ib
->ptr_ib_size_inside_ib
= true;
988 /* Hook up the new chunk */
989 rcs
->prev
[rcs
->num_prev
].buf
= rcs
->current
.buf
;
990 rcs
->prev
[rcs
->num_prev
].cdw
= rcs
->current
.cdw
;
991 rcs
->prev
[rcs
->num_prev
].max_dw
= rcs
->current
.cdw
; /* no modifications */
994 ib
->base
.prev_dw
+= ib
->base
.current
.cdw
;
995 ib
->base
.current
.cdw
= 0;
997 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
998 ib
->base
.current
.max_dw
= ib
->big_ib_buffer
->size
/ 4 - amdgpu_cs_epilog_dws(cs
->ring_type
);
1000 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
1001 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
1006 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
1007 struct radeon_bo_list_item
*list
)
1009 struct amdgpu_cs_context
*cs
= amdgpu_cs(rcs
)->csc
;
1013 for (i
= 0; i
< cs
->num_real_buffers
; i
++) {
1014 list
[i
].bo_size
= cs
->real_buffers
[i
].bo
->base
.size
;
1015 list
[i
].vm_address
= cs
->real_buffers
[i
].bo
->va
;
1016 list
[i
].priority_usage
= cs
->real_buffers
[i
].u
.real
.priority_usage
;
1019 return cs
->num_real_buffers
;
1022 static unsigned add_fence_dependency_entry(struct amdgpu_cs_context
*cs
)
1024 unsigned idx
= cs
->num_fence_dependencies
++;
1026 if (idx
>= cs
->max_fence_dependencies
) {
1028 const unsigned increment
= 8;
1030 cs
->max_fence_dependencies
= idx
+ increment
;
1031 size
= cs
->max_fence_dependencies
* sizeof(cs
->fence_dependencies
[0]);
1032 cs
->fence_dependencies
= realloc(cs
->fence_dependencies
, size
);
1033 /* Clear the newly-allocated elements. */
1034 memset(cs
->fence_dependencies
+ idx
, 0,
1035 increment
* sizeof(cs
->fence_dependencies
[0]));
1040 static bool is_noop_fence_dependency(struct amdgpu_cs
*acs
,
1041 struct amdgpu_fence
*fence
)
1043 struct amdgpu_cs_context
*cs
= acs
->csc
;
1045 if (!amdgpu_fence_is_syncobj(fence
) &&
1046 fence
->ctx
== acs
->ctx
&&
1047 fence
->fence
.ip_type
== cs
->ib
[IB_MAIN
].ip_type
&&
1048 fence
->fence
.ip_instance
== cs
->ib
[IB_MAIN
].ip_instance
&&
1049 fence
->fence
.ring
== cs
->ib
[IB_MAIN
].ring
)
1052 return amdgpu_fence_wait((void *)fence
, 0, false);
1055 static void amdgpu_cs_add_fence_dependency(struct radeon_winsys_cs
*rws
,
1056 struct pipe_fence_handle
*pfence
)
1058 struct amdgpu_cs
*acs
= amdgpu_cs(rws
);
1059 struct amdgpu_cs_context
*cs
= acs
->csc
;
1060 struct amdgpu_fence
*fence
= (struct amdgpu_fence
*)pfence
;
1062 util_queue_fence_wait(&fence
->submitted
);
1064 if (is_noop_fence_dependency(acs
, fence
))
1067 unsigned idx
= add_fence_dependency_entry(cs
);
1068 amdgpu_fence_reference(&cs
->fence_dependencies
[idx
],
1069 (struct pipe_fence_handle
*)fence
);
1072 static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs
*acs
,
1073 struct amdgpu_cs_buffer
*buffer
)
1075 struct amdgpu_cs_context
*cs
= acs
->csc
;
1076 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1077 unsigned new_num_fences
= 0;
1079 for (unsigned j
= 0; j
< bo
->num_fences
; ++j
) {
1080 struct amdgpu_fence
*bo_fence
= (void *)bo
->fences
[j
];
1082 if (is_noop_fence_dependency(acs
, bo_fence
))
1085 amdgpu_fence_reference(&bo
->fences
[new_num_fences
], bo
->fences
[j
]);
1088 if (!(buffer
->usage
& RADEON_USAGE_SYNCHRONIZED
))
1091 unsigned idx
= add_fence_dependency_entry(cs
);
1092 amdgpu_fence_reference(&cs
->fence_dependencies
[idx
],
1093 (struct pipe_fence_handle
*)bo_fence
);
1096 for (unsigned j
= new_num_fences
; j
< bo
->num_fences
; ++j
)
1097 amdgpu_fence_reference(&bo
->fences
[j
], NULL
);
1099 bo
->num_fences
= new_num_fences
;
1102 /* Add the given list of fences to the buffer's fence list.
1104 * Must be called with the winsys bo_fence_lock held.
1106 void amdgpu_add_fences(struct amdgpu_winsys_bo
*bo
,
1107 unsigned num_fences
,
1108 struct pipe_fence_handle
**fences
)
1110 if (bo
->num_fences
+ num_fences
> bo
->max_fences
) {
1111 unsigned new_max_fences
= MAX2(bo
->num_fences
+ num_fences
, bo
->max_fences
* 2);
1112 struct pipe_fence_handle
**new_fences
=
1114 bo
->num_fences
* sizeof(*new_fences
),
1115 new_max_fences
* sizeof(*new_fences
));
1116 if (likely(new_fences
)) {
1117 bo
->fences
= new_fences
;
1118 bo
->max_fences
= new_max_fences
;
1122 fprintf(stderr
, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
1123 if (!bo
->num_fences
)
1126 bo
->num_fences
--; /* prefer to keep the most recent fence if possible */
1127 amdgpu_fence_reference(&bo
->fences
[bo
->num_fences
], NULL
);
1129 drop
= bo
->num_fences
+ num_fences
- bo
->max_fences
;
1135 for (unsigned i
= 0; i
< num_fences
; ++i
) {
1136 bo
->fences
[bo
->num_fences
] = NULL
;
1137 amdgpu_fence_reference(&bo
->fences
[bo
->num_fences
], fences
[i
]);
1142 static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs
*acs
,
1143 struct pipe_fence_handle
*fence
,
1144 unsigned num_buffers
,
1145 struct amdgpu_cs_buffer
*buffers
)
1147 for (unsigned i
= 0; i
< num_buffers
; i
++) {
1148 struct amdgpu_cs_buffer
*buffer
= &buffers
[i
];
1149 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1151 amdgpu_add_bo_fence_dependencies(acs
, buffer
);
1152 p_atomic_inc(&bo
->num_active_ioctls
);
1153 amdgpu_add_fences(bo
, 1, &fence
);
1157 /* Since the kernel driver doesn't synchronize execution between different
1158 * rings automatically, we have to add fence dependencies manually.
1160 static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs
*acs
)
1162 struct amdgpu_cs_context
*cs
= acs
->csc
;
1164 cs
->num_fence_dependencies
= 0;
1166 amdgpu_add_fence_dependencies_bo_list(acs
, cs
->fence
, cs
->num_real_buffers
, cs
->real_buffers
);
1167 amdgpu_add_fence_dependencies_bo_list(acs
, cs
->fence
, cs
->num_slab_buffers
, cs
->slab_buffers
);
1168 amdgpu_add_fence_dependencies_bo_list(acs
, cs
->fence
, cs
->num_sparse_buffers
, cs
->sparse_buffers
);
1171 /* Add backing of sparse buffers to the buffer list.
1173 * This is done late, during submission, to keep the buffer list short before
1174 * submit, and to avoid managing fences for the backing buffers.
1176 static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context
*cs
)
1178 for (unsigned i
= 0; i
< cs
->num_sparse_buffers
; ++i
) {
1179 struct amdgpu_cs_buffer
*buffer
= &cs
->sparse_buffers
[i
];
1180 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1182 simple_mtx_lock(&bo
->u
.sparse
.commit_lock
);
1184 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
1185 /* We can directly add the buffer here, because we know that each
1186 * backing buffer occurs only once.
1188 int idx
= amdgpu_do_add_real_buffer(cs
, backing
->bo
);
1190 fprintf(stderr
, "%s: failed to add buffer\n", __FUNCTION__
);
1191 simple_mtx_unlock(&bo
->u
.sparse
.commit_lock
);
1195 cs
->real_buffers
[idx
].usage
= buffer
->usage
& ~RADEON_USAGE_SYNCHRONIZED
;
1196 cs
->real_buffers
[idx
].u
.real
.priority_usage
= buffer
->u
.real
.priority_usage
;
1197 p_atomic_inc(&backing
->bo
->num_active_ioctls
);
1200 simple_mtx_unlock(&bo
->u
.sparse
.commit_lock
);
1206 void amdgpu_cs_submit_ib(void *job
, int thread_index
)
1208 struct amdgpu_cs
*acs
= (struct amdgpu_cs
*)job
;
1209 struct amdgpu_winsys
*ws
= acs
->ctx
->ws
;
1210 struct amdgpu_cs_context
*cs
= acs
->cst
;
1212 amdgpu_bo_list_handle bo_list
= NULL
;
1213 uint64_t seq_no
= 0;
1214 bool has_user_fence
= amdgpu_cs_has_user_fence(cs
);
1216 /* Create the buffer list.
1217 * Use a buffer list containing all allocated buffers if requested.
1219 if (ws
->debug_all_bos
) {
1220 struct amdgpu_winsys_bo
*bo
;
1221 amdgpu_bo_handle
*handles
;
1224 simple_mtx_lock(&ws
->global_bo_list_lock
);
1226 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
1228 simple_mtx_unlock(&ws
->global_bo_list_lock
);
1229 amdgpu_cs_context_cleanup(cs
);
1230 cs
->error_code
= -ENOMEM
;
1234 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, u
.real
.global_list_item
) {
1235 assert(num
< ws
->num_buffers
);
1236 handles
[num
++] = bo
->bo
;
1239 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
1240 handles
, NULL
, &bo_list
);
1242 simple_mtx_unlock(&ws
->global_bo_list_lock
);
1244 unsigned num_handles
;
1246 if (!amdgpu_add_sparse_backing_buffers(cs
)) {
1251 if (cs
->max_real_submit
< cs
->num_real_buffers
) {
1255 cs
->handles
= MALLOC(sizeof(*cs
->handles
) * cs
->num_real_buffers
);
1256 cs
->flags
= MALLOC(sizeof(*cs
->flags
) * cs
->num_real_buffers
);
1258 if (!cs
->handles
|| !cs
->flags
) {
1259 cs
->max_real_submit
= 0;
1266 for (i
= 0; i
< cs
->num_real_buffers
; ++i
) {
1267 struct amdgpu_cs_buffer
*buffer
= &cs
->real_buffers
[i
];
1269 if (buffer
->bo
->is_local
)
1272 assert(buffer
->u
.real
.priority_usage
!= 0);
1274 cs
->handles
[num_handles
] = buffer
->bo
->bo
;
1275 cs
->flags
[num_handles
] = (util_last_bit64(buffer
->u
.real
.priority_usage
) - 1) / 4;
1279 if (acs
->ring_type
== RING_GFX
)
1280 ws
->gfx_bo_list_counter
+= cs
->num_real_buffers
;
1283 r
= amdgpu_bo_list_create(ws
->dev
, num_handles
,
1284 cs
->handles
, cs
->flags
, &bo_list
);
1292 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
1293 amdgpu_fence_signalled(cs
->fence
);
1298 if (acs
->ctx
->num_rejected_cs
) {
1301 struct drm_amdgpu_cs_chunk chunks
[4];
1302 unsigned num_chunks
= 0;
1304 /* Convert from dwords to bytes. */
1305 cs
->ib
[IB_MAIN
].ib_bytes
*= 4;
1308 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_IB
;
1309 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_ib
) / 4;
1310 chunks
[num_chunks
].chunk_data
= (uintptr_t)&cs
->ib
[IB_MAIN
];
1314 if (has_user_fence
) {
1315 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_FENCE
;
1316 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_fence
) / 4;
1317 chunks
[num_chunks
].chunk_data
= (uintptr_t)&acs
->fence_chunk
;
1322 unsigned num_dependencies
= cs
->num_fence_dependencies
;
1323 unsigned num_syncobj_dependencies
= 0;
1325 if (num_dependencies
) {
1326 struct drm_amdgpu_cs_chunk_dep
*dep_chunk
=
1327 alloca(num_dependencies
* sizeof(*dep_chunk
));
1330 for (unsigned i
= 0; i
< num_dependencies
; i
++) {
1331 struct amdgpu_fence
*fence
=
1332 (struct amdgpu_fence
*)cs
->fence_dependencies
[i
];
1334 if (amdgpu_fence_is_syncobj(fence
)) {
1335 num_syncobj_dependencies
++;
1339 assert(util_queue_fence_is_signalled(&fence
->submitted
));
1340 amdgpu_cs_chunk_fence_to_dep(&fence
->fence
, &dep_chunk
[num
++]);
1343 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_DEPENDENCIES
;
1344 chunks
[num_chunks
].length_dw
= sizeof(dep_chunk
[0]) / 4 * num
;
1345 chunks
[num_chunks
].chunk_data
= (uintptr_t)dep_chunk
;
1349 /* Syncobj dependencies. */
1350 if (num_syncobj_dependencies
) {
1351 struct drm_amdgpu_cs_chunk_sem
*sem_chunk
=
1352 alloca(num_syncobj_dependencies
* sizeof(sem_chunk
[0]));
1355 for (unsigned i
= 0; i
< num_dependencies
; i
++) {
1356 struct amdgpu_fence
*fence
=
1357 (struct amdgpu_fence
*)cs
->fence_dependencies
[i
];
1359 if (!amdgpu_fence_is_syncobj(fence
))
1362 assert(util_queue_fence_is_signalled(&fence
->submitted
));
1363 sem_chunk
[num
++].handle
= fence
->syncobj
;
1366 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_SYNCOBJ_IN
;
1367 chunks
[num_chunks
].length_dw
= sizeof(sem_chunk
[0]) / 4 * num
;
1368 chunks
[num_chunks
].chunk_data
= (uintptr_t)sem_chunk
;
1372 assert(num_chunks
<= ARRAY_SIZE(chunks
));
1374 r
= amdgpu_cs_submit_raw(ws
->dev
, acs
->ctx
->ctx
, bo_list
,
1375 num_chunks
, chunks
, &seq_no
);
1381 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
1382 else if (r
== -ECANCELED
)
1383 fprintf(stderr
, "amdgpu: The CS has been cancelled because the context is lost.\n");
1385 fprintf(stderr
, "amdgpu: The CS has been rejected, "
1386 "see dmesg for more information (%i).\n", r
);
1388 amdgpu_fence_signalled(cs
->fence
);
1390 acs
->ctx
->num_rejected_cs
++;
1391 ws
->num_total_rejected_cs
++;
1394 uint64_t *user_fence
= NULL
;
1397 user_fence
= acs
->ctx
->user_fence_cpu_address_base
+ acs
->ring_type
;
1398 amdgpu_fence_submitted(cs
->fence
, seq_no
, user_fence
);
1403 amdgpu_bo_list_destroy(bo_list
);
1406 for (i
= 0; i
< cs
->num_real_buffers
; i
++)
1407 p_atomic_dec(&cs
->real_buffers
[i
].bo
->num_active_ioctls
);
1408 for (i
= 0; i
< cs
->num_slab_buffers
; i
++)
1409 p_atomic_dec(&cs
->slab_buffers
[i
].bo
->num_active_ioctls
);
1410 for (i
= 0; i
< cs
->num_sparse_buffers
; i
++)
1411 p_atomic_dec(&cs
->sparse_buffers
[i
].bo
->num_active_ioctls
);
1413 amdgpu_cs_context_cleanup(cs
);
1416 /* Make sure the previous submission is completed. */
1417 void amdgpu_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
1419 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1421 /* Wait for any pending ioctl of this CS to complete. */
1422 util_queue_fence_wait(&cs
->flush_completed
);
1425 static int amdgpu_cs_flush(struct radeon_winsys_cs
*rcs
,
1427 struct pipe_fence_handle
**fence
)
1429 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1430 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
1433 rcs
->current
.max_dw
+= amdgpu_cs_epilog_dws(cs
->ring_type
);
1435 switch (cs
->ring_type
) {
1437 /* pad DMA ring to 8 DWs */
1438 if (ws
->info
.chip_class
<= SI
) {
1439 while (rcs
->current
.cdw
& 7)
1440 radeon_emit(rcs
, 0xf0000000); /* NOP packet */
1442 while (rcs
->current
.cdw
& 7)
1443 radeon_emit(rcs
, 0x00000000); /* NOP packet */
1447 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1448 if (ws
->info
.gfx_ib_pad_with_type2
) {
1449 while (rcs
->current
.cdw
& 7)
1450 radeon_emit(rcs
, 0x80000000); /* type2 nop packet */
1452 while (rcs
->current
.cdw
& 7)
1453 radeon_emit(rcs
, 0xffff1000); /* type3 nop packet */
1455 ws
->gfx_ib_size_counter
+= (rcs
->prev_dw
+ rcs
->current
.cdw
) * 4;
1458 while (rcs
->current
.cdw
& 15)
1459 radeon_emit(rcs
, 0x80000000); /* type2 nop packet */
1462 while (rcs
->current
.cdw
& 15)
1463 radeon_emit(rcs
, 0x81ff); /* nop packet */
1469 if (rcs
->current
.cdw
> rcs
->current
.max_dw
) {
1470 fprintf(stderr
, "amdgpu: command stream overflowed\n");
1473 /* If the CS is not empty or overflowed.... */
1474 if (likely(radeon_emitted(&cs
->main
.base
, 0) &&
1475 cs
->main
.base
.current
.cdw
<= cs
->main
.base
.current
.max_dw
&&
1476 !debug_get_option_noop())) {
1477 struct amdgpu_cs_context
*cur
= cs
->csc
;
1480 amdgpu_ib_finalize(&cs
->main
);
1482 /* Create a fence. */
1483 amdgpu_fence_reference(&cur
->fence
, NULL
);
1484 if (cs
->next_fence
) {
1485 /* just move the reference */
1486 cur
->fence
= cs
->next_fence
;
1487 cs
->next_fence
= NULL
;
1489 cur
->fence
= amdgpu_fence_create(cs
->ctx
,
1490 cur
->ib
[IB_MAIN
].ip_type
,
1491 cur
->ib
[IB_MAIN
].ip_instance
,
1492 cur
->ib
[IB_MAIN
].ring
);
1495 amdgpu_fence_reference(fence
, cur
->fence
);
1497 amdgpu_cs_sync_flush(rcs
);
1501 * This fence must be held until the submission is queued to ensure
1502 * that the order of fence dependency updates matches the order of
1505 simple_mtx_lock(&ws
->bo_fence_lock
);
1506 amdgpu_add_fence_dependencies_bo_lists(cs
);
1508 /* Swap command streams. "cst" is going to be submitted. */
1513 util_queue_add_job(&ws
->cs_queue
, cs
, &cs
->flush_completed
,
1514 amdgpu_cs_submit_ib
, NULL
);
1515 /* The submission has been queued, unlock the fence now. */
1516 simple_mtx_unlock(&ws
->bo_fence_lock
);
1518 if (!(flags
& PIPE_FLUSH_ASYNC
)) {
1519 amdgpu_cs_sync_flush(rcs
);
1520 error_code
= cur
->error_code
;
1523 amdgpu_cs_context_cleanup(cs
->csc
);
1526 amdgpu_get_new_ib(&ws
->base
, cs
, IB_MAIN
);
1528 cs
->main
.base
.used_gart
= 0;
1529 cs
->main
.base
.used_vram
= 0;
1531 if (cs
->ring_type
== RING_GFX
)
1533 else if (cs
->ring_type
== RING_DMA
)
1539 static void amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
1541 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1543 amdgpu_cs_sync_flush(rcs
);
1544 util_queue_fence_destroy(&cs
->flush_completed
);
1545 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
1546 pb_reference(&cs
->main
.big_ib_buffer
, NULL
);
1547 FREE(cs
->main
.base
.prev
);
1548 amdgpu_destroy_cs_context(&cs
->csc1
);
1549 amdgpu_destroy_cs_context(&cs
->csc2
);
1550 amdgpu_fence_reference(&cs
->next_fence
, NULL
);
1554 static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
1555 struct pb_buffer
*_buf
,
1556 enum radeon_bo_usage usage
)
1558 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1559 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
1561 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
1564 void amdgpu_cs_init_functions(struct amdgpu_winsys
*ws
)
1566 ws
->base
.ctx_create
= amdgpu_ctx_create
;
1567 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
1568 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
1569 ws
->base
.cs_create
= amdgpu_cs_create
;
1570 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
1571 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
1572 ws
->base
.cs_validate
= amdgpu_cs_validate
;
1573 ws
->base
.cs_check_space
= amdgpu_cs_check_space
;
1574 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
1575 ws
->base
.cs_flush
= amdgpu_cs_flush
;
1576 ws
->base
.cs_get_next_fence
= amdgpu_cs_get_next_fence
;
1577 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
1578 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
1579 ws
->base
.cs_add_fence_dependency
= amdgpu_cs_add_fence_dependency
;
1580 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
1581 ws
->base
.fence_reference
= amdgpu_fence_reference
;
1582 ws
->base
.fence_import_sync_file
= amdgpu_fence_import_sync_file
;
1583 ws
->base
.fence_export_sync_file
= amdgpu_fence_export_sync_file
;
1584 ws
->base
.export_signalled_sync_file
= amdgpu_export_signalled_sync_file
;