2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
30 * Marek Olšák <maraeo@gmail.com>
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
36 #include <amdgpu_drm.h>
38 #include "amd/common/sid.h"
40 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", false)
44 static struct pipe_fence_handle
*
45 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
46 unsigned ip_instance
, unsigned ring
)
48 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
50 fence
->reference
.count
= 1;
52 fence
->fence
.context
= ctx
->ctx
;
53 fence
->fence
.ip_type
= ip_type
;
54 fence
->fence
.ip_instance
= ip_instance
;
55 fence
->fence
.ring
= ring
;
56 fence
->submission_in_progress
= true;
57 p_atomic_inc(&ctx
->refcount
);
58 return (struct pipe_fence_handle
*)fence
;
61 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
62 struct amdgpu_cs_request
* request
,
63 uint64_t *user_fence_cpu_address
)
65 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
67 rfence
->fence
.fence
= request
->seq_no
;
68 rfence
->user_fence_cpu_address
= user_fence_cpu_address
;
69 rfence
->submission_in_progress
= false;
72 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
74 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
76 rfence
->signalled
= true;
77 rfence
->submission_in_progress
= false;
80 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
83 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
86 uint64_t *user_fence_cpu
;
89 if (rfence
->signalled
)
93 abs_timeout
= timeout
;
95 abs_timeout
= os_time_get_absolute_timeout(timeout
);
97 /* The fence might not have a number assigned if its IB is being
98 * submitted in the other thread right now. Wait until the submission
100 if (!os_wait_until_zero_abs_timeout(&rfence
->submission_in_progress
,
104 user_fence_cpu
= rfence
->user_fence_cpu_address
;
105 if (user_fence_cpu
) {
106 if (*user_fence_cpu
>= rfence
->fence
.fence
) {
107 rfence
->signalled
= true;
111 /* No timeout, just query: no need for the ioctl. */
112 if (!absolute
&& !timeout
)
116 /* Now use the libdrm query. */
117 r
= amdgpu_cs_query_fence_status(&rfence
->fence
,
119 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
122 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
127 /* This variable can only transition from false to true, so it doesn't
128 * matter if threads race for it. */
129 rfence
->signalled
= true;
135 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
136 struct pipe_fence_handle
*fence
,
139 return amdgpu_fence_wait(fence
, timeout
, false);
142 static struct pipe_fence_handle
*
143 amdgpu_cs_get_next_fence(struct radeon_winsys_cs
*rcs
)
145 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
146 struct pipe_fence_handle
*fence
= NULL
;
148 if (debug_get_option_noop())
151 if (cs
->next_fence
) {
152 amdgpu_fence_reference(&fence
, cs
->next_fence
);
156 fence
= amdgpu_fence_create(cs
->ctx
,
157 cs
->csc
->request
.ip_type
,
158 cs
->csc
->request
.ip_instance
,
159 cs
->csc
->request
.ring
);
163 amdgpu_fence_reference(&cs
->next_fence
, fence
);
169 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
171 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
173 struct amdgpu_bo_alloc_request alloc_buffer
= {};
174 amdgpu_bo_handle buf_handle
;
179 ctx
->ws
= amdgpu_winsys(ws
);
181 ctx
->initial_num_total_rejected_cs
= ctx
->ws
->num_total_rejected_cs
;
183 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
185 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
189 alloc_buffer
.alloc_size
= ctx
->ws
->info
.gart_page_size
;
190 alloc_buffer
.phys_alignment
= ctx
->ws
->info
.gart_page_size
;
191 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
193 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
195 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
196 goto error_user_fence_alloc
;
199 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
201 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
202 goto error_user_fence_map
;
205 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
206 ctx
->user_fence_bo
= buf_handle
;
208 return (struct radeon_winsys_ctx
*)ctx
;
210 error_user_fence_map
:
211 amdgpu_bo_free(buf_handle
);
212 error_user_fence_alloc
:
213 amdgpu_cs_ctx_free(ctx
->ctx
);
219 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
221 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
224 static enum pipe_reset_status
225 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
227 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
228 uint32_t result
, hangs
;
231 /* Return a failure due to a rejected command submission. */
232 if (ctx
->ws
->num_total_rejected_cs
> ctx
->initial_num_total_rejected_cs
) {
233 return ctx
->num_rejected_cs
? PIPE_GUILTY_CONTEXT_RESET
:
234 PIPE_INNOCENT_CONTEXT_RESET
;
237 /* Return a failure due to a GPU hang. */
238 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
240 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
241 return PIPE_NO_RESET
;
245 case AMDGPU_CTX_GUILTY_RESET
:
246 return PIPE_GUILTY_CONTEXT_RESET
;
247 case AMDGPU_CTX_INNOCENT_RESET
:
248 return PIPE_INNOCENT_CONTEXT_RESET
;
249 case AMDGPU_CTX_UNKNOWN_RESET
:
250 return PIPE_UNKNOWN_CONTEXT_RESET
;
251 case AMDGPU_CTX_NO_RESET
:
253 return PIPE_NO_RESET
;
257 /* COMMAND SUBMISSION */
259 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context
*cs
)
261 return cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&&
262 cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
&&
263 cs
->request
.ip_type
!= AMDGPU_HW_IP_VCN_DEC
;
266 static bool amdgpu_cs_has_chaining(struct amdgpu_cs
*cs
)
268 return cs
->ctx
->ws
->info
.chip_class
>= CIK
&&
269 cs
->ring_type
== RING_GFX
;
272 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type
)
274 if (ring_type
== RING_GFX
)
275 return 4; /* for chaining */
280 int amdgpu_lookup_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
282 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
283 int i
= cs
->buffer_indices_hashlist
[hash
];
284 struct amdgpu_cs_buffer
*buffers
;
288 buffers
= cs
->real_buffers
;
289 num_buffers
= cs
->num_real_buffers
;
290 } else if (!bo
->sparse
) {
291 buffers
= cs
->slab_buffers
;
292 num_buffers
= cs
->num_slab_buffers
;
294 buffers
= cs
->sparse_buffers
;
295 num_buffers
= cs
->num_sparse_buffers
;
298 /* not found or found */
299 if (i
< 0 || (i
< num_buffers
&& buffers
[i
].bo
== bo
))
302 /* Hash collision, look for the BO in the list of buffers linearly. */
303 for (i
= num_buffers
- 1; i
>= 0; i
--) {
304 if (buffers
[i
].bo
== bo
) {
305 /* Put this buffer in the hash list.
306 * This will prevent additional hash collisions if there are
307 * several consecutive lookup_buffer calls for the same buffer.
309 * Example: Assuming buffers A,B,C collide in the hash list,
310 * the following sequence of buffers:
311 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
312 * will collide here: ^ and here: ^,
313 * meaning that we should get very few collisions in the end. */
314 cs
->buffer_indices_hashlist
[hash
] = i
;
322 amdgpu_do_add_real_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
324 struct amdgpu_cs_buffer
*buffer
;
327 /* New buffer, check if the backing array is large enough. */
328 if (cs
->num_real_buffers
>= cs
->max_real_buffers
) {
330 MAX2(cs
->max_real_buffers
+ 16, (unsigned)(cs
->max_real_buffers
* 1.3));
331 struct amdgpu_cs_buffer
*new_buffers
;
333 new_buffers
= MALLOC(new_max
* sizeof(*new_buffers
));
336 fprintf(stderr
, "amdgpu_do_add_buffer: allocation failed\n");
341 memcpy(new_buffers
, cs
->real_buffers
, cs
->num_real_buffers
* sizeof(*new_buffers
));
343 FREE(cs
->real_buffers
);
345 cs
->max_real_buffers
= new_max
;
346 cs
->real_buffers
= new_buffers
;
349 idx
= cs
->num_real_buffers
;
350 buffer
= &cs
->real_buffers
[idx
];
352 memset(buffer
, 0, sizeof(*buffer
));
353 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
354 p_atomic_inc(&bo
->num_cs_references
);
355 cs
->num_real_buffers
++;
361 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs
*acs
, struct amdgpu_winsys_bo
*bo
)
363 struct amdgpu_cs_context
*cs
= acs
->csc
;
365 int idx
= amdgpu_lookup_buffer(cs
, bo
);
370 idx
= amdgpu_do_add_real_buffer(cs
, bo
);
372 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
373 cs
->buffer_indices_hashlist
[hash
] = idx
;
375 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
376 acs
->main
.base
.used_vram
+= bo
->base
.size
;
377 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
378 acs
->main
.base
.used_gart
+= bo
->base
.size
;
383 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs
*acs
,
384 struct amdgpu_winsys_bo
*bo
)
386 struct amdgpu_cs_context
*cs
= acs
->csc
;
387 struct amdgpu_cs_buffer
*buffer
;
389 int idx
= amdgpu_lookup_buffer(cs
, bo
);
395 real_idx
= amdgpu_lookup_or_add_real_buffer(acs
, bo
->u
.slab
.real
);
399 /* New buffer, check if the backing array is large enough. */
400 if (cs
->num_slab_buffers
>= cs
->max_slab_buffers
) {
402 MAX2(cs
->max_slab_buffers
+ 16, (unsigned)(cs
->max_slab_buffers
* 1.3));
403 struct amdgpu_cs_buffer
*new_buffers
;
405 new_buffers
= REALLOC(cs
->slab_buffers
,
406 cs
->max_slab_buffers
* sizeof(*new_buffers
),
407 new_max
* sizeof(*new_buffers
));
409 fprintf(stderr
, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
413 cs
->max_slab_buffers
= new_max
;
414 cs
->slab_buffers
= new_buffers
;
417 idx
= cs
->num_slab_buffers
;
418 buffer
= &cs
->slab_buffers
[idx
];
420 memset(buffer
, 0, sizeof(*buffer
));
421 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
422 buffer
->u
.slab
.real_idx
= real_idx
;
423 p_atomic_inc(&bo
->num_cs_references
);
424 cs
->num_slab_buffers
++;
426 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
427 cs
->buffer_indices_hashlist
[hash
] = idx
;
432 static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs
*acs
,
433 struct amdgpu_winsys_bo
*bo
)
435 struct amdgpu_cs_context
*cs
= acs
->csc
;
436 struct amdgpu_cs_buffer
*buffer
;
438 int idx
= amdgpu_lookup_buffer(cs
, bo
);
443 /* New buffer, check if the backing array is large enough. */
444 if (cs
->num_sparse_buffers
>= cs
->max_sparse_buffers
) {
446 MAX2(cs
->max_sparse_buffers
+ 16, (unsigned)(cs
->max_sparse_buffers
* 1.3));
447 struct amdgpu_cs_buffer
*new_buffers
;
449 new_buffers
= REALLOC(cs
->sparse_buffers
,
450 cs
->max_sparse_buffers
* sizeof(*new_buffers
),
451 new_max
* sizeof(*new_buffers
));
453 fprintf(stderr
, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
457 cs
->max_sparse_buffers
= new_max
;
458 cs
->sparse_buffers
= new_buffers
;
461 idx
= cs
->num_sparse_buffers
;
462 buffer
= &cs
->sparse_buffers
[idx
];
464 memset(buffer
, 0, sizeof(*buffer
));
465 amdgpu_winsys_bo_reference(&buffer
->bo
, bo
);
466 p_atomic_inc(&bo
->num_cs_references
);
467 cs
->num_sparse_buffers
++;
469 hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
470 cs
->buffer_indices_hashlist
[hash
] = idx
;
472 /* We delay adding the backing buffers until we really have to. However,
473 * we cannot delay accounting for memory use.
475 mtx_lock(&bo
->u
.sparse
.commit_lock
);
477 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
478 if (bo
->initial_domain
& RADEON_DOMAIN_VRAM
)
479 acs
->main
.base
.used_vram
+= backing
->bo
->base
.size
;
480 else if (bo
->initial_domain
& RADEON_DOMAIN_GTT
)
481 acs
->main
.base
.used_gart
+= backing
->bo
->base
.size
;
484 mtx_unlock(&bo
->u
.sparse
.commit_lock
);
489 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
490 struct pb_buffer
*buf
,
491 enum radeon_bo_usage usage
,
492 enum radeon_bo_domain domains
,
493 enum radeon_bo_priority priority
)
495 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
496 * the buffer placement during command submission.
498 struct amdgpu_cs
*acs
= amdgpu_cs(rcs
);
499 struct amdgpu_cs_context
*cs
= acs
->csc
;
500 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
501 struct amdgpu_cs_buffer
*buffer
;
504 /* Fast exit for no-op calls.
505 * This is very effective with suballocators and linear uploaders that
506 * are outside of the winsys.
508 if (bo
== cs
->last_added_bo
&&
509 (usage
& cs
->last_added_bo_usage
) == usage
&&
510 (1ull << priority
) & cs
->last_added_bo_priority_usage
)
511 return cs
->last_added_bo_index
;
515 index
= amdgpu_lookup_or_add_slab_buffer(acs
, bo
);
519 buffer
= &cs
->slab_buffers
[index
];
520 buffer
->usage
|= usage
;
522 usage
&= ~RADEON_USAGE_SYNCHRONIZED
;
523 index
= buffer
->u
.slab
.real_idx
;
525 index
= amdgpu_lookup_or_add_real_buffer(acs
, bo
);
530 buffer
= &cs
->real_buffers
[index
];
532 index
= amdgpu_lookup_or_add_sparse_buffer(acs
, bo
);
536 buffer
= &cs
->sparse_buffers
[index
];
539 buffer
->u
.real
.priority_usage
|= 1llu << priority
;
540 buffer
->usage
|= usage
;
542 cs
->last_added_bo
= bo
;
543 cs
->last_added_bo_index
= index
;
544 cs
->last_added_bo_usage
= buffer
->usage
;
545 cs
->last_added_bo_priority_usage
= buffer
->u
.real
.priority_usage
;
549 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys
*ws
, struct amdgpu_ib
*ib
)
551 struct pb_buffer
*pb
;
553 unsigned buffer_size
;
555 /* Always create a buffer that is at least as large as the maximum seen IB
556 * size, aligned to a power of two (and multiplied by 4 to reduce internal
557 * fragmentation if chaining is not available). Limit to 512k dwords, which
558 * is the largest power of two that fits into the size field of the
559 * INDIRECT_BUFFER packet.
561 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib
)))
562 buffer_size
= 4 *util_next_power_of_two(ib
->max_ib_size
);
564 buffer_size
= 4 *util_next_power_of_two(4 * ib
->max_ib_size
);
566 buffer_size
= MIN2(buffer_size
, 4 * 512 * 1024);
568 switch (ib
->ib_type
) {
569 case IB_CONST_PREAMBLE
:
570 buffer_size
= MAX2(buffer_size
, 4 * 1024);
573 buffer_size
= MAX2(buffer_size
, 16 * 1024 * 4);
576 buffer_size
= MAX2(buffer_size
, 8 * 1024 * 4);
579 unreachable("unhandled IB type");
582 pb
= ws
->base
.buffer_create(&ws
->base
, buffer_size
,
583 ws
->info
.gart_page_size
,
584 RADEON_DOMAIN_GTT
, 0);
588 mapped
= ws
->base
.buffer_map(pb
, NULL
, PIPE_TRANSFER_WRITE
);
590 pb_reference(&pb
, NULL
);
594 pb_reference(&ib
->big_ib_buffer
, pb
);
595 pb_reference(&pb
, NULL
);
597 ib
->ib_mapped
= mapped
;
598 ib
->used_ib_space
= 0;
603 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type
)
607 /* Smaller submits means the GPU gets busy sooner and there is less
608 * waiting for buffers and fences. Proof:
609 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
612 case IB_CONST_PREAMBLE
:
614 /* There isn't really any reason to limit CE IB size beyond the natural
615 * limit implied by the main IB, except perhaps GTT size. Just return
616 * an extremely large value that we never get anywhere close to.
618 return 16 * 1024 * 1024;
620 unreachable("bad ib_type");
624 static bool amdgpu_get_new_ib(struct radeon_winsys
*ws
, struct amdgpu_cs
*cs
,
625 enum ib_type ib_type
)
627 struct amdgpu_winsys
*aws
= (struct amdgpu_winsys
*)ws
;
628 /* Small IBs are better than big IBs, because the GPU goes idle quicker
629 * and there is less waiting for buffers and fences. Proof:
630 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
632 struct amdgpu_ib
*ib
= NULL
;
633 struct amdgpu_cs_ib_info
*info
= &cs
->csc
->ib
[ib_type
];
634 unsigned ib_size
= 0;
637 case IB_CONST_PREAMBLE
:
638 ib
= &cs
->const_preamble_ib
;
643 ib_size
= 8 * 1024 * 4;
647 ib_size
= 4 * 1024 * 4;
650 unreachable("unhandled IB type");
653 if (!amdgpu_cs_has_chaining(cs
)) {
654 ib_size
= MAX2(ib_size
,
655 4 * MIN2(util_next_power_of_two(ib
->max_ib_size
),
656 amdgpu_ib_max_submit_dwords(ib_type
)));
659 ib
->max_ib_size
= ib
->max_ib_size
- ib
->max_ib_size
/ 32;
661 ib
->base
.prev_dw
= 0;
662 ib
->base
.num_prev
= 0;
663 ib
->base
.current
.cdw
= 0;
664 ib
->base
.current
.buf
= NULL
;
666 /* Allocate a new buffer for IBs if the current buffer is all used. */
667 if (!ib
->big_ib_buffer
||
668 ib
->used_ib_space
+ ib_size
> ib
->big_ib_buffer
->size
) {
669 if (!amdgpu_ib_new_buffer(aws
, ib
))
673 info
->ib_mc_address
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
+
676 ib
->ptr_ib_size
= &info
->size
;
678 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
679 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
681 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
683 ib_size
= ib
->big_ib_buffer
->size
- ib
->used_ib_space
;
684 ib
->base
.current
.max_dw
= ib_size
/ 4 - amdgpu_cs_epilog_dws(cs
->ring_type
);
688 static void amdgpu_ib_finalize(struct amdgpu_ib
*ib
)
690 *ib
->ptr_ib_size
|= ib
->base
.current
.cdw
;
691 ib
->used_ib_space
+= ib
->base
.current
.cdw
* 4;
692 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, ib
->base
.prev_dw
+ ib
->base
.current
.cdw
);
695 static bool amdgpu_init_cs_context(struct amdgpu_cs_context
*cs
,
696 enum ring_type ring_type
)
700 cs
->request
.ip_type
= AMDGPU_HW_IP_DMA
;
704 cs
->request
.ip_type
= AMDGPU_HW_IP_UVD
;
708 cs
->request
.ip_type
= AMDGPU_HW_IP_VCE
;
712 cs
->request
.ip_type
= AMDGPU_HW_IP_COMPUTE
;
716 cs
->request
.ip_type
= AMDGPU_HW_IP_VCN_DEC
;
721 cs
->request
.ip_type
= AMDGPU_HW_IP_GFX
;
725 memset(cs
->buffer_indices_hashlist
, -1, sizeof(cs
->buffer_indices_hashlist
));
726 cs
->last_added_bo
= NULL
;
728 cs
->request
.number_of_ibs
= 1;
729 cs
->request
.ibs
= &cs
->ib
[IB_MAIN
];
731 cs
->ib
[IB_CONST
].flags
= AMDGPU_IB_FLAG_CE
;
732 cs
->ib
[IB_CONST_PREAMBLE
].flags
= AMDGPU_IB_FLAG_CE
|
733 AMDGPU_IB_FLAG_PREAMBLE
;
738 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context
*cs
)
742 for (i
= 0; i
< cs
->num_real_buffers
; i
++) {
743 p_atomic_dec(&cs
->real_buffers
[i
].bo
->num_cs_references
);
744 amdgpu_winsys_bo_reference(&cs
->real_buffers
[i
].bo
, NULL
);
746 for (i
= 0; i
< cs
->num_slab_buffers
; i
++) {
747 p_atomic_dec(&cs
->slab_buffers
[i
].bo
->num_cs_references
);
748 amdgpu_winsys_bo_reference(&cs
->slab_buffers
[i
].bo
, NULL
);
750 for (i
= 0; i
< cs
->num_sparse_buffers
; i
++) {
751 p_atomic_dec(&cs
->sparse_buffers
[i
].bo
->num_cs_references
);
752 amdgpu_winsys_bo_reference(&cs
->sparse_buffers
[i
].bo
, NULL
);
754 for (i
= 0; i
< cs
->num_fence_dependencies
; i
++)
755 amdgpu_fence_reference(&cs
->fence_dependencies
[i
], NULL
);
757 cs
->num_real_buffers
= 0;
758 cs
->num_slab_buffers
= 0;
759 cs
->num_sparse_buffers
= 0;
760 cs
->num_fence_dependencies
= 0;
761 amdgpu_fence_reference(&cs
->fence
, NULL
);
763 memset(cs
->buffer_indices_hashlist
, -1, sizeof(cs
->buffer_indices_hashlist
));
764 cs
->last_added_bo
= NULL
;
767 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context
*cs
)
769 amdgpu_cs_context_cleanup(cs
);
771 FREE(cs
->real_buffers
);
773 FREE(cs
->slab_buffers
);
774 FREE(cs
->sparse_buffers
);
775 FREE(cs
->fence_dependencies
);
779 static struct radeon_winsys_cs
*
780 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
781 enum ring_type ring_type
,
782 void (*flush
)(void *ctx
, unsigned flags
,
783 struct pipe_fence_handle
**fence
),
786 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
787 struct amdgpu_cs
*cs
;
789 cs
= CALLOC_STRUCT(amdgpu_cs
);
794 util_queue_fence_init(&cs
->flush_completed
);
797 cs
->flush_cs
= flush
;
798 cs
->flush_data
= flush_ctx
;
799 cs
->ring_type
= ring_type
;
801 cs
->main
.ib_type
= IB_MAIN
;
802 cs
->const_ib
.ib_type
= IB_CONST
;
803 cs
->const_preamble_ib
.ib_type
= IB_CONST_PREAMBLE
;
805 if (!amdgpu_init_cs_context(&cs
->csc1
, ring_type
)) {
810 if (!amdgpu_init_cs_context(&cs
->csc2
, ring_type
)) {
811 amdgpu_destroy_cs_context(&cs
->csc1
);
816 /* Set the first submission context as current. */
820 if (!amdgpu_get_new_ib(&ctx
->ws
->base
, cs
, IB_MAIN
)) {
821 amdgpu_destroy_cs_context(&cs
->csc2
);
822 amdgpu_destroy_cs_context(&cs
->csc1
);
827 p_atomic_inc(&ctx
->ws
->num_cs
);
828 return &cs
->main
.base
;
831 static struct radeon_winsys_cs
*
832 amdgpu_cs_add_const_ib(struct radeon_winsys_cs
*rcs
)
834 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
835 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
837 /* only one const IB can be added */
838 if (cs
->ring_type
!= RING_GFX
|| cs
->const_ib
.ib_mapped
)
841 if (!amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST
))
844 cs
->csc
->request
.number_of_ibs
= 2;
845 cs
->csc
->request
.ibs
= &cs
->csc
->ib
[IB_CONST
];
847 cs
->cst
->request
.number_of_ibs
= 2;
848 cs
->cst
->request
.ibs
= &cs
->cst
->ib
[IB_CONST
];
850 return &cs
->const_ib
.base
;
853 static struct radeon_winsys_cs
*
854 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs
*rcs
)
856 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
857 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
859 /* only one const preamble IB can be added and only when the const IB has
860 * also been mapped */
861 if (cs
->ring_type
!= RING_GFX
|| !cs
->const_ib
.ib_mapped
||
862 cs
->const_preamble_ib
.ib_mapped
)
865 if (!amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST_PREAMBLE
))
868 cs
->csc
->request
.number_of_ibs
= 3;
869 cs
->csc
->request
.ibs
= &cs
->csc
->ib
[IB_CONST_PREAMBLE
];
871 cs
->cst
->request
.number_of_ibs
= 3;
872 cs
->cst
->request
.ibs
= &cs
->cst
->ib
[IB_CONST_PREAMBLE
];
874 return &cs
->const_preamble_ib
.base
;
877 static bool amdgpu_cs_validate(struct radeon_winsys_cs
*rcs
)
882 static bool amdgpu_cs_check_space(struct radeon_winsys_cs
*rcs
, unsigned dw
)
884 struct amdgpu_ib
*ib
= amdgpu_ib(rcs
);
885 struct amdgpu_cs
*cs
= amdgpu_cs_from_ib(ib
);
886 unsigned requested_size
= rcs
->prev_dw
+ rcs
->current
.cdw
+ dw
;
888 uint32_t *new_ptr_ib_size
;
890 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
892 if (requested_size
> amdgpu_ib_max_submit_dwords(ib
->ib_type
))
895 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, requested_size
);
897 if (rcs
->current
.max_dw
- rcs
->current
.cdw
>= dw
)
900 if (!amdgpu_cs_has_chaining(cs
))
903 /* Allocate a new chunk */
904 if (rcs
->num_prev
>= rcs
->max_prev
) {
905 unsigned new_max_prev
= MAX2(1, 2 * rcs
->max_prev
);
906 struct radeon_winsys_cs_chunk
*new_prev
;
908 new_prev
= REALLOC(rcs
->prev
,
909 sizeof(*new_prev
) * rcs
->max_prev
,
910 sizeof(*new_prev
) * new_max_prev
);
914 rcs
->prev
= new_prev
;
915 rcs
->max_prev
= new_max_prev
;
918 if (!amdgpu_ib_new_buffer(cs
->ctx
->ws
, ib
))
921 assert(ib
->used_ib_space
== 0);
922 va
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
;
924 /* This space was originally reserved. */
925 rcs
->current
.max_dw
+= 4;
926 assert(ib
->used_ib_space
+ 4 * rcs
->current
.max_dw
<= ib
->big_ib_buffer
->size
);
928 /* Pad with NOPs and add INDIRECT_BUFFER packet */
929 while ((rcs
->current
.cdw
& 7) != 4)
930 radeon_emit(rcs
, 0xffff1000); /* type3 nop packet */
932 radeon_emit(rcs
, PKT3(ib
->ib_type
== IB_MAIN
? PKT3_INDIRECT_BUFFER_CIK
933 : PKT3_INDIRECT_BUFFER_CONST
, 2, 0));
934 radeon_emit(rcs
, va
);
935 radeon_emit(rcs
, va
>> 32);
936 new_ptr_ib_size
= &rcs
->current
.buf
[rcs
->current
.cdw
];
937 radeon_emit(rcs
, S_3F2_CHAIN(1) | S_3F2_VALID(1));
939 assert((rcs
->current
.cdw
& 7) == 0);
940 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
942 *ib
->ptr_ib_size
|= rcs
->current
.cdw
;
943 ib
->ptr_ib_size
= new_ptr_ib_size
;
945 /* Hook up the new chunk */
946 rcs
->prev
[rcs
->num_prev
].buf
= rcs
->current
.buf
;
947 rcs
->prev
[rcs
->num_prev
].cdw
= rcs
->current
.cdw
;
948 rcs
->prev
[rcs
->num_prev
].max_dw
= rcs
->current
.cdw
; /* no modifications */
951 ib
->base
.prev_dw
+= ib
->base
.current
.cdw
;
952 ib
->base
.current
.cdw
= 0;
954 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
955 ib
->base
.current
.max_dw
= ib
->big_ib_buffer
->size
/ 4 - amdgpu_cs_epilog_dws(cs
->ring_type
);
957 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
958 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
963 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
964 struct radeon_bo_list_item
*list
)
966 struct amdgpu_cs_context
*cs
= amdgpu_cs(rcs
)->csc
;
970 for (i
= 0; i
< cs
->num_real_buffers
; i
++) {
971 list
[i
].bo_size
= cs
->real_buffers
[i
].bo
->base
.size
;
972 list
[i
].vm_address
= cs
->real_buffers
[i
].bo
->va
;
973 list
[i
].priority_usage
= cs
->real_buffers
[i
].u
.real
.priority_usage
;
976 return cs
->num_real_buffers
;
979 DEBUG_GET_ONCE_BOOL_OPTION(all_bos
, "RADEON_ALL_BOS", false)
981 static void amdgpu_add_fence_dependency(struct amdgpu_cs
*acs
,
982 struct amdgpu_cs_buffer
*buffer
)
984 struct amdgpu_cs_context
*cs
= acs
->csc
;
985 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
986 unsigned new_num_fences
= 0;
988 for (unsigned j
= 0; j
< bo
->num_fences
; ++j
) {
989 struct amdgpu_fence
*bo_fence
= (void *)bo
->fences
[j
];
992 if (bo_fence
->ctx
== acs
->ctx
&&
993 bo_fence
->fence
.ip_type
== cs
->request
.ip_type
&&
994 bo_fence
->fence
.ip_instance
== cs
->request
.ip_instance
&&
995 bo_fence
->fence
.ring
== cs
->request
.ring
)
998 if (amdgpu_fence_wait((void *)bo_fence
, 0, false))
1001 amdgpu_fence_reference(&bo
->fences
[new_num_fences
], bo
->fences
[j
]);
1004 if (!(buffer
->usage
& RADEON_USAGE_SYNCHRONIZED
))
1007 idx
= cs
->num_fence_dependencies
++;
1008 if (idx
>= cs
->max_fence_dependencies
) {
1010 const unsigned increment
= 8;
1012 cs
->max_fence_dependencies
= idx
+ increment
;
1013 size
= cs
->max_fence_dependencies
* sizeof(cs
->fence_dependencies
[0]);
1014 cs
->fence_dependencies
= realloc(cs
->fence_dependencies
, size
);
1015 /* Clear the newly-allocated elements. */
1016 memset(cs
->fence_dependencies
+ idx
, 0,
1017 increment
* sizeof(cs
->fence_dependencies
[0]));
1020 amdgpu_fence_reference(&cs
->fence_dependencies
[idx
],
1021 (struct pipe_fence_handle
*)bo_fence
);
1024 for (unsigned j
= new_num_fences
; j
< bo
->num_fences
; ++j
)
1025 amdgpu_fence_reference(&bo
->fences
[j
], NULL
);
1027 bo
->num_fences
= new_num_fences
;
1030 /* Add the given list of fences to the buffer's fence list.
1032 * Must be called with the winsys bo_fence_lock held.
1034 void amdgpu_add_fences(struct amdgpu_winsys_bo
*bo
,
1035 unsigned num_fences
,
1036 struct pipe_fence_handle
**fences
)
1038 if (bo
->num_fences
+ num_fences
> bo
->max_fences
) {
1039 unsigned new_max_fences
= MAX2(bo
->num_fences
+ num_fences
, bo
->max_fences
* 2);
1040 struct pipe_fence_handle
**new_fences
=
1042 bo
->num_fences
* sizeof(*new_fences
),
1043 new_max_fences
* sizeof(*new_fences
));
1044 if (likely(new_fences
)) {
1045 bo
->fences
= new_fences
;
1046 bo
->max_fences
= new_max_fences
;
1050 fprintf(stderr
, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
1051 if (!bo
->num_fences
)
1054 bo
->num_fences
--; /* prefer to keep the most recent fence if possible */
1055 amdgpu_fence_reference(&bo
->fences
[bo
->num_fences
], NULL
);
1057 drop
= bo
->num_fences
+ num_fences
- bo
->max_fences
;
1063 for (unsigned i
= 0; i
< num_fences
; ++i
) {
1064 bo
->fences
[bo
->num_fences
] = NULL
;
1065 amdgpu_fence_reference(&bo
->fences
[bo
->num_fences
], fences
[i
]);
1070 static void amdgpu_add_fence_dependencies_list(struct amdgpu_cs
*acs
,
1071 struct pipe_fence_handle
*fence
,
1072 unsigned num_buffers
,
1073 struct amdgpu_cs_buffer
*buffers
)
1075 for (unsigned i
= 0; i
< num_buffers
; i
++) {
1076 struct amdgpu_cs_buffer
*buffer
= &buffers
[i
];
1077 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1079 amdgpu_add_fence_dependency(acs
, buffer
);
1080 p_atomic_inc(&bo
->num_active_ioctls
);
1081 amdgpu_add_fences(bo
, 1, &fence
);
1085 /* Since the kernel driver doesn't synchronize execution between different
1086 * rings automatically, we have to add fence dependencies manually.
1088 static void amdgpu_add_fence_dependencies(struct amdgpu_cs
*acs
)
1090 struct amdgpu_cs_context
*cs
= acs
->csc
;
1092 cs
->num_fence_dependencies
= 0;
1094 amdgpu_add_fence_dependencies_list(acs
, cs
->fence
, cs
->num_real_buffers
, cs
->real_buffers
);
1095 amdgpu_add_fence_dependencies_list(acs
, cs
->fence
, cs
->num_slab_buffers
, cs
->slab_buffers
);
1096 amdgpu_add_fence_dependencies_list(acs
, cs
->fence
, cs
->num_sparse_buffers
, cs
->sparse_buffers
);
1099 /* Add backing of sparse buffers to the buffer list.
1101 * This is done late, during submission, to keep the buffer list short before
1102 * submit, and to avoid managing fences for the backing buffers.
1104 static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context
*cs
)
1106 for (unsigned i
= 0; i
< cs
->num_sparse_buffers
; ++i
) {
1107 struct amdgpu_cs_buffer
*buffer
= &cs
->sparse_buffers
[i
];
1108 struct amdgpu_winsys_bo
*bo
= buffer
->bo
;
1110 mtx_lock(&bo
->u
.sparse
.commit_lock
);
1112 list_for_each_entry(struct amdgpu_sparse_backing
, backing
, &bo
->u
.sparse
.backing
, list
) {
1113 /* We can directly add the buffer here, because we know that each
1114 * backing buffer occurs only once.
1116 int idx
= amdgpu_do_add_real_buffer(cs
, backing
->bo
);
1118 fprintf(stderr
, "%s: failed to add buffer\n", __FUNCTION__
);
1119 mtx_unlock(&bo
->u
.sparse
.commit_lock
);
1123 cs
->real_buffers
[idx
].usage
= buffer
->usage
& ~RADEON_USAGE_SYNCHRONIZED
;
1124 cs
->real_buffers
[idx
].u
.real
.priority_usage
= buffer
->u
.real
.priority_usage
;
1125 p_atomic_inc(&backing
->bo
->num_active_ioctls
);
1128 mtx_unlock(&bo
->u
.sparse
.commit_lock
);
1134 void amdgpu_cs_submit_ib(void *job
, int thread_index
)
1136 struct amdgpu_cs
*acs
= (struct amdgpu_cs
*)job
;
1137 struct amdgpu_winsys
*ws
= acs
->ctx
->ws
;
1138 struct amdgpu_cs_context
*cs
= acs
->cst
;
1140 struct amdgpu_cs_fence
*dependencies
= NULL
;
1142 /* Set dependencies (input fences). */
1143 if (cs
->num_fence_dependencies
) {
1144 dependencies
= alloca(sizeof(dependencies
[0]) *
1145 cs
->num_fence_dependencies
);
1148 for (i
= 0; i
< cs
->num_fence_dependencies
; i
++) {
1149 struct amdgpu_fence
*fence
=
1150 (struct amdgpu_fence
*)cs
->fence_dependencies
[i
];
1152 /* Past fences can't be unsubmitted because we have only 1 CS thread. */
1153 assert(!fence
->submission_in_progress
);
1154 memcpy(&dependencies
[num
++], &fence
->fence
, sizeof(dependencies
[0]));
1156 cs
->request
.dependencies
= dependencies
;
1157 cs
->request
.number_of_dependencies
= num
;
1159 cs
->request
.dependencies
= NULL
;
1160 cs
->request
.number_of_dependencies
= 0;
1163 /* Set the output fence. */
1164 cs
->request
.fence_info
.handle
= NULL
;
1165 if (amdgpu_cs_has_user_fence(cs
)) {
1166 cs
->request
.fence_info
.handle
= acs
->ctx
->user_fence_bo
;
1167 cs
->request
.fence_info
.offset
= acs
->ring_type
;
1170 /* Create the buffer list.
1171 * Use a buffer list containing all allocated buffers if requested.
1173 if (debug_get_option_all_bos()) {
1174 struct amdgpu_winsys_bo
*bo
;
1175 amdgpu_bo_handle
*handles
;
1178 mtx_lock(&ws
->global_bo_list_lock
);
1180 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
1182 mtx_unlock(&ws
->global_bo_list_lock
);
1183 amdgpu_cs_context_cleanup(cs
);
1184 cs
->error_code
= -ENOMEM
;
1188 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, u
.real
.global_list_item
) {
1189 assert(num
< ws
->num_buffers
);
1190 handles
[num
++] = bo
->bo
;
1193 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
1195 &cs
->request
.resources
);
1197 mtx_unlock(&ws
->global_bo_list_lock
);
1199 if (!amdgpu_add_sparse_backing_buffers(cs
)) {
1204 if (cs
->max_real_submit
< cs
->num_real_buffers
) {
1208 cs
->handles
= MALLOC(sizeof(*cs
->handles
) * cs
->num_real_buffers
);
1209 cs
->flags
= MALLOC(sizeof(*cs
->flags
) * cs
->num_real_buffers
);
1211 if (!cs
->handles
|| !cs
->flags
) {
1212 cs
->max_real_submit
= 0;
1218 for (i
= 0; i
< cs
->num_real_buffers
; ++i
) {
1219 struct amdgpu_cs_buffer
*buffer
= &cs
->real_buffers
[i
];
1221 assert(buffer
->u
.real
.priority_usage
!= 0);
1223 cs
->handles
[i
] = buffer
->bo
->bo
;
1224 cs
->flags
[i
] = (util_last_bit64(buffer
->u
.real
.priority_usage
) - 1) / 4;
1227 if (acs
->ring_type
== RING_GFX
)
1228 ws
->gfx_bo_list_counter
+= cs
->num_real_buffers
;
1230 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_real_buffers
,
1231 cs
->handles
, cs
->flags
,
1232 &cs
->request
.resources
);
1237 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
1238 cs
->request
.resources
= NULL
;
1239 amdgpu_fence_signalled(cs
->fence
);
1244 if (acs
->ctx
->num_rejected_cs
)
1247 r
= amdgpu_cs_submit(acs
->ctx
->ctx
, 0, &cs
->request
, 1);
1252 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
1253 else if (r
== -ECANCELED
)
1254 fprintf(stderr
, "amdgpu: The CS has been cancelled because the context is lost.\n");
1256 fprintf(stderr
, "amdgpu: The CS has been rejected, "
1257 "see dmesg for more information (%i).\n", r
);
1259 amdgpu_fence_signalled(cs
->fence
);
1261 acs
->ctx
->num_rejected_cs
++;
1262 ws
->num_total_rejected_cs
++;
1265 uint64_t *user_fence
= NULL
;
1266 if (amdgpu_cs_has_user_fence(cs
))
1267 user_fence
= acs
->ctx
->user_fence_cpu_address_base
+
1268 cs
->request
.fence_info
.offset
;
1269 amdgpu_fence_submitted(cs
->fence
, &cs
->request
, user_fence
);
1273 if (cs
->request
.resources
)
1274 amdgpu_bo_list_destroy(cs
->request
.resources
);
1277 for (i
= 0; i
< cs
->num_real_buffers
; i
++)
1278 p_atomic_dec(&cs
->real_buffers
[i
].bo
->num_active_ioctls
);
1279 for (i
= 0; i
< cs
->num_slab_buffers
; i
++)
1280 p_atomic_dec(&cs
->slab_buffers
[i
].bo
->num_active_ioctls
);
1281 for (i
= 0; i
< cs
->num_sparse_buffers
; i
++)
1282 p_atomic_dec(&cs
->sparse_buffers
[i
].bo
->num_active_ioctls
);
1284 amdgpu_cs_context_cleanup(cs
);
1287 /* Make sure the previous submission is completed. */
1288 void amdgpu_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
1290 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1292 /* Wait for any pending ioctl of this CS to complete. */
1293 util_queue_fence_wait(&cs
->flush_completed
);
1296 static int amdgpu_cs_flush(struct radeon_winsys_cs
*rcs
,
1298 struct pipe_fence_handle
**fence
)
1300 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1301 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
1304 rcs
->current
.max_dw
+= amdgpu_cs_epilog_dws(cs
->ring_type
);
1306 switch (cs
->ring_type
) {
1308 /* pad DMA ring to 8 DWs */
1309 if (ws
->info
.chip_class
<= SI
) {
1310 while (rcs
->current
.cdw
& 7)
1311 radeon_emit(rcs
, 0xf0000000); /* NOP packet */
1313 while (rcs
->current
.cdw
& 7)
1314 radeon_emit(rcs
, 0x00000000); /* NOP packet */
1318 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1319 if (ws
->info
.gfx_ib_pad_with_type2
) {
1320 while (rcs
->current
.cdw
& 7)
1321 radeon_emit(rcs
, 0x80000000); /* type2 nop packet */
1323 while (rcs
->current
.cdw
& 7)
1324 radeon_emit(rcs
, 0xffff1000); /* type3 nop packet */
1327 /* Also pad the const IB. */
1328 if (cs
->const_ib
.ib_mapped
)
1329 while (!cs
->const_ib
.base
.current
.cdw
|| (cs
->const_ib
.base
.current
.cdw
& 7))
1330 radeon_emit(&cs
->const_ib
.base
, 0xffff1000); /* type3 nop packet */
1332 if (cs
->const_preamble_ib
.ib_mapped
)
1333 while (!cs
->const_preamble_ib
.base
.current
.cdw
|| (cs
->const_preamble_ib
.base
.current
.cdw
& 7))
1334 radeon_emit(&cs
->const_preamble_ib
.base
, 0xffff1000);
1337 while (rcs
->current
.cdw
& 15)
1338 radeon_emit(rcs
, 0x80000000); /* type2 nop packet */
1341 while (rcs
->current
.cdw
& 15)
1342 radeon_emit(rcs
, 0x81ff); /* nop packet */
1348 if (rcs
->current
.cdw
> rcs
->current
.max_dw
) {
1349 fprintf(stderr
, "amdgpu: command stream overflowed\n");
1352 /* If the CS is not empty or overflowed.... */
1353 if (likely(radeon_emitted(&cs
->main
.base
, 0) &&
1354 cs
->main
.base
.current
.cdw
<= cs
->main
.base
.current
.max_dw
&&
1355 !debug_get_option_noop())) {
1356 struct amdgpu_cs_context
*cur
= cs
->csc
;
1359 amdgpu_ib_finalize(&cs
->main
);
1361 if (cs
->const_ib
.ib_mapped
)
1362 amdgpu_ib_finalize(&cs
->const_ib
);
1364 if (cs
->const_preamble_ib
.ib_mapped
)
1365 amdgpu_ib_finalize(&cs
->const_preamble_ib
);
1367 /* Create a fence. */
1368 amdgpu_fence_reference(&cur
->fence
, NULL
);
1369 if (cs
->next_fence
) {
1370 /* just move the reference */
1371 cur
->fence
= cs
->next_fence
;
1372 cs
->next_fence
= NULL
;
1374 cur
->fence
= amdgpu_fence_create(cs
->ctx
,
1375 cur
->request
.ip_type
,
1376 cur
->request
.ip_instance
,
1380 amdgpu_fence_reference(fence
, cur
->fence
);
1382 amdgpu_cs_sync_flush(rcs
);
1386 * This fence must be held until the submission is queued to ensure
1387 * that the order of fence dependency updates matches the order of
1390 mtx_lock(&ws
->bo_fence_lock
);
1391 amdgpu_add_fence_dependencies(cs
);
1393 /* Swap command streams. "cst" is going to be submitted. */
1398 util_queue_add_job(&ws
->cs_queue
, cs
, &cs
->flush_completed
,
1399 amdgpu_cs_submit_ib
, NULL
);
1400 /* The submission has been queued, unlock the fence now. */
1401 mtx_unlock(&ws
->bo_fence_lock
);
1403 if (!(flags
& RADEON_FLUSH_ASYNC
)) {
1404 amdgpu_cs_sync_flush(rcs
);
1405 error_code
= cur
->error_code
;
1408 amdgpu_cs_context_cleanup(cs
->csc
);
1411 amdgpu_get_new_ib(&ws
->base
, cs
, IB_MAIN
);
1412 if (cs
->const_ib
.ib_mapped
)
1413 amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST
);
1414 if (cs
->const_preamble_ib
.ib_mapped
)
1415 amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST_PREAMBLE
);
1417 cs
->main
.base
.used_gart
= 0;
1418 cs
->main
.base
.used_vram
= 0;
1420 if (cs
->ring_type
== RING_GFX
)
1422 else if (cs
->ring_type
== RING_DMA
)
1428 static void amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
1430 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1432 amdgpu_cs_sync_flush(rcs
);
1433 util_queue_fence_destroy(&cs
->flush_completed
);
1434 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
1435 pb_reference(&cs
->main
.big_ib_buffer
, NULL
);
1436 FREE(cs
->main
.base
.prev
);
1437 pb_reference(&cs
->const_ib
.big_ib_buffer
, NULL
);
1438 FREE(cs
->const_ib
.base
.prev
);
1439 pb_reference(&cs
->const_preamble_ib
.big_ib_buffer
, NULL
);
1440 FREE(cs
->const_preamble_ib
.base
.prev
);
1441 amdgpu_destroy_cs_context(&cs
->csc1
);
1442 amdgpu_destroy_cs_context(&cs
->csc2
);
1443 amdgpu_fence_reference(&cs
->next_fence
, NULL
);
1447 static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
1448 struct pb_buffer
*_buf
,
1449 enum radeon_bo_usage usage
)
1451 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1452 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
1454 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
1457 void amdgpu_cs_init_functions(struct amdgpu_winsys
*ws
)
1459 ws
->base
.ctx_create
= amdgpu_ctx_create
;
1460 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
1461 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
1462 ws
->base
.cs_create
= amdgpu_cs_create
;
1463 ws
->base
.cs_add_const_ib
= amdgpu_cs_add_const_ib
;
1464 ws
->base
.cs_add_const_preamble_ib
= amdgpu_cs_add_const_preamble_ib
;
1465 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
1466 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
1467 ws
->base
.cs_validate
= amdgpu_cs_validate
;
1468 ws
->base
.cs_check_space
= amdgpu_cs_check_space
;
1469 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
1470 ws
->base
.cs_flush
= amdgpu_cs_flush
;
1471 ws
->base
.cs_get_next_fence
= amdgpu_cs_get_next_fence
;
1472 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
1473 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
1474 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
1475 ws
->base
.fence_reference
= amdgpu_fence_reference
;