2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
30 * Marek Olšák <maraeo@gmail.com>
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
36 #include <amdgpu_drm.h>
41 static struct pipe_fence_handle
*
42 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
43 unsigned ip_instance
, unsigned ring
)
45 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
47 fence
->reference
.count
= 1;
49 fence
->fence
.context
= ctx
->ctx
;
50 fence
->fence
.ip_type
= ip_type
;
51 fence
->fence
.ip_instance
= ip_instance
;
52 fence
->fence
.ring
= ring
;
53 fence
->submission_in_progress
= true;
54 p_atomic_inc(&ctx
->refcount
);
55 return (struct pipe_fence_handle
*)fence
;
58 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
59 struct amdgpu_cs_request
* request
,
60 uint64_t *user_fence_cpu_address
)
62 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
64 rfence
->fence
.fence
= request
->seq_no
;
65 rfence
->user_fence_cpu_address
= user_fence_cpu_address
;
66 rfence
->submission_in_progress
= false;
69 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
71 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
73 rfence
->signalled
= true;
74 rfence
->submission_in_progress
= false;
77 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
80 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
83 uint64_t *user_fence_cpu
;
86 if (rfence
->signalled
)
90 abs_timeout
= timeout
;
92 abs_timeout
= os_time_get_absolute_timeout(timeout
);
94 /* The fence might not have a number assigned if its IB is being
95 * submitted in the other thread right now. Wait until the submission
97 if (!os_wait_until_zero_abs_timeout(&rfence
->submission_in_progress
,
101 user_fence_cpu
= rfence
->user_fence_cpu_address
;
102 if (user_fence_cpu
) {
103 if (*user_fence_cpu
>= rfence
->fence
.fence
) {
104 rfence
->signalled
= true;
108 /* No timeout, just query: no need for the ioctl. */
109 if (!absolute
&& !timeout
)
113 /* Now use the libdrm query. */
114 r
= amdgpu_cs_query_fence_status(&rfence
->fence
,
116 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
119 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
124 /* This variable can only transition from false to true, so it doesn't
125 * matter if threads race for it. */
126 rfence
->signalled
= true;
132 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
133 struct pipe_fence_handle
*fence
,
136 return amdgpu_fence_wait(fence
, timeout
, false);
141 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
143 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
145 struct amdgpu_bo_alloc_request alloc_buffer
= {};
146 amdgpu_bo_handle buf_handle
;
151 ctx
->ws
= amdgpu_winsys(ws
);
154 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
156 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
160 alloc_buffer
.alloc_size
= ctx
->ws
->info
.gart_page_size
;
161 alloc_buffer
.phys_alignment
= ctx
->ws
->info
.gart_page_size
;
162 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
164 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
166 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
167 goto error_user_fence_alloc
;
170 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
172 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
173 goto error_user_fence_map
;
176 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
177 ctx
->user_fence_bo
= buf_handle
;
179 return (struct radeon_winsys_ctx
*)ctx
;
181 error_user_fence_map
:
182 amdgpu_bo_free(buf_handle
);
183 error_user_fence_alloc
:
184 amdgpu_cs_ctx_free(ctx
->ctx
);
190 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
192 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
195 static enum pipe_reset_status
196 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
198 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
199 uint32_t result
, hangs
;
202 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
204 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
205 return PIPE_NO_RESET
;
209 case AMDGPU_CTX_GUILTY_RESET
:
210 return PIPE_GUILTY_CONTEXT_RESET
;
211 case AMDGPU_CTX_INNOCENT_RESET
:
212 return PIPE_INNOCENT_CONTEXT_RESET
;
213 case AMDGPU_CTX_UNKNOWN_RESET
:
214 return PIPE_UNKNOWN_CONTEXT_RESET
;
215 case AMDGPU_CTX_NO_RESET
:
217 return PIPE_NO_RESET
;
221 /* COMMAND SUBMISSION */
223 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context
*cs
)
225 return cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&&
226 cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
;
229 int amdgpu_lookup_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
231 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
232 int i
= cs
->buffer_indices_hashlist
[hash
];
234 /* not found or found */
235 if (i
== -1 || cs
->buffers
[i
].bo
== bo
)
238 /* Hash collision, look for the BO in the list of buffers linearly. */
239 for (i
= cs
->num_buffers
- 1; i
>= 0; i
--) {
240 if (cs
->buffers
[i
].bo
== bo
) {
241 /* Put this buffer in the hash list.
242 * This will prevent additional hash collisions if there are
243 * several consecutive lookup_buffer calls for the same buffer.
245 * Example: Assuming buffers A,B,C collide in the hash list,
246 * the following sequence of buffers:
247 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
248 * will collide here: ^ and here: ^,
249 * meaning that we should get very few collisions in the end. */
250 cs
->buffer_indices_hashlist
[hash
] = i
;
257 static unsigned amdgpu_add_buffer(struct amdgpu_cs
*acs
,
258 struct amdgpu_winsys_bo
*bo
,
259 enum radeon_bo_usage usage
,
260 enum radeon_bo_domain domains
,
262 enum radeon_bo_domain
*added_domains
)
264 struct amdgpu_cs_context
*cs
= acs
->csc
;
265 struct amdgpu_cs_buffer
*buffer
;
266 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
269 assert(priority
< 64);
272 i
= amdgpu_lookup_buffer(cs
, bo
);
275 buffer
= &cs
->buffers
[i
];
276 buffer
->priority_usage
|= 1llu << priority
;
277 buffer
->usage
|= usage
;
278 *added_domains
= domains
& ~buffer
->domains
;
279 buffer
->domains
|= domains
;
280 cs
->flags
[i
] = MAX2(cs
->flags
[i
], priority
/ 4);
284 /* New buffer, check if the backing array is large enough. */
285 if (cs
->num_buffers
>= cs
->max_num_buffers
) {
287 cs
->max_num_buffers
+= 10;
289 size
= cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
);
290 cs
->buffers
= realloc(cs
->buffers
, size
);
292 size
= cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
);
293 cs
->handles
= realloc(cs
->handles
, size
);
295 cs
->flags
= realloc(cs
->flags
, cs
->max_num_buffers
);
298 /* Initialize the new buffer. */
299 cs
->buffers
[cs
->num_buffers
].bo
= NULL
;
300 amdgpu_winsys_bo_reference(&cs
->buffers
[cs
->num_buffers
].bo
, bo
);
301 cs
->handles
[cs
->num_buffers
] = bo
->bo
;
302 cs
->flags
[cs
->num_buffers
] = priority
/ 4;
303 p_atomic_inc(&bo
->num_cs_references
);
304 buffer
= &cs
->buffers
[cs
->num_buffers
];
306 buffer
->priority_usage
= 1llu << priority
;
307 buffer
->usage
= usage
;
308 buffer
->domains
= domains
;
310 cs
->buffer_indices_hashlist
[hash
] = cs
->num_buffers
;
312 *added_domains
= domains
;
313 return cs
->num_buffers
++;
316 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
317 struct pb_buffer
*buf
,
318 enum radeon_bo_usage usage
,
319 enum radeon_bo_domain domains
,
320 enum radeon_bo_priority priority
)
322 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
323 * the buffer placement during command submission.
325 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
326 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
327 enum radeon_bo_domain added_domains
;
328 unsigned index
= amdgpu_add_buffer(cs
, bo
, usage
, bo
->initial_domain
,
329 priority
, &added_domains
);
331 if (added_domains
& RADEON_DOMAIN_VRAM
)
332 cs
->csc
->used_vram
+= bo
->base
.size
;
333 else if (added_domains
& RADEON_DOMAIN_GTT
)
334 cs
->csc
->used_gart
+= bo
->base
.size
;
339 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys
*ws
, struct amdgpu_ib
*ib
)
341 struct pb_buffer
*pb
;
343 unsigned buffer_size
;
345 /* Always create a buffer that is 4 times larger than the maximum seen IB
346 * size, aligned to a power of two. Limit to 512k dwords, which is the
347 * largest power of two that fits into the size field of the INDIRECT_BUFFER
350 buffer_size
= 4 * MIN2(util_next_power_of_two(4 * ib
->max_ib_size
),
353 switch (ib
->ib_type
) {
354 case IB_CONST_PREAMBLE
:
355 buffer_size
= MAX2(buffer_size
, 4 * 1024);
358 buffer_size
= MAX2(buffer_size
, 16 * 1024 * 4);
361 buffer_size
= MAX2(buffer_size
, 8 * 1024 * 4);
364 unreachable("unhandled IB type");
367 pb
= ws
->base
.buffer_create(&ws
->base
, buffer_size
,
368 ws
->info
.gart_page_size
,
370 RADEON_FLAG_CPU_ACCESS
);
374 mapped
= ws
->base
.buffer_map(pb
, NULL
, PIPE_TRANSFER_WRITE
);
376 pb_reference(&pb
, NULL
);
380 pb_reference(&ib
->big_ib_buffer
, pb
);
381 pb_reference(&pb
, NULL
);
383 ib
->ib_mapped
= mapped
;
384 ib
->used_ib_space
= 0;
389 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type
)
393 /* Smaller submits means the GPU gets busy sooner and there is less
394 * waiting for buffers and fences. Proof:
395 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
398 case IB_CONST_PREAMBLE
:
400 /* There isn't really any reason to limit CE IB size beyond the natural
401 * limit implied by the main IB, except perhaps GTT size. Just return
402 * an extremely large value that we never get anywhere close to.
404 return 16 * 1024 * 1024;
406 unreachable("bad ib_type");
410 static bool amdgpu_get_new_ib(struct radeon_winsys
*ws
, struct amdgpu_cs
*cs
,
411 enum ib_type ib_type
)
413 struct amdgpu_winsys
*aws
= (struct amdgpu_winsys
*)ws
;
414 /* Small IBs are better than big IBs, because the GPU goes idle quicker
415 * and there is less waiting for buffers and fences. Proof:
416 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
418 struct amdgpu_ib
*ib
= NULL
;
419 struct amdgpu_cs_ib_info
*info
= &cs
->csc
->ib
[ib_type
];
420 unsigned ib_size
= 0;
423 case IB_CONST_PREAMBLE
:
424 ib
= &cs
->const_preamble_ib
;
429 ib_size
= 8 * 1024 * 4;
433 ib_size
= 4 * 1024 * 4;
436 unreachable("unhandled IB type");
439 ib_size
= MAX2(ib_size
,
440 4 * MIN2(util_next_power_of_two(ib
->max_ib_size
),
441 amdgpu_ib_max_submit_dwords(ib_type
)));
446 /* Allocate a new buffer for IBs if the current buffer is all used. */
447 if (!ib
->big_ib_buffer
||
448 ib
->used_ib_space
+ ib_size
> ib
->big_ib_buffer
->size
) {
449 if (!amdgpu_ib_new_buffer(aws
, ib
))
453 info
->ib_mc_address
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
+
455 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
456 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
458 ib
->base
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
460 ib_size
= ib
->big_ib_buffer
->size
- ib
->used_ib_space
;
461 ib
->base
.max_dw
= ib_size
/ 4;
465 static boolean
amdgpu_init_cs_context(struct amdgpu_cs_context
*cs
,
466 enum ring_type ring_type
)
472 cs
->request
.ip_type
= AMDGPU_HW_IP_DMA
;
476 cs
->request
.ip_type
= AMDGPU_HW_IP_UVD
;
480 cs
->request
.ip_type
= AMDGPU_HW_IP_VCE
;
484 cs
->request
.ip_type
= AMDGPU_HW_IP_COMPUTE
;
489 cs
->request
.ip_type
= AMDGPU_HW_IP_GFX
;
493 cs
->max_num_buffers
= 512;
494 cs
->buffers
= (struct amdgpu_cs_buffer
*)
495 CALLOC(1, cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
));
500 cs
->handles
= CALLOC(1, cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
));
506 cs
->flags
= CALLOC(1, cs
->max_num_buffers
);
513 for (i
= 0; i
< ARRAY_SIZE(cs
->buffer_indices_hashlist
); i
++) {
514 cs
->buffer_indices_hashlist
[i
] = -1;
517 cs
->request
.number_of_ibs
= 1;
518 cs
->request
.ibs
= &cs
->ib
[IB_MAIN
];
520 cs
->ib
[IB_CONST
].flags
= AMDGPU_IB_FLAG_CE
;
521 cs
->ib
[IB_CONST_PREAMBLE
].flags
= AMDGPU_IB_FLAG_CE
|
522 AMDGPU_IB_FLAG_PREAMBLE
;
527 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context
*cs
)
531 for (i
= 0; i
< cs
->num_buffers
; i
++) {
532 p_atomic_dec(&cs
->buffers
[i
].bo
->num_cs_references
);
533 amdgpu_winsys_bo_reference(&cs
->buffers
[i
].bo
, NULL
);
534 cs
->handles
[i
] = NULL
;
541 amdgpu_fence_reference(&cs
->fence
, NULL
);
543 for (i
= 0; i
< ARRAY_SIZE(cs
->buffer_indices_hashlist
); i
++) {
544 cs
->buffer_indices_hashlist
[i
] = -1;
548 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context
*cs
)
550 amdgpu_cs_context_cleanup(cs
);
554 FREE(cs
->request
.dependencies
);
558 static struct radeon_winsys_cs
*
559 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
560 enum ring_type ring_type
,
561 void (*flush
)(void *ctx
, unsigned flags
,
562 struct pipe_fence_handle
**fence
),
565 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
566 struct amdgpu_cs
*cs
;
568 cs
= CALLOC_STRUCT(amdgpu_cs
);
573 pipe_semaphore_init(&cs
->flush_completed
, 1);
576 cs
->flush_cs
= flush
;
577 cs
->flush_data
= flush_ctx
;
578 cs
->ring_type
= ring_type
;
580 cs
->main
.ib_type
= IB_MAIN
;
581 cs
->const_ib
.ib_type
= IB_CONST
;
582 cs
->const_preamble_ib
.ib_type
= IB_CONST_PREAMBLE
;
584 if (!amdgpu_init_cs_context(&cs
->csc1
, ring_type
)) {
589 if (!amdgpu_init_cs_context(&cs
->csc2
, ring_type
)) {
590 amdgpu_destroy_cs_context(&cs
->csc1
);
595 /* Set the first submission context as current. */
599 if (!amdgpu_get_new_ib(&ctx
->ws
->base
, cs
, IB_MAIN
)) {
600 amdgpu_destroy_cs_context(&cs
->csc2
);
601 amdgpu_destroy_cs_context(&cs
->csc1
);
606 p_atomic_inc(&ctx
->ws
->num_cs
);
607 return &cs
->main
.base
;
610 static struct radeon_winsys_cs
*
611 amdgpu_cs_add_const_ib(struct radeon_winsys_cs
*rcs
)
613 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
614 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
616 /* only one const IB can be added */
617 if (cs
->ring_type
!= RING_GFX
|| cs
->const_ib
.ib_mapped
)
620 if (!amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST
))
623 cs
->csc
->request
.number_of_ibs
= 2;
624 cs
->csc
->request
.ibs
= &cs
->csc
->ib
[IB_CONST
];
626 cs
->cst
->request
.number_of_ibs
= 2;
627 cs
->cst
->request
.ibs
= &cs
->cst
->ib
[IB_CONST
];
629 return &cs
->const_ib
.base
;
632 static struct radeon_winsys_cs
*
633 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs
*rcs
)
635 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
636 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
638 /* only one const preamble IB can be added and only when the const IB has
639 * also been mapped */
640 if (cs
->ring_type
!= RING_GFX
|| !cs
->const_ib
.ib_mapped
||
641 cs
->const_preamble_ib
.ib_mapped
)
644 if (!amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST_PREAMBLE
))
647 cs
->csc
->request
.number_of_ibs
= 3;
648 cs
->csc
->request
.ibs
= &cs
->csc
->ib
[IB_CONST_PREAMBLE
];
650 cs
->cst
->request
.number_of_ibs
= 3;
651 cs
->cst
->request
.ibs
= &cs
->cst
->ib
[IB_CONST_PREAMBLE
];
653 return &cs
->const_preamble_ib
.base
;
656 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
658 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs
*rcs
,
659 struct pb_buffer
*buf
)
661 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
663 return amdgpu_lookup_buffer(cs
->csc
, (struct amdgpu_winsys_bo
*)buf
);
666 static boolean
amdgpu_cs_validate(struct radeon_winsys_cs
*rcs
)
671 static bool amdgpu_cs_check_space(struct radeon_winsys_cs
*rcs
, unsigned dw
)
673 struct amdgpu_ib
*ib
= amdgpu_ib(rcs
);
674 struct amdgpu_cs
*cs
= amdgpu_cs_from_ib(ib
);
675 unsigned requested_size
= rcs
->cdw
+ dw
;
677 assert(rcs
->cdw
<= rcs
->max_dw
);
679 if (requested_size
> amdgpu_ib_max_submit_dwords(ib
->ib_type
))
682 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, requested_size
);
684 return rcs
->max_dw
- rcs
->cdw
>= dw
;
687 static boolean
amdgpu_cs_memory_below_limit(struct radeon_winsys_cs
*rcs
, uint64_t vram
, uint64_t gtt
)
689 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
690 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
692 vram
+= cs
->csc
->used_vram
;
693 gtt
+= cs
->csc
->used_gart
;
695 /* Anything that goes above the VRAM size should go to GTT. */
696 if (vram
> ws
->info
.vram_size
)
697 gtt
+= vram
- ws
->info
.vram_size
;
699 /* Now we just need to check if we have enough GTT. */
700 return gtt
< ws
->info
.gart_size
* 0.7;
703 static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs
*rcs
)
705 struct amdgpu_cs_context
*cs
= amdgpu_cs(rcs
)->csc
;
707 return cs
->used_vram
+ cs
->used_gart
;
710 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
711 struct radeon_bo_list_item
*list
)
713 struct amdgpu_cs_context
*cs
= amdgpu_cs(rcs
)->csc
;
717 for (i
= 0; i
< cs
->num_buffers
; i
++) {
718 pb_reference(&list
[i
].buf
, &cs
->buffers
[i
].bo
->base
);
719 list
[i
].vm_address
= cs
->buffers
[i
].bo
->va
;
720 list
[i
].priority_usage
= cs
->buffers
[i
].priority_usage
;
723 return cs
->num_buffers
;
726 DEBUG_GET_ONCE_BOOL_OPTION(all_bos
, "RADEON_ALL_BOS", FALSE
)
728 /* Since the kernel driver doesn't synchronize execution between different
729 * rings automatically, we have to add fence dependencies manually.
731 static void amdgpu_add_fence_dependencies(struct amdgpu_cs
*acs
)
733 struct amdgpu_cs_context
*cs
= acs
->csc
;
736 cs
->request
.number_of_dependencies
= 0;
738 for (i
= 0; i
< cs
->num_buffers
; i
++) {
739 for (j
= 0; j
< RING_LAST
; j
++) {
740 struct amdgpu_cs_fence
*dep
;
743 struct amdgpu_fence
*bo_fence
= (void *)cs
->buffers
[i
].bo
->fence
[j
];
747 if (bo_fence
->ctx
== acs
->ctx
&&
748 bo_fence
->fence
.ip_type
== cs
->request
.ip_type
&&
749 bo_fence
->fence
.ip_instance
== cs
->request
.ip_instance
&&
750 bo_fence
->fence
.ring
== cs
->request
.ring
)
753 if (amdgpu_fence_wait((void *)bo_fence
, 0, false))
756 if (bo_fence
->submission_in_progress
)
757 os_wait_until_zero(&bo_fence
->submission_in_progress
,
758 PIPE_TIMEOUT_INFINITE
);
760 idx
= cs
->request
.number_of_dependencies
++;
761 if (idx
>= cs
->max_dependencies
) {
764 cs
->max_dependencies
= idx
+ 8;
765 size
= cs
->max_dependencies
* sizeof(struct amdgpu_cs_fence
);
766 cs
->request
.dependencies
= realloc(cs
->request
.dependencies
, size
);
769 dep
= &cs
->request
.dependencies
[idx
];
770 memcpy(dep
, &bo_fence
->fence
, sizeof(*dep
));
775 void amdgpu_cs_submit_ib(struct amdgpu_cs
*acs
)
777 struct amdgpu_winsys
*ws
= acs
->ctx
->ws
;
778 struct amdgpu_cs_context
*cs
= acs
->cst
;
781 cs
->request
.fence_info
.handle
= NULL
;
782 if (amdgpu_cs_has_user_fence(cs
)) {
783 cs
->request
.fence_info
.handle
= acs
->ctx
->user_fence_bo
;
784 cs
->request
.fence_info
.offset
= acs
->ring_type
;
787 /* Create the buffer list.
788 * Use a buffer list containing all allocated buffers if requested.
790 if (debug_get_option_all_bos()) {
791 struct amdgpu_winsys_bo
*bo
;
792 amdgpu_bo_handle
*handles
;
795 pipe_mutex_lock(ws
->global_bo_list_lock
);
797 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
799 pipe_mutex_unlock(ws
->global_bo_list_lock
);
800 amdgpu_cs_context_cleanup(cs
);
804 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
805 assert(num
< ws
->num_buffers
);
806 handles
[num
++] = bo
->bo
;
809 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
811 &cs
->request
.resources
);
813 pipe_mutex_unlock(ws
->global_bo_list_lock
);
815 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_buffers
,
816 cs
->handles
, cs
->flags
,
817 &cs
->request
.resources
);
821 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
822 cs
->request
.resources
= NULL
;
823 amdgpu_fence_signalled(cs
->fence
);
827 r
= amdgpu_cs_submit(acs
->ctx
->ctx
, 0, &cs
->request
, 1);
830 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
832 fprintf(stderr
, "amdgpu: The CS has been rejected, "
833 "see dmesg for more information.\n");
835 amdgpu_fence_signalled(cs
->fence
);
838 uint64_t *user_fence
= NULL
;
839 if (amdgpu_cs_has_user_fence(cs
))
840 user_fence
= acs
->ctx
->user_fence_cpu_address_base
+
841 cs
->request
.fence_info
.offset
;
842 amdgpu_fence_submitted(cs
->fence
, &cs
->request
, user_fence
);
846 if (cs
->request
.resources
)
847 amdgpu_bo_list_destroy(cs
->request
.resources
);
850 for (i
= 0; i
< cs
->num_buffers
; i
++)
851 p_atomic_dec(&cs
->buffers
[i
].bo
->num_active_ioctls
);
853 amdgpu_cs_context_cleanup(cs
);
856 /* Make sure the previous submission is completed. */
857 void amdgpu_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
859 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
861 /* Wait for any pending ioctl of this CS to complete. */
862 if (cs
->ctx
->ws
->thread
) {
863 /* wait and set the semaphore to "busy" */
864 pipe_semaphore_wait(&cs
->flush_completed
);
865 /* set the semaphore to "idle" */
866 pipe_semaphore_signal(&cs
->flush_completed
);
870 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", FALSE
)
872 static void amdgpu_cs_flush(struct radeon_winsys_cs
*rcs
,
874 struct pipe_fence_handle
**fence
)
876 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
877 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
879 switch (cs
->ring_type
) {
881 /* pad DMA ring to 8 DWs */
883 OUT_CS(rcs
, 0x00000000); /* NOP packet */
886 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
888 OUT_CS(rcs
, 0xffff1000); /* type3 nop packet */
890 /* Also pad the const IB. */
891 if (cs
->const_ib
.ib_mapped
)
892 while (!cs
->const_ib
.base
.cdw
|| (cs
->const_ib
.base
.cdw
& 7))
893 OUT_CS(&cs
->const_ib
.base
, 0xffff1000); /* type3 nop packet */
895 if (cs
->const_preamble_ib
.ib_mapped
)
896 while (!cs
->const_preamble_ib
.base
.cdw
|| (cs
->const_preamble_ib
.base
.cdw
& 7))
897 OUT_CS(&cs
->const_preamble_ib
.base
, 0xffff1000);
900 while (rcs
->cdw
& 15)
901 OUT_CS(rcs
, 0x80000000); /* type2 nop packet */
907 if (rcs
->cdw
> rcs
->max_dw
) {
908 fprintf(stderr
, "amdgpu: command stream overflowed\n");
911 /* If the CS is not empty or overflowed.... */
912 if (cs
->main
.base
.cdw
&& cs
->main
.base
.cdw
<= cs
->main
.base
.max_dw
&&
913 !debug_get_option_noop()) {
914 struct amdgpu_cs_context
*cur
= cs
->csc
;
915 unsigned i
, num_buffers
= cur
->num_buffers
;
918 cur
->ib
[IB_MAIN
].size
= cs
->main
.base
.cdw
;
919 cs
->main
.used_ib_space
+= cs
->main
.base
.cdw
* 4;
920 cs
->main
.max_ib_size
= MAX2(cs
->main
.max_ib_size
, cs
->main
.base
.cdw
);
922 if (cs
->const_ib
.ib_mapped
) {
923 cur
->ib
[IB_CONST
].size
= cs
->const_ib
.base
.cdw
;
924 cs
->const_ib
.used_ib_space
+= cs
->const_ib
.base
.cdw
* 4;
925 cs
->const_ib
.max_ib_size
= MAX2(cs
->const_ib
.max_ib_size
, cs
->const_ib
.base
.cdw
);
928 if (cs
->const_preamble_ib
.ib_mapped
) {
929 cur
->ib
[IB_CONST_PREAMBLE
].size
= cs
->const_preamble_ib
.base
.cdw
;
930 cs
->const_preamble_ib
.used_ib_space
+= cs
->const_preamble_ib
.base
.cdw
* 4;
931 cs
->const_preamble_ib
.max_ib_size
=
932 MAX2(cs
->const_preamble_ib
.max_ib_size
, cs
->const_preamble_ib
.base
.cdw
);
935 /* Create a fence. */
936 amdgpu_fence_reference(&cur
->fence
, NULL
);
937 cur
->fence
= amdgpu_fence_create(cs
->ctx
,
938 cur
->request
.ip_type
,
939 cur
->request
.ip_instance
,
942 amdgpu_fence_reference(fence
, cur
->fence
);
944 /* Prepare buffers. */
945 pipe_mutex_lock(ws
->bo_fence_lock
);
946 amdgpu_add_fence_dependencies(cs
);
947 for (i
= 0; i
< num_buffers
; i
++) {
948 p_atomic_inc(&cur
->buffers
[i
].bo
->num_active_ioctls
);
949 amdgpu_fence_reference(&cur
->buffers
[i
].bo
->fence
[cs
->ring_type
],
952 pipe_mutex_unlock(ws
->bo_fence_lock
);
954 amdgpu_cs_sync_flush(rcs
);
956 /* Swap command streams. "cst" is going to be submitted. */
961 if (ws
->thread
&& (flags
& RADEON_FLUSH_ASYNC
)) {
962 /* Set the semaphore to "busy". */
963 pipe_semaphore_wait(&cs
->flush_completed
);
964 amdgpu_ws_queue_cs(ws
, cs
);
966 amdgpu_cs_submit_ib(cs
);
969 amdgpu_cs_context_cleanup(cs
->csc
);
972 amdgpu_get_new_ib(&ws
->base
, cs
, IB_MAIN
);
973 if (cs
->const_ib
.ib_mapped
)
974 amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST
);
975 if (cs
->const_preamble_ib
.ib_mapped
)
976 amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST_PREAMBLE
);
978 ws
->num_cs_flushes
++;
981 static void amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
983 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
985 amdgpu_cs_sync_flush(rcs
);
986 pipe_semaphore_destroy(&cs
->flush_completed
);
987 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
988 pb_reference(&cs
->main
.big_ib_buffer
, NULL
);
989 pb_reference(&cs
->const_ib
.big_ib_buffer
, NULL
);
990 pb_reference(&cs
->const_preamble_ib
.big_ib_buffer
, NULL
);
991 amdgpu_destroy_cs_context(&cs
->csc1
);
992 amdgpu_destroy_cs_context(&cs
->csc2
);
996 static boolean
amdgpu_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
997 struct pb_buffer
*_buf
,
998 enum radeon_bo_usage usage
)
1000 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1001 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
1003 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
1006 void amdgpu_cs_init_functions(struct amdgpu_winsys
*ws
)
1008 ws
->base
.ctx_create
= amdgpu_ctx_create
;
1009 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
1010 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
1011 ws
->base
.cs_create
= amdgpu_cs_create
;
1012 ws
->base
.cs_add_const_ib
= amdgpu_cs_add_const_ib
;
1013 ws
->base
.cs_add_const_preamble_ib
= amdgpu_cs_add_const_preamble_ib
;
1014 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
1015 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
1016 ws
->base
.cs_lookup_buffer
= amdgpu_cs_lookup_buffer
;
1017 ws
->base
.cs_validate
= amdgpu_cs_validate
;
1018 ws
->base
.cs_check_space
= amdgpu_cs_check_space
;
1019 ws
->base
.cs_memory_below_limit
= amdgpu_cs_memory_below_limit
;
1020 ws
->base
.cs_query_memory_usage
= amdgpu_cs_query_memory_usage
;
1021 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
1022 ws
->base
.cs_flush
= amdgpu_cs_flush
;
1023 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
1024 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
1025 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
1026 ws
->base
.fence_reference
= amdgpu_fence_reference
;