2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
30 * Marek Olšák <maraeo@gmail.com>
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
36 #include <amdgpu_drm.h>
38 #include "../../../drivers/radeonsi/sid.h"
42 static struct pipe_fence_handle
*
43 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
44 unsigned ip_instance
, unsigned ring
)
46 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
48 fence
->reference
.count
= 1;
50 fence
->fence
.context
= ctx
->ctx
;
51 fence
->fence
.ip_type
= ip_type
;
52 fence
->fence
.ip_instance
= ip_instance
;
53 fence
->fence
.ring
= ring
;
54 fence
->submission_in_progress
= true;
55 p_atomic_inc(&ctx
->refcount
);
56 return (struct pipe_fence_handle
*)fence
;
59 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
60 struct amdgpu_cs_request
* request
,
61 uint64_t *user_fence_cpu_address
)
63 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
65 rfence
->fence
.fence
= request
->seq_no
;
66 rfence
->user_fence_cpu_address
= user_fence_cpu_address
;
67 rfence
->submission_in_progress
= false;
70 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
72 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
74 rfence
->signalled
= true;
75 rfence
->submission_in_progress
= false;
78 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
81 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
84 uint64_t *user_fence_cpu
;
87 if (rfence
->signalled
)
91 abs_timeout
= timeout
;
93 abs_timeout
= os_time_get_absolute_timeout(timeout
);
95 /* The fence might not have a number assigned if its IB is being
96 * submitted in the other thread right now. Wait until the submission
98 if (!os_wait_until_zero_abs_timeout(&rfence
->submission_in_progress
,
102 user_fence_cpu
= rfence
->user_fence_cpu_address
;
103 if (user_fence_cpu
) {
104 if (*user_fence_cpu
>= rfence
->fence
.fence
) {
105 rfence
->signalled
= true;
109 /* No timeout, just query: no need for the ioctl. */
110 if (!absolute
&& !timeout
)
114 /* Now use the libdrm query. */
115 r
= amdgpu_cs_query_fence_status(&rfence
->fence
,
117 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
120 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
125 /* This variable can only transition from false to true, so it doesn't
126 * matter if threads race for it. */
127 rfence
->signalled
= true;
133 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
134 struct pipe_fence_handle
*fence
,
137 return amdgpu_fence_wait(fence
, timeout
, false);
140 static struct pipe_fence_handle
*
141 amdgpu_cs_get_next_fence(struct radeon_winsys_cs
*rcs
)
143 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
144 struct pipe_fence_handle
*fence
= NULL
;
146 if (cs
->next_fence
) {
147 amdgpu_fence_reference(&fence
, cs
->next_fence
);
151 fence
= amdgpu_fence_create(cs
->ctx
,
152 cs
->csc
->request
.ip_type
,
153 cs
->csc
->request
.ip_instance
,
154 cs
->csc
->request
.ring
);
158 amdgpu_fence_reference(&cs
->next_fence
, fence
);
164 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
166 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
168 struct amdgpu_bo_alloc_request alloc_buffer
= {};
169 amdgpu_bo_handle buf_handle
;
174 ctx
->ws
= amdgpu_winsys(ws
);
177 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
179 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
183 alloc_buffer
.alloc_size
= ctx
->ws
->info
.gart_page_size
;
184 alloc_buffer
.phys_alignment
= ctx
->ws
->info
.gart_page_size
;
185 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
187 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
189 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
190 goto error_user_fence_alloc
;
193 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
195 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
196 goto error_user_fence_map
;
199 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
200 ctx
->user_fence_bo
= buf_handle
;
202 return (struct radeon_winsys_ctx
*)ctx
;
204 error_user_fence_map
:
205 amdgpu_bo_free(buf_handle
);
206 error_user_fence_alloc
:
207 amdgpu_cs_ctx_free(ctx
->ctx
);
213 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
215 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
218 static enum pipe_reset_status
219 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
221 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
222 uint32_t result
, hangs
;
225 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
227 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
228 return PIPE_NO_RESET
;
232 case AMDGPU_CTX_GUILTY_RESET
:
233 return PIPE_GUILTY_CONTEXT_RESET
;
234 case AMDGPU_CTX_INNOCENT_RESET
:
235 return PIPE_INNOCENT_CONTEXT_RESET
;
236 case AMDGPU_CTX_UNKNOWN_RESET
:
237 return PIPE_UNKNOWN_CONTEXT_RESET
;
238 case AMDGPU_CTX_NO_RESET
:
240 return PIPE_NO_RESET
;
244 /* COMMAND SUBMISSION */
246 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context
*cs
)
248 return cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&&
249 cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
;
252 static bool amdgpu_cs_has_chaining(enum ring_type ring_type
)
254 return ring_type
== RING_GFX
;
257 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type
)
259 if (ring_type
== RING_GFX
)
260 return 4; /* for chaining */
265 int amdgpu_lookup_buffer(struct amdgpu_cs_context
*cs
, struct amdgpu_winsys_bo
*bo
)
267 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
268 int i
= cs
->buffer_indices_hashlist
[hash
];
270 /* not found or found */
271 if (i
== -1 || cs
->buffers
[i
].bo
== bo
)
274 /* Hash collision, look for the BO in the list of buffers linearly. */
275 for (i
= cs
->num_buffers
- 1; i
>= 0; i
--) {
276 if (cs
->buffers
[i
].bo
== bo
) {
277 /* Put this buffer in the hash list.
278 * This will prevent additional hash collisions if there are
279 * several consecutive lookup_buffer calls for the same buffer.
281 * Example: Assuming buffers A,B,C collide in the hash list,
282 * the following sequence of buffers:
283 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
284 * will collide here: ^ and here: ^,
285 * meaning that we should get very few collisions in the end. */
286 cs
->buffer_indices_hashlist
[hash
] = i
;
293 static unsigned amdgpu_add_buffer(struct amdgpu_cs
*acs
,
294 struct amdgpu_winsys_bo
*bo
,
295 enum radeon_bo_usage usage
,
296 enum radeon_bo_domain domains
,
298 enum radeon_bo_domain
*added_domains
)
300 struct amdgpu_cs_context
*cs
= acs
->csc
;
301 struct amdgpu_cs_buffer
*buffer
;
302 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
305 assert(priority
< 64);
308 i
= amdgpu_lookup_buffer(cs
, bo
);
311 buffer
= &cs
->buffers
[i
];
312 buffer
->priority_usage
|= 1llu << priority
;
313 buffer
->usage
|= usage
;
314 *added_domains
= domains
& ~buffer
->domains
;
315 buffer
->domains
|= domains
;
316 cs
->flags
[i
] = MAX2(cs
->flags
[i
], priority
/ 4);
320 /* New buffer, check if the backing array is large enough. */
321 if (cs
->num_buffers
>= cs
->max_num_buffers
) {
323 cs
->max_num_buffers
+= 10;
325 size
= cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
);
326 cs
->buffers
= realloc(cs
->buffers
, size
);
328 size
= cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
);
329 cs
->handles
= realloc(cs
->handles
, size
);
331 cs
->flags
= realloc(cs
->flags
, cs
->max_num_buffers
);
334 /* Initialize the new buffer. */
335 cs
->buffers
[cs
->num_buffers
].bo
= NULL
;
336 amdgpu_winsys_bo_reference(&cs
->buffers
[cs
->num_buffers
].bo
, bo
);
337 cs
->handles
[cs
->num_buffers
] = bo
->bo
;
338 cs
->flags
[cs
->num_buffers
] = priority
/ 4;
339 p_atomic_inc(&bo
->num_cs_references
);
340 buffer
= &cs
->buffers
[cs
->num_buffers
];
342 buffer
->priority_usage
= 1llu << priority
;
343 buffer
->usage
= usage
;
344 buffer
->domains
= domains
;
346 cs
->buffer_indices_hashlist
[hash
] = cs
->num_buffers
;
348 *added_domains
= domains
;
349 return cs
->num_buffers
++;
352 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
353 struct pb_buffer
*buf
,
354 enum radeon_bo_usage usage
,
355 enum radeon_bo_domain domains
,
356 enum radeon_bo_priority priority
)
358 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
359 * the buffer placement during command submission.
361 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
362 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
363 enum radeon_bo_domain added_domains
;
364 unsigned index
= amdgpu_add_buffer(cs
, bo
, usage
, bo
->initial_domain
,
365 priority
, &added_domains
);
367 if (added_domains
& RADEON_DOMAIN_VRAM
)
368 cs
->main
.base
.used_vram
+= bo
->base
.size
;
369 else if (added_domains
& RADEON_DOMAIN_GTT
)
370 cs
->main
.base
.used_gart
+= bo
->base
.size
;
375 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys
*ws
, struct amdgpu_ib
*ib
)
377 struct pb_buffer
*pb
;
379 unsigned buffer_size
;
381 /* Always create a buffer that is at least as large as the maximum seen IB
382 * size, aligned to a power of two (and multiplied by 4 to reduce internal
383 * fragmentation if chaining is not available). Limit to 512k dwords, which
384 * is the largest power of two that fits into the size field of the
385 * INDIRECT_BUFFER packet.
387 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib
)->ring_type
))
388 buffer_size
= 4 *util_next_power_of_two(ib
->max_ib_size
);
390 buffer_size
= 4 *util_next_power_of_two(4 * ib
->max_ib_size
);
392 buffer_size
= MIN2(buffer_size
, 4 * 512 * 1024);
394 switch (ib
->ib_type
) {
395 case IB_CONST_PREAMBLE
:
396 buffer_size
= MAX2(buffer_size
, 4 * 1024);
399 buffer_size
= MAX2(buffer_size
, 16 * 1024 * 4);
402 buffer_size
= MAX2(buffer_size
, 8 * 1024 * 4);
405 unreachable("unhandled IB type");
408 pb
= ws
->base
.buffer_create(&ws
->base
, buffer_size
,
409 ws
->info
.gart_page_size
,
411 RADEON_FLAG_CPU_ACCESS
);
415 mapped
= ws
->base
.buffer_map(pb
, NULL
, PIPE_TRANSFER_WRITE
);
417 pb_reference(&pb
, NULL
);
421 pb_reference(&ib
->big_ib_buffer
, pb
);
422 pb_reference(&pb
, NULL
);
424 ib
->ib_mapped
= mapped
;
425 ib
->used_ib_space
= 0;
430 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type
)
434 /* Smaller submits means the GPU gets busy sooner and there is less
435 * waiting for buffers and fences. Proof:
436 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
439 case IB_CONST_PREAMBLE
:
441 /* There isn't really any reason to limit CE IB size beyond the natural
442 * limit implied by the main IB, except perhaps GTT size. Just return
443 * an extremely large value that we never get anywhere close to.
445 return 16 * 1024 * 1024;
447 unreachable("bad ib_type");
451 static bool amdgpu_get_new_ib(struct radeon_winsys
*ws
, struct amdgpu_cs
*cs
,
452 enum ib_type ib_type
)
454 struct amdgpu_winsys
*aws
= (struct amdgpu_winsys
*)ws
;
455 /* Small IBs are better than big IBs, because the GPU goes idle quicker
456 * and there is less waiting for buffers and fences. Proof:
457 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
459 struct amdgpu_ib
*ib
= NULL
;
460 struct amdgpu_cs_ib_info
*info
= &cs
->csc
->ib
[ib_type
];
461 unsigned ib_size
= 0;
464 case IB_CONST_PREAMBLE
:
465 ib
= &cs
->const_preamble_ib
;
470 ib_size
= 8 * 1024 * 4;
474 ib_size
= 4 * 1024 * 4;
477 unreachable("unhandled IB type");
480 if (!amdgpu_cs_has_chaining(cs
->ring_type
)) {
481 ib_size
= MAX2(ib_size
,
482 4 * MIN2(util_next_power_of_two(ib
->max_ib_size
),
483 amdgpu_ib_max_submit_dwords(ib_type
)));
486 ib
->max_ib_size
= ib
->max_ib_size
- ib
->max_ib_size
/ 32;
488 ib
->base
.prev_dw
= 0;
489 ib
->base
.num_prev
= 0;
490 ib
->base
.current
.cdw
= 0;
491 ib
->base
.current
.buf
= NULL
;
493 /* Allocate a new buffer for IBs if the current buffer is all used. */
494 if (!ib
->big_ib_buffer
||
495 ib
->used_ib_space
+ ib_size
> ib
->big_ib_buffer
->size
) {
496 if (!amdgpu_ib_new_buffer(aws
, ib
))
500 info
->ib_mc_address
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
+
503 ib
->ptr_ib_size
= &info
->size
;
505 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
506 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
508 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
510 ib_size
= ib
->big_ib_buffer
->size
- ib
->used_ib_space
;
511 ib
->base
.current
.max_dw
= ib_size
/ 4 - amdgpu_cs_epilog_dws(cs
->ring_type
);
515 static void amdgpu_ib_finalize(struct amdgpu_ib
*ib
)
517 *ib
->ptr_ib_size
|= ib
->base
.current
.cdw
;
518 ib
->used_ib_space
+= ib
->base
.current
.cdw
* 4;
519 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, ib
->base
.prev_dw
+ ib
->base
.current
.cdw
);
522 static bool amdgpu_init_cs_context(struct amdgpu_cs_context
*cs
,
523 enum ring_type ring_type
)
529 cs
->request
.ip_type
= AMDGPU_HW_IP_DMA
;
533 cs
->request
.ip_type
= AMDGPU_HW_IP_UVD
;
537 cs
->request
.ip_type
= AMDGPU_HW_IP_VCE
;
541 cs
->request
.ip_type
= AMDGPU_HW_IP_COMPUTE
;
546 cs
->request
.ip_type
= AMDGPU_HW_IP_GFX
;
550 cs
->max_num_buffers
= 512;
551 cs
->buffers
= (struct amdgpu_cs_buffer
*)
552 CALLOC(1, cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
));
557 cs
->handles
= CALLOC(1, cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
));
563 cs
->flags
= CALLOC(1, cs
->max_num_buffers
);
570 for (i
= 0; i
< ARRAY_SIZE(cs
->buffer_indices_hashlist
); i
++) {
571 cs
->buffer_indices_hashlist
[i
] = -1;
574 cs
->request
.number_of_ibs
= 1;
575 cs
->request
.ibs
= &cs
->ib
[IB_MAIN
];
577 cs
->ib
[IB_CONST
].flags
= AMDGPU_IB_FLAG_CE
;
578 cs
->ib
[IB_CONST_PREAMBLE
].flags
= AMDGPU_IB_FLAG_CE
|
579 AMDGPU_IB_FLAG_PREAMBLE
;
584 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context
*cs
)
588 for (i
= 0; i
< cs
->num_buffers
; i
++) {
589 p_atomic_dec(&cs
->buffers
[i
].bo
->num_cs_references
);
590 amdgpu_winsys_bo_reference(&cs
->buffers
[i
].bo
, NULL
);
591 cs
->handles
[i
] = NULL
;
596 amdgpu_fence_reference(&cs
->fence
, NULL
);
598 for (i
= 0; i
< ARRAY_SIZE(cs
->buffer_indices_hashlist
); i
++) {
599 cs
->buffer_indices_hashlist
[i
] = -1;
603 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context
*cs
)
605 amdgpu_cs_context_cleanup(cs
);
609 FREE(cs
->request
.dependencies
);
613 static struct radeon_winsys_cs
*
614 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
615 enum ring_type ring_type
,
616 void (*flush
)(void *ctx
, unsigned flags
,
617 struct pipe_fence_handle
**fence
),
620 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
621 struct amdgpu_cs
*cs
;
623 cs
= CALLOC_STRUCT(amdgpu_cs
);
628 util_queue_fence_init(&cs
->flush_completed
);
631 cs
->flush_cs
= flush
;
632 cs
->flush_data
= flush_ctx
;
633 cs
->ring_type
= ring_type
;
635 cs
->main
.ib_type
= IB_MAIN
;
636 cs
->const_ib
.ib_type
= IB_CONST
;
637 cs
->const_preamble_ib
.ib_type
= IB_CONST_PREAMBLE
;
639 if (!amdgpu_init_cs_context(&cs
->csc1
, ring_type
)) {
644 if (!amdgpu_init_cs_context(&cs
->csc2
, ring_type
)) {
645 amdgpu_destroy_cs_context(&cs
->csc1
);
650 /* Set the first submission context as current. */
654 if (!amdgpu_get_new_ib(&ctx
->ws
->base
, cs
, IB_MAIN
)) {
655 amdgpu_destroy_cs_context(&cs
->csc2
);
656 amdgpu_destroy_cs_context(&cs
->csc1
);
661 p_atomic_inc(&ctx
->ws
->num_cs
);
662 return &cs
->main
.base
;
665 static struct radeon_winsys_cs
*
666 amdgpu_cs_add_const_ib(struct radeon_winsys_cs
*rcs
)
668 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
669 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
671 /* only one const IB can be added */
672 if (cs
->ring_type
!= RING_GFX
|| cs
->const_ib
.ib_mapped
)
675 if (!amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST
))
678 cs
->csc
->request
.number_of_ibs
= 2;
679 cs
->csc
->request
.ibs
= &cs
->csc
->ib
[IB_CONST
];
681 cs
->cst
->request
.number_of_ibs
= 2;
682 cs
->cst
->request
.ibs
= &cs
->cst
->ib
[IB_CONST
];
684 return &cs
->const_ib
.base
;
687 static struct radeon_winsys_cs
*
688 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs
*rcs
)
690 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
691 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
693 /* only one const preamble IB can be added and only when the const IB has
694 * also been mapped */
695 if (cs
->ring_type
!= RING_GFX
|| !cs
->const_ib
.ib_mapped
||
696 cs
->const_preamble_ib
.ib_mapped
)
699 if (!amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST_PREAMBLE
))
702 cs
->csc
->request
.number_of_ibs
= 3;
703 cs
->csc
->request
.ibs
= &cs
->csc
->ib
[IB_CONST_PREAMBLE
];
705 cs
->cst
->request
.number_of_ibs
= 3;
706 cs
->cst
->request
.ibs
= &cs
->cst
->ib
[IB_CONST_PREAMBLE
];
708 return &cs
->const_preamble_ib
.base
;
711 #define OUT_CS(cs, value) (cs)->current.buf[(cs)->current.cdw++] = (value)
713 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs
*rcs
,
714 struct pb_buffer
*buf
)
716 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
718 return amdgpu_lookup_buffer(cs
->csc
, (struct amdgpu_winsys_bo
*)buf
);
721 static bool amdgpu_cs_validate(struct radeon_winsys_cs
*rcs
)
726 static bool amdgpu_cs_check_space(struct radeon_winsys_cs
*rcs
, unsigned dw
)
728 struct amdgpu_ib
*ib
= amdgpu_ib(rcs
);
729 struct amdgpu_cs
*cs
= amdgpu_cs_from_ib(ib
);
730 unsigned requested_size
= rcs
->prev_dw
+ rcs
->current
.cdw
+ dw
;
732 uint32_t *new_ptr_ib_size
;
734 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
736 if (requested_size
> amdgpu_ib_max_submit_dwords(ib
->ib_type
))
739 ib
->max_ib_size
= MAX2(ib
->max_ib_size
, requested_size
);
741 if (rcs
->current
.max_dw
- rcs
->current
.cdw
>= dw
)
744 if (!amdgpu_cs_has_chaining(cs
->ring_type
))
747 /* Allocate a new chunk */
748 if (rcs
->num_prev
>= rcs
->max_prev
) {
749 unsigned new_max_prev
= MAX2(1, 2 * rcs
->max_prev
);
750 struct radeon_winsys_cs_chunk
*new_prev
;
752 new_prev
= REALLOC(rcs
->prev
,
753 sizeof(*new_prev
) * rcs
->max_prev
,
754 sizeof(*new_prev
) * new_max_prev
);
758 rcs
->prev
= new_prev
;
759 rcs
->max_prev
= new_max_prev
;
762 if (!amdgpu_ib_new_buffer(cs
->ctx
->ws
, ib
))
765 assert(ib
->used_ib_space
== 0);
766 va
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
;
768 /* This space was originally reserved. */
769 rcs
->current
.max_dw
+= 4;
770 assert(ib
->used_ib_space
+ 4 * rcs
->current
.max_dw
<= ib
->big_ib_buffer
->size
);
772 /* Pad with NOPs and add INDIRECT_BUFFER packet */
773 while ((rcs
->current
.cdw
& 7) != 4)
774 OUT_CS(rcs
, 0xffff1000); /* type3 nop packet */
776 OUT_CS(rcs
, PKT3(ib
->ib_type
== IB_MAIN
? PKT3_INDIRECT_BUFFER_CIK
777 : PKT3_INDIRECT_BUFFER_CONST
, 2, 0));
779 OUT_CS(rcs
, va
>> 32);
780 new_ptr_ib_size
= &rcs
->current
.buf
[rcs
->current
.cdw
];
781 OUT_CS(rcs
, S_3F2_CHAIN(1) | S_3F2_VALID(1));
783 assert((rcs
->current
.cdw
& 7) == 0);
784 assert(rcs
->current
.cdw
<= rcs
->current
.max_dw
);
786 *ib
->ptr_ib_size
|= rcs
->current
.cdw
;
787 ib
->ptr_ib_size
= new_ptr_ib_size
;
789 /* Hook up the new chunk */
790 rcs
->prev
[rcs
->num_prev
].buf
= rcs
->current
.buf
;
791 rcs
->prev
[rcs
->num_prev
].cdw
= rcs
->current
.cdw
;
792 rcs
->prev
[rcs
->num_prev
].max_dw
= rcs
->current
.cdw
; /* no modifications */
795 ib
->base
.prev_dw
+= ib
->base
.current
.cdw
;
796 ib
->base
.current
.cdw
= 0;
798 ib
->base
.current
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
799 ib
->base
.current
.max_dw
= ib
->big_ib_buffer
->size
/ 4 - amdgpu_cs_epilog_dws(cs
->ring_type
);
801 amdgpu_cs_add_buffer(&cs
->main
.base
, ib
->big_ib_buffer
,
802 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
807 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
808 struct radeon_bo_list_item
*list
)
810 struct amdgpu_cs_context
*cs
= amdgpu_cs(rcs
)->csc
;
814 for (i
= 0; i
< cs
->num_buffers
; i
++) {
815 list
[i
].bo_size
= cs
->buffers
[i
].bo
->base
.size
;
816 list
[i
].vm_address
= cs
->buffers
[i
].bo
->va
;
817 list
[i
].priority_usage
= cs
->buffers
[i
].priority_usage
;
820 return cs
->num_buffers
;
823 DEBUG_GET_ONCE_BOOL_OPTION(all_bos
, "RADEON_ALL_BOS", false)
825 /* Since the kernel driver doesn't synchronize execution between different
826 * rings automatically, we have to add fence dependencies manually.
828 static void amdgpu_add_fence_dependencies(struct amdgpu_cs
*acs
)
830 struct amdgpu_cs_context
*cs
= acs
->csc
;
833 cs
->request
.number_of_dependencies
= 0;
835 for (i
= 0; i
< cs
->num_buffers
; i
++) {
836 for (j
= 0; j
< RING_LAST
; j
++) {
837 struct amdgpu_cs_fence
*dep
;
840 struct amdgpu_fence
*bo_fence
= (void *)cs
->buffers
[i
].bo
->fence
[j
];
844 if (bo_fence
->ctx
== acs
->ctx
&&
845 bo_fence
->fence
.ip_type
== cs
->request
.ip_type
&&
846 bo_fence
->fence
.ip_instance
== cs
->request
.ip_instance
&&
847 bo_fence
->fence
.ring
== cs
->request
.ring
)
850 if (amdgpu_fence_wait((void *)bo_fence
, 0, false))
853 if (bo_fence
->submission_in_progress
)
854 os_wait_until_zero(&bo_fence
->submission_in_progress
,
855 PIPE_TIMEOUT_INFINITE
);
857 idx
= cs
->request
.number_of_dependencies
++;
858 if (idx
>= cs
->max_dependencies
) {
861 cs
->max_dependencies
= idx
+ 8;
862 size
= cs
->max_dependencies
* sizeof(struct amdgpu_cs_fence
);
863 cs
->request
.dependencies
= realloc(cs
->request
.dependencies
, size
);
866 dep
= &cs
->request
.dependencies
[idx
];
867 memcpy(dep
, &bo_fence
->fence
, sizeof(*dep
));
872 void amdgpu_cs_submit_ib(void *job
, int thread_index
)
874 struct amdgpu_cs
*acs
= (struct amdgpu_cs
*)job
;
875 struct amdgpu_winsys
*ws
= acs
->ctx
->ws
;
876 struct amdgpu_cs_context
*cs
= acs
->cst
;
879 cs
->request
.fence_info
.handle
= NULL
;
880 if (amdgpu_cs_has_user_fence(cs
)) {
881 cs
->request
.fence_info
.handle
= acs
->ctx
->user_fence_bo
;
882 cs
->request
.fence_info
.offset
= acs
->ring_type
;
885 /* Create the buffer list.
886 * Use a buffer list containing all allocated buffers if requested.
888 if (debug_get_option_all_bos()) {
889 struct amdgpu_winsys_bo
*bo
;
890 amdgpu_bo_handle
*handles
;
893 pipe_mutex_lock(ws
->global_bo_list_lock
);
895 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
897 pipe_mutex_unlock(ws
->global_bo_list_lock
);
898 amdgpu_cs_context_cleanup(cs
);
899 cs
->error_code
= -ENOMEM
;
903 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
904 assert(num
< ws
->num_buffers
);
905 handles
[num
++] = bo
->bo
;
908 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
910 &cs
->request
.resources
);
912 pipe_mutex_unlock(ws
->global_bo_list_lock
);
914 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_buffers
,
915 cs
->handles
, cs
->flags
,
916 &cs
->request
.resources
);
920 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
921 cs
->request
.resources
= NULL
;
922 amdgpu_fence_signalled(cs
->fence
);
927 r
= amdgpu_cs_submit(acs
->ctx
->ctx
, 0, &cs
->request
, 1);
931 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
933 fprintf(stderr
, "amdgpu: The CS has been rejected, "
934 "see dmesg for more information (%i).\n", r
);
936 amdgpu_fence_signalled(cs
->fence
);
939 uint64_t *user_fence
= NULL
;
940 if (amdgpu_cs_has_user_fence(cs
))
941 user_fence
= acs
->ctx
->user_fence_cpu_address_base
+
942 cs
->request
.fence_info
.offset
;
943 amdgpu_fence_submitted(cs
->fence
, &cs
->request
, user_fence
);
947 if (cs
->request
.resources
)
948 amdgpu_bo_list_destroy(cs
->request
.resources
);
951 for (i
= 0; i
< cs
->num_buffers
; i
++)
952 p_atomic_dec(&cs
->buffers
[i
].bo
->num_active_ioctls
);
954 amdgpu_cs_context_cleanup(cs
);
957 /* Make sure the previous submission is completed. */
958 void amdgpu_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
960 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
961 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
963 /* Wait for any pending ioctl of this CS to complete. */
964 if (util_queue_is_initialized(&ws
->cs_queue
))
965 util_queue_job_wait(&cs
->flush_completed
);
968 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", false)
970 static int amdgpu_cs_flush(struct radeon_winsys_cs
*rcs
,
972 struct pipe_fence_handle
**fence
)
974 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
975 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
978 rcs
->current
.max_dw
+= amdgpu_cs_epilog_dws(cs
->ring_type
);
980 switch (cs
->ring_type
) {
982 /* pad DMA ring to 8 DWs */
983 if (ws
->info
.chip_class
<= SI
) {
984 while (rcs
->current
.cdw
& 7)
985 OUT_CS(rcs
, 0xf0000000); /* NOP packet */
987 while (rcs
->current
.cdw
& 7)
988 OUT_CS(rcs
, 0x00000000); /* NOP packet */
992 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
993 if (ws
->info
.gfx_ib_pad_with_type2
) {
994 while (rcs
->current
.cdw
& 7)
995 OUT_CS(rcs
, 0x80000000); /* type2 nop packet */
997 while (rcs
->current
.cdw
& 7)
998 OUT_CS(rcs
, 0xffff1000); /* type3 nop packet */
1001 /* Also pad the const IB. */
1002 if (cs
->const_ib
.ib_mapped
)
1003 while (!cs
->const_ib
.base
.current
.cdw
|| (cs
->const_ib
.base
.current
.cdw
& 7))
1004 OUT_CS(&cs
->const_ib
.base
, 0xffff1000); /* type3 nop packet */
1006 if (cs
->const_preamble_ib
.ib_mapped
)
1007 while (!cs
->const_preamble_ib
.base
.current
.cdw
|| (cs
->const_preamble_ib
.base
.current
.cdw
& 7))
1008 OUT_CS(&cs
->const_preamble_ib
.base
, 0xffff1000);
1011 while (rcs
->current
.cdw
& 15)
1012 OUT_CS(rcs
, 0x80000000); /* type2 nop packet */
1018 if (rcs
->current
.cdw
> rcs
->current
.max_dw
) {
1019 fprintf(stderr
, "amdgpu: command stream overflowed\n");
1022 /* If the CS is not empty or overflowed.... */
1023 if (radeon_emitted(&cs
->main
.base
, 0) &&
1024 cs
->main
.base
.current
.cdw
<= cs
->main
.base
.current
.max_dw
&&
1025 !debug_get_option_noop()) {
1026 struct amdgpu_cs_context
*cur
= cs
->csc
;
1027 unsigned i
, num_buffers
= cur
->num_buffers
;
1030 amdgpu_ib_finalize(&cs
->main
);
1032 if (cs
->const_ib
.ib_mapped
)
1033 amdgpu_ib_finalize(&cs
->const_ib
);
1035 if (cs
->const_preamble_ib
.ib_mapped
)
1036 amdgpu_ib_finalize(&cs
->const_preamble_ib
);
1038 /* Create a fence. */
1039 amdgpu_fence_reference(&cur
->fence
, NULL
);
1040 if (cs
->next_fence
) {
1041 /* just move the reference */
1042 cur
->fence
= cs
->next_fence
;
1043 cs
->next_fence
= NULL
;
1045 cur
->fence
= amdgpu_fence_create(cs
->ctx
,
1046 cur
->request
.ip_type
,
1047 cur
->request
.ip_instance
,
1051 amdgpu_fence_reference(fence
, cur
->fence
);
1053 /* Prepare buffers. */
1054 pipe_mutex_lock(ws
->bo_fence_lock
);
1055 amdgpu_add_fence_dependencies(cs
);
1056 for (i
= 0; i
< num_buffers
; i
++) {
1057 p_atomic_inc(&cur
->buffers
[i
].bo
->num_active_ioctls
);
1058 amdgpu_fence_reference(&cur
->buffers
[i
].bo
->fence
[cs
->ring_type
],
1061 pipe_mutex_unlock(ws
->bo_fence_lock
);
1063 amdgpu_cs_sync_flush(rcs
);
1065 /* Swap command streams. "cst" is going to be submitted. */
1070 if ((flags
& RADEON_FLUSH_ASYNC
) &&
1071 util_queue_is_initialized(&ws
->cs_queue
)) {
1072 util_queue_add_job(&ws
->cs_queue
, cs
, &cs
->flush_completed
,
1073 amdgpu_cs_submit_ib
, NULL
);
1075 amdgpu_cs_submit_ib(cs
, 0);
1076 error_code
= cs
->cst
->error_code
;
1079 amdgpu_cs_context_cleanup(cs
->csc
);
1082 amdgpu_get_new_ib(&ws
->base
, cs
, IB_MAIN
);
1083 if (cs
->const_ib
.ib_mapped
)
1084 amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST
);
1085 if (cs
->const_preamble_ib
.ib_mapped
)
1086 amdgpu_get_new_ib(&ws
->base
, cs
, IB_CONST_PREAMBLE
);
1088 cs
->main
.base
.used_gart
= 0;
1089 cs
->main
.base
.used_vram
= 0;
1091 ws
->num_cs_flushes
++;
1095 static void amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
1097 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1099 amdgpu_cs_sync_flush(rcs
);
1100 util_queue_fence_destroy(&cs
->flush_completed
);
1101 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
1102 pb_reference(&cs
->main
.big_ib_buffer
, NULL
);
1103 FREE(cs
->main
.base
.prev
);
1104 pb_reference(&cs
->const_ib
.big_ib_buffer
, NULL
);
1105 FREE(cs
->const_ib
.base
.prev
);
1106 pb_reference(&cs
->const_preamble_ib
.big_ib_buffer
, NULL
);
1107 FREE(cs
->const_preamble_ib
.base
.prev
);
1108 amdgpu_destroy_cs_context(&cs
->csc1
);
1109 amdgpu_destroy_cs_context(&cs
->csc2
);
1110 amdgpu_fence_reference(&cs
->next_fence
, NULL
);
1114 static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
1115 struct pb_buffer
*_buf
,
1116 enum radeon_bo_usage usage
)
1118 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
1119 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
1121 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
1124 void amdgpu_cs_init_functions(struct amdgpu_winsys
*ws
)
1126 ws
->base
.ctx_create
= amdgpu_ctx_create
;
1127 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
1128 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
1129 ws
->base
.cs_create
= amdgpu_cs_create
;
1130 ws
->base
.cs_add_const_ib
= amdgpu_cs_add_const_ib
;
1131 ws
->base
.cs_add_const_preamble_ib
= amdgpu_cs_add_const_preamble_ib
;
1132 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
1133 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
1134 ws
->base
.cs_lookup_buffer
= amdgpu_cs_lookup_buffer
;
1135 ws
->base
.cs_validate
= amdgpu_cs_validate
;
1136 ws
->base
.cs_check_space
= amdgpu_cs_check_space
;
1137 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
1138 ws
->base
.cs_flush
= amdgpu_cs_flush
;
1139 ws
->base
.cs_get_next_fence
= amdgpu_cs_get_next_fence
;
1140 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
1141 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
1142 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
1143 ws
->base
.fence_reference
= amdgpu_fence_reference
;