2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "drm-uapi/amdgpu_drm.h"
32 #include "util/u_memory.h"
34 #include "radv_radeon_winsys.h"
35 #include "radv_amdgpu_cs.h"
36 #include "radv_amdgpu_bo.h"
41 VIRTUAL_BUFFER_HASH_TABLE_SIZE
= 1024
44 struct radv_amdgpu_cs
{
45 struct radeon_cmdbuf base
;
46 struct radv_amdgpu_winsys
*ws
;
48 struct amdgpu_cs_ib_info ib
;
50 struct radeon_winsys_bo
*ib_buffer
;
52 unsigned max_num_buffers
;
54 struct drm_amdgpu_bo_list_entry
*handles
;
56 struct radeon_winsys_bo
**old_ib_buffers
;
57 unsigned num_old_ib_buffers
;
58 unsigned max_num_old_ib_buffers
;
59 unsigned *ib_size_ptr
;
63 int buffer_hash_table
[1024];
66 unsigned num_virtual_buffers
;
67 unsigned max_num_virtual_buffers
;
68 struct radeon_winsys_bo
**virtual_buffers
;
69 int *virtual_buffer_hash_table
;
71 /* For chips that don't support chaining. */
72 struct radeon_cmdbuf
*old_cs_buffers
;
73 unsigned num_old_cs_buffers
;
76 static inline struct radv_amdgpu_cs
*
77 radv_amdgpu_cs(struct radeon_cmdbuf
*base
)
79 return (struct radv_amdgpu_cs
*)base
;
82 static int ring_to_hw_ip(enum ring_type ring
)
86 return AMDGPU_HW_IP_GFX
;
88 return AMDGPU_HW_IP_DMA
;
90 return AMDGPU_HW_IP_COMPUTE
;
92 unreachable("unsupported ring");
96 struct radv_amdgpu_cs_request
{
97 /** Specify flags with additional information */
100 /** Specify HW IP block type to which to send the IB. */
103 /** IP instance index if there are several IPs of the same type. */
104 unsigned ip_instance
;
107 * Specify ring index of the IP. We could have several rings
108 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
113 * BO list handles used by this request.
115 struct drm_amdgpu_bo_list_entry
*handles
;
116 uint32_t num_handles
;
119 * Number of dependencies this Command submission needs to
120 * wait for before starting execution.
122 uint32_t number_of_dependencies
;
125 * Array of dependencies which need to be met before
126 * execution can start.
128 struct amdgpu_cs_fence
*dependencies
;
130 /** Number of IBs to submit in the field ibs. */
131 uint32_t number_of_ibs
;
134 * IBs to submit. Those IBs will be submit together as single entity
136 struct amdgpu_cs_ib_info
*ibs
;
139 * The returned sequence number for the command submission
144 * The fence information
146 struct amdgpu_cs_fence_info fence_info
;
150 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx
*ctx
,
153 struct radv_winsys_sem_info
*sem_info
);
154 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx
*ctx
,
155 struct radv_amdgpu_cs_request
*request
,
156 struct radv_winsys_sem_info
*sem_info
);
158 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx
*ctx
,
159 struct radv_amdgpu_fence
*fence
,
160 struct radv_amdgpu_cs_request
*req
)
162 fence
->fence
.context
= ctx
->ctx
;
163 fence
->fence
.ip_type
= req
->ip_type
;
164 fence
->fence
.ip_instance
= req
->ip_instance
;
165 fence
->fence
.ring
= req
->ring
;
166 fence
->fence
.fence
= req
->seq_no
;
167 fence
->user_ptr
= (volatile uint64_t*)(ctx
->fence_map
+ req
->ip_type
* MAX_RINGS_PER_TYPE
+ req
->ring
);
170 static struct radeon_winsys_fence
*radv_amdgpu_create_fence()
172 struct radv_amdgpu_fence
*fence
= calloc(1, sizeof(struct radv_amdgpu_fence
));
173 fence
->fence
.fence
= UINT64_MAX
;
174 return (struct radeon_winsys_fence
*)fence
;
177 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence
*_fence
)
179 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
183 static void radv_amdgpu_reset_fence(struct radeon_winsys_fence
*_fence
)
185 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
186 fence
->fence
.fence
= UINT64_MAX
;
189 static void radv_amdgpu_signal_fence(struct radeon_winsys_fence
*_fence
)
191 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
192 fence
->fence
.fence
= 0;
195 static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence
*_fence
)
197 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
198 return fence
->fence
.fence
< UINT64_MAX
;
201 static bool radv_amdgpu_fence_wait(struct radeon_winsys
*_ws
,
202 struct radeon_winsys_fence
*_fence
,
206 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
207 unsigned flags
= absolute
? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
: 0;
209 uint32_t expired
= 0;
211 /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
212 if (fence
->fence
.fence
== UINT64_MAX
)
215 if (fence
->fence
.fence
== 0)
218 if (fence
->user_ptr
) {
219 if (*fence
->user_ptr
>= fence
->fence
.fence
)
221 if (!absolute
&& !timeout
)
225 /* Now use the libdrm query. */
226 r
= amdgpu_cs_query_fence_status(&fence
->fence
,
232 fprintf(stderr
, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
243 static bool radv_amdgpu_fences_wait(struct radeon_winsys
*_ws
,
244 struct radeon_winsys_fence
*const *_fences
,
245 uint32_t fence_count
,
249 struct amdgpu_cs_fence
*fences
= malloc(sizeof(struct amdgpu_cs_fence
) * fence_count
);
251 uint32_t expired
= 0, first
= 0;
256 for (uint32_t i
= 0; i
< fence_count
; ++i
)
257 fences
[i
] = ((struct radv_amdgpu_fence
*)_fences
[i
])->fence
;
259 /* Now use the libdrm query. */
260 r
= amdgpu_cs_wait_fences(fences
, fence_count
, wait_all
,
261 timeout
, &expired
, &first
);
265 fprintf(stderr
, "amdgpu: amdgpu_cs_wait_fences failed.\n");
275 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf
*rcs
)
277 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(rcs
);
280 cs
->ws
->base
.buffer_destroy(cs
->ib_buffer
);
284 for (unsigned i
= 0; i
< cs
->num_old_ib_buffers
; ++i
)
285 cs
->ws
->base
.buffer_destroy(cs
->old_ib_buffers
[i
]);
287 for (unsigned i
= 0; i
< cs
->num_old_cs_buffers
; ++i
) {
288 struct radeon_cmdbuf
*rcs
= &cs
->old_cs_buffers
[i
];
292 free(cs
->old_cs_buffers
);
293 free(cs
->old_ib_buffers
);
294 free(cs
->virtual_buffers
);
295 free(cs
->virtual_buffer_hash_table
);
300 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs
*cs
,
301 enum ring_type ring_type
)
303 for (int i
= 0; i
< ARRAY_SIZE(cs
->buffer_hash_table
); ++i
)
304 cs
->buffer_hash_table
[i
] = -1;
306 cs
->hw_ip
= ring_to_hw_ip(ring_type
);
309 static struct radeon_cmdbuf
*
310 radv_amdgpu_cs_create(struct radeon_winsys
*ws
,
311 enum ring_type ring_type
)
313 struct radv_amdgpu_cs
*cs
;
314 uint32_t ib_size
= 20 * 1024 * 4;
315 cs
= calloc(1, sizeof(struct radv_amdgpu_cs
));
319 cs
->ws
= radv_amdgpu_winsys(ws
);
320 radv_amdgpu_init_cs(cs
, ring_type
);
322 if (cs
->ws
->use_ib_bos
) {
323 cs
->ib_buffer
= ws
->buffer_create(ws
, ib_size
, 0,
325 RADEON_FLAG_CPU_ACCESS
|
326 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
327 RADEON_FLAG_READ_ONLY
,
328 RADV_BO_PRIORITY_CS
);
329 if (!cs
->ib_buffer
) {
334 cs
->ib_mapped
= ws
->buffer_map(cs
->ib_buffer
);
335 if (!cs
->ib_mapped
) {
336 ws
->buffer_destroy(cs
->ib_buffer
);
341 cs
->ib
.ib_mc_address
= radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
;
342 cs
->base
.buf
= (uint32_t *)cs
->ib_mapped
;
343 cs
->base
.max_dw
= ib_size
/ 4 - 4;
344 cs
->ib_size_ptr
= &cs
->ib
.size
;
347 ws
->cs_add_buffer(&cs
->base
, cs
->ib_buffer
);
349 cs
->base
.buf
= malloc(16384);
350 cs
->base
.max_dw
= 4096;
360 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf
*_cs
, size_t min_size
)
362 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
364 if (cs
->status
!= VK_SUCCESS
) {
369 if (!cs
->ws
->use_ib_bos
) {
370 const uint64_t limit_dws
= 0xffff8;
371 uint64_t ib_dws
= MAX2(cs
->base
.cdw
+ min_size
,
372 MIN2(cs
->base
.max_dw
* 2, limit_dws
));
374 /* The total ib size cannot exceed limit_dws dwords. */
375 if (ib_dws
> limit_dws
)
377 /* The maximum size in dwords has been reached,
378 * try to allocate a new one.
381 realloc(cs
->old_cs_buffers
,
382 (cs
->num_old_cs_buffers
+ 1) * sizeof(*cs
->old_cs_buffers
));
383 if (!cs
->old_cs_buffers
) {
384 cs
->status
= VK_ERROR_OUT_OF_HOST_MEMORY
;
389 /* Store the current one for submitting it later. */
390 cs
->old_cs_buffers
[cs
->num_old_cs_buffers
].cdw
= cs
->base
.cdw
;
391 cs
->old_cs_buffers
[cs
->num_old_cs_buffers
].max_dw
= cs
->base
.max_dw
;
392 cs
->old_cs_buffers
[cs
->num_old_cs_buffers
].buf
= cs
->base
.buf
;
393 cs
->num_old_cs_buffers
++;
395 /* Reset the cs, it will be re-allocated below. */
399 /* Re-compute the number of dwords to allocate. */
400 ib_dws
= MAX2(cs
->base
.cdw
+ min_size
,
401 MIN2(cs
->base
.max_dw
* 2, limit_dws
));
402 if (ib_dws
> limit_dws
) {
403 fprintf(stderr
, "amdgpu: Too high number of "
404 "dwords to allocate\n");
405 cs
->status
= VK_ERROR_OUT_OF_HOST_MEMORY
;
410 uint32_t *new_buf
= realloc(cs
->base
.buf
, ib_dws
* 4);
412 cs
->base
.buf
= new_buf
;
413 cs
->base
.max_dw
= ib_dws
;
415 cs
->status
= VK_ERROR_OUT_OF_HOST_MEMORY
;
421 uint64_t ib_size
= MAX2(min_size
* 4 + 16, cs
->base
.max_dw
* 4 * 2);
423 /* max that fits in the chain size field. */
424 ib_size
= MIN2(ib_size
, 0xfffff);
426 while (!cs
->base
.cdw
|| (cs
->base
.cdw
& 7) != 4)
427 radeon_emit(&cs
->base
, PKT3_NOP_PAD
);
429 *cs
->ib_size_ptr
|= cs
->base
.cdw
+ 4;
431 if (cs
->num_old_ib_buffers
== cs
->max_num_old_ib_buffers
) {
432 cs
->max_num_old_ib_buffers
= MAX2(1, cs
->max_num_old_ib_buffers
* 2);
433 cs
->old_ib_buffers
= realloc(cs
->old_ib_buffers
,
434 cs
->max_num_old_ib_buffers
* sizeof(void*));
437 cs
->old_ib_buffers
[cs
->num_old_ib_buffers
++] = cs
->ib_buffer
;
439 cs
->ib_buffer
= cs
->ws
->base
.buffer_create(&cs
->ws
->base
, ib_size
, 0,
441 RADEON_FLAG_CPU_ACCESS
|
442 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
443 RADEON_FLAG_READ_ONLY
,
444 RADV_BO_PRIORITY_CS
);
446 if (!cs
->ib_buffer
) {
448 cs
->status
= VK_ERROR_OUT_OF_DEVICE_MEMORY
;
449 cs
->ib_buffer
= cs
->old_ib_buffers
[--cs
->num_old_ib_buffers
];
452 cs
->ib_mapped
= cs
->ws
->base
.buffer_map(cs
->ib_buffer
);
453 if (!cs
->ib_mapped
) {
454 cs
->ws
->base
.buffer_destroy(cs
->ib_buffer
);
457 /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
458 cs
->status
= VK_ERROR_OUT_OF_DEVICE_MEMORY
;
459 cs
->ib_buffer
= cs
->old_ib_buffers
[--cs
->num_old_ib_buffers
];
462 cs
->ws
->base
.cs_add_buffer(&cs
->base
, cs
->ib_buffer
);
464 radeon_emit(&cs
->base
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
465 radeon_emit(&cs
->base
, radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
);
466 radeon_emit(&cs
->base
, radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
>> 32);
467 radeon_emit(&cs
->base
, S_3F2_CHAIN(1) | S_3F2_VALID(1));
469 cs
->ib_size_ptr
= cs
->base
.buf
+ cs
->base
.cdw
- 1;
471 cs
->base
.buf
= (uint32_t *)cs
->ib_mapped
;
473 cs
->base
.max_dw
= ib_size
/ 4 - 4;
477 static VkResult
radv_amdgpu_cs_finalize(struct radeon_cmdbuf
*_cs
)
479 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
481 if (cs
->ws
->use_ib_bos
) {
482 while (!cs
->base
.cdw
|| (cs
->base
.cdw
& 7) != 0)
483 radeon_emit(&cs
->base
, PKT3_NOP_PAD
);
485 *cs
->ib_size_ptr
|= cs
->base
.cdw
;
487 cs
->is_chained
= false;
493 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf
*_cs
)
495 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
497 cs
->status
= VK_SUCCESS
;
499 for (unsigned i
= 0; i
< cs
->num_buffers
; ++i
) {
500 unsigned hash
= cs
->handles
[i
].bo_handle
&
501 (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
502 cs
->buffer_hash_table
[hash
] = -1;
505 for (unsigned i
= 0; i
< cs
->num_virtual_buffers
; ++i
) {
506 unsigned hash
= ((uintptr_t)cs
->virtual_buffers
[i
] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE
- 1);
507 cs
->virtual_buffer_hash_table
[hash
] = -1;
511 cs
->num_virtual_buffers
= 0;
513 if (cs
->ws
->use_ib_bos
) {
514 cs
->ws
->base
.cs_add_buffer(&cs
->base
, cs
->ib_buffer
);
516 for (unsigned i
= 0; i
< cs
->num_old_ib_buffers
; ++i
)
517 cs
->ws
->base
.buffer_destroy(cs
->old_ib_buffers
[i
]);
519 cs
->num_old_ib_buffers
= 0;
520 cs
->ib
.ib_mc_address
= radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
;
521 cs
->ib_size_ptr
= &cs
->ib
.size
;
524 for (unsigned i
= 0; i
< cs
->num_old_cs_buffers
; ++i
) {
525 struct radeon_cmdbuf
*rcs
= &cs
->old_cs_buffers
[i
];
529 free(cs
->old_cs_buffers
);
530 cs
->old_cs_buffers
= NULL
;
531 cs
->num_old_cs_buffers
= 0;
535 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs
*cs
,
538 unsigned hash
= bo
& (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
539 int index
= cs
->buffer_hash_table
[hash
];
544 if (cs
->handles
[index
].bo_handle
== bo
)
547 for (unsigned i
= 0; i
< cs
->num_buffers
; ++i
) {
548 if (cs
->handles
[i
].bo_handle
== bo
) {
549 cs
->buffer_hash_table
[hash
] = i
;
557 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs
*cs
,
558 uint32_t bo
, uint8_t priority
)
561 int index
= radv_amdgpu_cs_find_buffer(cs
, bo
);
563 if (index
!= -1 || cs
->status
!= VK_SUCCESS
)
566 if (cs
->num_buffers
== cs
->max_num_buffers
) {
567 unsigned new_count
= MAX2(1, cs
->max_num_buffers
* 2);
568 struct drm_amdgpu_bo_list_entry
*new_entries
=
569 realloc(cs
->handles
, new_count
* sizeof(struct drm_amdgpu_bo_list_entry
));
571 cs
->max_num_buffers
= new_count
;
572 cs
->handles
= new_entries
;
574 cs
->status
= VK_ERROR_OUT_OF_HOST_MEMORY
;
579 cs
->handles
[cs
->num_buffers
].bo_handle
= bo
;
580 cs
->handles
[cs
->num_buffers
].bo_priority
= priority
;
582 hash
= bo
& (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
583 cs
->buffer_hash_table
[hash
] = cs
->num_buffers
;
588 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf
*_cs
,
589 struct radeon_winsys_bo
*bo
)
591 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
592 unsigned hash
= ((uintptr_t)bo
>> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE
- 1);
595 if (!cs
->virtual_buffer_hash_table
) {
596 cs
->virtual_buffer_hash_table
= malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE
* sizeof(int));
597 for (int i
= 0; i
< VIRTUAL_BUFFER_HASH_TABLE_SIZE
; ++i
)
598 cs
->virtual_buffer_hash_table
[i
] = -1;
601 if (cs
->virtual_buffer_hash_table
[hash
] >= 0) {
602 int idx
= cs
->virtual_buffer_hash_table
[hash
];
603 if (cs
->virtual_buffers
[idx
] == bo
) {
606 for (unsigned i
= 0; i
< cs
->num_virtual_buffers
; ++i
) {
607 if (cs
->virtual_buffers
[i
] == bo
) {
608 cs
->virtual_buffer_hash_table
[hash
] = i
;
614 if(cs
->max_num_virtual_buffers
<= cs
->num_virtual_buffers
) {
615 cs
->max_num_virtual_buffers
= MAX2(2, cs
->max_num_virtual_buffers
* 2);
616 cs
->virtual_buffers
= realloc(cs
->virtual_buffers
, sizeof(struct radv_amdgpu_virtual_virtual_buffer
*) * cs
->max_num_virtual_buffers
);
619 cs
->virtual_buffers
[cs
->num_virtual_buffers
] = bo
;
621 cs
->virtual_buffer_hash_table
[hash
] = cs
->num_virtual_buffers
;
622 ++cs
->num_virtual_buffers
;
626 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf
*_cs
,
627 struct radeon_winsys_bo
*_bo
)
629 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
630 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
632 if (bo
->is_virtual
) {
633 radv_amdgpu_cs_add_virtual_buffer(_cs
, _bo
);
637 if (bo
->base
.is_local
)
640 radv_amdgpu_cs_add_buffer_internal(cs
, bo
->bo_handle
, bo
->priority
);
643 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf
*_parent
,
644 struct radeon_cmdbuf
*_child
)
646 struct radv_amdgpu_cs
*parent
= radv_amdgpu_cs(_parent
);
647 struct radv_amdgpu_cs
*child
= radv_amdgpu_cs(_child
);
649 for (unsigned i
= 0; i
< child
->num_buffers
; ++i
) {
650 radv_amdgpu_cs_add_buffer_internal(parent
,
651 child
->handles
[i
].bo_handle
,
652 child
->handles
[i
].bo_priority
);
655 for (unsigned i
= 0; i
< child
->num_virtual_buffers
; ++i
) {
656 radv_amdgpu_cs_add_buffer(&parent
->base
, child
->virtual_buffers
[i
]);
659 if (parent
->ws
->use_ib_bos
) {
660 if (parent
->base
.cdw
+ 4 > parent
->base
.max_dw
)
661 radv_amdgpu_cs_grow(&parent
->base
, 4);
663 radeon_emit(&parent
->base
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
664 radeon_emit(&parent
->base
, child
->ib
.ib_mc_address
);
665 radeon_emit(&parent
->base
, child
->ib
.ib_mc_address
>> 32);
666 radeon_emit(&parent
->base
, child
->ib
.size
);
668 if (parent
->base
.cdw
+ child
->base
.cdw
> parent
->base
.max_dw
)
669 radv_amdgpu_cs_grow(&parent
->base
, child
->base
.cdw
);
671 memcpy(parent
->base
.buf
+ parent
->base
.cdw
, child
->base
.buf
, 4 * child
->base
.cdw
);
672 parent
->base
.cdw
+= child
->base
.cdw
;
677 radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys
*ws
,
678 struct radeon_cmdbuf
**cs_array
,
680 struct radv_amdgpu_winsys_bo
**extra_bo_array
,
681 unsigned num_extra_bo
,
682 struct radeon_cmdbuf
*extra_cs
,
683 const struct radv_winsys_bo_list
*radv_bo_list
,
684 unsigned *rnum_handles
,
685 struct drm_amdgpu_bo_list_entry
**rhandles
)
687 struct drm_amdgpu_bo_list_entry
*handles
= NULL
;
688 unsigned num_handles
= 0;
691 if (ws
->debug_all_bos
) {
692 struct radv_amdgpu_winsys_bo
*bo
;
694 pthread_mutex_lock(&ws
->global_bo_list_lock
);
696 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
698 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
702 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
703 assert(num_handles
< ws
->num_buffers
);
704 handles
[num_handles
].bo_handle
= bo
->bo_handle
;
705 handles
[num_handles
].bo_priority
= bo
->priority
;
709 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
710 } else if (count
== 1 && !num_extra_bo
&& !extra_cs
&& !radv_bo_list
&&
711 !radv_amdgpu_cs(cs_array
[0])->num_virtual_buffers
) {
712 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[0];
713 if (cs
->num_buffers
== 0)
716 handles
= malloc(sizeof(handles
[0]) * cs
->num_buffers
);
720 memcpy(handles
, cs
->handles
,
721 sizeof(handles
[0]) * cs
->num_buffers
);
722 num_handles
= cs
->num_buffers
;
724 unsigned total_buffer_count
= num_extra_bo
;
725 num_handles
= num_extra_bo
;
726 for (unsigned i
= 0; i
< count
; ++i
) {
727 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[i
];
728 total_buffer_count
+= cs
->num_buffers
;
729 for (unsigned j
= 0; j
< cs
->num_virtual_buffers
; ++j
)
730 total_buffer_count
+= radv_amdgpu_winsys_bo(cs
->virtual_buffers
[j
])->bo_count
;
734 total_buffer_count
+= ((struct radv_amdgpu_cs
*)extra_cs
)->num_buffers
;
738 total_buffer_count
+= radv_bo_list
->count
;
741 if (total_buffer_count
== 0)
744 handles
= malloc(sizeof(handles
[0]) * total_buffer_count
);
748 for (unsigned i
= 0; i
< num_extra_bo
; i
++) {
749 handles
[i
].bo_handle
= extra_bo_array
[i
]->bo_handle
;
750 handles
[i
].bo_priority
= extra_bo_array
[i
]->priority
;
753 for (unsigned i
= 0; i
< count
+ !!extra_cs
; ++i
) {
754 struct radv_amdgpu_cs
*cs
;
757 cs
= (struct radv_amdgpu_cs
*)extra_cs
;
759 cs
= (struct radv_amdgpu_cs
*)cs_array
[i
];
761 if (!cs
->num_buffers
)
764 if (num_handles
== 0 && !cs
->num_virtual_buffers
) {
765 memcpy(handles
, cs
->handles
, cs
->num_buffers
* sizeof(struct drm_amdgpu_bo_list_entry
));
766 num_handles
= cs
->num_buffers
;
769 int unique_bo_so_far
= num_handles
;
770 for (unsigned j
= 0; j
< cs
->num_buffers
; ++j
) {
772 for (unsigned k
= 0; k
< unique_bo_so_far
; ++k
) {
773 if (handles
[k
].bo_handle
== cs
->handles
[j
].bo_handle
) {
779 handles
[num_handles
] = cs
->handles
[j
];
783 for (unsigned j
= 0; j
< cs
->num_virtual_buffers
; ++j
) {
784 struct radv_amdgpu_winsys_bo
*virtual_bo
= radv_amdgpu_winsys_bo(cs
->virtual_buffers
[j
]);
785 for(unsigned k
= 0; k
< virtual_bo
->bo_count
; ++k
) {
786 struct radv_amdgpu_winsys_bo
*bo
= virtual_bo
->bos
[k
];
788 for (unsigned m
= 0; m
< num_handles
; ++m
) {
789 if (handles
[m
].bo_handle
== bo
->bo_handle
) {
795 handles
[num_handles
].bo_handle
= bo
->bo_handle
;
796 handles
[num_handles
].bo_priority
= bo
->priority
;
804 unsigned unique_bo_so_far
= num_handles
;
805 for (unsigned i
= 0; i
< radv_bo_list
->count
; ++i
) {
806 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(radv_bo_list
->bos
[i
]);
808 for (unsigned j
= 0; j
< unique_bo_so_far
; ++j
) {
809 if (bo
->bo_handle
== handles
[j
].bo_handle
) {
815 handles
[num_handles
].bo_handle
= bo
->bo_handle
;
816 handles
[num_handles
].bo_priority
= bo
->priority
;
824 *rnum_handles
= num_handles
;
829 static struct amdgpu_cs_fence_info
radv_set_cs_fence(struct radv_amdgpu_ctx
*ctx
, int ip_type
, int ring
)
831 struct amdgpu_cs_fence_info ret
= {0};
832 if (ctx
->fence_map
) {
833 ret
.handle
= radv_amdgpu_winsys_bo(ctx
->fence_bo
)->bo
;
834 ret
.offset
= (ip_type
* MAX_RINGS_PER_TYPE
+ ring
) * sizeof(uint64_t);
839 static void radv_assign_last_submit(struct radv_amdgpu_ctx
*ctx
,
840 struct radv_amdgpu_cs_request
*request
)
842 radv_amdgpu_request_to_fence(ctx
,
843 &ctx
->last_submission
[request
->ip_type
][request
->ring
],
847 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx
*_ctx
,
849 struct radv_winsys_sem_info
*sem_info
,
850 const struct radv_winsys_bo_list
*radv_bo_list
,
851 struct radeon_cmdbuf
**cs_array
,
853 struct radeon_cmdbuf
*initial_preamble_cs
,
854 struct radeon_cmdbuf
*continue_preamble_cs
,
855 struct radeon_winsys_fence
*_fence
)
858 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
859 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
860 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[0]);
861 struct drm_amdgpu_bo_list_entry
*handles
= NULL
;
862 struct radv_amdgpu_cs_request request
= {0};
863 struct amdgpu_cs_ib_info ibs
[2];
864 unsigned number_of_ibs
= 1;
865 unsigned num_handles
= 0;
867 for (unsigned i
= cs_count
; i
--;) {
868 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
870 if (cs
->is_chained
) {
871 *cs
->ib_size_ptr
-= 4;
872 cs
->is_chained
= false;
875 if (i
+ 1 < cs_count
) {
876 struct radv_amdgpu_cs
*next
= radv_amdgpu_cs(cs_array
[i
+ 1]);
877 assert(cs
->base
.cdw
+ 4 <= cs
->base
.max_dw
);
879 cs
->is_chained
= true;
880 *cs
->ib_size_ptr
+= 4;
882 cs
->base
.buf
[cs
->base
.cdw
+ 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0);
883 cs
->base
.buf
[cs
->base
.cdw
+ 1] = next
->ib
.ib_mc_address
;
884 cs
->base
.buf
[cs
->base
.cdw
+ 2] = next
->ib
.ib_mc_address
>> 32;
885 cs
->base
.buf
[cs
->base
.cdw
+ 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next
->ib
.size
;
889 /* Get the BO list. */
890 r
= radv_amdgpu_get_bo_list(cs0
->ws
, cs_array
, cs_count
, NULL
, 0,
891 initial_preamble_cs
, radv_bo_list
,
892 &num_handles
, &handles
);
896 /* Configure the CS request. */
897 if (initial_preamble_cs
) {
898 ibs
[0] = radv_amdgpu_cs(initial_preamble_cs
)->ib
;
905 request
.ip_type
= cs0
->hw_ip
;
906 request
.ring
= queue_idx
;
907 request
.number_of_ibs
= number_of_ibs
;
909 request
.handles
= handles
;
910 request
.num_handles
= num_handles
;
911 request
.fence_info
= radv_set_cs_fence(ctx
, cs0
->hw_ip
, queue_idx
);
914 r
= radv_amdgpu_cs_submit(ctx
, &request
, sem_info
);
917 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
919 fprintf(stderr
, "amdgpu: The CS has been rejected, "
920 "see dmesg for more information.\n");
923 free(request
.handles
);
929 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
931 radv_assign_last_submit(ctx
, &request
);
936 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx
*_ctx
,
938 struct radv_winsys_sem_info
*sem_info
,
939 const struct radv_winsys_bo_list
*radv_bo_list
,
940 struct radeon_cmdbuf
**cs_array
,
942 struct radeon_cmdbuf
*initial_preamble_cs
,
943 struct radeon_cmdbuf
*continue_preamble_cs
,
944 struct radeon_winsys_fence
*_fence
)
947 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
948 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
949 struct drm_amdgpu_bo_list_entry
*handles
= NULL
;
950 struct radv_amdgpu_cs_request request
= {};
951 struct amdgpu_cs_ib_info
*ibs
;
952 struct radv_amdgpu_cs
*cs0
;
953 unsigned num_handles
= 0;
954 unsigned number_of_ibs
;
957 cs0
= radv_amdgpu_cs(cs_array
[0]);
959 /* Compute the number of IBs for this submit. */
960 number_of_ibs
= cs_count
+ !!initial_preamble_cs
;
962 /* Get the BO list. */
963 r
= radv_amdgpu_get_bo_list(cs0
->ws
, &cs_array
[0], cs_count
, NULL
, 0,
964 initial_preamble_cs
, radv_bo_list
,
965 &num_handles
, &handles
);
969 ibs
= malloc(number_of_ibs
* sizeof(*ibs
));
971 free(request
.handles
);
975 /* Configure the CS request. */
976 if (initial_preamble_cs
)
977 ibs
[0] = radv_amdgpu_cs(initial_preamble_cs
)->ib
;
979 for (unsigned i
= 0; i
< cs_count
; i
++) {
980 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
982 ibs
[i
+ !!initial_preamble_cs
] = cs
->ib
;
984 if (cs
->is_chained
) {
985 *cs
->ib_size_ptr
-= 4;
986 cs
->is_chained
= false;
990 request
.ip_type
= cs0
->hw_ip
;
991 request
.ring
= queue_idx
;
992 request
.handles
= handles
;
993 request
.num_handles
= num_handles
;
994 request
.number_of_ibs
= number_of_ibs
;
996 request
.fence_info
= radv_set_cs_fence(ctx
, cs0
->hw_ip
, queue_idx
);
999 r
= radv_amdgpu_cs_submit(ctx
, &request
, sem_info
);
1002 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
1004 fprintf(stderr
, "amdgpu: The CS has been rejected, "
1005 "see dmesg for more information.\n");
1008 free(request
.handles
);
1015 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
1017 radv_assign_last_submit(ctx
, &request
);
1022 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx
*_ctx
,
1024 struct radv_winsys_sem_info
*sem_info
,
1025 const struct radv_winsys_bo_list
*radv_bo_list
,
1026 struct radeon_cmdbuf
**cs_array
,
1028 struct radeon_cmdbuf
*initial_preamble_cs
,
1029 struct radeon_cmdbuf
*continue_preamble_cs
,
1030 struct radeon_winsys_fence
*_fence
)
1033 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
1034 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
1035 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[0]);
1036 struct radeon_winsys
*ws
= (struct radeon_winsys
*)cs0
->ws
;
1037 struct radv_amdgpu_cs_request request
;
1038 uint32_t pad_word
= PKT3_NOP_PAD
;
1039 bool emit_signal_sem
= sem_info
->cs_emit_signal
;
1041 if (radv_amdgpu_winsys(ws
)->info
.chip_class
== GFX6
)
1042 pad_word
= 0x80000000;
1046 for (unsigned i
= 0; i
< cs_count
;) {
1047 struct amdgpu_cs_ib_info
*ibs
;
1048 struct radeon_winsys_bo
**bos
;
1049 struct radeon_cmdbuf
*preamble_cs
= i
? continue_preamble_cs
: initial_preamble_cs
;
1050 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
1051 struct drm_amdgpu_bo_list_entry
*handles
= NULL
;
1052 unsigned num_handles
= 0;
1053 unsigned number_of_ibs
;
1057 unsigned pad_words
= 0;
1059 /* Compute the number of IBs for this submit. */
1060 number_of_ibs
= cs
->num_old_cs_buffers
+ 1;
1062 ibs
= malloc(number_of_ibs
* sizeof(*ibs
));
1066 bos
= malloc(number_of_ibs
* sizeof(*bos
));
1072 if (number_of_ibs
> 1) {
1073 /* Special path when the maximum size in dwords has
1074 * been reached because we need to handle more than one
1077 struct radeon_cmdbuf
**new_cs_array
;
1080 new_cs_array
= malloc(cs
->num_old_cs_buffers
*
1081 sizeof(*new_cs_array
));
1082 assert(new_cs_array
);
1084 for (unsigned j
= 0; j
< cs
->num_old_cs_buffers
; j
++)
1085 new_cs_array
[idx
++] = &cs
->old_cs_buffers
[j
];
1086 new_cs_array
[idx
++] = cs_array
[i
];
1088 for (unsigned j
= 0; j
< number_of_ibs
; j
++) {
1089 struct radeon_cmdbuf
*rcs
= new_cs_array
[j
];
1090 bool needs_preamble
= preamble_cs
&& j
== 0;
1094 size
+= preamble_cs
->cdw
;
1097 assert(size
< 0xffff8);
1099 while (!size
|| (size
& 7)) {
1104 bos
[j
] = ws
->buffer_create(ws
, 4 * size
, 4096,
1106 RADEON_FLAG_CPU_ACCESS
|
1107 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
1108 RADEON_FLAG_READ_ONLY
,
1109 RADV_BO_PRIORITY_CS
);
1110 ptr
= ws
->buffer_map(bos
[j
]);
1112 if (needs_preamble
) {
1113 memcpy(ptr
, preamble_cs
->buf
, preamble_cs
->cdw
* 4);
1114 ptr
+= preamble_cs
->cdw
;
1117 memcpy(ptr
, rcs
->buf
, 4 * rcs
->cdw
);
1120 for (unsigned k
= 0; k
< pad_words
; ++k
)
1124 ibs
[j
].ib_mc_address
= radv_buffer_get_va(bos
[j
]);
1132 size
+= preamble_cs
->cdw
;
1134 while (i
+ cnt
< cs_count
&& 0xffff8 - size
>= radv_amdgpu_cs(cs_array
[i
+ cnt
])->base
.cdw
) {
1135 size
+= radv_amdgpu_cs(cs_array
[i
+ cnt
])->base
.cdw
;
1139 while (!size
|| (size
& 7)) {
1145 bos
[0] = ws
->buffer_create(ws
, 4 * size
, 4096,
1147 RADEON_FLAG_CPU_ACCESS
|
1148 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
1149 RADEON_FLAG_READ_ONLY
,
1150 RADV_BO_PRIORITY_CS
);
1151 ptr
= ws
->buffer_map(bos
[0]);
1154 memcpy(ptr
, preamble_cs
->buf
, preamble_cs
->cdw
* 4);
1155 ptr
+= preamble_cs
->cdw
;
1158 for (unsigned j
= 0; j
< cnt
; ++j
) {
1159 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
+ j
]);
1160 memcpy(ptr
, cs
->base
.buf
, 4 * cs
->base
.cdw
);
1161 ptr
+= cs
->base
.cdw
;
1165 for (unsigned j
= 0; j
< pad_words
; ++j
)
1169 ibs
[0].ib_mc_address
= radv_buffer_get_va(bos
[0]);
1173 r
= radv_amdgpu_get_bo_list(cs0
->ws
, &cs_array
[i
], cnt
,
1174 (struct radv_amdgpu_winsys_bo
**)bos
,
1175 number_of_ibs
, preamble_cs
,
1177 &num_handles
, &handles
);
1179 fprintf(stderr
, "amdgpu: buffer list creation failed "
1180 "for the sysmem submission (%d)\n", r
);
1186 memset(&request
, 0, sizeof(request
));
1188 request
.ip_type
= cs0
->hw_ip
;
1189 request
.ring
= queue_idx
;
1190 request
.handles
= handles
;
1191 request
.num_handles
= num_handles
;
1192 request
.number_of_ibs
= number_of_ibs
;
1194 request
.fence_info
= radv_set_cs_fence(ctx
, cs0
->hw_ip
, queue_idx
);
1196 sem_info
->cs_emit_signal
= (i
== cs_count
- cnt
) ? emit_signal_sem
: false;
1197 r
= radv_amdgpu_cs_submit(ctx
, &request
, sem_info
);
1200 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
1202 fprintf(stderr
, "amdgpu: The CS has been rejected, "
1203 "see dmesg for more information.\n");
1206 free(request
.handles
);
1208 for (unsigned j
= 0; j
< number_of_ibs
; j
++) {
1209 ws
->buffer_destroy(bos
[j
]);
1221 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
1223 radv_assign_last_submit(ctx
, &request
);
1228 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx
*_ctx
,
1230 struct radeon_cmdbuf
**cs_array
,
1232 struct radeon_cmdbuf
*initial_preamble_cs
,
1233 struct radeon_cmdbuf
*continue_preamble_cs
,
1234 struct radv_winsys_sem_info
*sem_info
,
1235 const struct radv_winsys_bo_list
*bo_list
,
1237 struct radeon_winsys_fence
*_fence
)
1239 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[0]);
1240 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
1244 if (!cs
->ws
->use_ib_bos
) {
1245 ret
= radv_amdgpu_winsys_cs_submit_sysmem(_ctx
, queue_idx
, sem_info
, bo_list
, cs_array
,
1246 cs_count
, initial_preamble_cs
, continue_preamble_cs
, _fence
);
1247 } else if (can_patch
) {
1248 ret
= radv_amdgpu_winsys_cs_submit_chained(_ctx
, queue_idx
, sem_info
, bo_list
, cs_array
,
1249 cs_count
, initial_preamble_cs
, continue_preamble_cs
, _fence
);
1251 ret
= radv_amdgpu_winsys_cs_submit_fallback(_ctx
, queue_idx
, sem_info
, bo_list
, cs_array
,
1252 cs_count
, initial_preamble_cs
, continue_preamble_cs
, _fence
);
1255 radv_amdgpu_signal_sems(ctx
, cs
->hw_ip
, queue_idx
, sem_info
);
1259 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs
, uint64_t addr
)
1261 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)_cs
;
1266 for (unsigned i
= 0; i
<= cs
->num_old_ib_buffers
; ++i
) {
1267 struct radv_amdgpu_winsys_bo
*bo
;
1269 bo
= (struct radv_amdgpu_winsys_bo
*)
1270 (i
== cs
->num_old_ib_buffers
? cs
->ib_buffer
: cs
->old_ib_buffers
[i
]);
1271 if (addr
>= bo
->base
.va
&& addr
- bo
->base
.va
< bo
->size
) {
1272 if (amdgpu_bo_cpu_map(bo
->bo
, &ret
) == 0)
1273 return (char *)ret
+ (addr
- bo
->base
.va
);
1276 if(cs
->ws
->debug_all_bos
) {
1277 pthread_mutex_lock(&cs
->ws
->global_bo_list_lock
);
1278 list_for_each_entry(struct radv_amdgpu_winsys_bo
, bo
,
1279 &cs
->ws
->global_bo_list
, global_list_item
) {
1280 if (addr
>= bo
->base
.va
&& addr
- bo
->base
.va
< bo
->size
) {
1281 if (amdgpu_bo_cpu_map(bo
->bo
, &ret
) == 0) {
1282 pthread_mutex_unlock(&cs
->ws
->global_bo_list_lock
);
1283 return (char *)ret
+ (addr
- bo
->base
.va
);
1287 pthread_mutex_unlock(&cs
->ws
->global_bo_list_lock
);
1292 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf
*_cs
,
1294 const int *trace_ids
, int trace_id_count
)
1296 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)_cs
;
1297 void *ib
= cs
->base
.buf
;
1298 int num_dw
= cs
->base
.cdw
;
1300 if (cs
->ws
->use_ib_bos
) {
1301 ib
= radv_amdgpu_winsys_get_cpu_addr(cs
, cs
->ib
.ib_mc_address
);
1302 num_dw
= cs
->ib
.size
;
1305 ac_parse_ib(file
, ib
, num_dw
, trace_ids
, trace_id_count
, "main IB",
1306 cs
->ws
->info
.chip_class
, radv_amdgpu_winsys_get_cpu_addr
, cs
);
1309 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority
)
1311 switch (radv_priority
) {
1312 case RADEON_CTX_PRIORITY_REALTIME
:
1313 return AMDGPU_CTX_PRIORITY_VERY_HIGH
;
1314 case RADEON_CTX_PRIORITY_HIGH
:
1315 return AMDGPU_CTX_PRIORITY_HIGH
;
1316 case RADEON_CTX_PRIORITY_MEDIUM
:
1317 return AMDGPU_CTX_PRIORITY_NORMAL
;
1318 case RADEON_CTX_PRIORITY_LOW
:
1319 return AMDGPU_CTX_PRIORITY_LOW
;
1321 unreachable("Invalid context priority");
1325 static VkResult
radv_amdgpu_ctx_create(struct radeon_winsys
*_ws
,
1326 enum radeon_ctx_priority priority
,
1327 struct radeon_winsys_ctx
**rctx
)
1329 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1330 struct radv_amdgpu_ctx
*ctx
= CALLOC_STRUCT(radv_amdgpu_ctx
);
1331 uint32_t amdgpu_priority
= radv_to_amdgpu_priority(priority
);
1336 return VK_ERROR_OUT_OF_HOST_MEMORY
;
1338 r
= amdgpu_cs_ctx_create2(ws
->dev
, amdgpu_priority
, &ctx
->ctx
);
1339 if (r
&& r
== -EACCES
) {
1340 result
= VK_ERROR_NOT_PERMITTED_EXT
;
1343 fprintf(stderr
, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r
);
1344 result
= VK_ERROR_OUT_OF_HOST_MEMORY
;
1349 assert(AMDGPU_HW_IP_NUM
* MAX_RINGS_PER_TYPE
* sizeof(uint64_t) <= 4096);
1350 ctx
->fence_bo
= ws
->base
.buffer_create(&ws
->base
, 4096, 8,
1352 RADEON_FLAG_CPU_ACCESS
|
1353 RADEON_FLAG_NO_INTERPROCESS_SHARING
,
1354 RADV_BO_PRIORITY_CS
);
1356 ctx
->fence_map
= (uint64_t*)ws
->base
.buffer_map(ctx
->fence_bo
);
1358 memset(ctx
->fence_map
, 0, 4096);
1360 *rctx
= (struct radeon_winsys_ctx
*)ctx
;
1367 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
1369 struct radv_amdgpu_ctx
*ctx
= (struct radv_amdgpu_ctx
*)rwctx
;
1370 ctx
->ws
->base
.buffer_destroy(ctx
->fence_bo
);
1371 amdgpu_cs_ctx_free(ctx
->ctx
);
1375 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx
*rwctx
,
1376 enum ring_type ring_type
, int ring_index
)
1378 struct radv_amdgpu_ctx
*ctx
= (struct radv_amdgpu_ctx
*)rwctx
;
1379 int ip_type
= ring_to_hw_ip(ring_type
);
1381 if (ctx
->last_submission
[ip_type
][ring_index
].fence
.fence
) {
1383 int ret
= amdgpu_cs_query_fence_status(&ctx
->last_submission
[ip_type
][ring_index
].fence
,
1384 1000000000ull, 0, &expired
);
1386 if (ret
|| !expired
)
1393 static struct radeon_winsys_sem
*radv_amdgpu_create_sem(struct radeon_winsys
*_ws
)
1395 struct amdgpu_cs_fence
*sem
= CALLOC_STRUCT(amdgpu_cs_fence
);
1399 return (struct radeon_winsys_sem
*)sem
;
1402 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem
*_sem
)
1404 struct amdgpu_cs_fence
*sem
= (struct amdgpu_cs_fence
*)_sem
;
1408 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx
*ctx
,
1411 struct radv_winsys_sem_info
*sem_info
)
1413 for (unsigned i
= 0; i
< sem_info
->signal
.sem_count
; i
++) {
1414 struct amdgpu_cs_fence
*sem
= (struct amdgpu_cs_fence
*)(sem_info
->signal
.sem
)[i
];
1419 *sem
= ctx
->last_submission
[ip_type
][ring
].fence
;
1424 static struct drm_amdgpu_cs_chunk_sem
*radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts
*counts
,
1425 struct drm_amdgpu_cs_chunk
*chunk
, int chunk_id
)
1427 struct drm_amdgpu_cs_chunk_sem
*syncobj
= malloc(sizeof(struct drm_amdgpu_cs_chunk_sem
) * counts
->syncobj_count
);
1431 for (unsigned i
= 0; i
< counts
->syncobj_count
; i
++) {
1432 struct drm_amdgpu_cs_chunk_sem
*sem
= &syncobj
[i
];
1433 sem
->handle
= counts
->syncobj
[i
];
1436 chunk
->chunk_id
= chunk_id
;
1437 chunk
->length_dw
= sizeof(struct drm_amdgpu_cs_chunk_sem
) / 4 * counts
->syncobj_count
;
1438 chunk
->chunk_data
= (uint64_t)(uintptr_t)syncobj
;
1442 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx
*ctx
,
1443 struct radv_amdgpu_cs_request
*request
,
1444 struct radv_winsys_sem_info
*sem_info
)
1450 struct drm_amdgpu_cs_chunk
*chunks
;
1451 struct drm_amdgpu_cs_chunk_data
*chunk_data
;
1452 struct drm_amdgpu_cs_chunk_dep
*sem_dependencies
= NULL
;
1453 struct drm_amdgpu_cs_chunk_sem
*wait_syncobj
= NULL
, *signal_syncobj
= NULL
;
1454 bool use_bo_list_create
= ctx
->ws
->info
.drm_minor
< 27;
1455 struct drm_amdgpu_bo_list_in bo_list_in
;
1457 struct amdgpu_cs_fence
*sem
;
1458 uint32_t bo_list
= 0;
1460 user_fence
= (request
->fence_info
.handle
!= NULL
);
1461 size
= request
->number_of_ibs
+ (user_fence
? 2 : 1) + (!use_bo_list_create
? 1 : 0) + 3;
1463 chunks
= malloc(sizeof(chunks
[0]) * size
);
1467 size
= request
->number_of_ibs
+ (user_fence
? 1 : 0);
1469 chunk_data
= malloc(sizeof(chunk_data
[0]) * size
);
1475 num_chunks
= request
->number_of_ibs
;
1476 for (i
= 0; i
< request
->number_of_ibs
; i
++) {
1477 struct amdgpu_cs_ib_info
*ib
;
1478 chunks
[i
].chunk_id
= AMDGPU_CHUNK_ID_IB
;
1479 chunks
[i
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_ib
) / 4;
1480 chunks
[i
].chunk_data
= (uint64_t)(uintptr_t)&chunk_data
[i
];
1482 ib
= &request
->ibs
[i
];
1484 chunk_data
[i
].ib_data
._pad
= 0;
1485 chunk_data
[i
].ib_data
.va_start
= ib
->ib_mc_address
;
1486 chunk_data
[i
].ib_data
.ib_bytes
= ib
->size
* 4;
1487 chunk_data
[i
].ib_data
.ip_type
= request
->ip_type
;
1488 chunk_data
[i
].ib_data
.ip_instance
= request
->ip_instance
;
1489 chunk_data
[i
].ib_data
.ring
= request
->ring
;
1490 chunk_data
[i
].ib_data
.flags
= ib
->flags
;
1496 chunks
[i
].chunk_id
= AMDGPU_CHUNK_ID_FENCE
;
1497 chunks
[i
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_fence
) / 4;
1498 chunks
[i
].chunk_data
= (uint64_t)(uintptr_t)&chunk_data
[i
];
1500 amdgpu_cs_chunk_fence_info_to_data(&request
->fence_info
,
1504 if (sem_info
->wait
.syncobj_count
&& sem_info
->cs_emit_wait
) {
1505 wait_syncobj
= radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info
->wait
,
1506 &chunks
[num_chunks
],
1507 AMDGPU_CHUNK_ID_SYNCOBJ_IN
);
1508 if (!wait_syncobj
) {
1514 if (sem_info
->wait
.sem_count
== 0)
1515 sem_info
->cs_emit_wait
= false;
1519 if (sem_info
->wait
.sem_count
&& sem_info
->cs_emit_wait
) {
1520 sem_dependencies
= malloc(sizeof(sem_dependencies
[0]) * sem_info
->wait
.sem_count
);
1521 if (!sem_dependencies
) {
1528 for (unsigned j
= 0; j
< sem_info
->wait
.sem_count
; j
++) {
1529 sem
= (struct amdgpu_cs_fence
*)sem_info
->wait
.sem
[j
];
1532 struct drm_amdgpu_cs_chunk_dep
*dep
= &sem_dependencies
[sem_count
++];
1534 amdgpu_cs_chunk_fence_to_dep(sem
, dep
);
1536 sem
->context
= NULL
;
1540 /* dependencies chunk */
1541 chunks
[i
].chunk_id
= AMDGPU_CHUNK_ID_DEPENDENCIES
;
1542 chunks
[i
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_dep
) / 4 * sem_count
;
1543 chunks
[i
].chunk_data
= (uint64_t)(uintptr_t)sem_dependencies
;
1545 sem_info
->cs_emit_wait
= false;
1548 if (sem_info
->signal
.syncobj_count
&& sem_info
->cs_emit_signal
) {
1549 signal_syncobj
= radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info
->signal
,
1550 &chunks
[num_chunks
],
1551 AMDGPU_CHUNK_ID_SYNCOBJ_OUT
);
1552 if (!signal_syncobj
) {
1559 if (use_bo_list_create
) {
1560 /* Legacy path creating the buffer list handle and passing it
1563 r
= amdgpu_bo_list_create_raw(ctx
->ws
->dev
, request
->num_handles
,
1564 request
->handles
, &bo_list
);
1566 fprintf(stderr
, "amdgpu: buffer list creation failed (%d)\n", r
);
1570 /* Standard path passing the buffer list via the CS ioctl. */
1571 bo_list_in
.operation
= ~0;
1572 bo_list_in
.list_handle
= ~0;
1573 bo_list_in
.bo_number
= request
->num_handles
;
1574 bo_list_in
.bo_info_size
= sizeof(struct drm_amdgpu_bo_list_entry
);
1575 bo_list_in
.bo_info_ptr
= (uint64_t)(uintptr_t)request
->handles
;
1577 chunks
[num_chunks
].chunk_id
= AMDGPU_CHUNK_ID_BO_HANDLES
;
1578 chunks
[num_chunks
].length_dw
= sizeof(struct drm_amdgpu_bo_list_in
) / 4;
1579 chunks
[num_chunks
].chunk_data
= (uintptr_t)&bo_list_in
;
1583 r
= amdgpu_cs_submit_raw2(ctx
->ws
->dev
,
1591 amdgpu_bo_list_destroy_raw(ctx
->ws
->dev
, bo_list
);
1596 free(sem_dependencies
);
1598 free(signal_syncobj
);
1602 static int radv_amdgpu_create_syncobj(struct radeon_winsys
*_ws
,
1603 bool create_signaled
,
1606 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1609 if (create_signaled
)
1610 flags
|= DRM_SYNCOBJ_CREATE_SIGNALED
;
1612 return amdgpu_cs_create_syncobj2(ws
->dev
, flags
, handle
);
1615 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys
*_ws
,
1618 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1619 amdgpu_cs_destroy_syncobj(ws
->dev
, handle
);
1622 static void radv_amdgpu_reset_syncobj(struct radeon_winsys
*_ws
,
1625 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1626 amdgpu_cs_syncobj_reset(ws
->dev
, &handle
, 1);
1629 static void radv_amdgpu_signal_syncobj(struct radeon_winsys
*_ws
,
1632 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1633 amdgpu_cs_syncobj_signal(ws
->dev
, &handle
, 1);
1636 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys
*_ws
, const uint32_t *handles
,
1637 uint32_t handle_count
, bool wait_all
, uint64_t timeout
)
1639 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1642 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1643 timeout
= MIN2(timeout
, INT64_MAX
);
1645 int ret
= amdgpu_cs_syncobj_wait(ws
->dev
, (uint32_t*)handles
, handle_count
, timeout
,
1646 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
|
1647 (wait_all
? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
: 0),
1651 } else if (ret
== -ETIME
) {
1654 fprintf(stderr
, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno
);
1659 static int radv_amdgpu_export_syncobj(struct radeon_winsys
*_ws
,
1663 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1665 return amdgpu_cs_export_syncobj(ws
->dev
, syncobj
, fd
);
1668 static int radv_amdgpu_import_syncobj(struct radeon_winsys
*_ws
,
1672 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1674 return amdgpu_cs_import_syncobj(ws
->dev
, fd
, syncobj
);
1678 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys
*_ws
,
1682 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1684 return amdgpu_cs_syncobj_export_sync_file(ws
->dev
, syncobj
, fd
);
1687 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys
*_ws
,
1691 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1693 return amdgpu_cs_syncobj_import_sync_file(ws
->dev
, syncobj
, fd
);
1696 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys
*ws
)
1698 ws
->base
.ctx_create
= radv_amdgpu_ctx_create
;
1699 ws
->base
.ctx_destroy
= radv_amdgpu_ctx_destroy
;
1700 ws
->base
.ctx_wait_idle
= radv_amdgpu_ctx_wait_idle
;
1701 ws
->base
.cs_create
= radv_amdgpu_cs_create
;
1702 ws
->base
.cs_destroy
= radv_amdgpu_cs_destroy
;
1703 ws
->base
.cs_grow
= radv_amdgpu_cs_grow
;
1704 ws
->base
.cs_finalize
= radv_amdgpu_cs_finalize
;
1705 ws
->base
.cs_reset
= radv_amdgpu_cs_reset
;
1706 ws
->base
.cs_add_buffer
= radv_amdgpu_cs_add_buffer
;
1707 ws
->base
.cs_execute_secondary
= radv_amdgpu_cs_execute_secondary
;
1708 ws
->base
.cs_submit
= radv_amdgpu_winsys_cs_submit
;
1709 ws
->base
.cs_dump
= radv_amdgpu_winsys_cs_dump
;
1710 ws
->base
.create_fence
= radv_amdgpu_create_fence
;
1711 ws
->base
.destroy_fence
= radv_amdgpu_destroy_fence
;
1712 ws
->base
.reset_fence
= radv_amdgpu_reset_fence
;
1713 ws
->base
.signal_fence
= radv_amdgpu_signal_fence
;
1714 ws
->base
.is_fence_waitable
= radv_amdgpu_is_fence_waitable
;
1715 ws
->base
.create_sem
= radv_amdgpu_create_sem
;
1716 ws
->base
.destroy_sem
= radv_amdgpu_destroy_sem
;
1717 ws
->base
.create_syncobj
= radv_amdgpu_create_syncobj
;
1718 ws
->base
.destroy_syncobj
= radv_amdgpu_destroy_syncobj
;
1719 ws
->base
.reset_syncobj
= radv_amdgpu_reset_syncobj
;
1720 ws
->base
.signal_syncobj
= radv_amdgpu_signal_syncobj
;
1721 ws
->base
.wait_syncobj
= radv_amdgpu_wait_syncobj
;
1722 ws
->base
.export_syncobj
= radv_amdgpu_export_syncobj
;
1723 ws
->base
.import_syncobj
= radv_amdgpu_import_syncobj
;
1724 ws
->base
.export_syncobj_to_sync_file
= radv_amdgpu_export_syncobj_to_sync_file
;
1725 ws
->base
.import_syncobj_from_sync_file
= radv_amdgpu_import_syncobj_from_sync_file
;
1726 ws
->base
.fence_wait
= radv_amdgpu_fence_wait
;
1727 ws
->base
.fences_wait
= radv_amdgpu_fences_wait
;