2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include <amdgpu_drm.h>
33 #include "radv_radeon_winsys.h"
34 #include "radv_amdgpu_cs.h"
35 #include "radv_amdgpu_bo.h"
40 VIRTUAL_BUFFER_HASH_TABLE_SIZE
= 1024
43 struct radv_amdgpu_cs
{
44 struct radeon_cmdbuf base
;
45 struct radv_amdgpu_winsys
*ws
;
47 struct amdgpu_cs_ib_info ib
;
49 struct radeon_winsys_bo
*ib_buffer
;
51 unsigned max_num_buffers
;
53 amdgpu_bo_handle
*handles
;
55 struct radeon_winsys_bo
**old_ib_buffers
;
56 unsigned num_old_ib_buffers
;
57 unsigned max_num_old_ib_buffers
;
58 unsigned *ib_size_ptr
;
62 int buffer_hash_table
[1024];
65 unsigned num_virtual_buffers
;
66 unsigned max_num_virtual_buffers
;
67 struct radeon_winsys_bo
**virtual_buffers
;
68 int *virtual_buffer_hash_table
;
70 /* For chips that don't support chaining. */
71 struct radeon_cmdbuf
*old_cs_buffers
;
72 unsigned num_old_cs_buffers
;
75 static inline struct radv_amdgpu_cs
*
76 radv_amdgpu_cs(struct radeon_cmdbuf
*base
)
78 return (struct radv_amdgpu_cs
*)base
;
81 static int ring_to_hw_ip(enum ring_type ring
)
85 return AMDGPU_HW_IP_GFX
;
87 return AMDGPU_HW_IP_DMA
;
89 return AMDGPU_HW_IP_COMPUTE
;
91 unreachable("unsupported ring");
95 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx
*ctx
,
98 struct radv_winsys_sem_info
*sem_info
);
99 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx
*ctx
,
100 struct amdgpu_cs_request
*request
,
101 struct radv_winsys_sem_info
*sem_info
);
103 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx
*ctx
,
104 struct radv_amdgpu_fence
*fence
,
105 struct amdgpu_cs_request
*req
)
107 fence
->fence
.context
= ctx
->ctx
;
108 fence
->fence
.ip_type
= req
->ip_type
;
109 fence
->fence
.ip_instance
= req
->ip_instance
;
110 fence
->fence
.ring
= req
->ring
;
111 fence
->fence
.fence
= req
->seq_no
;
112 fence
->user_ptr
= (volatile uint64_t*)(ctx
->fence_map
+ (req
->ip_type
* MAX_RINGS_PER_TYPE
+ req
->ring
) * sizeof(uint64_t));
115 static struct radeon_winsys_fence
*radv_amdgpu_create_fence()
117 struct radv_amdgpu_fence
*fence
= calloc(1, sizeof(struct radv_amdgpu_fence
));
118 return (struct radeon_winsys_fence
*)fence
;
121 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence
*_fence
)
123 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
127 static bool radv_amdgpu_fence_wait(struct radeon_winsys
*_ws
,
128 struct radeon_winsys_fence
*_fence
,
132 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
133 unsigned flags
= absolute
? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
: 0;
135 uint32_t expired
= 0;
137 if (fence
->user_ptr
) {
138 if (*fence
->user_ptr
>= fence
->fence
.fence
)
140 if (!absolute
&& !timeout
)
144 /* Now use the libdrm query. */
145 r
= amdgpu_cs_query_fence_status(&fence
->fence
,
151 fprintf(stderr
, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
162 static bool radv_amdgpu_fences_wait(struct radeon_winsys
*_ws
,
163 struct radeon_winsys_fence
*const *_fences
,
164 uint32_t fence_count
,
168 struct amdgpu_cs_fence
*fences
= malloc(sizeof(struct amdgpu_cs_fence
) * fence_count
);
170 uint32_t expired
= 0, first
= 0;
175 for (uint32_t i
= 0; i
< fence_count
; ++i
)
176 fences
[i
] = ((struct radv_amdgpu_fence
*)_fences
[i
])->fence
;
178 /* Now use the libdrm query. */
179 r
= amdgpu_cs_wait_fences(fences
, fence_count
, wait_all
,
180 timeout
, &expired
, &first
);
184 fprintf(stderr
, "amdgpu: amdgpu_cs_wait_fences failed.\n");
194 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf
*rcs
)
196 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(rcs
);
199 cs
->ws
->base
.buffer_destroy(cs
->ib_buffer
);
203 for (unsigned i
= 0; i
< cs
->num_old_ib_buffers
; ++i
)
204 cs
->ws
->base
.buffer_destroy(cs
->old_ib_buffers
[i
]);
206 for (unsigned i
= 0; i
< cs
->num_old_cs_buffers
; ++i
) {
207 struct radeon_cmdbuf
*rcs
= &cs
->old_cs_buffers
[i
];
211 free(cs
->old_cs_buffers
);
212 free(cs
->old_ib_buffers
);
213 free(cs
->virtual_buffers
);
214 free(cs
->virtual_buffer_hash_table
);
219 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs
*cs
,
220 enum ring_type ring_type
)
222 for (int i
= 0; i
< ARRAY_SIZE(cs
->buffer_hash_table
); ++i
)
223 cs
->buffer_hash_table
[i
] = -1;
225 cs
->hw_ip
= ring_to_hw_ip(ring_type
);
228 static struct radeon_cmdbuf
*
229 radv_amdgpu_cs_create(struct radeon_winsys
*ws
,
230 enum ring_type ring_type
)
232 struct radv_amdgpu_cs
*cs
;
233 uint32_t ib_size
= 20 * 1024 * 4;
234 cs
= calloc(1, sizeof(struct radv_amdgpu_cs
));
238 cs
->ws
= radv_amdgpu_winsys(ws
);
239 radv_amdgpu_init_cs(cs
, ring_type
);
241 if (cs
->ws
->use_ib_bos
) {
242 cs
->ib_buffer
= ws
->buffer_create(ws
, ib_size
, 0,
244 RADEON_FLAG_CPU_ACCESS
|
245 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
246 RADEON_FLAG_READ_ONLY
);
247 if (!cs
->ib_buffer
) {
252 cs
->ib_mapped
= ws
->buffer_map(cs
->ib_buffer
);
253 if (!cs
->ib_mapped
) {
254 ws
->buffer_destroy(cs
->ib_buffer
);
259 cs
->ib
.ib_mc_address
= radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
;
260 cs
->base
.buf
= (uint32_t *)cs
->ib_mapped
;
261 cs
->base
.max_dw
= ib_size
/ 4 - 4;
262 cs
->ib_size_ptr
= &cs
->ib
.size
;
265 ws
->cs_add_buffer(&cs
->base
, cs
->ib_buffer
);
267 cs
->base
.buf
= malloc(16384);
268 cs
->base
.max_dw
= 4096;
278 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf
*_cs
, size_t min_size
)
280 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
287 if (!cs
->ws
->use_ib_bos
) {
288 const uint64_t limit_dws
= 0xffff8;
289 uint64_t ib_dws
= MAX2(cs
->base
.cdw
+ min_size
,
290 MIN2(cs
->base
.max_dw
* 2, limit_dws
));
292 /* The total ib size cannot exceed limit_dws dwords. */
293 if (ib_dws
> limit_dws
)
295 /* The maximum size in dwords has been reached,
296 * try to allocate a new one.
299 realloc(cs
->old_cs_buffers
,
300 (cs
->num_old_cs_buffers
+ 1) * sizeof(*cs
->old_cs_buffers
));
301 if (!cs
->old_cs_buffers
) {
307 /* Store the current one for submitting it later. */
308 cs
->old_cs_buffers
[cs
->num_old_cs_buffers
].cdw
= cs
->base
.cdw
;
309 cs
->old_cs_buffers
[cs
->num_old_cs_buffers
].max_dw
= cs
->base
.max_dw
;
310 cs
->old_cs_buffers
[cs
->num_old_cs_buffers
].buf
= cs
->base
.buf
;
311 cs
->num_old_cs_buffers
++;
313 /* Reset the cs, it will be re-allocated below. */
317 /* Re-compute the number of dwords to allocate. */
318 ib_dws
= MAX2(cs
->base
.cdw
+ min_size
,
319 MIN2(cs
->base
.max_dw
* 2, limit_dws
));
320 if (ib_dws
> limit_dws
) {
321 fprintf(stderr
, "amdgpu: Too high number of "
322 "dwords to allocate\n");
328 uint32_t *new_buf
= realloc(cs
->base
.buf
, ib_dws
* 4);
330 cs
->base
.buf
= new_buf
;
331 cs
->base
.max_dw
= ib_dws
;
339 uint64_t ib_size
= MAX2(min_size
* 4 + 16, cs
->base
.max_dw
* 4 * 2);
341 /* max that fits in the chain size field. */
342 ib_size
= MIN2(ib_size
, 0xfffff);
344 while (!cs
->base
.cdw
|| (cs
->base
.cdw
& 7) != 4)
345 radeon_emit(&cs
->base
, 0xffff1000);
347 *cs
->ib_size_ptr
|= cs
->base
.cdw
+ 4;
349 if (cs
->num_old_ib_buffers
== cs
->max_num_old_ib_buffers
) {
350 cs
->max_num_old_ib_buffers
= MAX2(1, cs
->max_num_old_ib_buffers
* 2);
351 cs
->old_ib_buffers
= realloc(cs
->old_ib_buffers
,
352 cs
->max_num_old_ib_buffers
* sizeof(void*));
355 cs
->old_ib_buffers
[cs
->num_old_ib_buffers
++] = cs
->ib_buffer
;
357 cs
->ib_buffer
= cs
->ws
->base
.buffer_create(&cs
->ws
->base
, ib_size
, 0,
359 RADEON_FLAG_CPU_ACCESS
|
360 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
361 RADEON_FLAG_READ_ONLY
);
363 if (!cs
->ib_buffer
) {
366 cs
->ib_buffer
= cs
->old_ib_buffers
[--cs
->num_old_ib_buffers
];
369 cs
->ib_mapped
= cs
->ws
->base
.buffer_map(cs
->ib_buffer
);
370 if (!cs
->ib_mapped
) {
371 cs
->ws
->base
.buffer_destroy(cs
->ib_buffer
);
374 cs
->ib_buffer
= cs
->old_ib_buffers
[--cs
->num_old_ib_buffers
];
377 cs
->ws
->base
.cs_add_buffer(&cs
->base
, cs
->ib_buffer
);
379 radeon_emit(&cs
->base
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
380 radeon_emit(&cs
->base
, radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
);
381 radeon_emit(&cs
->base
, radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
>> 32);
382 radeon_emit(&cs
->base
, S_3F2_CHAIN(1) | S_3F2_VALID(1));
384 cs
->ib_size_ptr
= cs
->base
.buf
+ cs
->base
.cdw
- 1;
386 cs
->base
.buf
= (uint32_t *)cs
->ib_mapped
;
388 cs
->base
.max_dw
= ib_size
/ 4 - 4;
392 static bool radv_amdgpu_cs_finalize(struct radeon_cmdbuf
*_cs
)
394 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
396 if (cs
->ws
->use_ib_bos
) {
397 while (!cs
->base
.cdw
|| (cs
->base
.cdw
& 7) != 0)
398 radeon_emit(&cs
->base
, 0xffff1000);
400 *cs
->ib_size_ptr
|= cs
->base
.cdw
;
402 cs
->is_chained
= false;
408 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf
*_cs
)
410 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
414 for (unsigned i
= 0; i
< cs
->num_buffers
; ++i
) {
415 unsigned hash
= ((uintptr_t)cs
->handles
[i
] >> 6) &
416 (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
417 cs
->buffer_hash_table
[hash
] = -1;
420 for (unsigned i
= 0; i
< cs
->num_virtual_buffers
; ++i
) {
421 unsigned hash
= ((uintptr_t)cs
->virtual_buffers
[i
] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE
- 1);
422 cs
->virtual_buffer_hash_table
[hash
] = -1;
426 cs
->num_virtual_buffers
= 0;
428 if (cs
->ws
->use_ib_bos
) {
429 cs
->ws
->base
.cs_add_buffer(&cs
->base
, cs
->ib_buffer
);
431 for (unsigned i
= 0; i
< cs
->num_old_ib_buffers
; ++i
)
432 cs
->ws
->base
.buffer_destroy(cs
->old_ib_buffers
[i
]);
434 cs
->num_old_ib_buffers
= 0;
435 cs
->ib
.ib_mc_address
= radv_amdgpu_winsys_bo(cs
->ib_buffer
)->base
.va
;
436 cs
->ib_size_ptr
= &cs
->ib
.size
;
439 for (unsigned i
= 0; i
< cs
->num_old_cs_buffers
; ++i
) {
440 struct radeon_cmdbuf
*rcs
= &cs
->old_cs_buffers
[i
];
444 free(cs
->old_cs_buffers
);
445 cs
->old_cs_buffers
= NULL
;
446 cs
->num_old_cs_buffers
= 0;
450 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs
*cs
,
453 unsigned hash
= ((uintptr_t)bo
>> 6) & (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
454 int index
= cs
->buffer_hash_table
[hash
];
459 if (cs
->handles
[index
] == bo
)
462 for (unsigned i
= 0; i
< cs
->num_buffers
; ++i
) {
463 if (cs
->handles
[i
] == bo
) {
464 cs
->buffer_hash_table
[hash
] = i
;
472 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs
*cs
,
476 int index
= radv_amdgpu_cs_find_buffer(cs
, bo
);
481 if (cs
->num_buffers
== cs
->max_num_buffers
) {
482 unsigned new_count
= MAX2(1, cs
->max_num_buffers
* 2);
483 cs
->handles
= realloc(cs
->handles
, new_count
* sizeof(amdgpu_bo_handle
));
484 cs
->max_num_buffers
= new_count
;
487 cs
->handles
[cs
->num_buffers
] = bo
;
489 hash
= ((uintptr_t)bo
>> 6) & (ARRAY_SIZE(cs
->buffer_hash_table
) - 1);
490 cs
->buffer_hash_table
[hash
] = cs
->num_buffers
;
495 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf
*_cs
,
496 struct radeon_winsys_bo
*bo
)
498 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
499 unsigned hash
= ((uintptr_t)bo
>> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE
- 1);
502 if (!cs
->virtual_buffer_hash_table
) {
503 cs
->virtual_buffer_hash_table
= malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE
* sizeof(int));
504 for (int i
= 0; i
< VIRTUAL_BUFFER_HASH_TABLE_SIZE
; ++i
)
505 cs
->virtual_buffer_hash_table
[i
] = -1;
508 if (cs
->virtual_buffer_hash_table
[hash
] >= 0) {
509 int idx
= cs
->virtual_buffer_hash_table
[hash
];
510 if (cs
->virtual_buffers
[idx
] == bo
) {
513 for (unsigned i
= 0; i
< cs
->num_virtual_buffers
; ++i
) {
514 if (cs
->virtual_buffers
[i
] == bo
) {
515 cs
->virtual_buffer_hash_table
[hash
] = i
;
521 if(cs
->max_num_virtual_buffers
<= cs
->num_virtual_buffers
) {
522 cs
->max_num_virtual_buffers
= MAX2(2, cs
->max_num_virtual_buffers
* 2);
523 cs
->virtual_buffers
= realloc(cs
->virtual_buffers
, sizeof(struct radv_amdgpu_virtual_virtual_buffer
*) * cs
->max_num_virtual_buffers
);
526 cs
->virtual_buffers
[cs
->num_virtual_buffers
] = bo
;
528 cs
->virtual_buffer_hash_table
[hash
] = cs
->num_virtual_buffers
;
529 ++cs
->num_virtual_buffers
;
533 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf
*_cs
,
534 struct radeon_winsys_bo
*_bo
)
536 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(_cs
);
537 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(_bo
);
539 if (bo
->is_virtual
) {
540 radv_amdgpu_cs_add_virtual_buffer(_cs
, _bo
);
544 if (bo
->base
.is_local
)
547 radv_amdgpu_cs_add_buffer_internal(cs
, bo
->bo
);
550 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf
*_parent
,
551 struct radeon_cmdbuf
*_child
)
553 struct radv_amdgpu_cs
*parent
= radv_amdgpu_cs(_parent
);
554 struct radv_amdgpu_cs
*child
= radv_amdgpu_cs(_child
);
556 for (unsigned i
= 0; i
< child
->num_buffers
; ++i
) {
557 radv_amdgpu_cs_add_buffer_internal(parent
, child
->handles
[i
]);
560 for (unsigned i
= 0; i
< child
->num_virtual_buffers
; ++i
) {
561 radv_amdgpu_cs_add_buffer(&parent
->base
, child
->virtual_buffers
[i
]);
564 if (parent
->ws
->use_ib_bos
) {
565 if (parent
->base
.cdw
+ 4 > parent
->base
.max_dw
)
566 radv_amdgpu_cs_grow(&parent
->base
, 4);
568 radeon_emit(&parent
->base
, PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0));
569 radeon_emit(&parent
->base
, child
->ib
.ib_mc_address
);
570 radeon_emit(&parent
->base
, child
->ib
.ib_mc_address
>> 32);
571 radeon_emit(&parent
->base
, child
->ib
.size
);
573 if (parent
->base
.cdw
+ child
->base
.cdw
> parent
->base
.max_dw
)
574 radv_amdgpu_cs_grow(&parent
->base
, child
->base
.cdw
);
576 memcpy(parent
->base
.buf
+ parent
->base
.cdw
, child
->base
.buf
, 4 * child
->base
.cdw
);
577 parent
->base
.cdw
+= child
->base
.cdw
;
581 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys
*ws
,
582 struct radeon_cmdbuf
**cs_array
,
584 struct radv_amdgpu_winsys_bo
**extra_bo_array
,
585 unsigned num_extra_bo
,
586 struct radeon_cmdbuf
*extra_cs
,
587 const struct radv_winsys_bo_list
*radv_bo_list
,
588 amdgpu_bo_list_handle
*bo_list
)
592 if (ws
->debug_all_bos
) {
593 struct radv_amdgpu_winsys_bo
*bo
;
594 amdgpu_bo_handle
*handles
;
597 pthread_mutex_lock(&ws
->global_bo_list_lock
);
599 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
601 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
605 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
606 assert(num
< ws
->num_buffers
);
607 handles
[num
++] = bo
->bo
;
610 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
614 pthread_mutex_unlock(&ws
->global_bo_list_lock
);
615 } else if (count
== 1 && !num_extra_bo
&& !extra_cs
&& !radv_bo_list
&&
616 !radv_amdgpu_cs(cs_array
[0])->num_virtual_buffers
) {
617 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[0];
618 if (cs
->num_buffers
== 0) {
622 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_buffers
, cs
->handles
,
625 unsigned total_buffer_count
= num_extra_bo
;
626 unsigned unique_bo_count
= num_extra_bo
;
627 for (unsigned i
= 0; i
< count
; ++i
) {
628 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)cs_array
[i
];
629 total_buffer_count
+= cs
->num_buffers
;
630 for (unsigned j
= 0; j
< cs
->num_virtual_buffers
; ++j
)
631 total_buffer_count
+= radv_amdgpu_winsys_bo(cs
->virtual_buffers
[j
])->bo_count
;
635 total_buffer_count
+= ((struct radv_amdgpu_cs
*)extra_cs
)->num_buffers
;
639 total_buffer_count
+= radv_bo_list
->count
;
642 if (total_buffer_count
== 0) {
646 amdgpu_bo_handle
*handles
= malloc(sizeof(amdgpu_bo_handle
) * total_buffer_count
);
652 for (unsigned i
= 0; i
< num_extra_bo
; i
++) {
653 handles
[i
] = extra_bo_array
[i
]->bo
;
656 for (unsigned i
= 0; i
< count
+ !!extra_cs
; ++i
) {
657 struct radv_amdgpu_cs
*cs
;
660 cs
= (struct radv_amdgpu_cs
*)extra_cs
;
662 cs
= (struct radv_amdgpu_cs
*)cs_array
[i
];
664 if (!cs
->num_buffers
)
667 if (unique_bo_count
== 0 && !cs
->num_virtual_buffers
) {
668 memcpy(handles
, cs
->handles
, cs
->num_buffers
* sizeof(amdgpu_bo_handle
));
669 unique_bo_count
= cs
->num_buffers
;
672 int unique_bo_so_far
= unique_bo_count
;
673 for (unsigned j
= 0; j
< cs
->num_buffers
; ++j
) {
675 for (unsigned k
= 0; k
< unique_bo_so_far
; ++k
) {
676 if (handles
[k
] == cs
->handles
[j
]) {
682 handles
[unique_bo_count
] = cs
->handles
[j
];
686 for (unsigned j
= 0; j
< cs
->num_virtual_buffers
; ++j
) {
687 struct radv_amdgpu_winsys_bo
*virtual_bo
= radv_amdgpu_winsys_bo(cs
->virtual_buffers
[j
]);
688 for(unsigned k
= 0; k
< virtual_bo
->bo_count
; ++k
) {
689 struct radv_amdgpu_winsys_bo
*bo
= virtual_bo
->bos
[k
];
691 for (unsigned m
= 0; m
< unique_bo_count
; ++m
) {
692 if (handles
[m
] == bo
->bo
) {
698 handles
[unique_bo_count
] = bo
->bo
;
706 unsigned unique_bo_so_far
= unique_bo_count
;
707 for (unsigned i
= 0; i
< radv_bo_list
->count
; ++i
) {
708 struct radv_amdgpu_winsys_bo
*bo
= radv_amdgpu_winsys_bo(radv_bo_list
->bos
[i
]);
710 for (unsigned j
= 0; j
< unique_bo_so_far
; ++j
) {
711 if (bo
->bo
== handles
[j
]) {
717 handles
[unique_bo_count
] = bo
->bo
;
723 if (unique_bo_count
> 0) {
724 r
= amdgpu_bo_list_create(ws
->dev
, unique_bo_count
, handles
,
736 static struct amdgpu_cs_fence_info
radv_set_cs_fence(struct radv_amdgpu_ctx
*ctx
, int ip_type
, int ring
)
738 struct amdgpu_cs_fence_info ret
= {0};
739 if (ctx
->fence_map
) {
740 ret
.handle
= radv_amdgpu_winsys_bo(ctx
->fence_bo
)->bo
;
741 ret
.offset
= (ip_type
* MAX_RINGS_PER_TYPE
+ ring
) * sizeof(uint64_t);
746 static void radv_assign_last_submit(struct radv_amdgpu_ctx
*ctx
,
747 struct amdgpu_cs_request
*request
)
749 radv_amdgpu_request_to_fence(ctx
,
750 &ctx
->last_submission
[request
->ip_type
][request
->ring
],
754 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx
*_ctx
,
756 struct radv_winsys_sem_info
*sem_info
,
757 const struct radv_winsys_bo_list
*radv_bo_list
,
758 struct radeon_cmdbuf
**cs_array
,
760 struct radeon_cmdbuf
*initial_preamble_cs
,
761 struct radeon_cmdbuf
*continue_preamble_cs
,
762 struct radeon_winsys_fence
*_fence
)
765 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
766 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
767 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[0]);
768 amdgpu_bo_list_handle bo_list
;
769 struct amdgpu_cs_request request
= {0};
770 struct amdgpu_cs_ib_info ibs
[2];
771 unsigned number_of_ibs
= 1;
773 for (unsigned i
= cs_count
; i
--;) {
774 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
776 if (cs
->is_chained
) {
777 *cs
->ib_size_ptr
-= 4;
778 cs
->is_chained
= false;
781 if (i
+ 1 < cs_count
) {
782 struct radv_amdgpu_cs
*next
= radv_amdgpu_cs(cs_array
[i
+ 1]);
783 assert(cs
->base
.cdw
+ 4 <= cs
->base
.max_dw
);
785 cs
->is_chained
= true;
786 *cs
->ib_size_ptr
+= 4;
788 cs
->base
.buf
[cs
->base
.cdw
+ 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK
, 2, 0);
789 cs
->base
.buf
[cs
->base
.cdw
+ 1] = next
->ib
.ib_mc_address
;
790 cs
->base
.buf
[cs
->base
.cdw
+ 2] = next
->ib
.ib_mc_address
>> 32;
791 cs
->base
.buf
[cs
->base
.cdw
+ 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next
->ib
.size
;
795 /* Create a buffer object list. */
796 r
= radv_amdgpu_create_bo_list(cs0
->ws
, cs_array
, cs_count
, NULL
, 0,
797 initial_preamble_cs
, radv_bo_list
,
800 fprintf(stderr
, "amdgpu: buffer list creation failed for the "
801 "chained submission(%d)\n", r
);
805 /* Configure the CS request. */
806 if (initial_preamble_cs
) {
807 ibs
[0] = radv_amdgpu_cs(initial_preamble_cs
)->ib
;
814 request
.ip_type
= cs0
->hw_ip
;
815 request
.ring
= queue_idx
;
816 request
.number_of_ibs
= number_of_ibs
;
818 request
.resources
= bo_list
;
819 request
.fence_info
= radv_set_cs_fence(ctx
, cs0
->hw_ip
, queue_idx
);
822 r
= radv_amdgpu_cs_submit(ctx
, &request
, sem_info
);
825 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
827 fprintf(stderr
, "amdgpu: The CS has been rejected, "
828 "see dmesg for more information.\n");
832 amdgpu_bo_list_destroy(bo_list
);
838 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
840 radv_assign_last_submit(ctx
, &request
);
845 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx
*_ctx
,
847 struct radv_winsys_sem_info
*sem_info
,
848 const struct radv_winsys_bo_list
*radv_bo_list
,
849 struct radeon_cmdbuf
**cs_array
,
851 struct radeon_cmdbuf
*initial_preamble_cs
,
852 struct radeon_cmdbuf
*continue_preamble_cs
,
853 struct radeon_winsys_fence
*_fence
)
856 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
857 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
858 amdgpu_bo_list_handle bo_list
;
859 struct amdgpu_cs_request request
= {};
860 struct amdgpu_cs_ib_info
*ibs
;
861 struct radv_amdgpu_cs
*cs0
;
862 unsigned number_of_ibs
;
865 cs0
= radv_amdgpu_cs(cs_array
[0]);
867 /* Compute the number of IBs for this submit. */
868 number_of_ibs
= cs_count
+ !!initial_preamble_cs
;
870 /* Create a buffer object list. */
871 r
= radv_amdgpu_create_bo_list(cs0
->ws
, &cs_array
[0], cs_count
, NULL
, 0,
872 initial_preamble_cs
, radv_bo_list
,
875 fprintf(stderr
, "amdgpu: buffer list creation failed "
876 "for the fallback submission (%d)\n", r
);
880 ibs
= malloc(number_of_ibs
* sizeof(*ibs
));
883 amdgpu_bo_list_destroy(bo_list
);
887 /* Configure the CS request. */
888 if (initial_preamble_cs
)
889 ibs
[0] = radv_amdgpu_cs(initial_preamble_cs
)->ib
;
891 for (unsigned i
= 0; i
< cs_count
; i
++) {
892 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
894 ibs
[i
+ !!initial_preamble_cs
] = cs
->ib
;
896 if (cs
->is_chained
) {
897 *cs
->ib_size_ptr
-= 4;
898 cs
->is_chained
= false;
902 request
.ip_type
= cs0
->hw_ip
;
903 request
.ring
= queue_idx
;
904 request
.resources
= bo_list
;
905 request
.number_of_ibs
= number_of_ibs
;
907 request
.fence_info
= radv_set_cs_fence(ctx
, cs0
->hw_ip
, queue_idx
);
910 r
= radv_amdgpu_cs_submit(ctx
, &request
, sem_info
);
913 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
915 fprintf(stderr
, "amdgpu: The CS has been rejected, "
916 "see dmesg for more information.\n");
920 amdgpu_bo_list_destroy(bo_list
);
927 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
929 radv_assign_last_submit(ctx
, &request
);
934 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx
*_ctx
,
936 struct radv_winsys_sem_info
*sem_info
,
937 const struct radv_winsys_bo_list
*radv_bo_list
,
938 struct radeon_cmdbuf
**cs_array
,
940 struct radeon_cmdbuf
*initial_preamble_cs
,
941 struct radeon_cmdbuf
*continue_preamble_cs
,
942 struct radeon_winsys_fence
*_fence
)
945 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
946 struct radv_amdgpu_fence
*fence
= (struct radv_amdgpu_fence
*)_fence
;
947 struct radv_amdgpu_cs
*cs0
= radv_amdgpu_cs(cs_array
[0]);
948 struct radeon_winsys
*ws
= (struct radeon_winsys
*)cs0
->ws
;
949 amdgpu_bo_list_handle bo_list
;
950 struct amdgpu_cs_request request
;
951 uint32_t pad_word
= 0xffff1000U
;
952 bool emit_signal_sem
= sem_info
->cs_emit_signal
;
954 if (radv_amdgpu_winsys(ws
)->info
.chip_class
== SI
)
955 pad_word
= 0x80000000;
959 for (unsigned i
= 0; i
< cs_count
;) {
960 struct amdgpu_cs_ib_info
*ibs
;
961 struct radeon_winsys_bo
**bos
;
962 struct radeon_cmdbuf
*preamble_cs
= i
? continue_preamble_cs
: initial_preamble_cs
;
963 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
]);
964 unsigned number_of_ibs
;
968 unsigned pad_words
= 0;
970 /* Compute the number of IBs for this submit. */
971 number_of_ibs
= cs
->num_old_cs_buffers
+ 1;
973 ibs
= malloc(number_of_ibs
* sizeof(*ibs
));
977 bos
= malloc(number_of_ibs
* sizeof(*bos
));
983 if (number_of_ibs
> 1) {
984 /* Special path when the maximum size in dwords has
985 * been reached because we need to handle more than one
988 struct radeon_cmdbuf
**new_cs_array
;
991 new_cs_array
= malloc(cs
->num_old_cs_buffers
*
992 sizeof(*new_cs_array
));
993 assert(new_cs_array
);
995 for (unsigned j
= 0; j
< cs
->num_old_cs_buffers
; j
++)
996 new_cs_array
[idx
++] = &cs
->old_cs_buffers
[j
];
997 new_cs_array
[idx
++] = cs_array
[i
];
999 for (unsigned j
= 0; j
< number_of_ibs
; j
++) {
1000 struct radeon_cmdbuf
*rcs
= new_cs_array
[j
];
1001 bool needs_preamble
= preamble_cs
&& j
== 0;
1005 size
+= preamble_cs
->cdw
;
1008 assert(size
< 0xffff8);
1010 while (!size
|| (size
& 7)) {
1015 bos
[j
] = ws
->buffer_create(ws
, 4 * size
, 4096,
1017 RADEON_FLAG_CPU_ACCESS
|
1018 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
1019 RADEON_FLAG_READ_ONLY
);
1020 ptr
= ws
->buffer_map(bos
[j
]);
1022 if (needs_preamble
) {
1023 memcpy(ptr
, preamble_cs
->buf
, preamble_cs
->cdw
* 4);
1024 ptr
+= preamble_cs
->cdw
;
1027 memcpy(ptr
, rcs
->buf
, 4 * rcs
->cdw
);
1030 for (unsigned k
= 0; k
< pad_words
; ++k
)
1034 ibs
[j
].ib_mc_address
= radv_buffer_get_va(bos
[j
]);
1041 size
+= preamble_cs
->cdw
;
1043 while (i
+ cnt
< cs_count
&& 0xffff8 - size
>= radv_amdgpu_cs(cs_array
[i
+ cnt
])->base
.cdw
) {
1044 size
+= radv_amdgpu_cs(cs_array
[i
+ cnt
])->base
.cdw
;
1048 while (!size
|| (size
& 7)) {
1054 bos
[0] = ws
->buffer_create(ws
, 4 * size
, 4096,
1056 RADEON_FLAG_CPU_ACCESS
|
1057 RADEON_FLAG_NO_INTERPROCESS_SHARING
|
1058 RADEON_FLAG_READ_ONLY
);
1059 ptr
= ws
->buffer_map(bos
[0]);
1062 memcpy(ptr
, preamble_cs
->buf
, preamble_cs
->cdw
* 4);
1063 ptr
+= preamble_cs
->cdw
;
1066 for (unsigned j
= 0; j
< cnt
; ++j
) {
1067 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[i
+ j
]);
1068 memcpy(ptr
, cs
->base
.buf
, 4 * cs
->base
.cdw
);
1069 ptr
+= cs
->base
.cdw
;
1073 for (unsigned j
= 0; j
< pad_words
; ++j
)
1077 ibs
[0].ib_mc_address
= radv_buffer_get_va(bos
[0]);
1080 r
= radv_amdgpu_create_bo_list(cs0
->ws
, &cs_array
[i
], cnt
,
1081 (struct radv_amdgpu_winsys_bo
**)bos
,
1082 number_of_ibs
, preamble_cs
,
1083 radv_bo_list
, &bo_list
);
1085 fprintf(stderr
, "amdgpu: buffer list creation failed "
1086 "for the sysmem submission (%d)\n", r
);
1092 memset(&request
, 0, sizeof(request
));
1094 request
.ip_type
= cs0
->hw_ip
;
1095 request
.ring
= queue_idx
;
1096 request
.resources
= bo_list
;
1097 request
.number_of_ibs
= number_of_ibs
;
1099 request
.fence_info
= radv_set_cs_fence(ctx
, cs0
->hw_ip
, queue_idx
);
1101 sem_info
->cs_emit_signal
= (i
== cs_count
- cnt
) ? emit_signal_sem
: false;
1102 r
= radv_amdgpu_cs_submit(ctx
, &request
, sem_info
);
1105 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
1107 fprintf(stderr
, "amdgpu: The CS has been rejected, "
1108 "see dmesg for more information.\n");
1112 amdgpu_bo_list_destroy(bo_list
);
1114 for (unsigned j
= 0; j
< number_of_ibs
; j
++) {
1115 ws
->buffer_destroy(bos
[j
]);
1127 radv_amdgpu_request_to_fence(ctx
, fence
, &request
);
1129 radv_assign_last_submit(ctx
, &request
);
1134 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx
*_ctx
,
1136 struct radeon_cmdbuf
**cs_array
,
1138 struct radeon_cmdbuf
*initial_preamble_cs
,
1139 struct radeon_cmdbuf
*continue_preamble_cs
,
1140 struct radv_winsys_sem_info
*sem_info
,
1141 const struct radv_winsys_bo_list
*bo_list
,
1143 struct radeon_winsys_fence
*_fence
)
1145 struct radv_amdgpu_cs
*cs
= radv_amdgpu_cs(cs_array
[0]);
1146 struct radv_amdgpu_ctx
*ctx
= radv_amdgpu_ctx(_ctx
);
1150 if (!cs
->ws
->use_ib_bos
) {
1151 ret
= radv_amdgpu_winsys_cs_submit_sysmem(_ctx
, queue_idx
, sem_info
, bo_list
, cs_array
,
1152 cs_count
, initial_preamble_cs
, continue_preamble_cs
, _fence
);
1153 } else if (can_patch
&& cs
->ws
->batchchain
) {
1154 ret
= radv_amdgpu_winsys_cs_submit_chained(_ctx
, queue_idx
, sem_info
, bo_list
, cs_array
,
1155 cs_count
, initial_preamble_cs
, continue_preamble_cs
, _fence
);
1157 ret
= radv_amdgpu_winsys_cs_submit_fallback(_ctx
, queue_idx
, sem_info
, bo_list
, cs_array
,
1158 cs_count
, initial_preamble_cs
, continue_preamble_cs
, _fence
);
1161 radv_amdgpu_signal_sems(ctx
, cs
->hw_ip
, queue_idx
, sem_info
);
1165 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs
, uint64_t addr
)
1167 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)_cs
;
1172 for (unsigned i
= 0; i
<= cs
->num_old_ib_buffers
; ++i
) {
1173 struct radv_amdgpu_winsys_bo
*bo
;
1175 bo
= (struct radv_amdgpu_winsys_bo
*)
1176 (i
== cs
->num_old_ib_buffers
? cs
->ib_buffer
: cs
->old_ib_buffers
[i
]);
1177 if (addr
>= bo
->base
.va
&& addr
- bo
->base
.va
< bo
->size
) {
1178 if (amdgpu_bo_cpu_map(bo
->bo
, &ret
) == 0)
1179 return (char *)ret
+ (addr
- bo
->base
.va
);
1182 if(cs
->ws
->debug_all_bos
) {
1183 pthread_mutex_lock(&cs
->ws
->global_bo_list_lock
);
1184 list_for_each_entry(struct radv_amdgpu_winsys_bo
, bo
,
1185 &cs
->ws
->global_bo_list
, global_list_item
) {
1186 if (addr
>= bo
->base
.va
&& addr
- bo
->base
.va
< bo
->size
) {
1187 if (amdgpu_bo_cpu_map(bo
->bo
, &ret
) == 0) {
1188 pthread_mutex_unlock(&cs
->ws
->global_bo_list_lock
);
1189 return (char *)ret
+ (addr
- bo
->base
.va
);
1193 pthread_mutex_unlock(&cs
->ws
->global_bo_list_lock
);
1198 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf
*_cs
,
1200 const int *trace_ids
, int trace_id_count
)
1202 struct radv_amdgpu_cs
*cs
= (struct radv_amdgpu_cs
*)_cs
;
1203 void *ib
= cs
->base
.buf
;
1204 int num_dw
= cs
->base
.cdw
;
1206 if (cs
->ws
->use_ib_bos
) {
1207 ib
= radv_amdgpu_winsys_get_cpu_addr(cs
, cs
->ib
.ib_mc_address
);
1208 num_dw
= cs
->ib
.size
;
1211 ac_parse_ib(file
, ib
, num_dw
, trace_ids
, trace_id_count
, "main IB",
1212 cs
->ws
->info
.chip_class
, radv_amdgpu_winsys_get_cpu_addr
, cs
);
1215 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority
)
1217 switch (radv_priority
) {
1218 case RADEON_CTX_PRIORITY_REALTIME
:
1219 return AMDGPU_CTX_PRIORITY_VERY_HIGH
;
1220 case RADEON_CTX_PRIORITY_HIGH
:
1221 return AMDGPU_CTX_PRIORITY_HIGH
;
1222 case RADEON_CTX_PRIORITY_MEDIUM
:
1223 return AMDGPU_CTX_PRIORITY_NORMAL
;
1224 case RADEON_CTX_PRIORITY_LOW
:
1225 return AMDGPU_CTX_PRIORITY_LOW
;
1227 unreachable("Invalid context priority");
1231 static struct radeon_winsys_ctx
*radv_amdgpu_ctx_create(struct radeon_winsys
*_ws
,
1232 enum radeon_ctx_priority priority
)
1234 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1235 struct radv_amdgpu_ctx
*ctx
= CALLOC_STRUCT(radv_amdgpu_ctx
);
1236 uint32_t amdgpu_priority
= radv_to_amdgpu_priority(priority
);
1242 r
= amdgpu_cs_ctx_create2(ws
->dev
, amdgpu_priority
, &ctx
->ctx
);
1244 fprintf(stderr
, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r
);
1249 assert(AMDGPU_HW_IP_NUM
* MAX_RINGS_PER_TYPE
* sizeof(uint64_t) <= 4096);
1250 ctx
->fence_bo
= ws
->base
.buffer_create(&ws
->base
, 4096, 8,
1252 RADEON_FLAG_CPU_ACCESS
|
1253 RADEON_FLAG_NO_INTERPROCESS_SHARING
);
1255 ctx
->fence_map
= (uint64_t*)ws
->base
.buffer_map(ctx
->fence_bo
);
1257 memset(ctx
->fence_map
, 0, 4096);
1258 return (struct radeon_winsys_ctx
*)ctx
;
1264 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
1266 struct radv_amdgpu_ctx
*ctx
= (struct radv_amdgpu_ctx
*)rwctx
;
1267 ctx
->ws
->base
.buffer_destroy(ctx
->fence_bo
);
1268 amdgpu_cs_ctx_free(ctx
->ctx
);
1272 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx
*rwctx
,
1273 enum ring_type ring_type
, int ring_index
)
1275 struct radv_amdgpu_ctx
*ctx
= (struct radv_amdgpu_ctx
*)rwctx
;
1276 int ip_type
= ring_to_hw_ip(ring_type
);
1278 if (ctx
->last_submission
[ip_type
][ring_index
].fence
.fence
) {
1280 int ret
= amdgpu_cs_query_fence_status(&ctx
->last_submission
[ip_type
][ring_index
].fence
,
1281 1000000000ull, 0, &expired
);
1283 if (ret
|| !expired
)
1290 static struct radeon_winsys_sem
*radv_amdgpu_create_sem(struct radeon_winsys
*_ws
)
1292 struct amdgpu_cs_fence
*sem
= CALLOC_STRUCT(amdgpu_cs_fence
);
1296 return (struct radeon_winsys_sem
*)sem
;
1299 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem
*_sem
)
1301 struct amdgpu_cs_fence
*sem
= (struct amdgpu_cs_fence
*)_sem
;
1305 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx
*ctx
,
1308 struct radv_winsys_sem_info
*sem_info
)
1310 for (unsigned i
= 0; i
< sem_info
->signal
.sem_count
; i
++) {
1311 struct amdgpu_cs_fence
*sem
= (struct amdgpu_cs_fence
*)(sem_info
->signal
.sem
)[i
];
1316 *sem
= ctx
->last_submission
[ip_type
][ring
].fence
;
1321 static struct drm_amdgpu_cs_chunk_sem
*radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts
*counts
,
1322 struct drm_amdgpu_cs_chunk
*chunk
, int chunk_id
)
1324 struct drm_amdgpu_cs_chunk_sem
*syncobj
= malloc(sizeof(struct drm_amdgpu_cs_chunk_sem
) * counts
->syncobj_count
);
1328 for (unsigned i
= 0; i
< counts
->syncobj_count
; i
++) {
1329 struct drm_amdgpu_cs_chunk_sem
*sem
= &syncobj
[i
];
1330 sem
->handle
= counts
->syncobj
[i
];
1333 chunk
->chunk_id
= chunk_id
;
1334 chunk
->length_dw
= sizeof(struct drm_amdgpu_cs_chunk_sem
) / 4 * counts
->syncobj_count
;
1335 chunk
->chunk_data
= (uint64_t)(uintptr_t)syncobj
;
1339 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx
*ctx
,
1340 struct amdgpu_cs_request
*request
,
1341 struct radv_winsys_sem_info
*sem_info
)
1347 struct drm_amdgpu_cs_chunk
*chunks
;
1348 struct drm_amdgpu_cs_chunk_data
*chunk_data
;
1349 struct drm_amdgpu_cs_chunk_dep
*sem_dependencies
= NULL
;
1350 struct drm_amdgpu_cs_chunk_sem
*wait_syncobj
= NULL
, *signal_syncobj
= NULL
;
1352 struct amdgpu_cs_fence
*sem
;
1354 user_fence
= (request
->fence_info
.handle
!= NULL
);
1355 size
= request
->number_of_ibs
+ (user_fence
? 2 : 1) + 3;
1357 chunks
= alloca(sizeof(struct drm_amdgpu_cs_chunk
) * size
);
1359 size
= request
->number_of_ibs
+ (user_fence
? 1 : 0);
1361 chunk_data
= alloca(sizeof(struct drm_amdgpu_cs_chunk_data
) * size
);
1363 num_chunks
= request
->number_of_ibs
;
1364 for (i
= 0; i
< request
->number_of_ibs
; i
++) {
1365 struct amdgpu_cs_ib_info
*ib
;
1366 chunks
[i
].chunk_id
= AMDGPU_CHUNK_ID_IB
;
1367 chunks
[i
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_ib
) / 4;
1368 chunks
[i
].chunk_data
= (uint64_t)(uintptr_t)&chunk_data
[i
];
1370 ib
= &request
->ibs
[i
];
1372 chunk_data
[i
].ib_data
._pad
= 0;
1373 chunk_data
[i
].ib_data
.va_start
= ib
->ib_mc_address
;
1374 chunk_data
[i
].ib_data
.ib_bytes
= ib
->size
* 4;
1375 chunk_data
[i
].ib_data
.ip_type
= request
->ip_type
;
1376 chunk_data
[i
].ib_data
.ip_instance
= request
->ip_instance
;
1377 chunk_data
[i
].ib_data
.ring
= request
->ring
;
1378 chunk_data
[i
].ib_data
.flags
= ib
->flags
;
1384 chunks
[i
].chunk_id
= AMDGPU_CHUNK_ID_FENCE
;
1385 chunks
[i
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_fence
) / 4;
1386 chunks
[i
].chunk_data
= (uint64_t)(uintptr_t)&chunk_data
[i
];
1388 amdgpu_cs_chunk_fence_info_to_data(&request
->fence_info
,
1392 if (sem_info
->wait
.syncobj_count
&& sem_info
->cs_emit_wait
) {
1393 wait_syncobj
= radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info
->wait
,
1394 &chunks
[num_chunks
],
1395 AMDGPU_CHUNK_ID_SYNCOBJ_IN
);
1396 if (!wait_syncobj
) {
1402 if (sem_info
->wait
.sem_count
== 0)
1403 sem_info
->cs_emit_wait
= false;
1407 if (sem_info
->wait
.sem_count
&& sem_info
->cs_emit_wait
) {
1408 sem_dependencies
= alloca(sizeof(struct drm_amdgpu_cs_chunk_dep
) * sem_info
->wait
.sem_count
);
1411 for (unsigned j
= 0; j
< sem_info
->wait
.sem_count
; j
++) {
1412 sem
= (struct amdgpu_cs_fence
*)sem_info
->wait
.sem
[j
];
1415 struct drm_amdgpu_cs_chunk_dep
*dep
= &sem_dependencies
[sem_count
++];
1417 amdgpu_cs_chunk_fence_to_dep(sem
, dep
);
1419 sem
->context
= NULL
;
1423 /* dependencies chunk */
1424 chunks
[i
].chunk_id
= AMDGPU_CHUNK_ID_DEPENDENCIES
;
1425 chunks
[i
].length_dw
= sizeof(struct drm_amdgpu_cs_chunk_dep
) / 4 * sem_count
;
1426 chunks
[i
].chunk_data
= (uint64_t)(uintptr_t)sem_dependencies
;
1428 sem_info
->cs_emit_wait
= false;
1431 if (sem_info
->signal
.syncobj_count
&& sem_info
->cs_emit_signal
) {
1432 signal_syncobj
= radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info
->signal
,
1433 &chunks
[num_chunks
],
1434 AMDGPU_CHUNK_ID_SYNCOBJ_OUT
);
1435 if (!signal_syncobj
) {
1442 r
= amdgpu_cs_submit_raw(ctx
->ws
->dev
,
1450 free(signal_syncobj
);
1454 static int radv_amdgpu_create_syncobj(struct radeon_winsys
*_ws
,
1457 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1458 return amdgpu_cs_create_syncobj(ws
->dev
, handle
);
1461 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys
*_ws
,
1464 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1465 amdgpu_cs_destroy_syncobj(ws
->dev
, handle
);
1468 static void radv_amdgpu_reset_syncobj(struct radeon_winsys
*_ws
,
1471 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1472 amdgpu_cs_syncobj_reset(ws
->dev
, &handle
, 1);
1475 static void radv_amdgpu_signal_syncobj(struct radeon_winsys
*_ws
,
1478 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1479 amdgpu_cs_syncobj_signal(ws
->dev
, &handle
, 1);
1482 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys
*_ws
, const uint32_t *handles
,
1483 uint32_t handle_count
, bool wait_all
, uint64_t timeout
)
1485 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1488 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1489 timeout
= MIN2(timeout
, INT64_MAX
);
1491 int ret
= amdgpu_cs_syncobj_wait(ws
->dev
, (uint32_t*)handles
, handle_count
, timeout
,
1492 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
|
1493 (wait_all
? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
: 0),
1497 } else if (ret
== -1 && errno
== ETIME
) {
1500 fprintf(stderr
, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno
);
1505 static int radv_amdgpu_export_syncobj(struct radeon_winsys
*_ws
,
1509 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1511 return amdgpu_cs_export_syncobj(ws
->dev
, syncobj
, fd
);
1514 static int radv_amdgpu_import_syncobj(struct radeon_winsys
*_ws
,
1518 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1520 return amdgpu_cs_import_syncobj(ws
->dev
, fd
, syncobj
);
1524 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys
*_ws
,
1528 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1530 return amdgpu_cs_syncobj_export_sync_file(ws
->dev
, syncobj
, fd
);
1533 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys
*_ws
,
1537 struct radv_amdgpu_winsys
*ws
= radv_amdgpu_winsys(_ws
);
1539 return amdgpu_cs_syncobj_import_sync_file(ws
->dev
, syncobj
, fd
);
1542 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys
*ws
)
1544 ws
->base
.ctx_create
= radv_amdgpu_ctx_create
;
1545 ws
->base
.ctx_destroy
= radv_amdgpu_ctx_destroy
;
1546 ws
->base
.ctx_wait_idle
= radv_amdgpu_ctx_wait_idle
;
1547 ws
->base
.cs_create
= radv_amdgpu_cs_create
;
1548 ws
->base
.cs_destroy
= radv_amdgpu_cs_destroy
;
1549 ws
->base
.cs_grow
= radv_amdgpu_cs_grow
;
1550 ws
->base
.cs_finalize
= radv_amdgpu_cs_finalize
;
1551 ws
->base
.cs_reset
= radv_amdgpu_cs_reset
;
1552 ws
->base
.cs_add_buffer
= radv_amdgpu_cs_add_buffer
;
1553 ws
->base
.cs_execute_secondary
= radv_amdgpu_cs_execute_secondary
;
1554 ws
->base
.cs_submit
= radv_amdgpu_winsys_cs_submit
;
1555 ws
->base
.cs_dump
= radv_amdgpu_winsys_cs_dump
;
1556 ws
->base
.create_fence
= radv_amdgpu_create_fence
;
1557 ws
->base
.destroy_fence
= radv_amdgpu_destroy_fence
;
1558 ws
->base
.create_sem
= radv_amdgpu_create_sem
;
1559 ws
->base
.destroy_sem
= radv_amdgpu_destroy_sem
;
1560 ws
->base
.create_syncobj
= radv_amdgpu_create_syncobj
;
1561 ws
->base
.destroy_syncobj
= radv_amdgpu_destroy_syncobj
;
1562 ws
->base
.reset_syncobj
= radv_amdgpu_reset_syncobj
;
1563 ws
->base
.signal_syncobj
= radv_amdgpu_signal_syncobj
;
1564 ws
->base
.wait_syncobj
= radv_amdgpu_wait_syncobj
;
1565 ws
->base
.export_syncobj
= radv_amdgpu_export_syncobj
;
1566 ws
->base
.import_syncobj
= radv_amdgpu_import_syncobj
;
1567 ws
->base
.export_syncobj_to_sync_file
= radv_amdgpu_export_syncobj_to_sync_file
;
1568 ws
->base
.import_syncobj_from_sync_file
= radv_amdgpu_import_syncobj_from_sync_file
;
1569 ws
->base
.fence_wait
= radv_amdgpu_fence_wait
;
1570 ws
->base
.fences_wait
= radv_amdgpu_fences_wait
;