radv: Add winsys support for submitting timeline syncobj.
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include "drm-uapi/amdgpu_drm.h"
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "util/u_memory.h"
33 #include "ac_debug.h"
34 #include "radv_radeon_winsys.h"
35 #include "radv_amdgpu_cs.h"
36 #include "radv_amdgpu_bo.h"
37 #include "sid.h"
38
39
40 enum {
41 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
42 };
43
44 struct radv_amdgpu_cs {
45 struct radeon_cmdbuf base;
46 struct radv_amdgpu_winsys *ws;
47
48 struct amdgpu_cs_ib_info ib;
49
50 struct radeon_winsys_bo *ib_buffer;
51 uint8_t *ib_mapped;
52 unsigned max_num_buffers;
53 unsigned num_buffers;
54 struct drm_amdgpu_bo_list_entry *handles;
55
56 struct radeon_winsys_bo **old_ib_buffers;
57 unsigned num_old_ib_buffers;
58 unsigned max_num_old_ib_buffers;
59 unsigned *ib_size_ptr;
60 VkResult status;
61 bool is_chained;
62
63 int buffer_hash_table[1024];
64 unsigned hw_ip;
65
66 unsigned num_virtual_buffers;
67 unsigned max_num_virtual_buffers;
68 struct radeon_winsys_bo **virtual_buffers;
69 int *virtual_buffer_hash_table;
70
71 /* For chips that don't support chaining. */
72 struct radeon_cmdbuf *old_cs_buffers;
73 unsigned num_old_cs_buffers;
74 };
75
76 static inline struct radv_amdgpu_cs *
77 radv_amdgpu_cs(struct radeon_cmdbuf *base)
78 {
79 return (struct radv_amdgpu_cs*)base;
80 }
81
82 static int ring_to_hw_ip(enum ring_type ring)
83 {
84 switch (ring) {
85 case RING_GFX:
86 return AMDGPU_HW_IP_GFX;
87 case RING_DMA:
88 return AMDGPU_HW_IP_DMA;
89 case RING_COMPUTE:
90 return AMDGPU_HW_IP_COMPUTE;
91 default:
92 unreachable("unsupported ring");
93 }
94 }
95
96 struct radv_amdgpu_cs_request {
97 /** Specify flags with additional information */
98 uint64_t flags;
99
100 /** Specify HW IP block type to which to send the IB. */
101 unsigned ip_type;
102
103 /** IP instance index if there are several IPs of the same type. */
104 unsigned ip_instance;
105
106 /**
107 * Specify ring index of the IP. We could have several rings
108 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
109 */
110 uint32_t ring;
111
112 /**
113 * BO list handles used by this request.
114 */
115 struct drm_amdgpu_bo_list_entry *handles;
116 uint32_t num_handles;
117
118 /**
119 * Number of dependencies this Command submission needs to
120 * wait for before starting execution.
121 */
122 uint32_t number_of_dependencies;
123
124 /**
125 * Array of dependencies which need to be met before
126 * execution can start.
127 */
128 struct amdgpu_cs_fence *dependencies;
129
130 /** Number of IBs to submit in the field ibs. */
131 uint32_t number_of_ibs;
132
133 /**
134 * IBs to submit. Those IBs will be submit together as single entity
135 */
136 struct amdgpu_cs_ib_info *ibs;
137
138 /**
139 * The returned sequence number for the command submission
140 */
141 uint64_t seq_no;
142
143 /**
144 * The fence information
145 */
146 struct amdgpu_cs_fence_info fence_info;
147 };
148
149
150 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
151 uint32_t ip_type,
152 uint32_t ring,
153 struct radv_winsys_sem_info *sem_info);
154 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
155 struct radv_amdgpu_cs_request *request,
156 struct radv_winsys_sem_info *sem_info);
157
158 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
159 struct radv_amdgpu_fence *fence,
160 struct radv_amdgpu_cs_request *req)
161 {
162 fence->fence.context = ctx->ctx;
163 fence->fence.ip_type = req->ip_type;
164 fence->fence.ip_instance = req->ip_instance;
165 fence->fence.ring = req->ring;
166 fence->fence.fence = req->seq_no;
167 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
168 }
169
170 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
171 {
172 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
173 if (!fence)
174 return NULL;
175
176 fence->fence.fence = UINT64_MAX;
177 return (struct radeon_winsys_fence*)fence;
178 }
179
180 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
181 {
182 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
183 free(fence);
184 }
185
186 static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
187 {
188 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
189 fence->fence.fence = UINT64_MAX;
190 }
191
192 static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
193 {
194 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
195 fence->fence.fence = 0;
196 }
197
198 static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
199 {
200 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
201 return fence->fence.fence < UINT64_MAX;
202 }
203
204 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
205 struct radeon_winsys_fence *_fence,
206 bool absolute,
207 uint64_t timeout)
208 {
209 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
210 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
211 int r;
212 uint32_t expired = 0;
213
214 /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
215 if (fence->fence.fence == UINT64_MAX)
216 return false;
217
218 if (fence->fence.fence == 0)
219 return true;
220
221 if (fence->user_ptr) {
222 if (*fence->user_ptr >= fence->fence.fence)
223 return true;
224 if (!absolute && !timeout)
225 return false;
226 }
227
228 /* Now use the libdrm query. */
229 r = amdgpu_cs_query_fence_status(&fence->fence,
230 timeout,
231 flags,
232 &expired);
233
234 if (r) {
235 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
236 return false;
237 }
238
239 if (expired)
240 return true;
241
242 return false;
243 }
244
245
246 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
247 struct radeon_winsys_fence *const *_fences,
248 uint32_t fence_count,
249 bool wait_all,
250 uint64_t timeout)
251 {
252 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
253 int r;
254 uint32_t expired = 0, first = 0;
255
256 if (!fences)
257 return false;
258
259 for (uint32_t i = 0; i < fence_count; ++i)
260 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
261
262 /* Now use the libdrm query. */
263 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
264 timeout, &expired, &first);
265
266 free(fences);
267 if (r) {
268 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
269 return false;
270 }
271
272 if (expired)
273 return true;
274
275 return false;
276 }
277
278 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
279 {
280 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
281
282 if (cs->ib_buffer)
283 cs->ws->base.buffer_destroy(cs->ib_buffer);
284 else
285 free(cs->base.buf);
286
287 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
288 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
289
290 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
291 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
292 free(rcs->buf);
293 }
294
295 free(cs->old_cs_buffers);
296 free(cs->old_ib_buffers);
297 free(cs->virtual_buffers);
298 free(cs->virtual_buffer_hash_table);
299 free(cs->handles);
300 free(cs);
301 }
302
303 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
304 enum ring_type ring_type)
305 {
306 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
307 cs->buffer_hash_table[i] = -1;
308
309 cs->hw_ip = ring_to_hw_ip(ring_type);
310 }
311
312 static struct radeon_cmdbuf *
313 radv_amdgpu_cs_create(struct radeon_winsys *ws,
314 enum ring_type ring_type)
315 {
316 struct radv_amdgpu_cs *cs;
317 uint32_t ib_size = 20 * 1024 * 4;
318 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
319 if (!cs)
320 return NULL;
321
322 cs->ws = radv_amdgpu_winsys(ws);
323 radv_amdgpu_init_cs(cs, ring_type);
324
325 if (cs->ws->use_ib_bos) {
326 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
327 RADEON_DOMAIN_GTT,
328 RADEON_FLAG_CPU_ACCESS |
329 RADEON_FLAG_NO_INTERPROCESS_SHARING |
330 RADEON_FLAG_READ_ONLY |
331 RADEON_FLAG_GTT_WC,
332 RADV_BO_PRIORITY_CS);
333 if (!cs->ib_buffer) {
334 free(cs);
335 return NULL;
336 }
337
338 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
339 if (!cs->ib_mapped) {
340 ws->buffer_destroy(cs->ib_buffer);
341 free(cs);
342 return NULL;
343 }
344
345 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
346 cs->base.buf = (uint32_t *)cs->ib_mapped;
347 cs->base.max_dw = ib_size / 4 - 4;
348 cs->ib_size_ptr = &cs->ib.size;
349 cs->ib.size = 0;
350
351 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
352 } else {
353 uint32_t *buf = malloc(16384);
354 if (!buf) {
355 free(cs);
356 return NULL;
357 }
358 cs->base.buf = buf;
359 cs->base.max_dw = 4096;
360 }
361
362 return &cs->base;
363 }
364
365 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
366 {
367 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
368
369 if (cs->status != VK_SUCCESS) {
370 cs->base.cdw = 0;
371 return;
372 }
373
374 if (!cs->ws->use_ib_bos) {
375 const uint64_t limit_dws = 0xffff8;
376 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
377 MIN2(cs->base.max_dw * 2, limit_dws));
378
379 /* The total ib size cannot exceed limit_dws dwords. */
380 if (ib_dws > limit_dws)
381 {
382 /* The maximum size in dwords has been reached,
383 * try to allocate a new one.
384 */
385 struct radeon_cmdbuf *old_cs_buffers =
386 realloc(cs->old_cs_buffers,
387 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
388 if (!old_cs_buffers) {
389 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
390 cs->base.cdw = 0;
391 return;
392 }
393 cs->old_cs_buffers = old_cs_buffers;
394
395 /* Store the current one for submitting it later. */
396 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
397 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
398 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
399 cs->num_old_cs_buffers++;
400
401 /* Reset the cs, it will be re-allocated below. */
402 cs->base.cdw = 0;
403 cs->base.buf = NULL;
404
405 /* Re-compute the number of dwords to allocate. */
406 ib_dws = MAX2(cs->base.cdw + min_size,
407 MIN2(cs->base.max_dw * 2, limit_dws));
408 if (ib_dws > limit_dws) {
409 fprintf(stderr, "amdgpu: Too high number of "
410 "dwords to allocate\n");
411 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
412 return;
413 }
414 }
415
416 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
417 if (new_buf) {
418 cs->base.buf = new_buf;
419 cs->base.max_dw = ib_dws;
420 } else {
421 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
422 cs->base.cdw = 0;
423 }
424 return;
425 }
426
427 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
428
429 /* max that fits in the chain size field. */
430 ib_size = MIN2(ib_size, 0xfffff);
431
432 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
433 radeon_emit(&cs->base, PKT3_NOP_PAD);
434
435 *cs->ib_size_ptr |= cs->base.cdw + 4;
436
437 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
438 unsigned max_num_old_ib_buffers =
439 MAX2(1, cs->max_num_old_ib_buffers * 2);
440 struct radeon_winsys_bo **old_ib_buffers =
441 realloc(cs->old_ib_buffers,
442 max_num_old_ib_buffers * sizeof(void*));
443 if (!old_ib_buffers) {
444 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
445 return;
446 }
447 cs->max_num_old_ib_buffers = max_num_old_ib_buffers;
448 cs->old_ib_buffers = old_ib_buffers;
449 }
450
451 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
452
453 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
454 RADEON_DOMAIN_GTT,
455 RADEON_FLAG_CPU_ACCESS |
456 RADEON_FLAG_NO_INTERPROCESS_SHARING |
457 RADEON_FLAG_READ_ONLY |
458 RADEON_FLAG_GTT_WC,
459 RADV_BO_PRIORITY_CS);
460
461 if (!cs->ib_buffer) {
462 cs->base.cdw = 0;
463 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
464 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
465 }
466
467 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
468 if (!cs->ib_mapped) {
469 cs->ws->base.buffer_destroy(cs->ib_buffer);
470 cs->base.cdw = 0;
471
472 /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
473 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
474 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
475 }
476
477 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
478
479 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
480 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
481 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
482 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
483
484 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
485
486 cs->base.buf = (uint32_t *)cs->ib_mapped;
487 cs->base.cdw = 0;
488 cs->base.max_dw = ib_size / 4 - 4;
489
490 }
491
492 static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
493 {
494 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
495
496 if (cs->ws->use_ib_bos) {
497 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
498 radeon_emit(&cs->base, PKT3_NOP_PAD);
499
500 *cs->ib_size_ptr |= cs->base.cdw;
501
502 cs->is_chained = false;
503 }
504
505 return cs->status;
506 }
507
508 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
509 {
510 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
511 cs->base.cdw = 0;
512 cs->status = VK_SUCCESS;
513
514 for (unsigned i = 0; i < cs->num_buffers; ++i) {
515 unsigned hash = cs->handles[i].bo_handle &
516 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
517 cs->buffer_hash_table[hash] = -1;
518 }
519
520 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
521 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
522 cs->virtual_buffer_hash_table[hash] = -1;
523 }
524
525 cs->num_buffers = 0;
526 cs->num_virtual_buffers = 0;
527
528 if (cs->ws->use_ib_bos) {
529 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
530
531 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
532 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
533
534 cs->num_old_ib_buffers = 0;
535 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
536 cs->ib_size_ptr = &cs->ib.size;
537 cs->ib.size = 0;
538 } else {
539 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
540 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
541 free(rcs->buf);
542 }
543
544 free(cs->old_cs_buffers);
545 cs->old_cs_buffers = NULL;
546 cs->num_old_cs_buffers = 0;
547 }
548 }
549
550 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
551 uint32_t bo)
552 {
553 unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
554 int index = cs->buffer_hash_table[hash];
555
556 if (index == -1)
557 return -1;
558
559 if (cs->handles[index].bo_handle == bo)
560 return index;
561
562 for (unsigned i = 0; i < cs->num_buffers; ++i) {
563 if (cs->handles[i].bo_handle == bo) {
564 cs->buffer_hash_table[hash] = i;
565 return i;
566 }
567 }
568
569 return -1;
570 }
571
572 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
573 uint32_t bo, uint8_t priority)
574 {
575 unsigned hash;
576 int index = radv_amdgpu_cs_find_buffer(cs, bo);
577
578 if (index != -1)
579 return;
580
581 if (cs->num_buffers == cs->max_num_buffers) {
582 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
583 struct drm_amdgpu_bo_list_entry *new_entries =
584 realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
585 if (new_entries) {
586 cs->max_num_buffers = new_count;
587 cs->handles = new_entries;
588 } else {
589 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
590 return;
591 }
592 }
593
594 cs->handles[cs->num_buffers].bo_handle = bo;
595 cs->handles[cs->num_buffers].bo_priority = priority;
596
597 hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
598 cs->buffer_hash_table[hash] = cs->num_buffers;
599
600 ++cs->num_buffers;
601 }
602
603 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
604 struct radeon_winsys_bo *bo)
605 {
606 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
607 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
608
609
610 if (!cs->virtual_buffer_hash_table) {
611 int *virtual_buffer_hash_table =
612 malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
613 if (!virtual_buffer_hash_table) {
614 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
615 return;
616 }
617 cs->virtual_buffer_hash_table = virtual_buffer_hash_table;
618
619 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
620 cs->virtual_buffer_hash_table[i] = -1;
621 }
622
623 if (cs->virtual_buffer_hash_table[hash] >= 0) {
624 int idx = cs->virtual_buffer_hash_table[hash];
625 if (cs->virtual_buffers[idx] == bo) {
626 return;
627 }
628 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
629 if (cs->virtual_buffers[i] == bo) {
630 cs->virtual_buffer_hash_table[hash] = i;
631 return;
632 }
633 }
634 }
635
636 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
637 unsigned max_num_virtual_buffers =
638 MAX2(2, cs->max_num_virtual_buffers * 2);
639 struct radeon_winsys_bo **virtual_buffers =
640 realloc(cs->virtual_buffers,
641 sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * max_num_virtual_buffers);
642 if (!virtual_buffers) {
643 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
644 return;
645 }
646 cs->max_num_virtual_buffers = max_num_virtual_buffers;
647 cs->virtual_buffers = virtual_buffers;
648 }
649
650 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
651
652 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
653 ++cs->num_virtual_buffers;
654
655 }
656
657 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
658 struct radeon_winsys_bo *_bo)
659 {
660 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
661 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
662
663 if (cs->status != VK_SUCCESS)
664 return;
665
666 if (bo->is_virtual) {
667 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
668 return;
669 }
670
671 if (bo->base.is_local)
672 return;
673
674 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
675 }
676
677 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
678 struct radeon_cmdbuf *_child)
679 {
680 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
681 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
682
683 if (parent->status != VK_SUCCESS || child->status != VK_SUCCESS)
684 return;
685
686 for (unsigned i = 0; i < child->num_buffers; ++i) {
687 radv_amdgpu_cs_add_buffer_internal(parent,
688 child->handles[i].bo_handle,
689 child->handles[i].bo_priority);
690 }
691
692 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
693 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
694 }
695
696 if (parent->ws->use_ib_bos) {
697 if (parent->base.cdw + 4 > parent->base.max_dw)
698 radv_amdgpu_cs_grow(&parent->base, 4);
699
700 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
701 radeon_emit(&parent->base, child->ib.ib_mc_address);
702 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
703 radeon_emit(&parent->base, child->ib.size);
704 } else {
705 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
706 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
707
708 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
709 parent->base.cdw += child->base.cdw;
710 }
711 }
712
713 static VkResult
714 radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws,
715 struct radeon_cmdbuf **cs_array,
716 unsigned count,
717 struct radv_amdgpu_winsys_bo **extra_bo_array,
718 unsigned num_extra_bo,
719 struct radeon_cmdbuf *extra_cs,
720 const struct radv_winsys_bo_list *radv_bo_list,
721 unsigned *rnum_handles,
722 struct drm_amdgpu_bo_list_entry **rhandles)
723 {
724 struct drm_amdgpu_bo_list_entry *handles = NULL;
725 unsigned num_handles = 0;
726
727 if (ws->debug_all_bos) {
728 struct radv_amdgpu_winsys_bo *bo;
729
730 pthread_mutex_lock(&ws->global_bo_list_lock);
731
732 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
733 if (!handles) {
734 pthread_mutex_unlock(&ws->global_bo_list_lock);
735 return VK_ERROR_OUT_OF_HOST_MEMORY;
736 }
737
738 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
739 assert(num_handles < ws->num_buffers);
740 handles[num_handles].bo_handle = bo->bo_handle;
741 handles[num_handles].bo_priority = bo->priority;
742 num_handles++;
743 }
744
745 pthread_mutex_unlock(&ws->global_bo_list_lock);
746 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
747 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
748 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
749 if (cs->num_buffers == 0)
750 return VK_SUCCESS;
751
752 handles = malloc(sizeof(handles[0]) * cs->num_buffers);
753 if (!handles)
754 return VK_ERROR_OUT_OF_HOST_MEMORY;
755
756 memcpy(handles, cs->handles,
757 sizeof(handles[0]) * cs->num_buffers);
758 num_handles = cs->num_buffers;
759 } else {
760 unsigned total_buffer_count = num_extra_bo;
761 num_handles = num_extra_bo;
762 for (unsigned i = 0; i < count; ++i) {
763 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
764 total_buffer_count += cs->num_buffers;
765 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
766 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
767 }
768
769 if (extra_cs) {
770 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
771 }
772
773 if (radv_bo_list) {
774 total_buffer_count += radv_bo_list->count;
775 }
776
777 if (total_buffer_count == 0)
778 return VK_SUCCESS;
779
780 handles = malloc(sizeof(handles[0]) * total_buffer_count);
781 if (!handles)
782 return VK_ERROR_OUT_OF_HOST_MEMORY;
783
784 for (unsigned i = 0; i < num_extra_bo; i++) {
785 handles[i].bo_handle = extra_bo_array[i]->bo_handle;
786 handles[i].bo_priority = extra_bo_array[i]->priority;
787 }
788
789 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
790 struct radv_amdgpu_cs *cs;
791
792 if (i == count)
793 cs = (struct radv_amdgpu_cs*)extra_cs;
794 else
795 cs = (struct radv_amdgpu_cs*)cs_array[i];
796
797 if (!cs->num_buffers)
798 continue;
799
800 if (num_handles == 0 && !cs->num_virtual_buffers) {
801 memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
802 num_handles = cs->num_buffers;
803 continue;
804 }
805 int unique_bo_so_far = num_handles;
806 for (unsigned j = 0; j < cs->num_buffers; ++j) {
807 bool found = false;
808 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
809 if (handles[k].bo_handle == cs->handles[j].bo_handle) {
810 found = true;
811 break;
812 }
813 }
814 if (!found) {
815 handles[num_handles] = cs->handles[j];
816 ++num_handles;
817 }
818 }
819 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
820 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
821 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
822 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
823 bool found = false;
824 for (unsigned m = 0; m < num_handles; ++m) {
825 if (handles[m].bo_handle == bo->bo_handle) {
826 found = true;
827 break;
828 }
829 }
830 if (!found) {
831 handles[num_handles].bo_handle = bo->bo_handle;
832 handles[num_handles].bo_priority = bo->priority;
833 ++num_handles;
834 }
835 }
836 }
837 }
838
839 if (radv_bo_list) {
840 unsigned unique_bo_so_far = num_handles;
841 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
842 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
843 bool found = false;
844 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
845 if (bo->bo_handle == handles[j].bo_handle) {
846 found = true;
847 break;
848 }
849 }
850 if (!found) {
851 handles[num_handles].bo_handle = bo->bo_handle;
852 handles[num_handles].bo_priority = bo->priority;
853 ++num_handles;
854 }
855 }
856 }
857 }
858
859 *rhandles = handles;
860 *rnum_handles = num_handles;
861
862 return VK_SUCCESS;
863 }
864
865 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
866 {
867 struct amdgpu_cs_fence_info ret = {0};
868 if (ctx->fence_map) {
869 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
870 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
871 }
872 return ret;
873 }
874
875 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
876 struct radv_amdgpu_cs_request *request)
877 {
878 radv_amdgpu_request_to_fence(ctx,
879 &ctx->last_submission[request->ip_type][request->ring],
880 request);
881 }
882
883 static VkResult
884 radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
885 int queue_idx,
886 struct radv_winsys_sem_info *sem_info,
887 const struct radv_winsys_bo_list *radv_bo_list,
888 struct radeon_cmdbuf **cs_array,
889 unsigned cs_count,
890 struct radeon_cmdbuf *initial_preamble_cs,
891 struct radeon_cmdbuf *continue_preamble_cs,
892 struct radeon_winsys_fence *_fence)
893 {
894 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
895 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
896 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
897 struct drm_amdgpu_bo_list_entry *handles = NULL;
898 struct radv_amdgpu_cs_request request = {0};
899 struct amdgpu_cs_ib_info ibs[2];
900 unsigned number_of_ibs = 1;
901 unsigned num_handles = 0;
902 VkResult result;
903
904 for (unsigned i = cs_count; i--;) {
905 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
906
907 if (cs->is_chained) {
908 *cs->ib_size_ptr -= 4;
909 cs->is_chained = false;
910 }
911
912 if (i + 1 < cs_count) {
913 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
914 assert(cs->base.cdw + 4 <= cs->base.max_dw);
915
916 cs->is_chained = true;
917 *cs->ib_size_ptr += 4;
918
919 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
920 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
921 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
922 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
923 }
924 }
925
926 /* Get the BO list. */
927 result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
928 initial_preamble_cs, radv_bo_list,
929 &num_handles, &handles);
930 if (result != VK_SUCCESS)
931 return result;
932
933 /* Configure the CS request. */
934 if (initial_preamble_cs) {
935 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
936 ibs[1] = cs0->ib;
937 number_of_ibs++;
938 } else {
939 ibs[0] = cs0->ib;
940 }
941
942 request.ip_type = cs0->hw_ip;
943 request.ring = queue_idx;
944 request.number_of_ibs = number_of_ibs;
945 request.ibs = ibs;
946 request.handles = handles;
947 request.num_handles = num_handles;
948 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
949
950 /* Submit the CS. */
951 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
952
953 free(request.handles);
954
955 if (result != VK_SUCCESS)
956 return result;
957
958 if (fence)
959 radv_amdgpu_request_to_fence(ctx, fence, &request);
960
961 radv_assign_last_submit(ctx, &request);
962
963 return VK_SUCCESS;
964 }
965
966 static VkResult
967 radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
968 int queue_idx,
969 struct radv_winsys_sem_info *sem_info,
970 const struct radv_winsys_bo_list *radv_bo_list,
971 struct radeon_cmdbuf **cs_array,
972 unsigned cs_count,
973 struct radeon_cmdbuf *initial_preamble_cs,
974 struct radeon_cmdbuf *continue_preamble_cs,
975 struct radeon_winsys_fence *_fence)
976 {
977 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
978 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
979 struct drm_amdgpu_bo_list_entry *handles = NULL;
980 struct radv_amdgpu_cs_request request = {};
981 struct amdgpu_cs_ib_info *ibs;
982 struct radv_amdgpu_cs *cs0;
983 unsigned num_handles = 0;
984 unsigned number_of_ibs;
985 VkResult result;
986
987 assert(cs_count);
988 cs0 = radv_amdgpu_cs(cs_array[0]);
989
990 /* Compute the number of IBs for this submit. */
991 number_of_ibs = cs_count + !!initial_preamble_cs;
992
993 /* Get the BO list. */
994 result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
995 initial_preamble_cs, radv_bo_list,
996 &num_handles, &handles);
997 if (result != VK_SUCCESS)
998 return result;
999
1000 ibs = malloc(number_of_ibs * sizeof(*ibs));
1001 if (!ibs) {
1002 free(request.handles);
1003 return VK_ERROR_OUT_OF_HOST_MEMORY;
1004 }
1005
1006 /* Configure the CS request. */
1007 if (initial_preamble_cs)
1008 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
1009
1010 for (unsigned i = 0; i < cs_count; i++) {
1011 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1012
1013 ibs[i + !!initial_preamble_cs] = cs->ib;
1014
1015 if (cs->is_chained) {
1016 *cs->ib_size_ptr -= 4;
1017 cs->is_chained = false;
1018 }
1019 }
1020
1021 request.ip_type = cs0->hw_ip;
1022 request.ring = queue_idx;
1023 request.handles = handles;
1024 request.num_handles = num_handles;
1025 request.number_of_ibs = number_of_ibs;
1026 request.ibs = ibs;
1027 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1028
1029 /* Submit the CS. */
1030 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1031
1032 free(request.handles);
1033 free(ibs);
1034
1035 if (result != VK_SUCCESS)
1036 return result;
1037
1038 if (fence)
1039 radv_amdgpu_request_to_fence(ctx, fence, &request);
1040
1041 radv_assign_last_submit(ctx, &request);
1042
1043 return VK_SUCCESS;
1044 }
1045
1046 static VkResult
1047 radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
1048 int queue_idx,
1049 struct radv_winsys_sem_info *sem_info,
1050 const struct radv_winsys_bo_list *radv_bo_list,
1051 struct radeon_cmdbuf **cs_array,
1052 unsigned cs_count,
1053 struct radeon_cmdbuf *initial_preamble_cs,
1054 struct radeon_cmdbuf *continue_preamble_cs,
1055 struct radeon_winsys_fence *_fence)
1056 {
1057 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1058 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
1059 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1060 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
1061 struct radv_amdgpu_cs_request request;
1062 uint32_t pad_word = PKT3_NOP_PAD;
1063 bool emit_signal_sem = sem_info->cs_emit_signal;
1064 VkResult result;
1065
1066 if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
1067 pad_word = 0x80000000;
1068
1069 assert(cs_count);
1070
1071 for (unsigned i = 0; i < cs_count;) {
1072 struct amdgpu_cs_ib_info *ibs;
1073 struct radeon_winsys_bo **bos;
1074 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1075 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1076 struct drm_amdgpu_bo_list_entry *handles = NULL;
1077 unsigned num_handles = 0;
1078 unsigned number_of_ibs;
1079 uint32_t *ptr;
1080 unsigned cnt = 0;
1081 unsigned size = 0;
1082 unsigned pad_words = 0;
1083
1084 /* Compute the number of IBs for this submit. */
1085 number_of_ibs = cs->num_old_cs_buffers + 1;
1086
1087 ibs = malloc(number_of_ibs * sizeof(*ibs));
1088 if (!ibs)
1089 return VK_ERROR_OUT_OF_HOST_MEMORY;
1090
1091 bos = malloc(number_of_ibs * sizeof(*bos));
1092 if (!bos) {
1093 free(ibs);
1094 return VK_ERROR_OUT_OF_HOST_MEMORY;
1095 }
1096
1097 if (number_of_ibs > 1) {
1098 /* Special path when the maximum size in dwords has
1099 * been reached because we need to handle more than one
1100 * IB per submit.
1101 */
1102 struct radeon_cmdbuf **new_cs_array;
1103 unsigned idx = 0;
1104
1105 new_cs_array = malloc(cs->num_old_cs_buffers *
1106 sizeof(*new_cs_array));
1107 assert(new_cs_array);
1108
1109 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1110 new_cs_array[idx++] = &cs->old_cs_buffers[j];
1111 new_cs_array[idx++] = cs_array[i];
1112
1113 for (unsigned j = 0; j < number_of_ibs; j++) {
1114 struct radeon_cmdbuf *rcs = new_cs_array[j];
1115 bool needs_preamble = preamble_cs && j == 0;
1116 unsigned size = 0;
1117
1118 if (needs_preamble)
1119 size += preamble_cs->cdw;
1120 size += rcs->cdw;
1121
1122 assert(size < 0xffff8);
1123
1124 while (!size || (size & 7)) {
1125 size++;
1126 pad_words++;
1127 }
1128
1129 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1130 RADEON_DOMAIN_GTT,
1131 RADEON_FLAG_CPU_ACCESS |
1132 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1133 RADEON_FLAG_READ_ONLY,
1134 RADV_BO_PRIORITY_CS);
1135 ptr = ws->buffer_map(bos[j]);
1136
1137 if (needs_preamble) {
1138 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1139 ptr += preamble_cs->cdw;
1140 }
1141
1142 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1143 ptr += rcs->cdw;
1144
1145 for (unsigned k = 0; k < pad_words; ++k)
1146 *ptr++ = pad_word;
1147
1148 ibs[j].size = size;
1149 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1150 ibs[j].flags = 0;
1151 }
1152
1153 cnt++;
1154 free(new_cs_array);
1155 } else {
1156 if (preamble_cs)
1157 size += preamble_cs->cdw;
1158
1159 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1160 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1161 ++cnt;
1162 }
1163
1164 while (!size || (size & 7)) {
1165 size++;
1166 pad_words++;
1167 }
1168 assert(cnt);
1169
1170 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1171 RADEON_DOMAIN_GTT,
1172 RADEON_FLAG_CPU_ACCESS |
1173 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1174 RADEON_FLAG_READ_ONLY,
1175 RADV_BO_PRIORITY_CS);
1176 ptr = ws->buffer_map(bos[0]);
1177
1178 if (preamble_cs) {
1179 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1180 ptr += preamble_cs->cdw;
1181 }
1182
1183 for (unsigned j = 0; j < cnt; ++j) {
1184 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1185 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1186 ptr += cs->base.cdw;
1187
1188 }
1189
1190 for (unsigned j = 0; j < pad_words; ++j)
1191 *ptr++ = pad_word;
1192
1193 ibs[0].size = size;
1194 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1195 ibs[0].flags = 0;
1196 }
1197
1198 result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt,
1199 (struct radv_amdgpu_winsys_bo **)bos,
1200 number_of_ibs, preamble_cs,
1201 radv_bo_list,
1202 &num_handles, &handles);
1203 if (result != VK_SUCCESS) {
1204 free(ibs);
1205 free(bos);
1206 return result;
1207 }
1208
1209 memset(&request, 0, sizeof(request));
1210
1211 request.ip_type = cs0->hw_ip;
1212 request.ring = queue_idx;
1213 request.handles = handles;
1214 request.num_handles = num_handles;
1215 request.number_of_ibs = number_of_ibs;
1216 request.ibs = ibs;
1217 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1218
1219 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1220 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1221
1222 free(request.handles);
1223
1224 for (unsigned j = 0; j < number_of_ibs; j++) {
1225 ws->buffer_destroy(bos[j]);
1226 }
1227
1228 free(ibs);
1229 free(bos);
1230
1231 if (result != VK_SUCCESS)
1232 return result;
1233
1234 i += cnt;
1235 }
1236 if (fence)
1237 radv_amdgpu_request_to_fence(ctx, fence, &request);
1238
1239 radv_assign_last_submit(ctx, &request);
1240
1241 return VK_SUCCESS;
1242 }
1243
1244 static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1245 int queue_idx,
1246 struct radeon_cmdbuf **cs_array,
1247 unsigned cs_count,
1248 struct radeon_cmdbuf *initial_preamble_cs,
1249 struct radeon_cmdbuf *continue_preamble_cs,
1250 struct radv_winsys_sem_info *sem_info,
1251 const struct radv_winsys_bo_list *bo_list,
1252 bool can_patch,
1253 struct radeon_winsys_fence *_fence)
1254 {
1255 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1256 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1257 VkResult result;
1258
1259 assert(sem_info);
1260 if (!cs->ws->use_ib_bos) {
1261 result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1262 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1263 } else if (can_patch) {
1264 result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1265 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1266 } else {
1267 result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1268 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1269 }
1270
1271 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1272 return result;
1273 }
1274
1275 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1276 {
1277 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1278 void *ret = NULL;
1279
1280 if (!cs->ib_buffer)
1281 return NULL;
1282 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1283 struct radv_amdgpu_winsys_bo *bo;
1284
1285 bo = (struct radv_amdgpu_winsys_bo*)
1286 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1287 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1288 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1289 return (char *)ret + (addr - bo->base.va);
1290 }
1291 }
1292 if(cs->ws->debug_all_bos) {
1293 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1294 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1295 &cs->ws->global_bo_list, global_list_item) {
1296 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1297 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1298 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1299 return (char *)ret + (addr - bo->base.va);
1300 }
1301 }
1302 }
1303 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1304 }
1305 return ret;
1306 }
1307
1308 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1309 FILE* file,
1310 const int *trace_ids, int trace_id_count)
1311 {
1312 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1313 void *ib = cs->base.buf;
1314 int num_dw = cs->base.cdw;
1315
1316 if (cs->ws->use_ib_bos) {
1317 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1318 num_dw = cs->ib.size;
1319 }
1320 assert(ib);
1321 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1322 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1323 }
1324
1325 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1326 {
1327 switch (radv_priority) {
1328 case RADEON_CTX_PRIORITY_REALTIME:
1329 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1330 case RADEON_CTX_PRIORITY_HIGH:
1331 return AMDGPU_CTX_PRIORITY_HIGH;
1332 case RADEON_CTX_PRIORITY_MEDIUM:
1333 return AMDGPU_CTX_PRIORITY_NORMAL;
1334 case RADEON_CTX_PRIORITY_LOW:
1335 return AMDGPU_CTX_PRIORITY_LOW;
1336 default:
1337 unreachable("Invalid context priority");
1338 }
1339 }
1340
1341 static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1342 enum radeon_ctx_priority priority,
1343 struct radeon_winsys_ctx **rctx)
1344 {
1345 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1346 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1347 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1348 VkResult result;
1349 int r;
1350
1351 if (!ctx)
1352 return VK_ERROR_OUT_OF_HOST_MEMORY;
1353
1354 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1355 if (r && r == -EACCES) {
1356 result = VK_ERROR_NOT_PERMITTED_EXT;
1357 goto fail_create;
1358 } else if (r) {
1359 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1360 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1361 goto fail_create;
1362 }
1363 ctx->ws = ws;
1364
1365 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1366 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1367 RADEON_DOMAIN_GTT,
1368 RADEON_FLAG_CPU_ACCESS |
1369 RADEON_FLAG_NO_INTERPROCESS_SHARING,
1370 RADV_BO_PRIORITY_CS);
1371 if (!ctx->fence_bo) {
1372 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1373 goto fail_alloc;
1374 }
1375
1376 ctx->fence_map = (uint64_t *)ws->base.buffer_map(ctx->fence_bo);
1377 if (!ctx->fence_map) {
1378 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
1379 goto fail_map;
1380 }
1381
1382 memset(ctx->fence_map, 0, 4096);
1383
1384 *rctx = (struct radeon_winsys_ctx *)ctx;
1385 return VK_SUCCESS;
1386
1387 fail_map:
1388 ws->base.buffer_destroy(ctx->fence_bo);
1389 fail_alloc:
1390 amdgpu_cs_ctx_free(ctx->ctx);
1391 fail_create:
1392 FREE(ctx);
1393 return result;
1394 }
1395
1396 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1397 {
1398 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1399 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1400 amdgpu_cs_ctx_free(ctx->ctx);
1401 FREE(ctx);
1402 }
1403
1404 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1405 enum ring_type ring_type, int ring_index)
1406 {
1407 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1408 int ip_type = ring_to_hw_ip(ring_type);
1409
1410 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1411 uint32_t expired;
1412 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1413 1000000000ull, 0, &expired);
1414
1415 if (ret || !expired)
1416 return false;
1417 }
1418
1419 return true;
1420 }
1421
1422 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1423 {
1424 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1425 if (!sem)
1426 return NULL;
1427
1428 return (struct radeon_winsys_sem *)sem;
1429 }
1430
1431 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1432 {
1433 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1434 FREE(sem);
1435 }
1436
1437 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1438 uint32_t ip_type,
1439 uint32_t ring,
1440 struct radv_winsys_sem_info *sem_info)
1441 {
1442 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1443 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1444
1445 if (sem->context)
1446 return -EINVAL;
1447
1448 *sem = ctx->last_submission[ip_type][ring].fence;
1449 }
1450 return 0;
1451 }
1452
1453 static void *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1454 const uint32_t *syncobj_override,
1455 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1456 {
1457 const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
1458 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1459 if (!syncobj)
1460 return NULL;
1461
1462 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1463 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1464 sem->handle = src[i];
1465 }
1466
1467 chunk->chunk_id = chunk_id;
1468 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1469 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1470 return syncobj;
1471 }
1472
1473 static void *
1474 radv_amdgpu_cs_alloc_timeline_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1475 const uint32_t *syncobj_override,
1476 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1477 {
1478 const uint32_t *src = syncobj_override ? syncobj_override : counts->syncobj;
1479 struct drm_amdgpu_cs_chunk_syncobj *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_syncobj) *
1480 (counts->syncobj_count + counts->timeline_syncobj_count));
1481 if (!syncobj)
1482 return NULL;
1483
1484 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1485 struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i];
1486 sem->handle = src[i];
1487 sem->flags = 0;
1488 sem->point = 0;
1489 }
1490
1491 for (unsigned i = 0; i < counts->timeline_syncobj_count; i++) {
1492 struct drm_amdgpu_cs_chunk_syncobj *sem = &syncobj[i + counts->syncobj_count];
1493 sem->handle = counts->syncobj[i + counts->syncobj_count];
1494 sem->flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT;
1495 sem->point = counts->points[i];
1496 }
1497
1498 chunk->chunk_id = chunk_id;
1499 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_syncobj) / 4 *
1500 (counts->syncobj_count + counts->timeline_syncobj_count);
1501 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1502 return syncobj;
1503 }
1504
1505 static int radv_amdgpu_cache_alloc_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *dst)
1506 {
1507 pthread_mutex_lock(&ws->syncobj_lock);
1508 if (count > ws->syncobj_capacity) {
1509 if (ws->syncobj_capacity > UINT32_MAX / 2)
1510 goto fail;
1511
1512 unsigned new_capacity = MAX2(count, ws->syncobj_capacity * 2);
1513 uint32_t *n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
1514 if (!n)
1515 goto fail;
1516 ws->syncobj_capacity = new_capacity;
1517 ws->syncobj = n;
1518 }
1519
1520 while(ws->syncobj_count < count) {
1521 int r = amdgpu_cs_create_syncobj(ws->dev, ws->syncobj + ws->syncobj_count);
1522 if (r)
1523 goto fail;
1524 ++ws->syncobj_count;
1525 }
1526
1527 for (unsigned i = 0; i < count; ++i)
1528 dst[i] = ws->syncobj[--ws->syncobj_count];
1529
1530 pthread_mutex_unlock(&ws->syncobj_lock);
1531 return 0;
1532
1533 fail:
1534 pthread_mutex_unlock(&ws->syncobj_lock);
1535 return -ENOMEM;
1536 }
1537
1538 static void radv_amdgpu_cache_free_syncobjs(struct radv_amdgpu_winsys *ws, unsigned count, uint32_t *src)
1539 {
1540 pthread_mutex_lock(&ws->syncobj_lock);
1541
1542 uint32_t cache_count = MIN2(count, UINT32_MAX - ws->syncobj_count);
1543 if (cache_count + ws->syncobj_count > ws->syncobj_capacity) {
1544 unsigned new_capacity = MAX2(ws->syncobj_count + cache_count, ws->syncobj_capacity * 2);
1545 uint32_t* n = realloc(ws->syncobj, new_capacity * sizeof(*ws->syncobj));
1546 if (n) {
1547 ws->syncobj_capacity = new_capacity;
1548 ws->syncobj = n;
1549 }
1550 }
1551
1552 for (unsigned i = 0; i < count; ++i) {
1553 if (ws->syncobj_count < ws->syncobj_capacity)
1554 ws->syncobj[ws->syncobj_count++] = src[i];
1555 else
1556 amdgpu_cs_destroy_syncobj(ws->dev, src[i]);
1557 }
1558
1559 pthread_mutex_unlock(&ws->syncobj_lock);
1560
1561 }
1562
1563 static int radv_amdgpu_cs_prepare_syncobjs(struct radv_amdgpu_winsys *ws,
1564 struct radv_winsys_sem_counts *counts,
1565 uint32_t **out_syncobjs)
1566 {
1567 int r = 0;
1568
1569 if (!ws->info.has_timeline_syncobj || !counts->syncobj_count) {
1570 *out_syncobjs = NULL;
1571 return 0;
1572 }
1573
1574 *out_syncobjs = malloc(counts->syncobj_count * sizeof(**out_syncobjs));
1575 if (!*out_syncobjs)
1576 return -ENOMEM;
1577
1578 r = radv_amdgpu_cache_alloc_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
1579 if (r)
1580 return r;
1581
1582 for (unsigned i = 0; i < counts->syncobj_count; ++i) {
1583 r = amdgpu_cs_syncobj_transfer(ws->dev, (*out_syncobjs)[i], 0, counts->syncobj[i], 0, DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT);
1584 if (r)
1585 goto fail;
1586 }
1587
1588 r = amdgpu_cs_syncobj_reset(ws->dev, counts->syncobj, counts->syncobj_reset_count);
1589 if (r)
1590 goto fail;
1591
1592 return 0;
1593 fail:
1594 radv_amdgpu_cache_free_syncobjs(ws, counts->syncobj_count, *out_syncobjs);
1595 free(*out_syncobjs);
1596 *out_syncobjs = NULL;
1597 return r;
1598 }
1599
1600 static VkResult
1601 radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1602 struct radv_amdgpu_cs_request *request,
1603 struct radv_winsys_sem_info *sem_info)
1604 {
1605 int r;
1606 int num_chunks;
1607 int size;
1608 bool user_fence;
1609 struct drm_amdgpu_cs_chunk *chunks;
1610 struct drm_amdgpu_cs_chunk_data *chunk_data;
1611 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1612 bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
1613 struct drm_amdgpu_bo_list_in bo_list_in;
1614 void *wait_syncobj = NULL, *signal_syncobj = NULL;
1615 uint32_t *in_syncobjs = NULL;
1616 int i;
1617 struct amdgpu_cs_fence *sem;
1618 uint32_t bo_list = 0;
1619 VkResult result = VK_SUCCESS;
1620
1621 user_fence = (request->fence_info.handle != NULL);
1622 size = request->number_of_ibs + (user_fence ? 2 : 1) + (!use_bo_list_create ? 1 : 0) + 3;
1623
1624 chunks = malloc(sizeof(chunks[0]) * size);
1625 if (!chunks)
1626 return VK_ERROR_OUT_OF_HOST_MEMORY;
1627
1628 size = request->number_of_ibs + (user_fence ? 1 : 0);
1629
1630 chunk_data = malloc(sizeof(chunk_data[0]) * size);
1631 if (!chunk_data) {
1632 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1633 goto error_out;
1634 }
1635
1636 num_chunks = request->number_of_ibs;
1637 for (i = 0; i < request->number_of_ibs; i++) {
1638 struct amdgpu_cs_ib_info *ib;
1639 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1640 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1641 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1642
1643 ib = &request->ibs[i];
1644
1645 chunk_data[i].ib_data._pad = 0;
1646 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1647 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1648 chunk_data[i].ib_data.ip_type = request->ip_type;
1649 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1650 chunk_data[i].ib_data.ring = request->ring;
1651 chunk_data[i].ib_data.flags = ib->flags;
1652 }
1653
1654 if (user_fence) {
1655 i = num_chunks++;
1656
1657 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1658 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1659 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1660
1661 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1662 &chunk_data[i]);
1663 }
1664
1665 if ((sem_info->wait.syncobj_count || sem_info->wait.timeline_syncobj_count) && sem_info->cs_emit_wait) {
1666 r = radv_amdgpu_cs_prepare_syncobjs(ctx->ws, &sem_info->wait, &in_syncobjs);
1667 if (r)
1668 goto error_out;
1669
1670 if (ctx->ws->info.has_timeline_syncobj) {
1671 wait_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->wait,
1672 in_syncobjs,
1673 &chunks[num_chunks],
1674 AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT);
1675 } else {
1676 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1677 in_syncobjs,
1678 &chunks[num_chunks],
1679 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1680 }
1681 if (!wait_syncobj) {
1682 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1683 goto error_out;
1684 }
1685 num_chunks++;
1686
1687 if (sem_info->wait.sem_count == 0)
1688 sem_info->cs_emit_wait = false;
1689
1690 }
1691
1692 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1693 sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
1694 if (!sem_dependencies) {
1695 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1696 goto error_out;
1697 }
1698
1699 int sem_count = 0;
1700
1701 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1702 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1703 if (!sem->context)
1704 continue;
1705 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1706
1707 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1708
1709 sem->context = NULL;
1710 }
1711 i = num_chunks++;
1712
1713 /* dependencies chunk */
1714 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1715 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1716 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1717
1718 sem_info->cs_emit_wait = false;
1719 }
1720
1721 if ((sem_info->signal.syncobj_count || sem_info->signal.timeline_syncobj_count) && sem_info->cs_emit_signal) {
1722 if (ctx->ws->info.has_timeline_syncobj) {
1723 signal_syncobj = radv_amdgpu_cs_alloc_timeline_syncobj_chunk(&sem_info->signal,
1724 NULL,
1725 &chunks[num_chunks],
1726 AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL);
1727 } else {
1728 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1729 NULL,
1730 &chunks[num_chunks],
1731 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1732 }
1733 if (!signal_syncobj) {
1734 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1735 goto error_out;
1736 }
1737 num_chunks++;
1738 }
1739
1740 if (use_bo_list_create) {
1741 /* Legacy path creating the buffer list handle and passing it
1742 * to the CS ioctl.
1743 */
1744 r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
1745 request->handles, &bo_list);
1746 if (r) {
1747 if (r == -ENOMEM) {
1748 fprintf(stderr, "amdgpu: Not enough memory for buffer list creation.\n");
1749 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1750 } else {
1751 fprintf(stderr, "amdgpu: buffer list creation failed (%d).\n", r);
1752 result = VK_ERROR_UNKNOWN;
1753 }
1754 goto error_out;
1755 }
1756 } else {
1757 /* Standard path passing the buffer list via the CS ioctl. */
1758 bo_list_in.operation = ~0;
1759 bo_list_in.list_handle = ~0;
1760 bo_list_in.bo_number = request->num_handles;
1761 bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1762 bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
1763
1764 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1765 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1766 chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1767 num_chunks++;
1768 }
1769
1770 r = amdgpu_cs_submit_raw2(ctx->ws->dev,
1771 ctx->ctx,
1772 bo_list,
1773 num_chunks,
1774 chunks,
1775 &request->seq_no);
1776
1777 if (bo_list)
1778 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1779
1780 if (r) {
1781 if (r == -ENOMEM) {
1782 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1783 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1784 } else if (r == -ECANCELED) {
1785 fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1786 result = VK_ERROR_DEVICE_LOST;
1787 } else {
1788 fprintf(stderr, "amdgpu: The CS has been rejected, "
1789 "see dmesg for more information (%i).\n", r);
1790 result = VK_ERROR_UNKNOWN;
1791 }
1792 }
1793
1794 error_out:
1795 if (in_syncobjs) {
1796 radv_amdgpu_cache_free_syncobjs(ctx->ws, sem_info->wait.syncobj_count, in_syncobjs);
1797 free(in_syncobjs);
1798 }
1799 free(chunks);
1800 free(chunk_data);
1801 free(sem_dependencies);
1802 free(wait_syncobj);
1803 free(signal_syncobj);
1804 return result;
1805 }
1806
1807 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1808 bool create_signaled,
1809 uint32_t *handle)
1810 {
1811 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1812 uint32_t flags = 0;
1813
1814 if (create_signaled)
1815 flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1816
1817 return amdgpu_cs_create_syncobj2(ws->dev, flags, handle);
1818 }
1819
1820 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1821 uint32_t handle)
1822 {
1823 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1824 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1825 }
1826
1827 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1828 uint32_t handle)
1829 {
1830 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1831 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1832 }
1833
1834 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1835 uint32_t handle)
1836 {
1837 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1838 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1839 }
1840
1841 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1842 uint32_t handle_count, bool wait_all, uint64_t timeout)
1843 {
1844 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1845 uint32_t tmp;
1846
1847 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1848 timeout = MIN2(timeout, INT64_MAX);
1849
1850 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1851 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1852 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1853 &tmp);
1854 if (ret == 0) {
1855 return true;
1856 } else if (ret == -ETIME) {
1857 return false;
1858 } else {
1859 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1860 return false;
1861 }
1862 }
1863
1864 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1865 uint32_t syncobj,
1866 int *fd)
1867 {
1868 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1869
1870 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1871 }
1872
1873 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1874 int fd,
1875 uint32_t *syncobj)
1876 {
1877 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1878
1879 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1880 }
1881
1882
1883 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1884 uint32_t syncobj,
1885 int *fd)
1886 {
1887 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1888
1889 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1890 }
1891
1892 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1893 uint32_t syncobj,
1894 int fd)
1895 {
1896 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1897
1898 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1899 }
1900
1901 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1902 {
1903 ws->base.ctx_create = radv_amdgpu_ctx_create;
1904 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1905 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1906 ws->base.cs_create = radv_amdgpu_cs_create;
1907 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1908 ws->base.cs_grow = radv_amdgpu_cs_grow;
1909 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1910 ws->base.cs_reset = radv_amdgpu_cs_reset;
1911 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1912 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1913 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1914 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1915 ws->base.create_fence = radv_amdgpu_create_fence;
1916 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1917 ws->base.reset_fence = radv_amdgpu_reset_fence;
1918 ws->base.signal_fence = radv_amdgpu_signal_fence;
1919 ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
1920 ws->base.create_sem = radv_amdgpu_create_sem;
1921 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1922 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1923 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1924 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1925 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1926 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1927 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1928 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1929 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1930 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1931 ws->base.fence_wait = radv_amdgpu_fence_wait;
1932 ws->base.fences_wait = radv_amdgpu_fences_wait;
1933 }