radv/winsys: replace alloca() by malloc() everywhere
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include "drm-uapi/amdgpu_drm.h"
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "util/u_memory.h"
33 #include "ac_debug.h"
34 #include "radv_radeon_winsys.h"
35 #include "radv_amdgpu_cs.h"
36 #include "radv_amdgpu_bo.h"
37 #include "sid.h"
38
39
40 enum {
41 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
42 };
43
44 struct radv_amdgpu_cs {
45 struct radeon_cmdbuf base;
46 struct radv_amdgpu_winsys *ws;
47
48 struct amdgpu_cs_ib_info ib;
49
50 struct radeon_winsys_bo *ib_buffer;
51 uint8_t *ib_mapped;
52 unsigned max_num_buffers;
53 unsigned num_buffers;
54 struct drm_amdgpu_bo_list_entry *handles;
55
56 struct radeon_winsys_bo **old_ib_buffers;
57 unsigned num_old_ib_buffers;
58 unsigned max_num_old_ib_buffers;
59 unsigned *ib_size_ptr;
60 VkResult status;
61 bool is_chained;
62
63 int buffer_hash_table[1024];
64 unsigned hw_ip;
65
66 unsigned num_virtual_buffers;
67 unsigned max_num_virtual_buffers;
68 struct radeon_winsys_bo **virtual_buffers;
69 int *virtual_buffer_hash_table;
70
71 /* For chips that don't support chaining. */
72 struct radeon_cmdbuf *old_cs_buffers;
73 unsigned num_old_cs_buffers;
74 };
75
76 static inline struct radv_amdgpu_cs *
77 radv_amdgpu_cs(struct radeon_cmdbuf *base)
78 {
79 return (struct radv_amdgpu_cs*)base;
80 }
81
82 static int ring_to_hw_ip(enum ring_type ring)
83 {
84 switch (ring) {
85 case RING_GFX:
86 return AMDGPU_HW_IP_GFX;
87 case RING_DMA:
88 return AMDGPU_HW_IP_DMA;
89 case RING_COMPUTE:
90 return AMDGPU_HW_IP_COMPUTE;
91 default:
92 unreachable("unsupported ring");
93 }
94 }
95
96 struct radv_amdgpu_cs_request {
97 /** Specify flags with additional information */
98 uint64_t flags;
99
100 /** Specify HW IP block type to which to send the IB. */
101 unsigned ip_type;
102
103 /** IP instance index if there are several IPs of the same type. */
104 unsigned ip_instance;
105
106 /**
107 * Specify ring index of the IP. We could have several rings
108 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
109 */
110 uint32_t ring;
111
112 /**
113 * List handle with resources used by this request. This is a raw
114 * bo list handle used by the kernel.
115 */
116 uint32_t resources;
117
118 /**
119 * Number of dependencies this Command submission needs to
120 * wait for before starting execution.
121 */
122 uint32_t number_of_dependencies;
123
124 /**
125 * Array of dependencies which need to be met before
126 * execution can start.
127 */
128 struct amdgpu_cs_fence *dependencies;
129
130 /** Number of IBs to submit in the field ibs. */
131 uint32_t number_of_ibs;
132
133 /**
134 * IBs to submit. Those IBs will be submit together as single entity
135 */
136 struct amdgpu_cs_ib_info *ibs;
137
138 /**
139 * The returned sequence number for the command submission
140 */
141 uint64_t seq_no;
142
143 /**
144 * The fence information
145 */
146 struct amdgpu_cs_fence_info fence_info;
147 };
148
149
150 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
151 uint32_t ip_type,
152 uint32_t ring,
153 struct radv_winsys_sem_info *sem_info);
154 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
155 struct radv_amdgpu_cs_request *request,
156 struct radv_winsys_sem_info *sem_info);
157
158 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
159 struct radv_amdgpu_fence *fence,
160 struct radv_amdgpu_cs_request *req)
161 {
162 fence->fence.context = ctx->ctx;
163 fence->fence.ip_type = req->ip_type;
164 fence->fence.ip_instance = req->ip_instance;
165 fence->fence.ring = req->ring;
166 fence->fence.fence = req->seq_no;
167 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
168 }
169
170 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
171 {
172 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
173 fence->fence.fence = UINT64_MAX;
174 return (struct radeon_winsys_fence*)fence;
175 }
176
177 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
178 {
179 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
180 free(fence);
181 }
182
183 static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
184 {
185 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
186 fence->fence.fence = UINT64_MAX;
187 }
188
189 static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
190 {
191 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
192 fence->fence.fence = 0;
193 }
194
195 static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
196 {
197 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
198 return fence->fence.fence < UINT64_MAX;
199 }
200
201 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
202 struct radeon_winsys_fence *_fence,
203 bool absolute,
204 uint64_t timeout)
205 {
206 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
207 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
208 int r;
209 uint32_t expired = 0;
210
211 /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
212 if (fence->fence.fence == UINT64_MAX)
213 return false;
214
215 if (fence->fence.fence == 0)
216 return true;
217
218 if (fence->user_ptr) {
219 if (*fence->user_ptr >= fence->fence.fence)
220 return true;
221 if (!absolute && !timeout)
222 return false;
223 }
224
225 /* Now use the libdrm query. */
226 r = amdgpu_cs_query_fence_status(&fence->fence,
227 timeout,
228 flags,
229 &expired);
230
231 if (r) {
232 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
233 return false;
234 }
235
236 if (expired)
237 return true;
238
239 return false;
240 }
241
242
243 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
244 struct radeon_winsys_fence *const *_fences,
245 uint32_t fence_count,
246 bool wait_all,
247 uint64_t timeout)
248 {
249 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
250 int r;
251 uint32_t expired = 0, first = 0;
252
253 if (!fences)
254 return false;
255
256 for (uint32_t i = 0; i < fence_count; ++i)
257 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
258
259 /* Now use the libdrm query. */
260 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
261 timeout, &expired, &first);
262
263 free(fences);
264 if (r) {
265 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
266 return false;
267 }
268
269 if (expired)
270 return true;
271
272 return false;
273 }
274
275 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
276 {
277 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
278
279 if (cs->ib_buffer)
280 cs->ws->base.buffer_destroy(cs->ib_buffer);
281 else
282 free(cs->base.buf);
283
284 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
285 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
286
287 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
288 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
289 free(rcs->buf);
290 }
291
292 free(cs->old_cs_buffers);
293 free(cs->old_ib_buffers);
294 free(cs->virtual_buffers);
295 free(cs->virtual_buffer_hash_table);
296 free(cs->handles);
297 free(cs);
298 }
299
300 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
301 enum ring_type ring_type)
302 {
303 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
304 cs->buffer_hash_table[i] = -1;
305
306 cs->hw_ip = ring_to_hw_ip(ring_type);
307 }
308
309 static struct radeon_cmdbuf *
310 radv_amdgpu_cs_create(struct radeon_winsys *ws,
311 enum ring_type ring_type)
312 {
313 struct radv_amdgpu_cs *cs;
314 uint32_t ib_size = 20 * 1024 * 4;
315 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
316 if (!cs)
317 return NULL;
318
319 cs->ws = radv_amdgpu_winsys(ws);
320 radv_amdgpu_init_cs(cs, ring_type);
321
322 if (cs->ws->use_ib_bos) {
323 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
324 RADEON_DOMAIN_GTT,
325 RADEON_FLAG_CPU_ACCESS |
326 RADEON_FLAG_NO_INTERPROCESS_SHARING |
327 RADEON_FLAG_READ_ONLY,
328 RADV_BO_PRIORITY_CS);
329 if (!cs->ib_buffer) {
330 free(cs);
331 return NULL;
332 }
333
334 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
335 if (!cs->ib_mapped) {
336 ws->buffer_destroy(cs->ib_buffer);
337 free(cs);
338 return NULL;
339 }
340
341 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
342 cs->base.buf = (uint32_t *)cs->ib_mapped;
343 cs->base.max_dw = ib_size / 4 - 4;
344 cs->ib_size_ptr = &cs->ib.size;
345 cs->ib.size = 0;
346
347 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
348 } else {
349 cs->base.buf = malloc(16384);
350 cs->base.max_dw = 4096;
351 if (!cs->base.buf) {
352 free(cs);
353 return NULL;
354 }
355 }
356
357 return &cs->base;
358 }
359
360 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
361 {
362 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
363
364 if (cs->status != VK_SUCCESS) {
365 cs->base.cdw = 0;
366 return;
367 }
368
369 if (!cs->ws->use_ib_bos) {
370 const uint64_t limit_dws = 0xffff8;
371 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
372 MIN2(cs->base.max_dw * 2, limit_dws));
373
374 /* The total ib size cannot exceed limit_dws dwords. */
375 if (ib_dws > limit_dws)
376 {
377 /* The maximum size in dwords has been reached,
378 * try to allocate a new one.
379 */
380 cs->old_cs_buffers =
381 realloc(cs->old_cs_buffers,
382 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
383 if (!cs->old_cs_buffers) {
384 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
385 cs->base.cdw = 0;
386 return;
387 }
388
389 /* Store the current one for submitting it later. */
390 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
391 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
392 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
393 cs->num_old_cs_buffers++;
394
395 /* Reset the cs, it will be re-allocated below. */
396 cs->base.cdw = 0;
397 cs->base.buf = NULL;
398
399 /* Re-compute the number of dwords to allocate. */
400 ib_dws = MAX2(cs->base.cdw + min_size,
401 MIN2(cs->base.max_dw * 2, limit_dws));
402 if (ib_dws > limit_dws) {
403 fprintf(stderr, "amdgpu: Too high number of "
404 "dwords to allocate\n");
405 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
406 return;
407 }
408 }
409
410 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
411 if (new_buf) {
412 cs->base.buf = new_buf;
413 cs->base.max_dw = ib_dws;
414 } else {
415 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
416 cs->base.cdw = 0;
417 }
418 return;
419 }
420
421 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
422
423 /* max that fits in the chain size field. */
424 ib_size = MIN2(ib_size, 0xfffff);
425
426 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
427 radeon_emit(&cs->base, PKT3_NOP_PAD);
428
429 *cs->ib_size_ptr |= cs->base.cdw + 4;
430
431 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
432 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
433 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
434 cs->max_num_old_ib_buffers * sizeof(void*));
435 }
436
437 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
438
439 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
440 RADEON_DOMAIN_GTT,
441 RADEON_FLAG_CPU_ACCESS |
442 RADEON_FLAG_NO_INTERPROCESS_SHARING |
443 RADEON_FLAG_READ_ONLY,
444 RADV_BO_PRIORITY_CS);
445
446 if (!cs->ib_buffer) {
447 cs->base.cdw = 0;
448 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
449 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
450 }
451
452 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
453 if (!cs->ib_mapped) {
454 cs->ws->base.buffer_destroy(cs->ib_buffer);
455 cs->base.cdw = 0;
456
457 /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
458 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
459 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
460 }
461
462 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
463
464 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
465 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
466 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
467 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
468
469 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
470
471 cs->base.buf = (uint32_t *)cs->ib_mapped;
472 cs->base.cdw = 0;
473 cs->base.max_dw = ib_size / 4 - 4;
474
475 }
476
477 static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
478 {
479 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
480
481 if (cs->ws->use_ib_bos) {
482 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
483 radeon_emit(&cs->base, PKT3_NOP_PAD);
484
485 *cs->ib_size_ptr |= cs->base.cdw;
486
487 cs->is_chained = false;
488 }
489
490 return cs->status;
491 }
492
493 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
494 {
495 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
496 cs->base.cdw = 0;
497 cs->status = VK_SUCCESS;
498
499 for (unsigned i = 0; i < cs->num_buffers; ++i) {
500 unsigned hash = cs->handles[i].bo_handle &
501 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
502 cs->buffer_hash_table[hash] = -1;
503 }
504
505 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
506 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
507 cs->virtual_buffer_hash_table[hash] = -1;
508 }
509
510 cs->num_buffers = 0;
511 cs->num_virtual_buffers = 0;
512
513 if (cs->ws->use_ib_bos) {
514 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
515
516 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
517 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
518
519 cs->num_old_ib_buffers = 0;
520 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
521 cs->ib_size_ptr = &cs->ib.size;
522 cs->ib.size = 0;
523 } else {
524 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
525 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
526 free(rcs->buf);
527 }
528
529 free(cs->old_cs_buffers);
530 cs->old_cs_buffers = NULL;
531 cs->num_old_cs_buffers = 0;
532 }
533 }
534
535 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
536 uint32_t bo)
537 {
538 unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
539 int index = cs->buffer_hash_table[hash];
540
541 if (index == -1)
542 return -1;
543
544 if (cs->handles[index].bo_handle == bo)
545 return index;
546
547 for (unsigned i = 0; i < cs->num_buffers; ++i) {
548 if (cs->handles[i].bo_handle == bo) {
549 cs->buffer_hash_table[hash] = i;
550 return i;
551 }
552 }
553
554 return -1;
555 }
556
557 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
558 uint32_t bo, uint8_t priority)
559 {
560 unsigned hash;
561 int index = radv_amdgpu_cs_find_buffer(cs, bo);
562
563 if (index != -1 || cs->status != VK_SUCCESS)
564 return;
565
566 if (cs->num_buffers == cs->max_num_buffers) {
567 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
568 struct drm_amdgpu_bo_list_entry *new_entries =
569 realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
570 if (new_entries) {
571 cs->max_num_buffers = new_count;
572 cs->handles = new_entries;
573 } else {
574 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
575 return;
576 }
577 }
578
579 cs->handles[cs->num_buffers].bo_handle = bo;
580 cs->handles[cs->num_buffers].bo_priority = priority;
581
582 hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
583 cs->buffer_hash_table[hash] = cs->num_buffers;
584
585 ++cs->num_buffers;
586 }
587
588 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
589 struct radeon_winsys_bo *bo)
590 {
591 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
592 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
593
594
595 if (!cs->virtual_buffer_hash_table) {
596 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
597 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
598 cs->virtual_buffer_hash_table[i] = -1;
599 }
600
601 if (cs->virtual_buffer_hash_table[hash] >= 0) {
602 int idx = cs->virtual_buffer_hash_table[hash];
603 if (cs->virtual_buffers[idx] == bo) {
604 return;
605 }
606 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
607 if (cs->virtual_buffers[i] == bo) {
608 cs->virtual_buffer_hash_table[hash] = i;
609 return;
610 }
611 }
612 }
613
614 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
615 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
616 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
617 }
618
619 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
620
621 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
622 ++cs->num_virtual_buffers;
623
624 }
625
626 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
627 struct radeon_winsys_bo *_bo)
628 {
629 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
630 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
631
632 if (bo->is_virtual) {
633 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
634 return;
635 }
636
637 if (bo->base.is_local)
638 return;
639
640 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
641 }
642
643 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
644 struct radeon_cmdbuf *_child)
645 {
646 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
647 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
648
649 for (unsigned i = 0; i < child->num_buffers; ++i) {
650 radv_amdgpu_cs_add_buffer_internal(parent,
651 child->handles[i].bo_handle,
652 child->handles[i].bo_priority);
653 }
654
655 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
656 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
657 }
658
659 if (parent->ws->use_ib_bos) {
660 if (parent->base.cdw + 4 > parent->base.max_dw)
661 radv_amdgpu_cs_grow(&parent->base, 4);
662
663 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
664 radeon_emit(&parent->base, child->ib.ib_mc_address);
665 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
666 radeon_emit(&parent->base, child->ib.size);
667 } else {
668 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
669 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
670
671 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
672 parent->base.cdw += child->base.cdw;
673 }
674 }
675
676 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
677 struct radeon_cmdbuf **cs_array,
678 unsigned count,
679 struct radv_amdgpu_winsys_bo **extra_bo_array,
680 unsigned num_extra_bo,
681 struct radeon_cmdbuf *extra_cs,
682 const struct radv_winsys_bo_list *radv_bo_list,
683 uint32_t *bo_list)
684 {
685 int r = 0;
686
687 if (ws->debug_all_bos) {
688 struct radv_amdgpu_winsys_bo *bo;
689 struct drm_amdgpu_bo_list_entry *handles;
690 unsigned num = 0;
691
692 pthread_mutex_lock(&ws->global_bo_list_lock);
693
694 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
695 if (!handles) {
696 pthread_mutex_unlock(&ws->global_bo_list_lock);
697 return -ENOMEM;
698 }
699
700 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
701 assert(num < ws->num_buffers);
702 handles[num].bo_handle = bo->bo_handle;
703 handles[num].bo_priority = bo->priority;
704 num++;
705 }
706
707 r = amdgpu_bo_list_create_raw(ws->dev, ws->num_buffers,
708 handles, bo_list);
709 free(handles);
710 pthread_mutex_unlock(&ws->global_bo_list_lock);
711 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
712 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
713 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
714 if (cs->num_buffers == 0) {
715 *bo_list = 0;
716 return 0;
717 }
718 r = amdgpu_bo_list_create_raw(ws->dev, cs->num_buffers, cs->handles,
719 bo_list);
720 } else {
721 unsigned total_buffer_count = num_extra_bo;
722 unsigned unique_bo_count = num_extra_bo;
723 for (unsigned i = 0; i < count; ++i) {
724 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
725 total_buffer_count += cs->num_buffers;
726 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
727 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
728 }
729
730 if (extra_cs) {
731 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
732 }
733
734 if (radv_bo_list) {
735 total_buffer_count += radv_bo_list->count;
736 }
737
738 if (total_buffer_count == 0) {
739 *bo_list = 0;
740 return 0;
741 }
742 struct drm_amdgpu_bo_list_entry *handles = malloc(sizeof(struct drm_amdgpu_bo_list_entry) * total_buffer_count);
743 if (!handles)
744 return -ENOMEM;
745
746 for (unsigned i = 0; i < num_extra_bo; i++) {
747 handles[i].bo_handle = extra_bo_array[i]->bo_handle;
748 handles[i].bo_priority = extra_bo_array[i]->priority;
749 }
750
751 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
752 struct radv_amdgpu_cs *cs;
753
754 if (i == count)
755 cs = (struct radv_amdgpu_cs*)extra_cs;
756 else
757 cs = (struct radv_amdgpu_cs*)cs_array[i];
758
759 if (!cs->num_buffers)
760 continue;
761
762 if (unique_bo_count == 0 && !cs->num_virtual_buffers) {
763 memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
764 unique_bo_count = cs->num_buffers;
765 continue;
766 }
767 int unique_bo_so_far = unique_bo_count;
768 for (unsigned j = 0; j < cs->num_buffers; ++j) {
769 bool found = false;
770 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
771 if (handles[k].bo_handle == cs->handles[j].bo_handle) {
772 found = true;
773 break;
774 }
775 }
776 if (!found) {
777 handles[unique_bo_count] = cs->handles[j];
778 ++unique_bo_count;
779 }
780 }
781 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
782 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
783 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
784 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
785 bool found = false;
786 for (unsigned m = 0; m < unique_bo_count; ++m) {
787 if (handles[m].bo_handle == bo->bo_handle) {
788 found = true;
789 break;
790 }
791 }
792 if (!found) {
793 handles[unique_bo_count].bo_handle = bo->bo_handle;
794 handles[unique_bo_count].bo_priority = bo->priority;
795 ++unique_bo_count;
796 }
797 }
798 }
799 }
800
801 if (radv_bo_list) {
802 unsigned unique_bo_so_far = unique_bo_count;
803 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
804 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
805 bool found = false;
806 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
807 if (bo->bo_handle == handles[j].bo_handle) {
808 found = true;
809 break;
810 }
811 }
812 if (!found) {
813 handles[unique_bo_count].bo_handle = bo->bo_handle;
814 handles[unique_bo_count].bo_priority = bo->priority;
815 ++unique_bo_count;
816 }
817 }
818 }
819
820 if (unique_bo_count > 0) {
821 r = amdgpu_bo_list_create_raw(ws->dev, unique_bo_count, handles,
822 bo_list);
823 } else {
824 *bo_list = 0;
825 }
826
827 free(handles);
828 }
829
830 return r;
831 }
832
833 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
834 {
835 struct amdgpu_cs_fence_info ret = {0};
836 if (ctx->fence_map) {
837 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
838 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
839 }
840 return ret;
841 }
842
843 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
844 struct radv_amdgpu_cs_request *request)
845 {
846 radv_amdgpu_request_to_fence(ctx,
847 &ctx->last_submission[request->ip_type][request->ring],
848 request);
849 }
850
851 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
852 int queue_idx,
853 struct radv_winsys_sem_info *sem_info,
854 const struct radv_winsys_bo_list *radv_bo_list,
855 struct radeon_cmdbuf **cs_array,
856 unsigned cs_count,
857 struct radeon_cmdbuf *initial_preamble_cs,
858 struct radeon_cmdbuf *continue_preamble_cs,
859 struct radeon_winsys_fence *_fence)
860 {
861 int r;
862 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
863 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
864 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
865 uint32_t bo_list;
866 struct radv_amdgpu_cs_request request = {0};
867 struct amdgpu_cs_ib_info ibs[2];
868 unsigned number_of_ibs = 1;
869
870 for (unsigned i = cs_count; i--;) {
871 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
872
873 if (cs->is_chained) {
874 *cs->ib_size_ptr -= 4;
875 cs->is_chained = false;
876 }
877
878 if (i + 1 < cs_count) {
879 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
880 assert(cs->base.cdw + 4 <= cs->base.max_dw);
881
882 cs->is_chained = true;
883 *cs->ib_size_ptr += 4;
884
885 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
886 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
887 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
888 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
889 }
890 }
891
892 /* Create a buffer object list. */
893 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
894 initial_preamble_cs, radv_bo_list,
895 &bo_list);
896 if (r) {
897 fprintf(stderr, "amdgpu: buffer list creation failed for the "
898 "chained submission(%d)\n", r);
899 return r;
900 }
901
902 /* Configure the CS request. */
903 if (initial_preamble_cs) {
904 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
905 ibs[1] = cs0->ib;
906 number_of_ibs++;
907 } else {
908 ibs[0] = cs0->ib;
909 }
910
911 request.ip_type = cs0->hw_ip;
912 request.ring = queue_idx;
913 request.number_of_ibs = number_of_ibs;
914 request.ibs = ibs;
915 request.resources = bo_list;
916 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
917
918 /* Submit the CS. */
919 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
920 if (r) {
921 if (r == -ENOMEM)
922 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
923 else
924 fprintf(stderr, "amdgpu: The CS has been rejected, "
925 "see dmesg for more information.\n");
926 }
927
928 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
929
930 if (r)
931 return r;
932
933 if (fence)
934 radv_amdgpu_request_to_fence(ctx, fence, &request);
935
936 radv_assign_last_submit(ctx, &request);
937
938 return 0;
939 }
940
941 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
942 int queue_idx,
943 struct radv_winsys_sem_info *sem_info,
944 const struct radv_winsys_bo_list *radv_bo_list,
945 struct radeon_cmdbuf **cs_array,
946 unsigned cs_count,
947 struct radeon_cmdbuf *initial_preamble_cs,
948 struct radeon_cmdbuf *continue_preamble_cs,
949 struct radeon_winsys_fence *_fence)
950 {
951 int r;
952 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
953 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
954 uint32_t bo_list;
955 struct radv_amdgpu_cs_request request = {};
956 struct amdgpu_cs_ib_info *ibs;
957 struct radv_amdgpu_cs *cs0;
958 unsigned number_of_ibs;
959
960 assert(cs_count);
961 cs0 = radv_amdgpu_cs(cs_array[0]);
962
963 /* Compute the number of IBs for this submit. */
964 number_of_ibs = cs_count + !!initial_preamble_cs;
965
966 /* Create a buffer object list. */
967 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
968 initial_preamble_cs, radv_bo_list,
969 &bo_list);
970 if (r) {
971 fprintf(stderr, "amdgpu: buffer list creation failed "
972 "for the fallback submission (%d)\n", r);
973 return r;
974 }
975
976 ibs = malloc(number_of_ibs * sizeof(*ibs));
977 if (!ibs) {
978 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
979 return -ENOMEM;
980 }
981
982 /* Configure the CS request. */
983 if (initial_preamble_cs)
984 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
985
986 for (unsigned i = 0; i < cs_count; i++) {
987 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
988
989 ibs[i + !!initial_preamble_cs] = cs->ib;
990
991 if (cs->is_chained) {
992 *cs->ib_size_ptr -= 4;
993 cs->is_chained = false;
994 }
995 }
996
997 request.ip_type = cs0->hw_ip;
998 request.ring = queue_idx;
999 request.resources = bo_list;
1000 request.number_of_ibs = number_of_ibs;
1001 request.ibs = ibs;
1002 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1003
1004 /* Submit the CS. */
1005 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1006 if (r) {
1007 if (r == -ENOMEM)
1008 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1009 else
1010 fprintf(stderr, "amdgpu: The CS has been rejected, "
1011 "see dmesg for more information.\n");
1012 }
1013
1014 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1015 free(ibs);
1016
1017 if (r)
1018 return r;
1019
1020 if (fence)
1021 radv_amdgpu_request_to_fence(ctx, fence, &request);
1022
1023 radv_assign_last_submit(ctx, &request);
1024
1025 return 0;
1026 }
1027
1028 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
1029 int queue_idx,
1030 struct radv_winsys_sem_info *sem_info,
1031 const struct radv_winsys_bo_list *radv_bo_list,
1032 struct radeon_cmdbuf **cs_array,
1033 unsigned cs_count,
1034 struct radeon_cmdbuf *initial_preamble_cs,
1035 struct radeon_cmdbuf *continue_preamble_cs,
1036 struct radeon_winsys_fence *_fence)
1037 {
1038 int r;
1039 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1040 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
1041 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1042 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
1043 uint32_t bo_list;
1044 struct radv_amdgpu_cs_request request;
1045 uint32_t pad_word = PKT3_NOP_PAD;
1046 bool emit_signal_sem = sem_info->cs_emit_signal;
1047
1048 if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
1049 pad_word = 0x80000000;
1050
1051 assert(cs_count);
1052
1053 for (unsigned i = 0; i < cs_count;) {
1054 struct amdgpu_cs_ib_info *ibs;
1055 struct radeon_winsys_bo **bos;
1056 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1057 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1058 unsigned number_of_ibs;
1059 uint32_t *ptr;
1060 unsigned cnt = 0;
1061 unsigned size = 0;
1062 unsigned pad_words = 0;
1063
1064 /* Compute the number of IBs for this submit. */
1065 number_of_ibs = cs->num_old_cs_buffers + 1;
1066
1067 ibs = malloc(number_of_ibs * sizeof(*ibs));
1068 if (!ibs)
1069 return -ENOMEM;
1070
1071 bos = malloc(number_of_ibs * sizeof(*bos));
1072 if (!bos) {
1073 free(ibs);
1074 return -ENOMEM;
1075 }
1076
1077 if (number_of_ibs > 1) {
1078 /* Special path when the maximum size in dwords has
1079 * been reached because we need to handle more than one
1080 * IB per submit.
1081 */
1082 struct radeon_cmdbuf **new_cs_array;
1083 unsigned idx = 0;
1084
1085 new_cs_array = malloc(cs->num_old_cs_buffers *
1086 sizeof(*new_cs_array));
1087 assert(new_cs_array);
1088
1089 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1090 new_cs_array[idx++] = &cs->old_cs_buffers[j];
1091 new_cs_array[idx++] = cs_array[i];
1092
1093 for (unsigned j = 0; j < number_of_ibs; j++) {
1094 struct radeon_cmdbuf *rcs = new_cs_array[j];
1095 bool needs_preamble = preamble_cs && j == 0;
1096 unsigned size = 0;
1097
1098 if (needs_preamble)
1099 size += preamble_cs->cdw;
1100 size += rcs->cdw;
1101
1102 assert(size < 0xffff8);
1103
1104 while (!size || (size & 7)) {
1105 size++;
1106 pad_words++;
1107 }
1108
1109 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1110 RADEON_DOMAIN_GTT,
1111 RADEON_FLAG_CPU_ACCESS |
1112 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1113 RADEON_FLAG_READ_ONLY,
1114 RADV_BO_PRIORITY_CS);
1115 ptr = ws->buffer_map(bos[j]);
1116
1117 if (needs_preamble) {
1118 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1119 ptr += preamble_cs->cdw;
1120 }
1121
1122 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1123 ptr += rcs->cdw;
1124
1125 for (unsigned k = 0; k < pad_words; ++k)
1126 *ptr++ = pad_word;
1127
1128 ibs[j].size = size;
1129 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1130 ibs[j].flags = 0;
1131 }
1132
1133 cnt++;
1134 free(new_cs_array);
1135 } else {
1136 if (preamble_cs)
1137 size += preamble_cs->cdw;
1138
1139 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1140 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1141 ++cnt;
1142 }
1143
1144 while (!size || (size & 7)) {
1145 size++;
1146 pad_words++;
1147 }
1148 assert(cnt);
1149
1150 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1151 RADEON_DOMAIN_GTT,
1152 RADEON_FLAG_CPU_ACCESS |
1153 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1154 RADEON_FLAG_READ_ONLY,
1155 RADV_BO_PRIORITY_CS);
1156 ptr = ws->buffer_map(bos[0]);
1157
1158 if (preamble_cs) {
1159 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1160 ptr += preamble_cs->cdw;
1161 }
1162
1163 for (unsigned j = 0; j < cnt; ++j) {
1164 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1165 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1166 ptr += cs->base.cdw;
1167
1168 }
1169
1170 for (unsigned j = 0; j < pad_words; ++j)
1171 *ptr++ = pad_word;
1172
1173 ibs[0].size = size;
1174 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1175 ibs[0].flags = 0;
1176 }
1177
1178 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
1179 (struct radv_amdgpu_winsys_bo **)bos,
1180 number_of_ibs, preamble_cs,
1181 radv_bo_list, &bo_list);
1182 if (r) {
1183 fprintf(stderr, "amdgpu: buffer list creation failed "
1184 "for the sysmem submission (%d)\n", r);
1185 free(ibs);
1186 free(bos);
1187 return r;
1188 }
1189
1190 memset(&request, 0, sizeof(request));
1191
1192 request.ip_type = cs0->hw_ip;
1193 request.ring = queue_idx;
1194 request.resources = bo_list;
1195 request.number_of_ibs = number_of_ibs;
1196 request.ibs = ibs;
1197 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1198
1199 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1200 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1201 if (r) {
1202 if (r == -ENOMEM)
1203 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1204 else
1205 fprintf(stderr, "amdgpu: The CS has been rejected, "
1206 "see dmesg for more information.\n");
1207 }
1208
1209 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1210
1211 for (unsigned j = 0; j < number_of_ibs; j++) {
1212 ws->buffer_destroy(bos[j]);
1213 }
1214
1215 free(ibs);
1216 free(bos);
1217
1218 if (r)
1219 return r;
1220
1221 i += cnt;
1222 }
1223 if (fence)
1224 radv_amdgpu_request_to_fence(ctx, fence, &request);
1225
1226 radv_assign_last_submit(ctx, &request);
1227
1228 return 0;
1229 }
1230
1231 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1232 int queue_idx,
1233 struct radeon_cmdbuf **cs_array,
1234 unsigned cs_count,
1235 struct radeon_cmdbuf *initial_preamble_cs,
1236 struct radeon_cmdbuf *continue_preamble_cs,
1237 struct radv_winsys_sem_info *sem_info,
1238 const struct radv_winsys_bo_list *bo_list,
1239 bool can_patch,
1240 struct radeon_winsys_fence *_fence)
1241 {
1242 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1243 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1244 int ret;
1245
1246 assert(sem_info);
1247 if (!cs->ws->use_ib_bos) {
1248 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1249 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1250 } else if (can_patch) {
1251 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1252 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1253 } else {
1254 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1255 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1256 }
1257
1258 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1259 return ret;
1260 }
1261
1262 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1263 {
1264 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1265 void *ret = NULL;
1266
1267 if (!cs->ib_buffer)
1268 return NULL;
1269 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1270 struct radv_amdgpu_winsys_bo *bo;
1271
1272 bo = (struct radv_amdgpu_winsys_bo*)
1273 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1274 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1275 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1276 return (char *)ret + (addr - bo->base.va);
1277 }
1278 }
1279 if(cs->ws->debug_all_bos) {
1280 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1281 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1282 &cs->ws->global_bo_list, global_list_item) {
1283 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1284 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1285 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1286 return (char *)ret + (addr - bo->base.va);
1287 }
1288 }
1289 }
1290 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1291 }
1292 return ret;
1293 }
1294
1295 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1296 FILE* file,
1297 const int *trace_ids, int trace_id_count)
1298 {
1299 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1300 void *ib = cs->base.buf;
1301 int num_dw = cs->base.cdw;
1302
1303 if (cs->ws->use_ib_bos) {
1304 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1305 num_dw = cs->ib.size;
1306 }
1307 assert(ib);
1308 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1309 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1310 }
1311
1312 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1313 {
1314 switch (radv_priority) {
1315 case RADEON_CTX_PRIORITY_REALTIME:
1316 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1317 case RADEON_CTX_PRIORITY_HIGH:
1318 return AMDGPU_CTX_PRIORITY_HIGH;
1319 case RADEON_CTX_PRIORITY_MEDIUM:
1320 return AMDGPU_CTX_PRIORITY_NORMAL;
1321 case RADEON_CTX_PRIORITY_LOW:
1322 return AMDGPU_CTX_PRIORITY_LOW;
1323 default:
1324 unreachable("Invalid context priority");
1325 }
1326 }
1327
1328 static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1329 enum radeon_ctx_priority priority,
1330 struct radeon_winsys_ctx **rctx)
1331 {
1332 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1333 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1334 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1335 VkResult result;
1336 int r;
1337
1338 if (!ctx)
1339 return VK_ERROR_OUT_OF_HOST_MEMORY;
1340
1341 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1342 if (r && r == -EACCES) {
1343 result = VK_ERROR_NOT_PERMITTED_EXT;
1344 goto error_create;
1345 } else if (r) {
1346 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1347 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1348 goto error_create;
1349 }
1350 ctx->ws = ws;
1351
1352 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1353 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1354 RADEON_DOMAIN_GTT,
1355 RADEON_FLAG_CPU_ACCESS |
1356 RADEON_FLAG_NO_INTERPROCESS_SHARING,
1357 RADV_BO_PRIORITY_CS);
1358 if (ctx->fence_bo)
1359 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1360 if (ctx->fence_map)
1361 memset(ctx->fence_map, 0, 4096);
1362
1363 *rctx = (struct radeon_winsys_ctx *)ctx;
1364 return VK_SUCCESS;
1365 error_create:
1366 FREE(ctx);
1367 return result;
1368 }
1369
1370 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1371 {
1372 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1373 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1374 amdgpu_cs_ctx_free(ctx->ctx);
1375 FREE(ctx);
1376 }
1377
1378 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1379 enum ring_type ring_type, int ring_index)
1380 {
1381 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1382 int ip_type = ring_to_hw_ip(ring_type);
1383
1384 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1385 uint32_t expired;
1386 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1387 1000000000ull, 0, &expired);
1388
1389 if (ret || !expired)
1390 return false;
1391 }
1392
1393 return true;
1394 }
1395
1396 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1397 {
1398 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1399 if (!sem)
1400 return NULL;
1401
1402 return (struct radeon_winsys_sem *)sem;
1403 }
1404
1405 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1406 {
1407 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1408 FREE(sem);
1409 }
1410
1411 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1412 uint32_t ip_type,
1413 uint32_t ring,
1414 struct radv_winsys_sem_info *sem_info)
1415 {
1416 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1417 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1418
1419 if (sem->context)
1420 return -EINVAL;
1421
1422 *sem = ctx->last_submission[ip_type][ring].fence;
1423 }
1424 return 0;
1425 }
1426
1427 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1428 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1429 {
1430 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1431 if (!syncobj)
1432 return NULL;
1433
1434 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1435 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1436 sem->handle = counts->syncobj[i];
1437 }
1438
1439 chunk->chunk_id = chunk_id;
1440 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1441 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1442 return syncobj;
1443 }
1444
1445 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1446 struct radv_amdgpu_cs_request *request,
1447 struct radv_winsys_sem_info *sem_info)
1448 {
1449 int r;
1450 int num_chunks;
1451 int size;
1452 bool user_fence;
1453 struct drm_amdgpu_cs_chunk *chunks;
1454 struct drm_amdgpu_cs_chunk_data *chunk_data;
1455 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1456 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1457 int i;
1458 struct amdgpu_cs_fence *sem;
1459
1460 user_fence = (request->fence_info.handle != NULL);
1461 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1462
1463 chunks = malloc(sizeof(chunks[0]) * size);
1464 if (!chunks)
1465 return -ENOMEM;
1466
1467 size = request->number_of_ibs + (user_fence ? 1 : 0);
1468
1469 chunk_data = malloc(sizeof(chunk_data[0]) * size);
1470 if (!chunk_data) {
1471 r = -ENOMEM;
1472 goto error_out;
1473 }
1474
1475 num_chunks = request->number_of_ibs;
1476 for (i = 0; i < request->number_of_ibs; i++) {
1477 struct amdgpu_cs_ib_info *ib;
1478 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1479 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1480 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1481
1482 ib = &request->ibs[i];
1483
1484 chunk_data[i].ib_data._pad = 0;
1485 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1486 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1487 chunk_data[i].ib_data.ip_type = request->ip_type;
1488 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1489 chunk_data[i].ib_data.ring = request->ring;
1490 chunk_data[i].ib_data.flags = ib->flags;
1491 }
1492
1493 if (user_fence) {
1494 i = num_chunks++;
1495
1496 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1497 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1498 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1499
1500 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1501 &chunk_data[i]);
1502 }
1503
1504 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1505 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1506 &chunks[num_chunks],
1507 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1508 if (!wait_syncobj) {
1509 r = -ENOMEM;
1510 goto error_out;
1511 }
1512 num_chunks++;
1513
1514 if (sem_info->wait.sem_count == 0)
1515 sem_info->cs_emit_wait = false;
1516
1517 }
1518
1519 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1520 sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
1521 if (!sem_dependencies) {
1522 r = -ENOMEM;
1523 goto error_out;
1524 }
1525
1526 int sem_count = 0;
1527
1528 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1529 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1530 if (!sem->context)
1531 continue;
1532 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1533
1534 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1535
1536 sem->context = NULL;
1537 }
1538 i = num_chunks++;
1539
1540 /* dependencies chunk */
1541 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1542 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1543 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1544
1545 sem_info->cs_emit_wait = false;
1546 }
1547
1548 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1549 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1550 &chunks[num_chunks],
1551 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1552 if (!signal_syncobj) {
1553 r = -ENOMEM;
1554 goto error_out;
1555 }
1556 num_chunks++;
1557 }
1558
1559 r = amdgpu_cs_submit_raw2(ctx->ws->dev,
1560 ctx->ctx,
1561 request->resources,
1562 num_chunks,
1563 chunks,
1564 &request->seq_no);
1565 error_out:
1566 free(chunks);
1567 free(chunk_data);
1568 free(sem_dependencies);
1569 free(wait_syncobj);
1570 free(signal_syncobj);
1571 return r;
1572 }
1573
1574 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1575 uint32_t *handle)
1576 {
1577 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1578 return amdgpu_cs_create_syncobj(ws->dev, handle);
1579 }
1580
1581 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1582 uint32_t handle)
1583 {
1584 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1585 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1586 }
1587
1588 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1589 uint32_t handle)
1590 {
1591 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1592 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1593 }
1594
1595 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1596 uint32_t handle)
1597 {
1598 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1599 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1600 }
1601
1602 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1603 uint32_t handle_count, bool wait_all, uint64_t timeout)
1604 {
1605 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1606 uint32_t tmp;
1607
1608 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1609 timeout = MIN2(timeout, INT64_MAX);
1610
1611 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1612 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1613 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1614 &tmp);
1615 if (ret == 0) {
1616 return true;
1617 } else if (ret == -ETIME) {
1618 return false;
1619 } else {
1620 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1621 return false;
1622 }
1623 }
1624
1625 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1626 uint32_t syncobj,
1627 int *fd)
1628 {
1629 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1630
1631 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1632 }
1633
1634 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1635 int fd,
1636 uint32_t *syncobj)
1637 {
1638 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1639
1640 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1641 }
1642
1643
1644 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1645 uint32_t syncobj,
1646 int *fd)
1647 {
1648 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1649
1650 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1651 }
1652
1653 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1654 uint32_t syncobj,
1655 int fd)
1656 {
1657 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1658
1659 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1660 }
1661
1662 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1663 {
1664 ws->base.ctx_create = radv_amdgpu_ctx_create;
1665 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1666 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1667 ws->base.cs_create = radv_amdgpu_cs_create;
1668 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1669 ws->base.cs_grow = radv_amdgpu_cs_grow;
1670 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1671 ws->base.cs_reset = radv_amdgpu_cs_reset;
1672 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1673 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1674 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1675 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1676 ws->base.create_fence = radv_amdgpu_create_fence;
1677 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1678 ws->base.reset_fence = radv_amdgpu_reset_fence;
1679 ws->base.signal_fence = radv_amdgpu_signal_fence;
1680 ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
1681 ws->base.create_sem = radv_amdgpu_create_sem;
1682 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1683 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1684 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1685 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1686 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1687 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1688 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1689 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1690 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1691 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1692 ws->base.fence_wait = radv_amdgpu_fence_wait;
1693 ws->base.fences_wait = radv_amdgpu_fences_wait;
1694 }