radv: disable CPU caching for IBS to reduce fetch latency
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include "drm-uapi/amdgpu_drm.h"
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "util/u_memory.h"
33 #include "ac_debug.h"
34 #include "radv_radeon_winsys.h"
35 #include "radv_amdgpu_cs.h"
36 #include "radv_amdgpu_bo.h"
37 #include "sid.h"
38
39
40 enum {
41 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
42 };
43
44 struct radv_amdgpu_cs {
45 struct radeon_cmdbuf base;
46 struct radv_amdgpu_winsys *ws;
47
48 struct amdgpu_cs_ib_info ib;
49
50 struct radeon_winsys_bo *ib_buffer;
51 uint8_t *ib_mapped;
52 unsigned max_num_buffers;
53 unsigned num_buffers;
54 struct drm_amdgpu_bo_list_entry *handles;
55
56 struct radeon_winsys_bo **old_ib_buffers;
57 unsigned num_old_ib_buffers;
58 unsigned max_num_old_ib_buffers;
59 unsigned *ib_size_ptr;
60 VkResult status;
61 bool is_chained;
62
63 int buffer_hash_table[1024];
64 unsigned hw_ip;
65
66 unsigned num_virtual_buffers;
67 unsigned max_num_virtual_buffers;
68 struct radeon_winsys_bo **virtual_buffers;
69 int *virtual_buffer_hash_table;
70
71 /* For chips that don't support chaining. */
72 struct radeon_cmdbuf *old_cs_buffers;
73 unsigned num_old_cs_buffers;
74 };
75
76 static inline struct radv_amdgpu_cs *
77 radv_amdgpu_cs(struct radeon_cmdbuf *base)
78 {
79 return (struct radv_amdgpu_cs*)base;
80 }
81
82 static int ring_to_hw_ip(enum ring_type ring)
83 {
84 switch (ring) {
85 case RING_GFX:
86 return AMDGPU_HW_IP_GFX;
87 case RING_DMA:
88 return AMDGPU_HW_IP_DMA;
89 case RING_COMPUTE:
90 return AMDGPU_HW_IP_COMPUTE;
91 default:
92 unreachable("unsupported ring");
93 }
94 }
95
96 struct radv_amdgpu_cs_request {
97 /** Specify flags with additional information */
98 uint64_t flags;
99
100 /** Specify HW IP block type to which to send the IB. */
101 unsigned ip_type;
102
103 /** IP instance index if there are several IPs of the same type. */
104 unsigned ip_instance;
105
106 /**
107 * Specify ring index of the IP. We could have several rings
108 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
109 */
110 uint32_t ring;
111
112 /**
113 * BO list handles used by this request.
114 */
115 struct drm_amdgpu_bo_list_entry *handles;
116 uint32_t num_handles;
117
118 /**
119 * Number of dependencies this Command submission needs to
120 * wait for before starting execution.
121 */
122 uint32_t number_of_dependencies;
123
124 /**
125 * Array of dependencies which need to be met before
126 * execution can start.
127 */
128 struct amdgpu_cs_fence *dependencies;
129
130 /** Number of IBs to submit in the field ibs. */
131 uint32_t number_of_ibs;
132
133 /**
134 * IBs to submit. Those IBs will be submit together as single entity
135 */
136 struct amdgpu_cs_ib_info *ibs;
137
138 /**
139 * The returned sequence number for the command submission
140 */
141 uint64_t seq_no;
142
143 /**
144 * The fence information
145 */
146 struct amdgpu_cs_fence_info fence_info;
147 };
148
149
150 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
151 uint32_t ip_type,
152 uint32_t ring,
153 struct radv_winsys_sem_info *sem_info);
154 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
155 struct radv_amdgpu_cs_request *request,
156 struct radv_winsys_sem_info *sem_info);
157
158 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
159 struct radv_amdgpu_fence *fence,
160 struct radv_amdgpu_cs_request *req)
161 {
162 fence->fence.context = ctx->ctx;
163 fence->fence.ip_type = req->ip_type;
164 fence->fence.ip_instance = req->ip_instance;
165 fence->fence.ring = req->ring;
166 fence->fence.fence = req->seq_no;
167 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + req->ip_type * MAX_RINGS_PER_TYPE + req->ring);
168 }
169
170 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
171 {
172 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
173 fence->fence.fence = UINT64_MAX;
174 return (struct radeon_winsys_fence*)fence;
175 }
176
177 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
178 {
179 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
180 free(fence);
181 }
182
183 static void radv_amdgpu_reset_fence(struct radeon_winsys_fence *_fence)
184 {
185 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
186 fence->fence.fence = UINT64_MAX;
187 }
188
189 static void radv_amdgpu_signal_fence(struct radeon_winsys_fence *_fence)
190 {
191 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
192 fence->fence.fence = 0;
193 }
194
195 static bool radv_amdgpu_is_fence_waitable(struct radeon_winsys_fence *_fence)
196 {
197 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
198 return fence->fence.fence < UINT64_MAX;
199 }
200
201 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
202 struct radeon_winsys_fence *_fence,
203 bool absolute,
204 uint64_t timeout)
205 {
206 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
207 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
208 int r;
209 uint32_t expired = 0;
210
211 /* Special casing 0 and UINT64_MAX so that they work without user_ptr/fence.ctx */
212 if (fence->fence.fence == UINT64_MAX)
213 return false;
214
215 if (fence->fence.fence == 0)
216 return true;
217
218 if (fence->user_ptr) {
219 if (*fence->user_ptr >= fence->fence.fence)
220 return true;
221 if (!absolute && !timeout)
222 return false;
223 }
224
225 /* Now use the libdrm query. */
226 r = amdgpu_cs_query_fence_status(&fence->fence,
227 timeout,
228 flags,
229 &expired);
230
231 if (r) {
232 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
233 return false;
234 }
235
236 if (expired)
237 return true;
238
239 return false;
240 }
241
242
243 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
244 struct radeon_winsys_fence *const *_fences,
245 uint32_t fence_count,
246 bool wait_all,
247 uint64_t timeout)
248 {
249 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
250 int r;
251 uint32_t expired = 0, first = 0;
252
253 if (!fences)
254 return false;
255
256 for (uint32_t i = 0; i < fence_count; ++i)
257 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
258
259 /* Now use the libdrm query. */
260 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
261 timeout, &expired, &first);
262
263 free(fences);
264 if (r) {
265 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
266 return false;
267 }
268
269 if (expired)
270 return true;
271
272 return false;
273 }
274
275 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
276 {
277 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
278
279 if (cs->ib_buffer)
280 cs->ws->base.buffer_destroy(cs->ib_buffer);
281 else
282 free(cs->base.buf);
283
284 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
285 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
286
287 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
288 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
289 free(rcs->buf);
290 }
291
292 free(cs->old_cs_buffers);
293 free(cs->old_ib_buffers);
294 free(cs->virtual_buffers);
295 free(cs->virtual_buffer_hash_table);
296 free(cs->handles);
297 free(cs);
298 }
299
300 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
301 enum ring_type ring_type)
302 {
303 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
304 cs->buffer_hash_table[i] = -1;
305
306 cs->hw_ip = ring_to_hw_ip(ring_type);
307 }
308
309 static struct radeon_cmdbuf *
310 radv_amdgpu_cs_create(struct radeon_winsys *ws,
311 enum ring_type ring_type)
312 {
313 struct radv_amdgpu_cs *cs;
314 uint32_t ib_size = 20 * 1024 * 4;
315 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
316 if (!cs)
317 return NULL;
318
319 cs->ws = radv_amdgpu_winsys(ws);
320 radv_amdgpu_init_cs(cs, ring_type);
321
322 if (cs->ws->use_ib_bos) {
323 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
324 RADEON_DOMAIN_GTT,
325 RADEON_FLAG_CPU_ACCESS |
326 RADEON_FLAG_NO_INTERPROCESS_SHARING |
327 RADEON_FLAG_READ_ONLY |
328 RADEON_FLAG_GTT_WC,
329 RADV_BO_PRIORITY_CS);
330 if (!cs->ib_buffer) {
331 free(cs);
332 return NULL;
333 }
334
335 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
336 if (!cs->ib_mapped) {
337 ws->buffer_destroy(cs->ib_buffer);
338 free(cs);
339 return NULL;
340 }
341
342 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
343 cs->base.buf = (uint32_t *)cs->ib_mapped;
344 cs->base.max_dw = ib_size / 4 - 4;
345 cs->ib_size_ptr = &cs->ib.size;
346 cs->ib.size = 0;
347
348 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
349 } else {
350 cs->base.buf = malloc(16384);
351 cs->base.max_dw = 4096;
352 if (!cs->base.buf) {
353 free(cs);
354 return NULL;
355 }
356 }
357
358 return &cs->base;
359 }
360
361 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
362 {
363 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
364
365 if (cs->status != VK_SUCCESS) {
366 cs->base.cdw = 0;
367 return;
368 }
369
370 if (!cs->ws->use_ib_bos) {
371 const uint64_t limit_dws = 0xffff8;
372 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
373 MIN2(cs->base.max_dw * 2, limit_dws));
374
375 /* The total ib size cannot exceed limit_dws dwords. */
376 if (ib_dws > limit_dws)
377 {
378 /* The maximum size in dwords has been reached,
379 * try to allocate a new one.
380 */
381 cs->old_cs_buffers =
382 realloc(cs->old_cs_buffers,
383 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
384 if (!cs->old_cs_buffers) {
385 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
386 cs->base.cdw = 0;
387 return;
388 }
389
390 /* Store the current one for submitting it later. */
391 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
392 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
393 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
394 cs->num_old_cs_buffers++;
395
396 /* Reset the cs, it will be re-allocated below. */
397 cs->base.cdw = 0;
398 cs->base.buf = NULL;
399
400 /* Re-compute the number of dwords to allocate. */
401 ib_dws = MAX2(cs->base.cdw + min_size,
402 MIN2(cs->base.max_dw * 2, limit_dws));
403 if (ib_dws > limit_dws) {
404 fprintf(stderr, "amdgpu: Too high number of "
405 "dwords to allocate\n");
406 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
407 return;
408 }
409 }
410
411 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
412 if (new_buf) {
413 cs->base.buf = new_buf;
414 cs->base.max_dw = ib_dws;
415 } else {
416 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
417 cs->base.cdw = 0;
418 }
419 return;
420 }
421
422 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
423
424 /* max that fits in the chain size field. */
425 ib_size = MIN2(ib_size, 0xfffff);
426
427 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
428 radeon_emit(&cs->base, PKT3_NOP_PAD);
429
430 *cs->ib_size_ptr |= cs->base.cdw + 4;
431
432 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
433 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
434 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
435 cs->max_num_old_ib_buffers * sizeof(void*));
436 }
437
438 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
439
440 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
441 RADEON_DOMAIN_GTT,
442 RADEON_FLAG_CPU_ACCESS |
443 RADEON_FLAG_NO_INTERPROCESS_SHARING |
444 RADEON_FLAG_READ_ONLY |
445 RADEON_FLAG_GTT_WC,
446 RADV_BO_PRIORITY_CS);
447
448 if (!cs->ib_buffer) {
449 cs->base.cdw = 0;
450 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
451 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
452 }
453
454 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
455 if (!cs->ib_mapped) {
456 cs->ws->base.buffer_destroy(cs->ib_buffer);
457 cs->base.cdw = 0;
458
459 /* VK_ERROR_MEMORY_MAP_FAILED is not valid for vkEndCommandBuffer. */
460 cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
461 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
462 }
463
464 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
465
466 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
467 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
468 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
469 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
470
471 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
472
473 cs->base.buf = (uint32_t *)cs->ib_mapped;
474 cs->base.cdw = 0;
475 cs->base.max_dw = ib_size / 4 - 4;
476
477 }
478
479 static VkResult radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
480 {
481 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
482
483 if (cs->ws->use_ib_bos) {
484 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
485 radeon_emit(&cs->base, PKT3_NOP_PAD);
486
487 *cs->ib_size_ptr |= cs->base.cdw;
488
489 cs->is_chained = false;
490 }
491
492 return cs->status;
493 }
494
495 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
496 {
497 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
498 cs->base.cdw = 0;
499 cs->status = VK_SUCCESS;
500
501 for (unsigned i = 0; i < cs->num_buffers; ++i) {
502 unsigned hash = cs->handles[i].bo_handle &
503 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
504 cs->buffer_hash_table[hash] = -1;
505 }
506
507 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
508 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
509 cs->virtual_buffer_hash_table[hash] = -1;
510 }
511
512 cs->num_buffers = 0;
513 cs->num_virtual_buffers = 0;
514
515 if (cs->ws->use_ib_bos) {
516 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
517
518 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
519 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
520
521 cs->num_old_ib_buffers = 0;
522 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
523 cs->ib_size_ptr = &cs->ib.size;
524 cs->ib.size = 0;
525 } else {
526 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
527 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
528 free(rcs->buf);
529 }
530
531 free(cs->old_cs_buffers);
532 cs->old_cs_buffers = NULL;
533 cs->num_old_cs_buffers = 0;
534 }
535 }
536
537 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
538 uint32_t bo)
539 {
540 unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
541 int index = cs->buffer_hash_table[hash];
542
543 if (index == -1)
544 return -1;
545
546 if (cs->handles[index].bo_handle == bo)
547 return index;
548
549 for (unsigned i = 0; i < cs->num_buffers; ++i) {
550 if (cs->handles[i].bo_handle == bo) {
551 cs->buffer_hash_table[hash] = i;
552 return i;
553 }
554 }
555
556 return -1;
557 }
558
559 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
560 uint32_t bo, uint8_t priority)
561 {
562 unsigned hash;
563 int index = radv_amdgpu_cs_find_buffer(cs, bo);
564
565 if (index != -1 || cs->status != VK_SUCCESS)
566 return;
567
568 if (cs->num_buffers == cs->max_num_buffers) {
569 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
570 struct drm_amdgpu_bo_list_entry *new_entries =
571 realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
572 if (new_entries) {
573 cs->max_num_buffers = new_count;
574 cs->handles = new_entries;
575 } else {
576 cs->status = VK_ERROR_OUT_OF_HOST_MEMORY;
577 return;
578 }
579 }
580
581 cs->handles[cs->num_buffers].bo_handle = bo;
582 cs->handles[cs->num_buffers].bo_priority = priority;
583
584 hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
585 cs->buffer_hash_table[hash] = cs->num_buffers;
586
587 ++cs->num_buffers;
588 }
589
590 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
591 struct radeon_winsys_bo *bo)
592 {
593 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
594 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
595
596
597 if (!cs->virtual_buffer_hash_table) {
598 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
599 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
600 cs->virtual_buffer_hash_table[i] = -1;
601 }
602
603 if (cs->virtual_buffer_hash_table[hash] >= 0) {
604 int idx = cs->virtual_buffer_hash_table[hash];
605 if (cs->virtual_buffers[idx] == bo) {
606 return;
607 }
608 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
609 if (cs->virtual_buffers[i] == bo) {
610 cs->virtual_buffer_hash_table[hash] = i;
611 return;
612 }
613 }
614 }
615
616 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
617 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
618 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
619 }
620
621 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
622
623 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
624 ++cs->num_virtual_buffers;
625
626 }
627
628 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
629 struct radeon_winsys_bo *_bo)
630 {
631 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
632 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
633
634 if (bo->is_virtual) {
635 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
636 return;
637 }
638
639 if (bo->base.is_local)
640 return;
641
642 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
643 }
644
645 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
646 struct radeon_cmdbuf *_child)
647 {
648 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
649 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
650
651 for (unsigned i = 0; i < child->num_buffers; ++i) {
652 radv_amdgpu_cs_add_buffer_internal(parent,
653 child->handles[i].bo_handle,
654 child->handles[i].bo_priority);
655 }
656
657 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
658 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
659 }
660
661 if (parent->ws->use_ib_bos) {
662 if (parent->base.cdw + 4 > parent->base.max_dw)
663 radv_amdgpu_cs_grow(&parent->base, 4);
664
665 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
666 radeon_emit(&parent->base, child->ib.ib_mc_address);
667 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
668 radeon_emit(&parent->base, child->ib.size);
669 } else {
670 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
671 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
672
673 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
674 parent->base.cdw += child->base.cdw;
675 }
676 }
677
678 static VkResult
679 radv_amdgpu_get_bo_list(struct radv_amdgpu_winsys *ws,
680 struct radeon_cmdbuf **cs_array,
681 unsigned count,
682 struct radv_amdgpu_winsys_bo **extra_bo_array,
683 unsigned num_extra_bo,
684 struct radeon_cmdbuf *extra_cs,
685 const struct radv_winsys_bo_list *radv_bo_list,
686 unsigned *rnum_handles,
687 struct drm_amdgpu_bo_list_entry **rhandles)
688 {
689 struct drm_amdgpu_bo_list_entry *handles = NULL;
690 unsigned num_handles = 0;
691
692 if (ws->debug_all_bos) {
693 struct radv_amdgpu_winsys_bo *bo;
694
695 pthread_mutex_lock(&ws->global_bo_list_lock);
696
697 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
698 if (!handles) {
699 pthread_mutex_unlock(&ws->global_bo_list_lock);
700 return VK_ERROR_OUT_OF_HOST_MEMORY;
701 }
702
703 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
704 assert(num_handles < ws->num_buffers);
705 handles[num_handles].bo_handle = bo->bo_handle;
706 handles[num_handles].bo_priority = bo->priority;
707 num_handles++;
708 }
709
710 pthread_mutex_unlock(&ws->global_bo_list_lock);
711 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
712 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
713 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
714 if (cs->num_buffers == 0)
715 return VK_SUCCESS;
716
717 handles = malloc(sizeof(handles[0]) * cs->num_buffers);
718 if (!handles)
719 return VK_ERROR_OUT_OF_HOST_MEMORY;
720
721 memcpy(handles, cs->handles,
722 sizeof(handles[0]) * cs->num_buffers);
723 num_handles = cs->num_buffers;
724 } else {
725 unsigned total_buffer_count = num_extra_bo;
726 num_handles = num_extra_bo;
727 for (unsigned i = 0; i < count; ++i) {
728 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
729 total_buffer_count += cs->num_buffers;
730 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
731 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
732 }
733
734 if (extra_cs) {
735 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
736 }
737
738 if (radv_bo_list) {
739 total_buffer_count += radv_bo_list->count;
740 }
741
742 if (total_buffer_count == 0)
743 return VK_SUCCESS;
744
745 handles = malloc(sizeof(handles[0]) * total_buffer_count);
746 if (!handles)
747 return VK_ERROR_OUT_OF_HOST_MEMORY;
748
749 for (unsigned i = 0; i < num_extra_bo; i++) {
750 handles[i].bo_handle = extra_bo_array[i]->bo_handle;
751 handles[i].bo_priority = extra_bo_array[i]->priority;
752 }
753
754 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
755 struct radv_amdgpu_cs *cs;
756
757 if (i == count)
758 cs = (struct radv_amdgpu_cs*)extra_cs;
759 else
760 cs = (struct radv_amdgpu_cs*)cs_array[i];
761
762 if (!cs->num_buffers)
763 continue;
764
765 if (num_handles == 0 && !cs->num_virtual_buffers) {
766 memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
767 num_handles = cs->num_buffers;
768 continue;
769 }
770 int unique_bo_so_far = num_handles;
771 for (unsigned j = 0; j < cs->num_buffers; ++j) {
772 bool found = false;
773 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
774 if (handles[k].bo_handle == cs->handles[j].bo_handle) {
775 found = true;
776 break;
777 }
778 }
779 if (!found) {
780 handles[num_handles] = cs->handles[j];
781 ++num_handles;
782 }
783 }
784 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
785 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
786 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
787 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
788 bool found = false;
789 for (unsigned m = 0; m < num_handles; ++m) {
790 if (handles[m].bo_handle == bo->bo_handle) {
791 found = true;
792 break;
793 }
794 }
795 if (!found) {
796 handles[num_handles].bo_handle = bo->bo_handle;
797 handles[num_handles].bo_priority = bo->priority;
798 ++num_handles;
799 }
800 }
801 }
802 }
803
804 if (radv_bo_list) {
805 unsigned unique_bo_so_far = num_handles;
806 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
807 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
808 bool found = false;
809 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
810 if (bo->bo_handle == handles[j].bo_handle) {
811 found = true;
812 break;
813 }
814 }
815 if (!found) {
816 handles[num_handles].bo_handle = bo->bo_handle;
817 handles[num_handles].bo_priority = bo->priority;
818 ++num_handles;
819 }
820 }
821 }
822 }
823
824 *rhandles = handles;
825 *rnum_handles = num_handles;
826
827 return VK_SUCCESS;
828 }
829
830 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
831 {
832 struct amdgpu_cs_fence_info ret = {0};
833 if (ctx->fence_map) {
834 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
835 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
836 }
837 return ret;
838 }
839
840 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
841 struct radv_amdgpu_cs_request *request)
842 {
843 radv_amdgpu_request_to_fence(ctx,
844 &ctx->last_submission[request->ip_type][request->ring],
845 request);
846 }
847
848 static VkResult
849 radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
850 int queue_idx,
851 struct radv_winsys_sem_info *sem_info,
852 const struct radv_winsys_bo_list *radv_bo_list,
853 struct radeon_cmdbuf **cs_array,
854 unsigned cs_count,
855 struct radeon_cmdbuf *initial_preamble_cs,
856 struct radeon_cmdbuf *continue_preamble_cs,
857 struct radeon_winsys_fence *_fence)
858 {
859 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
860 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
861 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
862 struct drm_amdgpu_bo_list_entry *handles = NULL;
863 struct radv_amdgpu_cs_request request = {0};
864 struct amdgpu_cs_ib_info ibs[2];
865 unsigned number_of_ibs = 1;
866 unsigned num_handles = 0;
867 VkResult result;
868
869 for (unsigned i = cs_count; i--;) {
870 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
871
872 if (cs->is_chained) {
873 *cs->ib_size_ptr -= 4;
874 cs->is_chained = false;
875 }
876
877 if (i + 1 < cs_count) {
878 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
879 assert(cs->base.cdw + 4 <= cs->base.max_dw);
880
881 cs->is_chained = true;
882 *cs->ib_size_ptr += 4;
883
884 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
885 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
886 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
887 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
888 }
889 }
890
891 /* Get the BO list. */
892 result = radv_amdgpu_get_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
893 initial_preamble_cs, radv_bo_list,
894 &num_handles, &handles);
895 if (result != VK_SUCCESS)
896 return result;
897
898 /* Configure the CS request. */
899 if (initial_preamble_cs) {
900 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
901 ibs[1] = cs0->ib;
902 number_of_ibs++;
903 } else {
904 ibs[0] = cs0->ib;
905 }
906
907 request.ip_type = cs0->hw_ip;
908 request.ring = queue_idx;
909 request.number_of_ibs = number_of_ibs;
910 request.ibs = ibs;
911 request.handles = handles;
912 request.num_handles = num_handles;
913 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
914
915 /* Submit the CS. */
916 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
917
918 free(request.handles);
919
920 if (result != VK_SUCCESS)
921 return result;
922
923 if (fence)
924 radv_amdgpu_request_to_fence(ctx, fence, &request);
925
926 radv_assign_last_submit(ctx, &request);
927
928 return VK_SUCCESS;
929 }
930
931 static VkResult
932 radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
933 int queue_idx,
934 struct radv_winsys_sem_info *sem_info,
935 const struct radv_winsys_bo_list *radv_bo_list,
936 struct radeon_cmdbuf **cs_array,
937 unsigned cs_count,
938 struct radeon_cmdbuf *initial_preamble_cs,
939 struct radeon_cmdbuf *continue_preamble_cs,
940 struct radeon_winsys_fence *_fence)
941 {
942 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
943 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
944 struct drm_amdgpu_bo_list_entry *handles = NULL;
945 struct radv_amdgpu_cs_request request = {};
946 struct amdgpu_cs_ib_info *ibs;
947 struct radv_amdgpu_cs *cs0;
948 unsigned num_handles = 0;
949 unsigned number_of_ibs;
950 VkResult result;
951
952 assert(cs_count);
953 cs0 = radv_amdgpu_cs(cs_array[0]);
954
955 /* Compute the number of IBs for this submit. */
956 number_of_ibs = cs_count + !!initial_preamble_cs;
957
958 /* Get the BO list. */
959 result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
960 initial_preamble_cs, radv_bo_list,
961 &num_handles, &handles);
962 if (result != VK_SUCCESS)
963 return result;
964
965 ibs = malloc(number_of_ibs * sizeof(*ibs));
966 if (!ibs) {
967 free(request.handles);
968 return VK_ERROR_OUT_OF_HOST_MEMORY;
969 }
970
971 /* Configure the CS request. */
972 if (initial_preamble_cs)
973 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
974
975 for (unsigned i = 0; i < cs_count; i++) {
976 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
977
978 ibs[i + !!initial_preamble_cs] = cs->ib;
979
980 if (cs->is_chained) {
981 *cs->ib_size_ptr -= 4;
982 cs->is_chained = false;
983 }
984 }
985
986 request.ip_type = cs0->hw_ip;
987 request.ring = queue_idx;
988 request.handles = handles;
989 request.num_handles = num_handles;
990 request.number_of_ibs = number_of_ibs;
991 request.ibs = ibs;
992 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
993
994 /* Submit the CS. */
995 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
996
997 free(request.handles);
998 free(ibs);
999
1000 if (result != VK_SUCCESS)
1001 return result;
1002
1003 if (fence)
1004 radv_amdgpu_request_to_fence(ctx, fence, &request);
1005
1006 radv_assign_last_submit(ctx, &request);
1007
1008 return VK_SUCCESS;
1009 }
1010
1011 static VkResult
1012 radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
1013 int queue_idx,
1014 struct radv_winsys_sem_info *sem_info,
1015 const struct radv_winsys_bo_list *radv_bo_list,
1016 struct radeon_cmdbuf **cs_array,
1017 unsigned cs_count,
1018 struct radeon_cmdbuf *initial_preamble_cs,
1019 struct radeon_cmdbuf *continue_preamble_cs,
1020 struct radeon_winsys_fence *_fence)
1021 {
1022 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1023 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
1024 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1025 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
1026 struct radv_amdgpu_cs_request request;
1027 uint32_t pad_word = PKT3_NOP_PAD;
1028 bool emit_signal_sem = sem_info->cs_emit_signal;
1029 VkResult result;
1030
1031 if (radv_amdgpu_winsys(ws)->info.chip_class == GFX6)
1032 pad_word = 0x80000000;
1033
1034 assert(cs_count);
1035
1036 for (unsigned i = 0; i < cs_count;) {
1037 struct amdgpu_cs_ib_info *ibs;
1038 struct radeon_winsys_bo **bos;
1039 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1040 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1041 struct drm_amdgpu_bo_list_entry *handles = NULL;
1042 unsigned num_handles = 0;
1043 unsigned number_of_ibs;
1044 uint32_t *ptr;
1045 unsigned cnt = 0;
1046 unsigned size = 0;
1047 unsigned pad_words = 0;
1048
1049 /* Compute the number of IBs for this submit. */
1050 number_of_ibs = cs->num_old_cs_buffers + 1;
1051
1052 ibs = malloc(number_of_ibs * sizeof(*ibs));
1053 if (!ibs)
1054 return VK_ERROR_OUT_OF_HOST_MEMORY;
1055
1056 bos = malloc(number_of_ibs * sizeof(*bos));
1057 if (!bos) {
1058 free(ibs);
1059 return VK_ERROR_OUT_OF_HOST_MEMORY;
1060 }
1061
1062 if (number_of_ibs > 1) {
1063 /* Special path when the maximum size in dwords has
1064 * been reached because we need to handle more than one
1065 * IB per submit.
1066 */
1067 struct radeon_cmdbuf **new_cs_array;
1068 unsigned idx = 0;
1069
1070 new_cs_array = malloc(cs->num_old_cs_buffers *
1071 sizeof(*new_cs_array));
1072 assert(new_cs_array);
1073
1074 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1075 new_cs_array[idx++] = &cs->old_cs_buffers[j];
1076 new_cs_array[idx++] = cs_array[i];
1077
1078 for (unsigned j = 0; j < number_of_ibs; j++) {
1079 struct radeon_cmdbuf *rcs = new_cs_array[j];
1080 bool needs_preamble = preamble_cs && j == 0;
1081 unsigned size = 0;
1082
1083 if (needs_preamble)
1084 size += preamble_cs->cdw;
1085 size += rcs->cdw;
1086
1087 assert(size < 0xffff8);
1088
1089 while (!size || (size & 7)) {
1090 size++;
1091 pad_words++;
1092 }
1093
1094 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1095 RADEON_DOMAIN_GTT,
1096 RADEON_FLAG_CPU_ACCESS |
1097 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1098 RADEON_FLAG_READ_ONLY,
1099 RADV_BO_PRIORITY_CS);
1100 ptr = ws->buffer_map(bos[j]);
1101
1102 if (needs_preamble) {
1103 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1104 ptr += preamble_cs->cdw;
1105 }
1106
1107 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1108 ptr += rcs->cdw;
1109
1110 for (unsigned k = 0; k < pad_words; ++k)
1111 *ptr++ = pad_word;
1112
1113 ibs[j].size = size;
1114 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1115 ibs[j].flags = 0;
1116 }
1117
1118 cnt++;
1119 free(new_cs_array);
1120 } else {
1121 if (preamble_cs)
1122 size += preamble_cs->cdw;
1123
1124 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1125 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1126 ++cnt;
1127 }
1128
1129 while (!size || (size & 7)) {
1130 size++;
1131 pad_words++;
1132 }
1133 assert(cnt);
1134
1135 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1136 RADEON_DOMAIN_GTT,
1137 RADEON_FLAG_CPU_ACCESS |
1138 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1139 RADEON_FLAG_READ_ONLY,
1140 RADV_BO_PRIORITY_CS);
1141 ptr = ws->buffer_map(bos[0]);
1142
1143 if (preamble_cs) {
1144 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1145 ptr += preamble_cs->cdw;
1146 }
1147
1148 for (unsigned j = 0; j < cnt; ++j) {
1149 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1150 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1151 ptr += cs->base.cdw;
1152
1153 }
1154
1155 for (unsigned j = 0; j < pad_words; ++j)
1156 *ptr++ = pad_word;
1157
1158 ibs[0].size = size;
1159 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1160 ibs[0].flags = 0;
1161 }
1162
1163 result = radv_amdgpu_get_bo_list(cs0->ws, &cs_array[i], cnt,
1164 (struct radv_amdgpu_winsys_bo **)bos,
1165 number_of_ibs, preamble_cs,
1166 radv_bo_list,
1167 &num_handles, &handles);
1168 if (result != VK_SUCCESS) {
1169 free(ibs);
1170 free(bos);
1171 return result;
1172 }
1173
1174 memset(&request, 0, sizeof(request));
1175
1176 request.ip_type = cs0->hw_ip;
1177 request.ring = queue_idx;
1178 request.handles = handles;
1179 request.num_handles = num_handles;
1180 request.number_of_ibs = number_of_ibs;
1181 request.ibs = ibs;
1182 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1183
1184 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1185 result = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1186
1187 free(request.handles);
1188
1189 for (unsigned j = 0; j < number_of_ibs; j++) {
1190 ws->buffer_destroy(bos[j]);
1191 }
1192
1193 free(ibs);
1194 free(bos);
1195
1196 if (result != VK_SUCCESS)
1197 return result;
1198
1199 i += cnt;
1200 }
1201 if (fence)
1202 radv_amdgpu_request_to_fence(ctx, fence, &request);
1203
1204 radv_assign_last_submit(ctx, &request);
1205
1206 return VK_SUCCESS;
1207 }
1208
1209 static VkResult radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1210 int queue_idx,
1211 struct radeon_cmdbuf **cs_array,
1212 unsigned cs_count,
1213 struct radeon_cmdbuf *initial_preamble_cs,
1214 struct radeon_cmdbuf *continue_preamble_cs,
1215 struct radv_winsys_sem_info *sem_info,
1216 const struct radv_winsys_bo_list *bo_list,
1217 bool can_patch,
1218 struct radeon_winsys_fence *_fence)
1219 {
1220 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1221 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1222 VkResult result;
1223
1224 assert(sem_info);
1225 if (!cs->ws->use_ib_bos) {
1226 result = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1227 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1228 } else if (can_patch) {
1229 result = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1230 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1231 } else {
1232 result = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1233 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1234 }
1235
1236 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1237 return result;
1238 }
1239
1240 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1241 {
1242 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1243 void *ret = NULL;
1244
1245 if (!cs->ib_buffer)
1246 return NULL;
1247 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1248 struct radv_amdgpu_winsys_bo *bo;
1249
1250 bo = (struct radv_amdgpu_winsys_bo*)
1251 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1252 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1253 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1254 return (char *)ret + (addr - bo->base.va);
1255 }
1256 }
1257 if(cs->ws->debug_all_bos) {
1258 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1259 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1260 &cs->ws->global_bo_list, global_list_item) {
1261 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1262 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1263 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1264 return (char *)ret + (addr - bo->base.va);
1265 }
1266 }
1267 }
1268 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1269 }
1270 return ret;
1271 }
1272
1273 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1274 FILE* file,
1275 const int *trace_ids, int trace_id_count)
1276 {
1277 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1278 void *ib = cs->base.buf;
1279 int num_dw = cs->base.cdw;
1280
1281 if (cs->ws->use_ib_bos) {
1282 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1283 num_dw = cs->ib.size;
1284 }
1285 assert(ib);
1286 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1287 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1288 }
1289
1290 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1291 {
1292 switch (radv_priority) {
1293 case RADEON_CTX_PRIORITY_REALTIME:
1294 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1295 case RADEON_CTX_PRIORITY_HIGH:
1296 return AMDGPU_CTX_PRIORITY_HIGH;
1297 case RADEON_CTX_PRIORITY_MEDIUM:
1298 return AMDGPU_CTX_PRIORITY_NORMAL;
1299 case RADEON_CTX_PRIORITY_LOW:
1300 return AMDGPU_CTX_PRIORITY_LOW;
1301 default:
1302 unreachable("Invalid context priority");
1303 }
1304 }
1305
1306 static VkResult radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1307 enum radeon_ctx_priority priority,
1308 struct radeon_winsys_ctx **rctx)
1309 {
1310 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1311 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1312 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1313 VkResult result;
1314 int r;
1315
1316 if (!ctx)
1317 return VK_ERROR_OUT_OF_HOST_MEMORY;
1318
1319 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1320 if (r && r == -EACCES) {
1321 result = VK_ERROR_NOT_PERMITTED_EXT;
1322 goto error_create;
1323 } else if (r) {
1324 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1325 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1326 goto error_create;
1327 }
1328 ctx->ws = ws;
1329
1330 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1331 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1332 RADEON_DOMAIN_GTT,
1333 RADEON_FLAG_CPU_ACCESS |
1334 RADEON_FLAG_NO_INTERPROCESS_SHARING,
1335 RADV_BO_PRIORITY_CS);
1336 if (ctx->fence_bo)
1337 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1338 if (ctx->fence_map)
1339 memset(ctx->fence_map, 0, 4096);
1340
1341 *rctx = (struct radeon_winsys_ctx *)ctx;
1342 return VK_SUCCESS;
1343 error_create:
1344 FREE(ctx);
1345 return result;
1346 }
1347
1348 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1349 {
1350 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1351 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1352 amdgpu_cs_ctx_free(ctx->ctx);
1353 FREE(ctx);
1354 }
1355
1356 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1357 enum ring_type ring_type, int ring_index)
1358 {
1359 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1360 int ip_type = ring_to_hw_ip(ring_type);
1361
1362 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1363 uint32_t expired;
1364 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1365 1000000000ull, 0, &expired);
1366
1367 if (ret || !expired)
1368 return false;
1369 }
1370
1371 return true;
1372 }
1373
1374 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1375 {
1376 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1377 if (!sem)
1378 return NULL;
1379
1380 return (struct radeon_winsys_sem *)sem;
1381 }
1382
1383 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1384 {
1385 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1386 FREE(sem);
1387 }
1388
1389 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1390 uint32_t ip_type,
1391 uint32_t ring,
1392 struct radv_winsys_sem_info *sem_info)
1393 {
1394 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1395 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1396
1397 if (sem->context)
1398 return -EINVAL;
1399
1400 *sem = ctx->last_submission[ip_type][ring].fence;
1401 }
1402 return 0;
1403 }
1404
1405 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1406 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1407 {
1408 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1409 if (!syncobj)
1410 return NULL;
1411
1412 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1413 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1414 sem->handle = counts->syncobj[i];
1415 }
1416
1417 chunk->chunk_id = chunk_id;
1418 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1419 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1420 return syncobj;
1421 }
1422
1423 static VkResult
1424 radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1425 struct radv_amdgpu_cs_request *request,
1426 struct radv_winsys_sem_info *sem_info)
1427 {
1428 int r;
1429 int num_chunks;
1430 int size;
1431 bool user_fence;
1432 struct drm_amdgpu_cs_chunk *chunks;
1433 struct drm_amdgpu_cs_chunk_data *chunk_data;
1434 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1435 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1436 bool use_bo_list_create = ctx->ws->info.drm_minor < 27;
1437 struct drm_amdgpu_bo_list_in bo_list_in;
1438 int i;
1439 struct amdgpu_cs_fence *sem;
1440 uint32_t bo_list = 0;
1441 VkResult result = VK_SUCCESS;
1442
1443 user_fence = (request->fence_info.handle != NULL);
1444 size = request->number_of_ibs + (user_fence ? 2 : 1) + (!use_bo_list_create ? 1 : 0) + 3;
1445
1446 chunks = malloc(sizeof(chunks[0]) * size);
1447 if (!chunks)
1448 return VK_ERROR_OUT_OF_HOST_MEMORY;
1449
1450 size = request->number_of_ibs + (user_fence ? 1 : 0);
1451
1452 chunk_data = malloc(sizeof(chunk_data[0]) * size);
1453 if (!chunk_data) {
1454 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1455 goto error_out;
1456 }
1457
1458 num_chunks = request->number_of_ibs;
1459 for (i = 0; i < request->number_of_ibs; i++) {
1460 struct amdgpu_cs_ib_info *ib;
1461 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1462 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1463 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1464
1465 ib = &request->ibs[i];
1466
1467 chunk_data[i].ib_data._pad = 0;
1468 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1469 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1470 chunk_data[i].ib_data.ip_type = request->ip_type;
1471 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1472 chunk_data[i].ib_data.ring = request->ring;
1473 chunk_data[i].ib_data.flags = ib->flags;
1474 }
1475
1476 if (user_fence) {
1477 i = num_chunks++;
1478
1479 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1480 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1481 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1482
1483 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1484 &chunk_data[i]);
1485 }
1486
1487 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1488 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1489 &chunks[num_chunks],
1490 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1491 if (!wait_syncobj) {
1492 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1493 goto error_out;
1494 }
1495 num_chunks++;
1496
1497 if (sem_info->wait.sem_count == 0)
1498 sem_info->cs_emit_wait = false;
1499
1500 }
1501
1502 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1503 sem_dependencies = malloc(sizeof(sem_dependencies[0]) * sem_info->wait.sem_count);
1504 if (!sem_dependencies) {
1505 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1506 goto error_out;
1507 }
1508
1509 int sem_count = 0;
1510
1511 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1512 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1513 if (!sem->context)
1514 continue;
1515 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1516
1517 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1518
1519 sem->context = NULL;
1520 }
1521 i = num_chunks++;
1522
1523 /* dependencies chunk */
1524 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1525 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1526 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1527
1528 sem_info->cs_emit_wait = false;
1529 }
1530
1531 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1532 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1533 &chunks[num_chunks],
1534 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1535 if (!signal_syncobj) {
1536 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1537 goto error_out;
1538 }
1539 num_chunks++;
1540 }
1541
1542 if (use_bo_list_create) {
1543 /* Legacy path creating the buffer list handle and passing it
1544 * to the CS ioctl.
1545 */
1546 r = amdgpu_bo_list_create_raw(ctx->ws->dev, request->num_handles,
1547 request->handles, &bo_list);
1548 if (r) {
1549 if (r == -ENOMEM) {
1550 fprintf(stderr, "amdgpu: Not enough memory for buffer list creation.\n");
1551 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1552 } else {
1553 fprintf(stderr, "amdgpu: buffer list creation failed (%d).\n", r);
1554 result = VK_ERROR_UNKNOWN;
1555 }
1556 goto error_out;
1557 }
1558 } else {
1559 /* Standard path passing the buffer list via the CS ioctl. */
1560 bo_list_in.operation = ~0;
1561 bo_list_in.list_handle = ~0;
1562 bo_list_in.bo_number = request->num_handles;
1563 bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1564 bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)request->handles;
1565
1566 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1567 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1568 chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1569 num_chunks++;
1570 }
1571
1572 r = amdgpu_cs_submit_raw2(ctx->ws->dev,
1573 ctx->ctx,
1574 bo_list,
1575 num_chunks,
1576 chunks,
1577 &request->seq_no);
1578
1579 if (bo_list)
1580 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1581
1582 if (r) {
1583 if (r == -ENOMEM) {
1584 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1585 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1586 } else if (r == -ECANCELED) {
1587 fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1588 result = VK_ERROR_DEVICE_LOST;
1589 } else {
1590 fprintf(stderr, "amdgpu: The CS has been rejected, "
1591 "see dmesg for more information (%i).\n", r);
1592 result = VK_ERROR_UNKNOWN;
1593 }
1594 }
1595
1596 error_out:
1597 free(chunks);
1598 free(chunk_data);
1599 free(sem_dependencies);
1600 free(wait_syncobj);
1601 free(signal_syncobj);
1602 return result;
1603 }
1604
1605 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1606 bool create_signaled,
1607 uint32_t *handle)
1608 {
1609 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1610 uint32_t flags = 0;
1611
1612 if (create_signaled)
1613 flags |= DRM_SYNCOBJ_CREATE_SIGNALED;
1614
1615 return amdgpu_cs_create_syncobj2(ws->dev, flags, handle);
1616 }
1617
1618 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1619 uint32_t handle)
1620 {
1621 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1622 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1623 }
1624
1625 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1626 uint32_t handle)
1627 {
1628 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1629 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1630 }
1631
1632 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1633 uint32_t handle)
1634 {
1635 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1636 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1637 }
1638
1639 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1640 uint32_t handle_count, bool wait_all, uint64_t timeout)
1641 {
1642 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1643 uint32_t tmp;
1644
1645 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1646 timeout = MIN2(timeout, INT64_MAX);
1647
1648 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1649 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1650 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1651 &tmp);
1652 if (ret == 0) {
1653 return true;
1654 } else if (ret == -ETIME) {
1655 return false;
1656 } else {
1657 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1658 return false;
1659 }
1660 }
1661
1662 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1663 uint32_t syncobj,
1664 int *fd)
1665 {
1666 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1667
1668 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1669 }
1670
1671 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1672 int fd,
1673 uint32_t *syncobj)
1674 {
1675 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1676
1677 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1678 }
1679
1680
1681 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1682 uint32_t syncobj,
1683 int *fd)
1684 {
1685 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1686
1687 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1688 }
1689
1690 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1691 uint32_t syncobj,
1692 int fd)
1693 {
1694 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1695
1696 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1697 }
1698
1699 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1700 {
1701 ws->base.ctx_create = radv_amdgpu_ctx_create;
1702 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1703 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1704 ws->base.cs_create = radv_amdgpu_cs_create;
1705 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1706 ws->base.cs_grow = radv_amdgpu_cs_grow;
1707 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1708 ws->base.cs_reset = radv_amdgpu_cs_reset;
1709 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1710 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1711 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1712 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1713 ws->base.create_fence = radv_amdgpu_create_fence;
1714 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1715 ws->base.reset_fence = radv_amdgpu_reset_fence;
1716 ws->base.signal_fence = radv_amdgpu_signal_fence;
1717 ws->base.is_fence_waitable = radv_amdgpu_is_fence_waitable;
1718 ws->base.create_sem = radv_amdgpu_create_sem;
1719 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1720 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1721 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1722 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1723 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1724 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1725 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1726 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1727 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1728 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1729 ws->base.fence_wait = radv_amdgpu_fence_wait;
1730 ws->base.fences_wait = radv_amdgpu_fences_wait;
1731 }