5f621ec3fb881b0ae736ad715de604bed6da4df1
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "ac_debug.h"
33 #include "radv_radeon_winsys.h"
34 #include "radv_amdgpu_cs.h"
35 #include "radv_amdgpu_bo.h"
36 #include "sid.h"
37
38
39 enum {
40 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
41 };
42
43 struct radv_amdgpu_cs {
44 struct radeon_cmdbuf base;
45 struct radv_amdgpu_winsys *ws;
46
47 struct amdgpu_cs_ib_info ib;
48
49 struct radeon_winsys_bo *ib_buffer;
50 uint8_t *ib_mapped;
51 unsigned max_num_buffers;
52 unsigned num_buffers;
53 amdgpu_bo_handle *handles;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 int *virtual_buffer_hash_table;
69
70 /* For chips that don't support chaining. */
71 struct radeon_cmdbuf *old_cs_buffers;
72 unsigned num_old_cs_buffers;
73 };
74
75 static inline struct radv_amdgpu_cs *
76 radv_amdgpu_cs(struct radeon_cmdbuf *base)
77 {
78 return (struct radv_amdgpu_cs*)base;
79 }
80
81 static int ring_to_hw_ip(enum ring_type ring)
82 {
83 switch (ring) {
84 case RING_GFX:
85 return AMDGPU_HW_IP_GFX;
86 case RING_DMA:
87 return AMDGPU_HW_IP_DMA;
88 case RING_COMPUTE:
89 return AMDGPU_HW_IP_COMPUTE;
90 default:
91 unreachable("unsupported ring");
92 }
93 }
94
95 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
96 uint32_t ip_type,
97 uint32_t ring,
98 struct radv_winsys_sem_info *sem_info);
99 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
100 struct amdgpu_cs_request *request,
101 struct radv_winsys_sem_info *sem_info);
102
103 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
104 struct radv_amdgpu_fence *fence,
105 struct amdgpu_cs_request *req)
106 {
107 fence->fence.context = ctx->ctx;
108 fence->fence.ip_type = req->ip_type;
109 fence->fence.ip_instance = req->ip_instance;
110 fence->fence.ring = req->ring;
111 fence->fence.fence = req->seq_no;
112 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
113 }
114
115 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
116 {
117 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
118 return (struct radeon_winsys_fence*)fence;
119 }
120
121 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
122 {
123 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
124 free(fence);
125 }
126
127 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
128 struct radeon_winsys_fence *_fence,
129 bool absolute,
130 uint64_t timeout)
131 {
132 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
133 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
134 int r;
135 uint32_t expired = 0;
136
137 if (fence->user_ptr) {
138 if (*fence->user_ptr >= fence->fence.fence)
139 return true;
140 if (!absolute && !timeout)
141 return false;
142 }
143
144 /* Now use the libdrm query. */
145 r = amdgpu_cs_query_fence_status(&fence->fence,
146 timeout,
147 flags,
148 &expired);
149
150 if (r) {
151 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
152 return false;
153 }
154
155 if (expired)
156 return true;
157
158 return false;
159 }
160
161
162 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
163 struct radeon_winsys_fence *const *_fences,
164 uint32_t fence_count,
165 bool wait_all,
166 uint64_t timeout)
167 {
168 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
169 int r;
170 uint32_t expired = 0, first = 0;
171
172 if (!fences)
173 return false;
174
175 for (uint32_t i = 0; i < fence_count; ++i)
176 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
177
178 /* Now use the libdrm query. */
179 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
180 timeout, &expired, &first);
181
182 free(fences);
183 if (r) {
184 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
185 return false;
186 }
187
188 if (expired)
189 return true;
190
191 return false;
192 }
193
194 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
195 {
196 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
197
198 if (cs->ib_buffer)
199 cs->ws->base.buffer_destroy(cs->ib_buffer);
200 else
201 free(cs->base.buf);
202
203 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
204 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
205
206 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
207 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
208 free(rcs->buf);
209 }
210
211 free(cs->old_cs_buffers);
212 free(cs->old_ib_buffers);
213 free(cs->virtual_buffers);
214 free(cs->virtual_buffer_hash_table);
215 free(cs->handles);
216 free(cs);
217 }
218
219 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
220 enum ring_type ring_type)
221 {
222 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
223 cs->buffer_hash_table[i] = -1;
224
225 cs->hw_ip = ring_to_hw_ip(ring_type);
226 }
227
228 static struct radeon_cmdbuf *
229 radv_amdgpu_cs_create(struct radeon_winsys *ws,
230 enum ring_type ring_type)
231 {
232 struct radv_amdgpu_cs *cs;
233 uint32_t ib_size = 20 * 1024 * 4;
234 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
235 if (!cs)
236 return NULL;
237
238 cs->ws = radv_amdgpu_winsys(ws);
239 radv_amdgpu_init_cs(cs, ring_type);
240
241 if (cs->ws->use_ib_bos) {
242 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
243 RADEON_DOMAIN_GTT,
244 RADEON_FLAG_CPU_ACCESS |
245 RADEON_FLAG_NO_INTERPROCESS_SHARING |
246 RADEON_FLAG_READ_ONLY);
247 if (!cs->ib_buffer) {
248 free(cs);
249 return NULL;
250 }
251
252 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
253 if (!cs->ib_mapped) {
254 ws->buffer_destroy(cs->ib_buffer);
255 free(cs);
256 return NULL;
257 }
258
259 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
260 cs->base.buf = (uint32_t *)cs->ib_mapped;
261 cs->base.max_dw = ib_size / 4 - 4;
262 cs->ib_size_ptr = &cs->ib.size;
263 cs->ib.size = 0;
264
265 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
266 } else {
267 cs->base.buf = malloc(16384);
268 cs->base.max_dw = 4096;
269 if (!cs->base.buf) {
270 free(cs);
271 return NULL;
272 }
273 }
274
275 return &cs->base;
276 }
277
278 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
279 {
280 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
281
282 if (cs->failed) {
283 cs->base.cdw = 0;
284 return;
285 }
286
287 if (!cs->ws->use_ib_bos) {
288 const uint64_t limit_dws = 0xffff8;
289 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
290 MIN2(cs->base.max_dw * 2, limit_dws));
291
292 /* The total ib size cannot exceed limit_dws dwords. */
293 if (ib_dws > limit_dws)
294 {
295 /* The maximum size in dwords has been reached,
296 * try to allocate a new one.
297 */
298 if (cs->num_old_cs_buffers + 1 >= AMDGPU_CS_MAX_IBS_PER_SUBMIT) {
299 /* TODO: Allow to submit more than 4 IBs. */
300 fprintf(stderr, "amdgpu: Maximum number of IBs "
301 "per submit reached.\n");
302 cs->failed = true;
303 cs->base.cdw = 0;
304 return;
305 }
306
307 cs->old_cs_buffers =
308 realloc(cs->old_cs_buffers,
309 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
310 if (!cs->old_cs_buffers) {
311 cs->failed = true;
312 cs->base.cdw = 0;
313 return;
314 }
315
316 /* Store the current one for submitting it later. */
317 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
318 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
319 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
320 cs->num_old_cs_buffers++;
321
322 /* Reset the cs, it will be re-allocated below. */
323 cs->base.cdw = 0;
324 cs->base.buf = NULL;
325
326 /* Re-compute the number of dwords to allocate. */
327 ib_dws = MAX2(cs->base.cdw + min_size,
328 MIN2(cs->base.max_dw * 2, limit_dws));
329 if (ib_dws > limit_dws) {
330 fprintf(stderr, "amdgpu: Too high number of "
331 "dwords to allocate\n");
332 cs->failed = true;
333 return;
334 }
335 }
336
337 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
338 if (new_buf) {
339 cs->base.buf = new_buf;
340 cs->base.max_dw = ib_dws;
341 } else {
342 cs->failed = true;
343 cs->base.cdw = 0;
344 }
345 return;
346 }
347
348 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
349
350 /* max that fits in the chain size field. */
351 ib_size = MIN2(ib_size, 0xfffff);
352
353 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
354 radeon_emit(&cs->base, 0xffff1000);
355
356 *cs->ib_size_ptr |= cs->base.cdw + 4;
357
358 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
359 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
360 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
361 cs->max_num_old_ib_buffers * sizeof(void*));
362 }
363
364 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
365
366 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
367 RADEON_DOMAIN_GTT,
368 RADEON_FLAG_CPU_ACCESS |
369 RADEON_FLAG_NO_INTERPROCESS_SHARING |
370 RADEON_FLAG_READ_ONLY);
371
372 if (!cs->ib_buffer) {
373 cs->base.cdw = 0;
374 cs->failed = true;
375 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
376 }
377
378 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
379 if (!cs->ib_mapped) {
380 cs->ws->base.buffer_destroy(cs->ib_buffer);
381 cs->base.cdw = 0;
382 cs->failed = true;
383 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
384 }
385
386 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
387
388 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
389 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
390 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
391 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
392
393 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
394
395 cs->base.buf = (uint32_t *)cs->ib_mapped;
396 cs->base.cdw = 0;
397 cs->base.max_dw = ib_size / 4 - 4;
398
399 }
400
401 static bool radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
402 {
403 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
404
405 if (cs->ws->use_ib_bos) {
406 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
407 radeon_emit(&cs->base, 0xffff1000);
408
409 *cs->ib_size_ptr |= cs->base.cdw;
410
411 cs->is_chained = false;
412 }
413
414 return !cs->failed;
415 }
416
417 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
418 {
419 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
420 cs->base.cdw = 0;
421 cs->failed = false;
422
423 for (unsigned i = 0; i < cs->num_buffers; ++i) {
424 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
425 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
426 cs->buffer_hash_table[hash] = -1;
427 }
428
429 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
430 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
431 cs->virtual_buffer_hash_table[hash] = -1;
432 }
433
434 cs->num_buffers = 0;
435 cs->num_virtual_buffers = 0;
436
437 if (cs->ws->use_ib_bos) {
438 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
439
440 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
441 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
442
443 cs->num_old_ib_buffers = 0;
444 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
445 cs->ib_size_ptr = &cs->ib.size;
446 cs->ib.size = 0;
447 } else {
448 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
449 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
450 free(rcs->buf);
451 }
452
453 free(cs->old_cs_buffers);
454 cs->old_cs_buffers = NULL;
455 cs->num_old_cs_buffers = 0;
456 }
457 }
458
459 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
460 amdgpu_bo_handle bo)
461 {
462 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
463 int index = cs->buffer_hash_table[hash];
464
465 if (index == -1)
466 return -1;
467
468 if (cs->handles[index] == bo)
469 return index;
470
471 for (unsigned i = 0; i < cs->num_buffers; ++i) {
472 if (cs->handles[i] == bo) {
473 cs->buffer_hash_table[hash] = i;
474 return i;
475 }
476 }
477
478 return -1;
479 }
480
481 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
482 amdgpu_bo_handle bo)
483 {
484 unsigned hash;
485 int index = radv_amdgpu_cs_find_buffer(cs, bo);
486
487 if (index != -1)
488 return;
489
490 if (cs->num_buffers == cs->max_num_buffers) {
491 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
492 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
493 cs->max_num_buffers = new_count;
494 }
495
496 cs->handles[cs->num_buffers] = bo;
497
498 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
499 cs->buffer_hash_table[hash] = cs->num_buffers;
500
501 ++cs->num_buffers;
502 }
503
504 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
505 struct radeon_winsys_bo *bo)
506 {
507 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
508 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
509
510
511 if (!cs->virtual_buffer_hash_table) {
512 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
513 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
514 cs->virtual_buffer_hash_table[i] = -1;
515 }
516
517 if (cs->virtual_buffer_hash_table[hash] >= 0) {
518 int idx = cs->virtual_buffer_hash_table[hash];
519 if (cs->virtual_buffers[idx] == bo) {
520 return;
521 }
522 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
523 if (cs->virtual_buffers[i] == bo) {
524 cs->virtual_buffer_hash_table[hash] = i;
525 return;
526 }
527 }
528 }
529
530 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
531 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
532 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
533 }
534
535 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
536
537 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
538 ++cs->num_virtual_buffers;
539
540 }
541
542 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
543 struct radeon_winsys_bo *_bo)
544 {
545 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
546 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
547
548 if (bo->is_virtual) {
549 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
550 return;
551 }
552
553 if (bo->base.is_local)
554 return;
555
556 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo);
557 }
558
559 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
560 struct radeon_cmdbuf *_child)
561 {
562 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
563 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
564
565 for (unsigned i = 0; i < child->num_buffers; ++i) {
566 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i]);
567 }
568
569 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
570 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
571 }
572
573 if (parent->ws->use_ib_bos) {
574 if (parent->base.cdw + 4 > parent->base.max_dw)
575 radv_amdgpu_cs_grow(&parent->base, 4);
576
577 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
578 radeon_emit(&parent->base, child->ib.ib_mc_address);
579 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
580 radeon_emit(&parent->base, child->ib.size);
581 } else {
582 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
583 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
584
585 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
586 parent->base.cdw += child->base.cdw;
587 }
588 }
589
590 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
591 struct radeon_cmdbuf **cs_array,
592 unsigned count,
593 struct radv_amdgpu_winsys_bo **extra_bo_array,
594 unsigned num_extra_bo,
595 struct radeon_cmdbuf *extra_cs,
596 const struct radv_winsys_bo_list *radv_bo_list,
597 amdgpu_bo_list_handle *bo_list)
598 {
599 int r = 0;
600
601 if (ws->debug_all_bos) {
602 struct radv_amdgpu_winsys_bo *bo;
603 amdgpu_bo_handle *handles;
604 unsigned num = 0;
605
606 pthread_mutex_lock(&ws->global_bo_list_lock);
607
608 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
609 if (!handles) {
610 pthread_mutex_unlock(&ws->global_bo_list_lock);
611 return -ENOMEM;
612 }
613
614 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
615 assert(num < ws->num_buffers);
616 handles[num++] = bo->bo;
617 }
618
619 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
620 handles, NULL,
621 bo_list);
622 free(handles);
623 pthread_mutex_unlock(&ws->global_bo_list_lock);
624 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
625 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
626 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
627 if (cs->num_buffers == 0) {
628 *bo_list = 0;
629 return 0;
630 }
631 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
632 NULL, bo_list);
633 } else {
634 unsigned total_buffer_count = num_extra_bo;
635 unsigned unique_bo_count = num_extra_bo;
636 for (unsigned i = 0; i < count; ++i) {
637 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
638 total_buffer_count += cs->num_buffers;
639 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
640 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
641 }
642
643 if (extra_cs) {
644 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
645 }
646
647 if (radv_bo_list) {
648 total_buffer_count += radv_bo_list->count;
649 }
650
651 if (total_buffer_count == 0) {
652 *bo_list = 0;
653 return 0;
654 }
655 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
656 if (!handles) {
657 free(handles);
658 return -ENOMEM;
659 }
660
661 for (unsigned i = 0; i < num_extra_bo; i++) {
662 handles[i] = extra_bo_array[i]->bo;
663 }
664
665 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
666 struct radv_amdgpu_cs *cs;
667
668 if (i == count)
669 cs = (struct radv_amdgpu_cs*)extra_cs;
670 else
671 cs = (struct radv_amdgpu_cs*)cs_array[i];
672
673 if (!cs->num_buffers)
674 continue;
675
676 if (unique_bo_count == 0 && !cs->num_virtual_buffers) {
677 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
678 unique_bo_count = cs->num_buffers;
679 continue;
680 }
681 int unique_bo_so_far = unique_bo_count;
682 for (unsigned j = 0; j < cs->num_buffers; ++j) {
683 bool found = false;
684 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
685 if (handles[k] == cs->handles[j]) {
686 found = true;
687 break;
688 }
689 }
690 if (!found) {
691 handles[unique_bo_count] = cs->handles[j];
692 ++unique_bo_count;
693 }
694 }
695 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
696 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
697 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
698 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
699 bool found = false;
700 for (unsigned m = 0; m < unique_bo_count; ++m) {
701 if (handles[m] == bo->bo) {
702 found = true;
703 break;
704 }
705 }
706 if (!found) {
707 handles[unique_bo_count] = bo->bo;
708 ++unique_bo_count;
709 }
710 }
711 }
712 }
713
714 if (radv_bo_list) {
715 unsigned unique_bo_so_far = unique_bo_count;
716 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
717 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
718 bool found = false;
719 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
720 if (bo->bo == handles[j]) {
721 found = true;
722 break;
723 }
724 }
725 if (!found) {
726 handles[unique_bo_count] = bo->bo;
727 ++unique_bo_count;
728 }
729 }
730 }
731
732 if (unique_bo_count > 0) {
733 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
734 NULL, bo_list);
735 } else {
736 *bo_list = 0;
737 }
738
739 free(handles);
740 }
741
742 return r;
743 }
744
745 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
746 {
747 struct amdgpu_cs_fence_info ret = {0};
748 if (ctx->fence_map) {
749 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
750 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
751 }
752 return ret;
753 }
754
755 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
756 struct amdgpu_cs_request *request)
757 {
758 radv_amdgpu_request_to_fence(ctx,
759 &ctx->last_submission[request->ip_type][request->ring],
760 request);
761 }
762
763 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
764 int queue_idx,
765 struct radv_winsys_sem_info *sem_info,
766 const struct radv_winsys_bo_list *radv_bo_list,
767 struct radeon_cmdbuf **cs_array,
768 unsigned cs_count,
769 struct radeon_cmdbuf *initial_preamble_cs,
770 struct radeon_cmdbuf *continue_preamble_cs,
771 struct radeon_winsys_fence *_fence)
772 {
773 int r;
774 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
775 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
776 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
777 amdgpu_bo_list_handle bo_list;
778 struct amdgpu_cs_request request = {0};
779 struct amdgpu_cs_ib_info ibs[2];
780 unsigned number_of_ibs = 1;
781
782 for (unsigned i = cs_count; i--;) {
783 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
784
785 if (cs->is_chained) {
786 *cs->ib_size_ptr -= 4;
787 cs->is_chained = false;
788 }
789
790 if (i + 1 < cs_count) {
791 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
792 assert(cs->base.cdw + 4 <= cs->base.max_dw);
793
794 cs->is_chained = true;
795 *cs->ib_size_ptr += 4;
796
797 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
798 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
799 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
800 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
801 }
802 }
803
804 /* Create a buffer object list. */
805 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
806 initial_preamble_cs, radv_bo_list,
807 &bo_list);
808 if (r) {
809 fprintf(stderr, "amdgpu: buffer list creation failed for the "
810 "chained submission(%d)\n", r);
811 return r;
812 }
813
814 /* Configure the CS request. */
815 if (initial_preamble_cs) {
816 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
817 ibs[1] = cs0->ib;
818 number_of_ibs++;
819 } else {
820 ibs[0] = cs0->ib;
821 }
822
823 request.ip_type = cs0->hw_ip;
824 request.ring = queue_idx;
825 request.number_of_ibs = number_of_ibs;
826 request.ibs = ibs;
827 request.resources = bo_list;
828 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
829
830 /* Submit the CS. */
831 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
832 if (r) {
833 if (r == -ENOMEM)
834 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
835 else
836 fprintf(stderr, "amdgpu: The CS has been rejected, "
837 "see dmesg for more information.\n");
838 }
839
840 if (bo_list)
841 amdgpu_bo_list_destroy(bo_list);
842
843 if (fence)
844 radv_amdgpu_request_to_fence(ctx, fence, &request);
845
846 radv_assign_last_submit(ctx, &request);
847
848 return r;
849 }
850
851 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
852 int queue_idx,
853 struct radv_winsys_sem_info *sem_info,
854 const struct radv_winsys_bo_list *radv_bo_list,
855 struct radeon_cmdbuf **cs_array,
856 unsigned cs_count,
857 struct radeon_cmdbuf *initial_preamble_cs,
858 struct radeon_cmdbuf *continue_preamble_cs,
859 struct radeon_winsys_fence *_fence)
860 {
861 int r;
862 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
863 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
864 amdgpu_bo_list_handle bo_list;
865 struct amdgpu_cs_request request;
866 bool emit_signal_sem = sem_info->cs_emit_signal;
867 assert(cs_count);
868
869 for (unsigned i = 0; i < cs_count;) {
870 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
871 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
872 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
873 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
874 cs_count - i);
875
876 memset(&request, 0, sizeof(request));
877
878 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL, 0,
879 preamble_cs, radv_bo_list, &bo_list);
880 if (r) {
881 fprintf(stderr, "amdgpu: buffer list creation failed "
882 "for the fallback submission (%d)\n", r);
883 return r;
884 }
885
886 request.ip_type = cs0->hw_ip;
887 request.ring = queue_idx;
888 request.resources = bo_list;
889 request.number_of_ibs = cnt + !!preamble_cs;
890 request.ibs = ibs;
891 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
892
893 if (preamble_cs) {
894 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
895 }
896
897 for (unsigned j = 0; j < cnt; ++j) {
898 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
899 ibs[j + !!preamble_cs] = cs->ib;
900
901 if (cs->is_chained) {
902 *cs->ib_size_ptr -= 4;
903 cs->is_chained = false;
904 }
905 }
906
907 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
908 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
909 if (r) {
910 if (r == -ENOMEM)
911 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
912 else
913 fprintf(stderr, "amdgpu: The CS has been rejected, "
914 "see dmesg for more information.\n");
915 }
916
917 if (bo_list)
918 amdgpu_bo_list_destroy(bo_list);
919
920 if (r)
921 return r;
922
923 i += cnt;
924 }
925 if (fence)
926 radv_amdgpu_request_to_fence(ctx, fence, &request);
927
928 radv_assign_last_submit(ctx, &request);
929
930 return 0;
931 }
932
933 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
934 int queue_idx,
935 struct radv_winsys_sem_info *sem_info,
936 const struct radv_winsys_bo_list *radv_bo_list,
937 struct radeon_cmdbuf **cs_array,
938 unsigned cs_count,
939 struct radeon_cmdbuf *initial_preamble_cs,
940 struct radeon_cmdbuf *continue_preamble_cs,
941 struct radeon_winsys_fence *_fence)
942 {
943 int r;
944 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
945 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
946 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
947 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
948 amdgpu_bo_list_handle bo_list;
949 struct amdgpu_cs_request request;
950 uint32_t pad_word = 0xffff1000U;
951 bool emit_signal_sem = sem_info->cs_emit_signal;
952
953 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
954 pad_word = 0x80000000;
955
956 assert(cs_count);
957
958 for (unsigned i = 0; i < cs_count;) {
959 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT] = {0};
960 unsigned number_of_ibs = 1;
961 struct radeon_winsys_bo *bos[AMDGPU_CS_MAX_IBS_PER_SUBMIT] = {0};
962 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
963 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
964 uint32_t *ptr;
965 unsigned cnt = 0;
966 unsigned size = 0;
967 unsigned pad_words = 0;
968
969 if (cs->num_old_cs_buffers > 0) {
970 /* Special path when the maximum size in dwords has
971 * been reached because we need to handle more than one
972 * IB per submit.
973 */
974 unsigned new_cs_count = cs->num_old_cs_buffers + 1;
975 struct radeon_cmdbuf *new_cs_array[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
976 unsigned idx = 0;
977
978 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
979 new_cs_array[idx++] = &cs->old_cs_buffers[j];
980 new_cs_array[idx++] = cs_array[i];
981
982 for (unsigned j = 0; j < new_cs_count; j++) {
983 struct radeon_cmdbuf *rcs = new_cs_array[j];
984 bool needs_preamble = preamble_cs && j == 0;
985 unsigned size = 0;
986
987 if (needs_preamble)
988 size += preamble_cs->cdw;
989 size += rcs->cdw;
990
991 assert(size < 0xffff8);
992
993 while (!size || (size & 7)) {
994 size++;
995 pad_words++;
996 }
997
998 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
999 RADEON_DOMAIN_GTT,
1000 RADEON_FLAG_CPU_ACCESS |
1001 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1002 RADEON_FLAG_READ_ONLY);
1003 ptr = ws->buffer_map(bos[j]);
1004
1005 if (needs_preamble) {
1006 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1007 ptr += preamble_cs->cdw;
1008 }
1009
1010 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1011 ptr += rcs->cdw;
1012
1013 for (unsigned k = 0; k < pad_words; ++k)
1014 *ptr++ = pad_word;
1015
1016 ibs[j].size = size;
1017 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1018 }
1019
1020 number_of_ibs = new_cs_count;
1021 cnt++;
1022 } else {
1023 if (preamble_cs)
1024 size += preamble_cs->cdw;
1025
1026 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1027 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1028 ++cnt;
1029 }
1030
1031 while (!size || (size & 7)) {
1032 size++;
1033 pad_words++;
1034 }
1035 assert(cnt);
1036
1037 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1038 RADEON_DOMAIN_GTT,
1039 RADEON_FLAG_CPU_ACCESS |
1040 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1041 RADEON_FLAG_READ_ONLY);
1042 ptr = ws->buffer_map(bos[0]);
1043
1044 if (preamble_cs) {
1045 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1046 ptr += preamble_cs->cdw;
1047 }
1048
1049 for (unsigned j = 0; j < cnt; ++j) {
1050 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1051 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1052 ptr += cs->base.cdw;
1053
1054 }
1055
1056 for (unsigned j = 0; j < pad_words; ++j)
1057 *ptr++ = pad_word;
1058
1059 ibs[0].size = size;
1060 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1061 }
1062
1063 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
1064 (struct radv_amdgpu_winsys_bo **)bos,
1065 number_of_ibs, preamble_cs,
1066 radv_bo_list, &bo_list);
1067 if (r) {
1068 fprintf(stderr, "amdgpu: buffer list creation failed "
1069 "for the sysmem submission (%d)\n", r);
1070 return r;
1071 }
1072
1073 memset(&request, 0, sizeof(request));
1074
1075 request.ip_type = cs0->hw_ip;
1076 request.ring = queue_idx;
1077 request.resources = bo_list;
1078 request.number_of_ibs = number_of_ibs;
1079 request.ibs = ibs;
1080 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1081
1082 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1083 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1084 if (r) {
1085 if (r == -ENOMEM)
1086 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1087 else
1088 fprintf(stderr, "amdgpu: The CS has been rejected, "
1089 "see dmesg for more information.\n");
1090 }
1091
1092 if (bo_list)
1093 amdgpu_bo_list_destroy(bo_list);
1094
1095 for (unsigned j = 0; j < number_of_ibs; j++) {
1096 ws->buffer_destroy(bos[j]);
1097 if (r)
1098 return r;
1099 }
1100
1101 i += cnt;
1102 }
1103 if (fence)
1104 radv_amdgpu_request_to_fence(ctx, fence, &request);
1105
1106 radv_assign_last_submit(ctx, &request);
1107
1108 return 0;
1109 }
1110
1111 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1112 int queue_idx,
1113 struct radeon_cmdbuf **cs_array,
1114 unsigned cs_count,
1115 struct radeon_cmdbuf *initial_preamble_cs,
1116 struct radeon_cmdbuf *continue_preamble_cs,
1117 struct radv_winsys_sem_info *sem_info,
1118 const struct radv_winsys_bo_list *bo_list,
1119 bool can_patch,
1120 struct radeon_winsys_fence *_fence)
1121 {
1122 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1123 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1124 int ret;
1125
1126 assert(sem_info);
1127 if (!cs->ws->use_ib_bos) {
1128 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1129 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1130 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
1131 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1132 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1133 } else {
1134 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1135 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1136 }
1137
1138 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1139 return ret;
1140 }
1141
1142 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1143 {
1144 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1145 void *ret = NULL;
1146
1147 if (!cs->ib_buffer)
1148 return NULL;
1149 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1150 struct radv_amdgpu_winsys_bo *bo;
1151
1152 bo = (struct radv_amdgpu_winsys_bo*)
1153 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1154 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1155 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1156 return (char *)ret + (addr - bo->base.va);
1157 }
1158 }
1159 if(cs->ws->debug_all_bos) {
1160 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1161 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1162 &cs->ws->global_bo_list, global_list_item) {
1163 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1164 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1165 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1166 return (char *)ret + (addr - bo->base.va);
1167 }
1168 }
1169 }
1170 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1171 }
1172 return ret;
1173 }
1174
1175 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1176 FILE* file,
1177 const int *trace_ids, int trace_id_count)
1178 {
1179 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1180 void *ib = cs->base.buf;
1181 int num_dw = cs->base.cdw;
1182
1183 if (cs->ws->use_ib_bos) {
1184 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1185 num_dw = cs->ib.size;
1186 }
1187 assert(ib);
1188 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1189 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1190 }
1191
1192 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1193 {
1194 switch (radv_priority) {
1195 case RADEON_CTX_PRIORITY_REALTIME:
1196 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1197 case RADEON_CTX_PRIORITY_HIGH:
1198 return AMDGPU_CTX_PRIORITY_HIGH;
1199 case RADEON_CTX_PRIORITY_MEDIUM:
1200 return AMDGPU_CTX_PRIORITY_NORMAL;
1201 case RADEON_CTX_PRIORITY_LOW:
1202 return AMDGPU_CTX_PRIORITY_LOW;
1203 default:
1204 unreachable("Invalid context priority");
1205 }
1206 }
1207
1208 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1209 enum radeon_ctx_priority priority)
1210 {
1211 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1212 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1213 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1214 int r;
1215
1216 if (!ctx)
1217 return NULL;
1218
1219 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1220 if (r) {
1221 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1222 goto error_create;
1223 }
1224 ctx->ws = ws;
1225
1226 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1227 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1228 RADEON_DOMAIN_GTT,
1229 RADEON_FLAG_CPU_ACCESS|
1230 RADEON_FLAG_NO_INTERPROCESS_SHARING);
1231 if (ctx->fence_bo)
1232 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1233 if (ctx->fence_map)
1234 memset(ctx->fence_map, 0, 4096);
1235 return (struct radeon_winsys_ctx *)ctx;
1236 error_create:
1237 FREE(ctx);
1238 return NULL;
1239 }
1240
1241 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1242 {
1243 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1244 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1245 amdgpu_cs_ctx_free(ctx->ctx);
1246 FREE(ctx);
1247 }
1248
1249 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1250 enum ring_type ring_type, int ring_index)
1251 {
1252 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1253 int ip_type = ring_to_hw_ip(ring_type);
1254
1255 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1256 uint32_t expired;
1257 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1258 1000000000ull, 0, &expired);
1259
1260 if (ret || !expired)
1261 return false;
1262 }
1263
1264 return true;
1265 }
1266
1267 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1268 {
1269 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1270 if (!sem)
1271 return NULL;
1272
1273 return (struct radeon_winsys_sem *)sem;
1274 }
1275
1276 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1277 {
1278 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1279 FREE(sem);
1280 }
1281
1282 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1283 uint32_t ip_type,
1284 uint32_t ring,
1285 struct radv_winsys_sem_info *sem_info)
1286 {
1287 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1288 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1289
1290 if (sem->context)
1291 return -EINVAL;
1292
1293 *sem = ctx->last_submission[ip_type][ring].fence;
1294 }
1295 return 0;
1296 }
1297
1298 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1299 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1300 {
1301 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1302 if (!syncobj)
1303 return NULL;
1304
1305 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1306 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1307 sem->handle = counts->syncobj[i];
1308 }
1309
1310 chunk->chunk_id = chunk_id;
1311 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1312 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1313 return syncobj;
1314 }
1315
1316 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1317 struct amdgpu_cs_request *request,
1318 struct radv_winsys_sem_info *sem_info)
1319 {
1320 int r;
1321 int num_chunks;
1322 int size;
1323 bool user_fence;
1324 struct drm_amdgpu_cs_chunk *chunks;
1325 struct drm_amdgpu_cs_chunk_data *chunk_data;
1326 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1327 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1328 int i;
1329 struct amdgpu_cs_fence *sem;
1330
1331 user_fence = (request->fence_info.handle != NULL);
1332 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1333
1334 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1335
1336 size = request->number_of_ibs + (user_fence ? 1 : 0);
1337
1338 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1339
1340 num_chunks = request->number_of_ibs;
1341 for (i = 0; i < request->number_of_ibs; i++) {
1342 struct amdgpu_cs_ib_info *ib;
1343 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1344 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1345 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1346
1347 ib = &request->ibs[i];
1348
1349 chunk_data[i].ib_data._pad = 0;
1350 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1351 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1352 chunk_data[i].ib_data.ip_type = request->ip_type;
1353 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1354 chunk_data[i].ib_data.ring = request->ring;
1355 chunk_data[i].ib_data.flags = ib->flags;
1356 }
1357
1358 if (user_fence) {
1359 i = num_chunks++;
1360
1361 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1362 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1363 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1364
1365 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1366 &chunk_data[i]);
1367 }
1368
1369 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1370 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1371 &chunks[num_chunks],
1372 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1373 if (!wait_syncobj) {
1374 r = -ENOMEM;
1375 goto error_out;
1376 }
1377 num_chunks++;
1378
1379 if (sem_info->wait.sem_count == 0)
1380 sem_info->cs_emit_wait = false;
1381
1382 }
1383
1384 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1385 sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1386 int sem_count = 0;
1387
1388 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1389 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1390 if (!sem->context)
1391 continue;
1392 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1393
1394 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1395
1396 sem->context = NULL;
1397 }
1398 i = num_chunks++;
1399
1400 /* dependencies chunk */
1401 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1402 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1403 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1404
1405 sem_info->cs_emit_wait = false;
1406 }
1407
1408 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1409 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1410 &chunks[num_chunks],
1411 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1412 if (!signal_syncobj) {
1413 r = -ENOMEM;
1414 goto error_out;
1415 }
1416 num_chunks++;
1417 }
1418
1419 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1420 ctx->ctx,
1421 request->resources,
1422 num_chunks,
1423 chunks,
1424 &request->seq_no);
1425 error_out:
1426 free(wait_syncobj);
1427 free(signal_syncobj);
1428 return r;
1429 }
1430
1431 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1432 uint32_t *handle)
1433 {
1434 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1435 return amdgpu_cs_create_syncobj(ws->dev, handle);
1436 }
1437
1438 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1439 uint32_t handle)
1440 {
1441 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1442 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1443 }
1444
1445 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1446 uint32_t handle)
1447 {
1448 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1449 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1450 }
1451
1452 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1453 uint32_t handle)
1454 {
1455 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1456 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1457 }
1458
1459 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1460 uint32_t handle_count, bool wait_all, uint64_t timeout)
1461 {
1462 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1463 uint32_t tmp;
1464
1465 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1466 timeout = MIN2(timeout, INT64_MAX);
1467
1468 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1469 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1470 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1471 &tmp);
1472 if (ret == 0) {
1473 return true;
1474 } else if (ret == -1 && errno == ETIME) {
1475 return false;
1476 } else {
1477 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1478 return false;
1479 }
1480 }
1481
1482 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1483 uint32_t syncobj,
1484 int *fd)
1485 {
1486 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1487
1488 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1489 }
1490
1491 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1492 int fd,
1493 uint32_t *syncobj)
1494 {
1495 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1496
1497 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1498 }
1499
1500
1501 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1502 uint32_t syncobj,
1503 int *fd)
1504 {
1505 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1506
1507 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1508 }
1509
1510 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1511 uint32_t syncobj,
1512 int fd)
1513 {
1514 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1515
1516 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1517 }
1518
1519 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1520 {
1521 ws->base.ctx_create = radv_amdgpu_ctx_create;
1522 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1523 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1524 ws->base.cs_create = radv_amdgpu_cs_create;
1525 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1526 ws->base.cs_grow = radv_amdgpu_cs_grow;
1527 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1528 ws->base.cs_reset = radv_amdgpu_cs_reset;
1529 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1530 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1531 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1532 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1533 ws->base.create_fence = radv_amdgpu_create_fence;
1534 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1535 ws->base.create_sem = radv_amdgpu_create_sem;
1536 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1537 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1538 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1539 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1540 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1541 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1542 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1543 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1544 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1545 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1546 ws->base.fence_wait = radv_amdgpu_fence_wait;
1547 ws->base.fences_wait = radv_amdgpu_fences_wait;
1548 }