radv/winsys: Set winsys bo priority on creation.
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "ac_debug.h"
33 #include "radv_radeon_winsys.h"
34 #include "radv_amdgpu_cs.h"
35 #include "radv_amdgpu_bo.h"
36 #include "sid.h"
37
38
39 enum {
40 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
41 };
42
43 struct radv_amdgpu_cs {
44 struct radeon_cmdbuf base;
45 struct radv_amdgpu_winsys *ws;
46
47 struct amdgpu_cs_ib_info ib;
48
49 struct radeon_winsys_bo *ib_buffer;
50 uint8_t *ib_mapped;
51 unsigned max_num_buffers;
52 unsigned num_buffers;
53 amdgpu_bo_handle *handles;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 int *virtual_buffer_hash_table;
69
70 /* For chips that don't support chaining. */
71 struct radeon_cmdbuf *old_cs_buffers;
72 unsigned num_old_cs_buffers;
73 };
74
75 static inline struct radv_amdgpu_cs *
76 radv_amdgpu_cs(struct radeon_cmdbuf *base)
77 {
78 return (struct radv_amdgpu_cs*)base;
79 }
80
81 static int ring_to_hw_ip(enum ring_type ring)
82 {
83 switch (ring) {
84 case RING_GFX:
85 return AMDGPU_HW_IP_GFX;
86 case RING_DMA:
87 return AMDGPU_HW_IP_DMA;
88 case RING_COMPUTE:
89 return AMDGPU_HW_IP_COMPUTE;
90 default:
91 unreachable("unsupported ring");
92 }
93 }
94
95 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
96 uint32_t ip_type,
97 uint32_t ring,
98 struct radv_winsys_sem_info *sem_info);
99 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
100 struct amdgpu_cs_request *request,
101 struct radv_winsys_sem_info *sem_info);
102
103 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
104 struct radv_amdgpu_fence *fence,
105 struct amdgpu_cs_request *req)
106 {
107 fence->fence.context = ctx->ctx;
108 fence->fence.ip_type = req->ip_type;
109 fence->fence.ip_instance = req->ip_instance;
110 fence->fence.ring = req->ring;
111 fence->fence.fence = req->seq_no;
112 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
113 }
114
115 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
116 {
117 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
118 return (struct radeon_winsys_fence*)fence;
119 }
120
121 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
122 {
123 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
124 free(fence);
125 }
126
127 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
128 struct radeon_winsys_fence *_fence,
129 bool absolute,
130 uint64_t timeout)
131 {
132 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
133 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
134 int r;
135 uint32_t expired = 0;
136
137 if (fence->user_ptr) {
138 if (*fence->user_ptr >= fence->fence.fence)
139 return true;
140 if (!absolute && !timeout)
141 return false;
142 }
143
144 /* Now use the libdrm query. */
145 r = amdgpu_cs_query_fence_status(&fence->fence,
146 timeout,
147 flags,
148 &expired);
149
150 if (r) {
151 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
152 return false;
153 }
154
155 if (expired)
156 return true;
157
158 return false;
159 }
160
161
162 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
163 struct radeon_winsys_fence *const *_fences,
164 uint32_t fence_count,
165 bool wait_all,
166 uint64_t timeout)
167 {
168 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
169 int r;
170 uint32_t expired = 0, first = 0;
171
172 if (!fences)
173 return false;
174
175 for (uint32_t i = 0; i < fence_count; ++i)
176 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
177
178 /* Now use the libdrm query. */
179 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
180 timeout, &expired, &first);
181
182 free(fences);
183 if (r) {
184 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
185 return false;
186 }
187
188 if (expired)
189 return true;
190
191 return false;
192 }
193
194 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
195 {
196 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
197
198 if (cs->ib_buffer)
199 cs->ws->base.buffer_destroy(cs->ib_buffer);
200 else
201 free(cs->base.buf);
202
203 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
204 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
205
206 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
207 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
208 free(rcs->buf);
209 }
210
211 free(cs->old_cs_buffers);
212 free(cs->old_ib_buffers);
213 free(cs->virtual_buffers);
214 free(cs->virtual_buffer_hash_table);
215 free(cs->handles);
216 free(cs);
217 }
218
219 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
220 enum ring_type ring_type)
221 {
222 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
223 cs->buffer_hash_table[i] = -1;
224
225 cs->hw_ip = ring_to_hw_ip(ring_type);
226 }
227
228 static struct radeon_cmdbuf *
229 radv_amdgpu_cs_create(struct radeon_winsys *ws,
230 enum ring_type ring_type)
231 {
232 struct radv_amdgpu_cs *cs;
233 uint32_t ib_size = 20 * 1024 * 4;
234 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
235 if (!cs)
236 return NULL;
237
238 cs->ws = radv_amdgpu_winsys(ws);
239 radv_amdgpu_init_cs(cs, ring_type);
240
241 if (cs->ws->use_ib_bos) {
242 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
243 RADEON_DOMAIN_GTT,
244 RADEON_FLAG_CPU_ACCESS |
245 RADEON_FLAG_NO_INTERPROCESS_SHARING |
246 RADEON_FLAG_READ_ONLY,
247 RADV_BO_PRIORITY_CS);
248 if (!cs->ib_buffer) {
249 free(cs);
250 return NULL;
251 }
252
253 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
254 if (!cs->ib_mapped) {
255 ws->buffer_destroy(cs->ib_buffer);
256 free(cs);
257 return NULL;
258 }
259
260 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
261 cs->base.buf = (uint32_t *)cs->ib_mapped;
262 cs->base.max_dw = ib_size / 4 - 4;
263 cs->ib_size_ptr = &cs->ib.size;
264 cs->ib.size = 0;
265
266 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
267 } else {
268 cs->base.buf = malloc(16384);
269 cs->base.max_dw = 4096;
270 if (!cs->base.buf) {
271 free(cs);
272 return NULL;
273 }
274 }
275
276 return &cs->base;
277 }
278
279 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
280 {
281 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
282
283 if (cs->failed) {
284 cs->base.cdw = 0;
285 return;
286 }
287
288 if (!cs->ws->use_ib_bos) {
289 const uint64_t limit_dws = 0xffff8;
290 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
291 MIN2(cs->base.max_dw * 2, limit_dws));
292
293 /* The total ib size cannot exceed limit_dws dwords. */
294 if (ib_dws > limit_dws)
295 {
296 /* The maximum size in dwords has been reached,
297 * try to allocate a new one.
298 */
299 cs->old_cs_buffers =
300 realloc(cs->old_cs_buffers,
301 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
302 if (!cs->old_cs_buffers) {
303 cs->failed = true;
304 cs->base.cdw = 0;
305 return;
306 }
307
308 /* Store the current one for submitting it later. */
309 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
310 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
311 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
312 cs->num_old_cs_buffers++;
313
314 /* Reset the cs, it will be re-allocated below. */
315 cs->base.cdw = 0;
316 cs->base.buf = NULL;
317
318 /* Re-compute the number of dwords to allocate. */
319 ib_dws = MAX2(cs->base.cdw + min_size,
320 MIN2(cs->base.max_dw * 2, limit_dws));
321 if (ib_dws > limit_dws) {
322 fprintf(stderr, "amdgpu: Too high number of "
323 "dwords to allocate\n");
324 cs->failed = true;
325 return;
326 }
327 }
328
329 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
330 if (new_buf) {
331 cs->base.buf = new_buf;
332 cs->base.max_dw = ib_dws;
333 } else {
334 cs->failed = true;
335 cs->base.cdw = 0;
336 }
337 return;
338 }
339
340 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
341
342 /* max that fits in the chain size field. */
343 ib_size = MIN2(ib_size, 0xfffff);
344
345 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
346 radeon_emit(&cs->base, 0xffff1000);
347
348 *cs->ib_size_ptr |= cs->base.cdw + 4;
349
350 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
351 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
352 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
353 cs->max_num_old_ib_buffers * sizeof(void*));
354 }
355
356 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
357
358 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
359 RADEON_DOMAIN_GTT,
360 RADEON_FLAG_CPU_ACCESS |
361 RADEON_FLAG_NO_INTERPROCESS_SHARING |
362 RADEON_FLAG_READ_ONLY,
363 RADV_BO_PRIORITY_CS);
364
365 if (!cs->ib_buffer) {
366 cs->base.cdw = 0;
367 cs->failed = true;
368 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
369 }
370
371 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
372 if (!cs->ib_mapped) {
373 cs->ws->base.buffer_destroy(cs->ib_buffer);
374 cs->base.cdw = 0;
375 cs->failed = true;
376 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
377 }
378
379 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
380
381 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
382 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
383 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
384 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
385
386 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
387
388 cs->base.buf = (uint32_t *)cs->ib_mapped;
389 cs->base.cdw = 0;
390 cs->base.max_dw = ib_size / 4 - 4;
391
392 }
393
394 static bool radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
395 {
396 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
397
398 if (cs->ws->use_ib_bos) {
399 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
400 radeon_emit(&cs->base, 0xffff1000);
401
402 *cs->ib_size_ptr |= cs->base.cdw;
403
404 cs->is_chained = false;
405 }
406
407 return !cs->failed;
408 }
409
410 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
411 {
412 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
413 cs->base.cdw = 0;
414 cs->failed = false;
415
416 for (unsigned i = 0; i < cs->num_buffers; ++i) {
417 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
418 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
419 cs->buffer_hash_table[hash] = -1;
420 }
421
422 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
423 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
424 cs->virtual_buffer_hash_table[hash] = -1;
425 }
426
427 cs->num_buffers = 0;
428 cs->num_virtual_buffers = 0;
429
430 if (cs->ws->use_ib_bos) {
431 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
432
433 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
434 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
435
436 cs->num_old_ib_buffers = 0;
437 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
438 cs->ib_size_ptr = &cs->ib.size;
439 cs->ib.size = 0;
440 } else {
441 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
442 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
443 free(rcs->buf);
444 }
445
446 free(cs->old_cs_buffers);
447 cs->old_cs_buffers = NULL;
448 cs->num_old_cs_buffers = 0;
449 }
450 }
451
452 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
453 amdgpu_bo_handle bo)
454 {
455 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
456 int index = cs->buffer_hash_table[hash];
457
458 if (index == -1)
459 return -1;
460
461 if (cs->handles[index] == bo)
462 return index;
463
464 for (unsigned i = 0; i < cs->num_buffers; ++i) {
465 if (cs->handles[i] == bo) {
466 cs->buffer_hash_table[hash] = i;
467 return i;
468 }
469 }
470
471 return -1;
472 }
473
474 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
475 amdgpu_bo_handle bo)
476 {
477 unsigned hash;
478 int index = radv_amdgpu_cs_find_buffer(cs, bo);
479
480 if (index != -1)
481 return;
482
483 if (cs->num_buffers == cs->max_num_buffers) {
484 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
485 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
486 cs->max_num_buffers = new_count;
487 }
488
489 cs->handles[cs->num_buffers] = bo;
490
491 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
492 cs->buffer_hash_table[hash] = cs->num_buffers;
493
494 ++cs->num_buffers;
495 }
496
497 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
498 struct radeon_winsys_bo *bo)
499 {
500 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
501 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
502
503
504 if (!cs->virtual_buffer_hash_table) {
505 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
506 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
507 cs->virtual_buffer_hash_table[i] = -1;
508 }
509
510 if (cs->virtual_buffer_hash_table[hash] >= 0) {
511 int idx = cs->virtual_buffer_hash_table[hash];
512 if (cs->virtual_buffers[idx] == bo) {
513 return;
514 }
515 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
516 if (cs->virtual_buffers[i] == bo) {
517 cs->virtual_buffer_hash_table[hash] = i;
518 return;
519 }
520 }
521 }
522
523 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
524 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
525 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
526 }
527
528 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
529
530 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
531 ++cs->num_virtual_buffers;
532
533 }
534
535 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
536 struct radeon_winsys_bo *_bo)
537 {
538 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
539 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
540
541 if (bo->is_virtual) {
542 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
543 return;
544 }
545
546 if (bo->base.is_local)
547 return;
548
549 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo);
550 }
551
552 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
553 struct radeon_cmdbuf *_child)
554 {
555 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
556 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
557
558 for (unsigned i = 0; i < child->num_buffers; ++i) {
559 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i]);
560 }
561
562 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
563 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
564 }
565
566 if (parent->ws->use_ib_bos) {
567 if (parent->base.cdw + 4 > parent->base.max_dw)
568 radv_amdgpu_cs_grow(&parent->base, 4);
569
570 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
571 radeon_emit(&parent->base, child->ib.ib_mc_address);
572 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
573 radeon_emit(&parent->base, child->ib.size);
574 } else {
575 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
576 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
577
578 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
579 parent->base.cdw += child->base.cdw;
580 }
581 }
582
583 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
584 struct radeon_cmdbuf **cs_array,
585 unsigned count,
586 struct radv_amdgpu_winsys_bo **extra_bo_array,
587 unsigned num_extra_bo,
588 struct radeon_cmdbuf *extra_cs,
589 const struct radv_winsys_bo_list *radv_bo_list,
590 amdgpu_bo_list_handle *bo_list)
591 {
592 int r = 0;
593
594 if (ws->debug_all_bos) {
595 struct radv_amdgpu_winsys_bo *bo;
596 amdgpu_bo_handle *handles;
597 unsigned num = 0;
598
599 pthread_mutex_lock(&ws->global_bo_list_lock);
600
601 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
602 if (!handles) {
603 pthread_mutex_unlock(&ws->global_bo_list_lock);
604 return -ENOMEM;
605 }
606
607 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
608 assert(num < ws->num_buffers);
609 handles[num++] = bo->bo;
610 }
611
612 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
613 handles, NULL,
614 bo_list);
615 free(handles);
616 pthread_mutex_unlock(&ws->global_bo_list_lock);
617 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
618 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
619 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
620 if (cs->num_buffers == 0) {
621 *bo_list = 0;
622 return 0;
623 }
624 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
625 NULL, bo_list);
626 } else {
627 unsigned total_buffer_count = num_extra_bo;
628 unsigned unique_bo_count = num_extra_bo;
629 for (unsigned i = 0; i < count; ++i) {
630 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
631 total_buffer_count += cs->num_buffers;
632 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
633 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
634 }
635
636 if (extra_cs) {
637 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
638 }
639
640 if (radv_bo_list) {
641 total_buffer_count += radv_bo_list->count;
642 }
643
644 if (total_buffer_count == 0) {
645 *bo_list = 0;
646 return 0;
647 }
648 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
649 if (!handles) {
650 free(handles);
651 return -ENOMEM;
652 }
653
654 for (unsigned i = 0; i < num_extra_bo; i++) {
655 handles[i] = extra_bo_array[i]->bo;
656 }
657
658 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
659 struct radv_amdgpu_cs *cs;
660
661 if (i == count)
662 cs = (struct radv_amdgpu_cs*)extra_cs;
663 else
664 cs = (struct radv_amdgpu_cs*)cs_array[i];
665
666 if (!cs->num_buffers)
667 continue;
668
669 if (unique_bo_count == 0 && !cs->num_virtual_buffers) {
670 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
671 unique_bo_count = cs->num_buffers;
672 continue;
673 }
674 int unique_bo_so_far = unique_bo_count;
675 for (unsigned j = 0; j < cs->num_buffers; ++j) {
676 bool found = false;
677 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
678 if (handles[k] == cs->handles[j]) {
679 found = true;
680 break;
681 }
682 }
683 if (!found) {
684 handles[unique_bo_count] = cs->handles[j];
685 ++unique_bo_count;
686 }
687 }
688 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
689 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
690 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
691 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
692 bool found = false;
693 for (unsigned m = 0; m < unique_bo_count; ++m) {
694 if (handles[m] == bo->bo) {
695 found = true;
696 break;
697 }
698 }
699 if (!found) {
700 handles[unique_bo_count] = bo->bo;
701 ++unique_bo_count;
702 }
703 }
704 }
705 }
706
707 if (radv_bo_list) {
708 unsigned unique_bo_so_far = unique_bo_count;
709 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
710 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
711 bool found = false;
712 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
713 if (bo->bo == handles[j]) {
714 found = true;
715 break;
716 }
717 }
718 if (!found) {
719 handles[unique_bo_count] = bo->bo;
720 ++unique_bo_count;
721 }
722 }
723 }
724
725 if (unique_bo_count > 0) {
726 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
727 NULL, bo_list);
728 } else {
729 *bo_list = 0;
730 }
731
732 free(handles);
733 }
734
735 return r;
736 }
737
738 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
739 {
740 struct amdgpu_cs_fence_info ret = {0};
741 if (ctx->fence_map) {
742 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
743 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
744 }
745 return ret;
746 }
747
748 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
749 struct amdgpu_cs_request *request)
750 {
751 radv_amdgpu_request_to_fence(ctx,
752 &ctx->last_submission[request->ip_type][request->ring],
753 request);
754 }
755
756 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
757 int queue_idx,
758 struct radv_winsys_sem_info *sem_info,
759 const struct radv_winsys_bo_list *radv_bo_list,
760 struct radeon_cmdbuf **cs_array,
761 unsigned cs_count,
762 struct radeon_cmdbuf *initial_preamble_cs,
763 struct radeon_cmdbuf *continue_preamble_cs,
764 struct radeon_winsys_fence *_fence)
765 {
766 int r;
767 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
768 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
769 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
770 amdgpu_bo_list_handle bo_list;
771 struct amdgpu_cs_request request = {0};
772 struct amdgpu_cs_ib_info ibs[2];
773 unsigned number_of_ibs = 1;
774
775 for (unsigned i = cs_count; i--;) {
776 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
777
778 if (cs->is_chained) {
779 *cs->ib_size_ptr -= 4;
780 cs->is_chained = false;
781 }
782
783 if (i + 1 < cs_count) {
784 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
785 assert(cs->base.cdw + 4 <= cs->base.max_dw);
786
787 cs->is_chained = true;
788 *cs->ib_size_ptr += 4;
789
790 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
791 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
792 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
793 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
794 }
795 }
796
797 /* Create a buffer object list. */
798 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
799 initial_preamble_cs, radv_bo_list,
800 &bo_list);
801 if (r) {
802 fprintf(stderr, "amdgpu: buffer list creation failed for the "
803 "chained submission(%d)\n", r);
804 return r;
805 }
806
807 /* Configure the CS request. */
808 if (initial_preamble_cs) {
809 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
810 ibs[1] = cs0->ib;
811 number_of_ibs++;
812 } else {
813 ibs[0] = cs0->ib;
814 }
815
816 request.ip_type = cs0->hw_ip;
817 request.ring = queue_idx;
818 request.number_of_ibs = number_of_ibs;
819 request.ibs = ibs;
820 request.resources = bo_list;
821 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
822
823 /* Submit the CS. */
824 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
825 if (r) {
826 if (r == -ENOMEM)
827 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
828 else
829 fprintf(stderr, "amdgpu: The CS has been rejected, "
830 "see dmesg for more information.\n");
831 }
832
833 if (bo_list)
834 amdgpu_bo_list_destroy(bo_list);
835
836 if (r)
837 return r;
838
839 if (fence)
840 radv_amdgpu_request_to_fence(ctx, fence, &request);
841
842 radv_assign_last_submit(ctx, &request);
843
844 return 0;
845 }
846
847 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
848 int queue_idx,
849 struct radv_winsys_sem_info *sem_info,
850 const struct radv_winsys_bo_list *radv_bo_list,
851 struct radeon_cmdbuf **cs_array,
852 unsigned cs_count,
853 struct radeon_cmdbuf *initial_preamble_cs,
854 struct radeon_cmdbuf *continue_preamble_cs,
855 struct radeon_winsys_fence *_fence)
856 {
857 int r;
858 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
859 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
860 amdgpu_bo_list_handle bo_list;
861 struct amdgpu_cs_request request = {};
862 struct amdgpu_cs_ib_info *ibs;
863 struct radv_amdgpu_cs *cs0;
864 unsigned number_of_ibs;
865
866 assert(cs_count);
867 cs0 = radv_amdgpu_cs(cs_array[0]);
868
869 /* Compute the number of IBs for this submit. */
870 number_of_ibs = cs_count + !!initial_preamble_cs;
871
872 /* Create a buffer object list. */
873 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
874 initial_preamble_cs, radv_bo_list,
875 &bo_list);
876 if (r) {
877 fprintf(stderr, "amdgpu: buffer list creation failed "
878 "for the fallback submission (%d)\n", r);
879 return r;
880 }
881
882 ibs = malloc(number_of_ibs * sizeof(*ibs));
883 if (!ibs) {
884 if (bo_list)
885 amdgpu_bo_list_destroy(bo_list);
886 return -ENOMEM;
887 }
888
889 /* Configure the CS request. */
890 if (initial_preamble_cs)
891 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
892
893 for (unsigned i = 0; i < cs_count; i++) {
894 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
895
896 ibs[i + !!initial_preamble_cs] = cs->ib;
897
898 if (cs->is_chained) {
899 *cs->ib_size_ptr -= 4;
900 cs->is_chained = false;
901 }
902 }
903
904 request.ip_type = cs0->hw_ip;
905 request.ring = queue_idx;
906 request.resources = bo_list;
907 request.number_of_ibs = number_of_ibs;
908 request.ibs = ibs;
909 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
910
911 /* Submit the CS. */
912 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
913 if (r) {
914 if (r == -ENOMEM)
915 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
916 else
917 fprintf(stderr, "amdgpu: The CS has been rejected, "
918 "see dmesg for more information.\n");
919 }
920
921 if (bo_list)
922 amdgpu_bo_list_destroy(bo_list);
923 free(ibs);
924
925 if (r)
926 return r;
927
928 if (fence)
929 radv_amdgpu_request_to_fence(ctx, fence, &request);
930
931 radv_assign_last_submit(ctx, &request);
932
933 return 0;
934 }
935
936 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
937 int queue_idx,
938 struct radv_winsys_sem_info *sem_info,
939 const struct radv_winsys_bo_list *radv_bo_list,
940 struct radeon_cmdbuf **cs_array,
941 unsigned cs_count,
942 struct radeon_cmdbuf *initial_preamble_cs,
943 struct radeon_cmdbuf *continue_preamble_cs,
944 struct radeon_winsys_fence *_fence)
945 {
946 int r;
947 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
948 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
949 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
950 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
951 amdgpu_bo_list_handle bo_list;
952 struct amdgpu_cs_request request;
953 uint32_t pad_word = 0xffff1000U;
954 bool emit_signal_sem = sem_info->cs_emit_signal;
955
956 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
957 pad_word = 0x80000000;
958
959 assert(cs_count);
960
961 for (unsigned i = 0; i < cs_count;) {
962 struct amdgpu_cs_ib_info *ibs;
963 struct radeon_winsys_bo **bos;
964 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
965 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
966 unsigned number_of_ibs;
967 uint32_t *ptr;
968 unsigned cnt = 0;
969 unsigned size = 0;
970 unsigned pad_words = 0;
971
972 /* Compute the number of IBs for this submit. */
973 number_of_ibs = cs->num_old_cs_buffers + 1;
974
975 ibs = malloc(number_of_ibs * sizeof(*ibs));
976 if (!ibs)
977 return -ENOMEM;
978
979 bos = malloc(number_of_ibs * sizeof(*bos));
980 if (!bos) {
981 free(ibs);
982 return -ENOMEM;
983 }
984
985 if (number_of_ibs > 1) {
986 /* Special path when the maximum size in dwords has
987 * been reached because we need to handle more than one
988 * IB per submit.
989 */
990 struct radeon_cmdbuf **new_cs_array;
991 unsigned idx = 0;
992
993 new_cs_array = malloc(cs->num_old_cs_buffers *
994 sizeof(*new_cs_array));
995 assert(new_cs_array);
996
997 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
998 new_cs_array[idx++] = &cs->old_cs_buffers[j];
999 new_cs_array[idx++] = cs_array[i];
1000
1001 for (unsigned j = 0; j < number_of_ibs; j++) {
1002 struct radeon_cmdbuf *rcs = new_cs_array[j];
1003 bool needs_preamble = preamble_cs && j == 0;
1004 unsigned size = 0;
1005
1006 if (needs_preamble)
1007 size += preamble_cs->cdw;
1008 size += rcs->cdw;
1009
1010 assert(size < 0xffff8);
1011
1012 while (!size || (size & 7)) {
1013 size++;
1014 pad_words++;
1015 }
1016
1017 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1018 RADEON_DOMAIN_GTT,
1019 RADEON_FLAG_CPU_ACCESS |
1020 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1021 RADEON_FLAG_READ_ONLY,
1022 RADV_BO_PRIORITY_CS);
1023 ptr = ws->buffer_map(bos[j]);
1024
1025 if (needs_preamble) {
1026 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1027 ptr += preamble_cs->cdw;
1028 }
1029
1030 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1031 ptr += rcs->cdw;
1032
1033 for (unsigned k = 0; k < pad_words; ++k)
1034 *ptr++ = pad_word;
1035
1036 ibs[j].size = size;
1037 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1038 }
1039
1040 cnt++;
1041 free(new_cs_array);
1042 } else {
1043 if (preamble_cs)
1044 size += preamble_cs->cdw;
1045
1046 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1047 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1048 ++cnt;
1049 }
1050
1051 while (!size || (size & 7)) {
1052 size++;
1053 pad_words++;
1054 }
1055 assert(cnt);
1056
1057 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1058 RADEON_DOMAIN_GTT,
1059 RADEON_FLAG_CPU_ACCESS |
1060 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1061 RADEON_FLAG_READ_ONLY,
1062 RADV_BO_PRIORITY_CS);
1063 ptr = ws->buffer_map(bos[0]);
1064
1065 if (preamble_cs) {
1066 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1067 ptr += preamble_cs->cdw;
1068 }
1069
1070 for (unsigned j = 0; j < cnt; ++j) {
1071 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1072 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1073 ptr += cs->base.cdw;
1074
1075 }
1076
1077 for (unsigned j = 0; j < pad_words; ++j)
1078 *ptr++ = pad_word;
1079
1080 ibs[0].size = size;
1081 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1082 }
1083
1084 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
1085 (struct radv_amdgpu_winsys_bo **)bos,
1086 number_of_ibs, preamble_cs,
1087 radv_bo_list, &bo_list);
1088 if (r) {
1089 fprintf(stderr, "amdgpu: buffer list creation failed "
1090 "for the sysmem submission (%d)\n", r);
1091 free(ibs);
1092 free(bos);
1093 return r;
1094 }
1095
1096 memset(&request, 0, sizeof(request));
1097
1098 request.ip_type = cs0->hw_ip;
1099 request.ring = queue_idx;
1100 request.resources = bo_list;
1101 request.number_of_ibs = number_of_ibs;
1102 request.ibs = ibs;
1103 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1104
1105 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1106 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1107 if (r) {
1108 if (r == -ENOMEM)
1109 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1110 else
1111 fprintf(stderr, "amdgpu: The CS has been rejected, "
1112 "see dmesg for more information.\n");
1113 }
1114
1115 if (bo_list)
1116 amdgpu_bo_list_destroy(bo_list);
1117
1118 for (unsigned j = 0; j < number_of_ibs; j++) {
1119 ws->buffer_destroy(bos[j]);
1120 }
1121
1122 free(ibs);
1123 free(bos);
1124
1125 if (r)
1126 return r;
1127
1128 i += cnt;
1129 }
1130 if (fence)
1131 radv_amdgpu_request_to_fence(ctx, fence, &request);
1132
1133 radv_assign_last_submit(ctx, &request);
1134
1135 return 0;
1136 }
1137
1138 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1139 int queue_idx,
1140 struct radeon_cmdbuf **cs_array,
1141 unsigned cs_count,
1142 struct radeon_cmdbuf *initial_preamble_cs,
1143 struct radeon_cmdbuf *continue_preamble_cs,
1144 struct radv_winsys_sem_info *sem_info,
1145 const struct radv_winsys_bo_list *bo_list,
1146 bool can_patch,
1147 struct radeon_winsys_fence *_fence)
1148 {
1149 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1150 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1151 int ret;
1152
1153 assert(sem_info);
1154 if (!cs->ws->use_ib_bos) {
1155 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1156 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1157 } else if (can_patch && cs->ws->batchchain) {
1158 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1159 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1160 } else {
1161 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1162 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1163 }
1164
1165 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1166 return ret;
1167 }
1168
1169 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1170 {
1171 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1172 void *ret = NULL;
1173
1174 if (!cs->ib_buffer)
1175 return NULL;
1176 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1177 struct radv_amdgpu_winsys_bo *bo;
1178
1179 bo = (struct radv_amdgpu_winsys_bo*)
1180 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1181 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1182 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1183 return (char *)ret + (addr - bo->base.va);
1184 }
1185 }
1186 if(cs->ws->debug_all_bos) {
1187 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1188 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1189 &cs->ws->global_bo_list, global_list_item) {
1190 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1191 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1192 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1193 return (char *)ret + (addr - bo->base.va);
1194 }
1195 }
1196 }
1197 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1198 }
1199 return ret;
1200 }
1201
1202 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1203 FILE* file,
1204 const int *trace_ids, int trace_id_count)
1205 {
1206 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1207 void *ib = cs->base.buf;
1208 int num_dw = cs->base.cdw;
1209
1210 if (cs->ws->use_ib_bos) {
1211 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1212 num_dw = cs->ib.size;
1213 }
1214 assert(ib);
1215 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1216 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1217 }
1218
1219 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1220 {
1221 switch (radv_priority) {
1222 case RADEON_CTX_PRIORITY_REALTIME:
1223 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1224 case RADEON_CTX_PRIORITY_HIGH:
1225 return AMDGPU_CTX_PRIORITY_HIGH;
1226 case RADEON_CTX_PRIORITY_MEDIUM:
1227 return AMDGPU_CTX_PRIORITY_NORMAL;
1228 case RADEON_CTX_PRIORITY_LOW:
1229 return AMDGPU_CTX_PRIORITY_LOW;
1230 default:
1231 unreachable("Invalid context priority");
1232 }
1233 }
1234
1235 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1236 enum radeon_ctx_priority priority)
1237 {
1238 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1239 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1240 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1241 int r;
1242
1243 if (!ctx)
1244 return NULL;
1245
1246 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1247 if (r) {
1248 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1249 goto error_create;
1250 }
1251 ctx->ws = ws;
1252
1253 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1254 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1255 RADEON_DOMAIN_GTT,
1256 RADEON_FLAG_CPU_ACCESS |
1257 RADEON_FLAG_NO_INTERPROCESS_SHARING,
1258 RADV_BO_PRIORITY_CS);
1259 if (ctx->fence_bo)
1260 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1261 if (ctx->fence_map)
1262 memset(ctx->fence_map, 0, 4096);
1263 return (struct radeon_winsys_ctx *)ctx;
1264 error_create:
1265 FREE(ctx);
1266 return NULL;
1267 }
1268
1269 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1270 {
1271 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1272 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1273 amdgpu_cs_ctx_free(ctx->ctx);
1274 FREE(ctx);
1275 }
1276
1277 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1278 enum ring_type ring_type, int ring_index)
1279 {
1280 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1281 int ip_type = ring_to_hw_ip(ring_type);
1282
1283 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1284 uint32_t expired;
1285 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1286 1000000000ull, 0, &expired);
1287
1288 if (ret || !expired)
1289 return false;
1290 }
1291
1292 return true;
1293 }
1294
1295 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1296 {
1297 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1298 if (!sem)
1299 return NULL;
1300
1301 return (struct radeon_winsys_sem *)sem;
1302 }
1303
1304 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1305 {
1306 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1307 FREE(sem);
1308 }
1309
1310 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1311 uint32_t ip_type,
1312 uint32_t ring,
1313 struct radv_winsys_sem_info *sem_info)
1314 {
1315 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1316 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1317
1318 if (sem->context)
1319 return -EINVAL;
1320
1321 *sem = ctx->last_submission[ip_type][ring].fence;
1322 }
1323 return 0;
1324 }
1325
1326 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1327 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1328 {
1329 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1330 if (!syncobj)
1331 return NULL;
1332
1333 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1334 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1335 sem->handle = counts->syncobj[i];
1336 }
1337
1338 chunk->chunk_id = chunk_id;
1339 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1340 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1341 return syncobj;
1342 }
1343
1344 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1345 struct amdgpu_cs_request *request,
1346 struct radv_winsys_sem_info *sem_info)
1347 {
1348 int r;
1349 int num_chunks;
1350 int size;
1351 bool user_fence;
1352 struct drm_amdgpu_cs_chunk *chunks;
1353 struct drm_amdgpu_cs_chunk_data *chunk_data;
1354 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1355 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1356 int i;
1357 struct amdgpu_cs_fence *sem;
1358
1359 user_fence = (request->fence_info.handle != NULL);
1360 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1361
1362 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1363
1364 size = request->number_of_ibs + (user_fence ? 1 : 0);
1365
1366 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1367
1368 num_chunks = request->number_of_ibs;
1369 for (i = 0; i < request->number_of_ibs; i++) {
1370 struct amdgpu_cs_ib_info *ib;
1371 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1372 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1373 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1374
1375 ib = &request->ibs[i];
1376
1377 chunk_data[i].ib_data._pad = 0;
1378 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1379 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1380 chunk_data[i].ib_data.ip_type = request->ip_type;
1381 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1382 chunk_data[i].ib_data.ring = request->ring;
1383 chunk_data[i].ib_data.flags = ib->flags;
1384 }
1385
1386 if (user_fence) {
1387 i = num_chunks++;
1388
1389 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1390 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1391 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1392
1393 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1394 &chunk_data[i]);
1395 }
1396
1397 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1398 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1399 &chunks[num_chunks],
1400 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1401 if (!wait_syncobj) {
1402 r = -ENOMEM;
1403 goto error_out;
1404 }
1405 num_chunks++;
1406
1407 if (sem_info->wait.sem_count == 0)
1408 sem_info->cs_emit_wait = false;
1409
1410 }
1411
1412 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1413 sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1414 int sem_count = 0;
1415
1416 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1417 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1418 if (!sem->context)
1419 continue;
1420 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1421
1422 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1423
1424 sem->context = NULL;
1425 }
1426 i = num_chunks++;
1427
1428 /* dependencies chunk */
1429 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1430 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1431 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1432
1433 sem_info->cs_emit_wait = false;
1434 }
1435
1436 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1437 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1438 &chunks[num_chunks],
1439 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1440 if (!signal_syncobj) {
1441 r = -ENOMEM;
1442 goto error_out;
1443 }
1444 num_chunks++;
1445 }
1446
1447 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1448 ctx->ctx,
1449 request->resources,
1450 num_chunks,
1451 chunks,
1452 &request->seq_no);
1453 error_out:
1454 free(wait_syncobj);
1455 free(signal_syncobj);
1456 return r;
1457 }
1458
1459 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1460 uint32_t *handle)
1461 {
1462 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1463 return amdgpu_cs_create_syncobj(ws->dev, handle);
1464 }
1465
1466 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1467 uint32_t handle)
1468 {
1469 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1470 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1471 }
1472
1473 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1474 uint32_t handle)
1475 {
1476 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1477 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1478 }
1479
1480 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1481 uint32_t handle)
1482 {
1483 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1484 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1485 }
1486
1487 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1488 uint32_t handle_count, bool wait_all, uint64_t timeout)
1489 {
1490 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1491 uint32_t tmp;
1492
1493 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1494 timeout = MIN2(timeout, INT64_MAX);
1495
1496 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1497 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1498 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1499 &tmp);
1500 if (ret == 0) {
1501 return true;
1502 } else if (ret == -1 && errno == ETIME) {
1503 return false;
1504 } else {
1505 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1506 return false;
1507 }
1508 }
1509
1510 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1511 uint32_t syncobj,
1512 int *fd)
1513 {
1514 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1515
1516 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1517 }
1518
1519 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1520 int fd,
1521 uint32_t *syncobj)
1522 {
1523 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1524
1525 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1526 }
1527
1528
1529 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1530 uint32_t syncobj,
1531 int *fd)
1532 {
1533 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1534
1535 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1536 }
1537
1538 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1539 uint32_t syncobj,
1540 int fd)
1541 {
1542 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1543
1544 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1545 }
1546
1547 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1548 {
1549 ws->base.ctx_create = radv_amdgpu_ctx_create;
1550 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1551 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1552 ws->base.cs_create = radv_amdgpu_cs_create;
1553 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1554 ws->base.cs_grow = radv_amdgpu_cs_grow;
1555 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1556 ws->base.cs_reset = radv_amdgpu_cs_reset;
1557 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1558 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1559 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1560 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1561 ws->base.create_fence = radv_amdgpu_create_fence;
1562 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1563 ws->base.create_sem = radv_amdgpu_create_sem;
1564 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1565 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1566 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1567 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1568 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1569 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1570 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1571 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1572 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1573 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1574 ws->base.fence_wait = radv_amdgpu_fence_wait;
1575 ws->base.fences_wait = radv_amdgpu_fences_wait;
1576 }