radv/winsys: remove the max IBs per submit limit for the sysmem path
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "ac_debug.h"
33 #include "radv_radeon_winsys.h"
34 #include "radv_amdgpu_cs.h"
35 #include "radv_amdgpu_bo.h"
36 #include "sid.h"
37
38
39 enum {
40 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
41 };
42
43 struct radv_amdgpu_cs {
44 struct radeon_cmdbuf base;
45 struct radv_amdgpu_winsys *ws;
46
47 struct amdgpu_cs_ib_info ib;
48
49 struct radeon_winsys_bo *ib_buffer;
50 uint8_t *ib_mapped;
51 unsigned max_num_buffers;
52 unsigned num_buffers;
53 amdgpu_bo_handle *handles;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 int *virtual_buffer_hash_table;
69
70 /* For chips that don't support chaining. */
71 struct radeon_cmdbuf *old_cs_buffers;
72 unsigned num_old_cs_buffers;
73 };
74
75 static inline struct radv_amdgpu_cs *
76 radv_amdgpu_cs(struct radeon_cmdbuf *base)
77 {
78 return (struct radv_amdgpu_cs*)base;
79 }
80
81 static int ring_to_hw_ip(enum ring_type ring)
82 {
83 switch (ring) {
84 case RING_GFX:
85 return AMDGPU_HW_IP_GFX;
86 case RING_DMA:
87 return AMDGPU_HW_IP_DMA;
88 case RING_COMPUTE:
89 return AMDGPU_HW_IP_COMPUTE;
90 default:
91 unreachable("unsupported ring");
92 }
93 }
94
95 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
96 uint32_t ip_type,
97 uint32_t ring,
98 struct radv_winsys_sem_info *sem_info);
99 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
100 struct amdgpu_cs_request *request,
101 struct radv_winsys_sem_info *sem_info);
102
103 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
104 struct radv_amdgpu_fence *fence,
105 struct amdgpu_cs_request *req)
106 {
107 fence->fence.context = ctx->ctx;
108 fence->fence.ip_type = req->ip_type;
109 fence->fence.ip_instance = req->ip_instance;
110 fence->fence.ring = req->ring;
111 fence->fence.fence = req->seq_no;
112 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
113 }
114
115 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
116 {
117 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
118 return (struct radeon_winsys_fence*)fence;
119 }
120
121 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
122 {
123 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
124 free(fence);
125 }
126
127 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
128 struct radeon_winsys_fence *_fence,
129 bool absolute,
130 uint64_t timeout)
131 {
132 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
133 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
134 int r;
135 uint32_t expired = 0;
136
137 if (fence->user_ptr) {
138 if (*fence->user_ptr >= fence->fence.fence)
139 return true;
140 if (!absolute && !timeout)
141 return false;
142 }
143
144 /* Now use the libdrm query. */
145 r = amdgpu_cs_query_fence_status(&fence->fence,
146 timeout,
147 flags,
148 &expired);
149
150 if (r) {
151 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
152 return false;
153 }
154
155 if (expired)
156 return true;
157
158 return false;
159 }
160
161
162 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
163 struct radeon_winsys_fence *const *_fences,
164 uint32_t fence_count,
165 bool wait_all,
166 uint64_t timeout)
167 {
168 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
169 int r;
170 uint32_t expired = 0, first = 0;
171
172 if (!fences)
173 return false;
174
175 for (uint32_t i = 0; i < fence_count; ++i)
176 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
177
178 /* Now use the libdrm query. */
179 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
180 timeout, &expired, &first);
181
182 free(fences);
183 if (r) {
184 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
185 return false;
186 }
187
188 if (expired)
189 return true;
190
191 return false;
192 }
193
194 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
195 {
196 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
197
198 if (cs->ib_buffer)
199 cs->ws->base.buffer_destroy(cs->ib_buffer);
200 else
201 free(cs->base.buf);
202
203 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
204 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
205
206 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
207 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
208 free(rcs->buf);
209 }
210
211 free(cs->old_cs_buffers);
212 free(cs->old_ib_buffers);
213 free(cs->virtual_buffers);
214 free(cs->virtual_buffer_hash_table);
215 free(cs->handles);
216 free(cs);
217 }
218
219 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
220 enum ring_type ring_type)
221 {
222 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
223 cs->buffer_hash_table[i] = -1;
224
225 cs->hw_ip = ring_to_hw_ip(ring_type);
226 }
227
228 static struct radeon_cmdbuf *
229 radv_amdgpu_cs_create(struct radeon_winsys *ws,
230 enum ring_type ring_type)
231 {
232 struct radv_amdgpu_cs *cs;
233 uint32_t ib_size = 20 * 1024 * 4;
234 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
235 if (!cs)
236 return NULL;
237
238 cs->ws = radv_amdgpu_winsys(ws);
239 radv_amdgpu_init_cs(cs, ring_type);
240
241 if (cs->ws->use_ib_bos) {
242 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
243 RADEON_DOMAIN_GTT,
244 RADEON_FLAG_CPU_ACCESS |
245 RADEON_FLAG_NO_INTERPROCESS_SHARING |
246 RADEON_FLAG_READ_ONLY);
247 if (!cs->ib_buffer) {
248 free(cs);
249 return NULL;
250 }
251
252 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
253 if (!cs->ib_mapped) {
254 ws->buffer_destroy(cs->ib_buffer);
255 free(cs);
256 return NULL;
257 }
258
259 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
260 cs->base.buf = (uint32_t *)cs->ib_mapped;
261 cs->base.max_dw = ib_size / 4 - 4;
262 cs->ib_size_ptr = &cs->ib.size;
263 cs->ib.size = 0;
264
265 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
266 } else {
267 cs->base.buf = malloc(16384);
268 cs->base.max_dw = 4096;
269 if (!cs->base.buf) {
270 free(cs);
271 return NULL;
272 }
273 }
274
275 return &cs->base;
276 }
277
278 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
279 {
280 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
281
282 if (cs->failed) {
283 cs->base.cdw = 0;
284 return;
285 }
286
287 if (!cs->ws->use_ib_bos) {
288 const uint64_t limit_dws = 0xffff8;
289 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
290 MIN2(cs->base.max_dw * 2, limit_dws));
291
292 /* The total ib size cannot exceed limit_dws dwords. */
293 if (ib_dws > limit_dws)
294 {
295 /* The maximum size in dwords has been reached,
296 * try to allocate a new one.
297 */
298 cs->old_cs_buffers =
299 realloc(cs->old_cs_buffers,
300 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
301 if (!cs->old_cs_buffers) {
302 cs->failed = true;
303 cs->base.cdw = 0;
304 return;
305 }
306
307 /* Store the current one for submitting it later. */
308 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
309 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
310 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
311 cs->num_old_cs_buffers++;
312
313 /* Reset the cs, it will be re-allocated below. */
314 cs->base.cdw = 0;
315 cs->base.buf = NULL;
316
317 /* Re-compute the number of dwords to allocate. */
318 ib_dws = MAX2(cs->base.cdw + min_size,
319 MIN2(cs->base.max_dw * 2, limit_dws));
320 if (ib_dws > limit_dws) {
321 fprintf(stderr, "amdgpu: Too high number of "
322 "dwords to allocate\n");
323 cs->failed = true;
324 return;
325 }
326 }
327
328 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
329 if (new_buf) {
330 cs->base.buf = new_buf;
331 cs->base.max_dw = ib_dws;
332 } else {
333 cs->failed = true;
334 cs->base.cdw = 0;
335 }
336 return;
337 }
338
339 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
340
341 /* max that fits in the chain size field. */
342 ib_size = MIN2(ib_size, 0xfffff);
343
344 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
345 radeon_emit(&cs->base, 0xffff1000);
346
347 *cs->ib_size_ptr |= cs->base.cdw + 4;
348
349 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
350 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
351 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
352 cs->max_num_old_ib_buffers * sizeof(void*));
353 }
354
355 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
356
357 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
358 RADEON_DOMAIN_GTT,
359 RADEON_FLAG_CPU_ACCESS |
360 RADEON_FLAG_NO_INTERPROCESS_SHARING |
361 RADEON_FLAG_READ_ONLY);
362
363 if (!cs->ib_buffer) {
364 cs->base.cdw = 0;
365 cs->failed = true;
366 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
367 }
368
369 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
370 if (!cs->ib_mapped) {
371 cs->ws->base.buffer_destroy(cs->ib_buffer);
372 cs->base.cdw = 0;
373 cs->failed = true;
374 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
375 }
376
377 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
378
379 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
380 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
381 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
382 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
383
384 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
385
386 cs->base.buf = (uint32_t *)cs->ib_mapped;
387 cs->base.cdw = 0;
388 cs->base.max_dw = ib_size / 4 - 4;
389
390 }
391
392 static bool radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
393 {
394 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
395
396 if (cs->ws->use_ib_bos) {
397 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
398 radeon_emit(&cs->base, 0xffff1000);
399
400 *cs->ib_size_ptr |= cs->base.cdw;
401
402 cs->is_chained = false;
403 }
404
405 return !cs->failed;
406 }
407
408 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
409 {
410 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
411 cs->base.cdw = 0;
412 cs->failed = false;
413
414 for (unsigned i = 0; i < cs->num_buffers; ++i) {
415 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
416 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
417 cs->buffer_hash_table[hash] = -1;
418 }
419
420 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
421 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
422 cs->virtual_buffer_hash_table[hash] = -1;
423 }
424
425 cs->num_buffers = 0;
426 cs->num_virtual_buffers = 0;
427
428 if (cs->ws->use_ib_bos) {
429 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
430
431 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
432 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
433
434 cs->num_old_ib_buffers = 0;
435 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
436 cs->ib_size_ptr = &cs->ib.size;
437 cs->ib.size = 0;
438 } else {
439 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
440 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
441 free(rcs->buf);
442 }
443
444 free(cs->old_cs_buffers);
445 cs->old_cs_buffers = NULL;
446 cs->num_old_cs_buffers = 0;
447 }
448 }
449
450 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
451 amdgpu_bo_handle bo)
452 {
453 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
454 int index = cs->buffer_hash_table[hash];
455
456 if (index == -1)
457 return -1;
458
459 if (cs->handles[index] == bo)
460 return index;
461
462 for (unsigned i = 0; i < cs->num_buffers; ++i) {
463 if (cs->handles[i] == bo) {
464 cs->buffer_hash_table[hash] = i;
465 return i;
466 }
467 }
468
469 return -1;
470 }
471
472 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
473 amdgpu_bo_handle bo)
474 {
475 unsigned hash;
476 int index = radv_amdgpu_cs_find_buffer(cs, bo);
477
478 if (index != -1)
479 return;
480
481 if (cs->num_buffers == cs->max_num_buffers) {
482 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
483 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
484 cs->max_num_buffers = new_count;
485 }
486
487 cs->handles[cs->num_buffers] = bo;
488
489 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
490 cs->buffer_hash_table[hash] = cs->num_buffers;
491
492 ++cs->num_buffers;
493 }
494
495 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
496 struct radeon_winsys_bo *bo)
497 {
498 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
499 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
500
501
502 if (!cs->virtual_buffer_hash_table) {
503 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
504 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
505 cs->virtual_buffer_hash_table[i] = -1;
506 }
507
508 if (cs->virtual_buffer_hash_table[hash] >= 0) {
509 int idx = cs->virtual_buffer_hash_table[hash];
510 if (cs->virtual_buffers[idx] == bo) {
511 return;
512 }
513 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
514 if (cs->virtual_buffers[i] == bo) {
515 cs->virtual_buffer_hash_table[hash] = i;
516 return;
517 }
518 }
519 }
520
521 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
522 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
523 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
524 }
525
526 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
527
528 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
529 ++cs->num_virtual_buffers;
530
531 }
532
533 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
534 struct radeon_winsys_bo *_bo)
535 {
536 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
537 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
538
539 if (bo->is_virtual) {
540 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
541 return;
542 }
543
544 if (bo->base.is_local)
545 return;
546
547 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo);
548 }
549
550 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
551 struct radeon_cmdbuf *_child)
552 {
553 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
554 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
555
556 for (unsigned i = 0; i < child->num_buffers; ++i) {
557 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i]);
558 }
559
560 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
561 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
562 }
563
564 if (parent->ws->use_ib_bos) {
565 if (parent->base.cdw + 4 > parent->base.max_dw)
566 radv_amdgpu_cs_grow(&parent->base, 4);
567
568 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
569 radeon_emit(&parent->base, child->ib.ib_mc_address);
570 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
571 radeon_emit(&parent->base, child->ib.size);
572 } else {
573 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
574 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
575
576 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
577 parent->base.cdw += child->base.cdw;
578 }
579 }
580
581 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
582 struct radeon_cmdbuf **cs_array,
583 unsigned count,
584 struct radv_amdgpu_winsys_bo **extra_bo_array,
585 unsigned num_extra_bo,
586 struct radeon_cmdbuf *extra_cs,
587 const struct radv_winsys_bo_list *radv_bo_list,
588 amdgpu_bo_list_handle *bo_list)
589 {
590 int r = 0;
591
592 if (ws->debug_all_bos) {
593 struct radv_amdgpu_winsys_bo *bo;
594 amdgpu_bo_handle *handles;
595 unsigned num = 0;
596
597 pthread_mutex_lock(&ws->global_bo_list_lock);
598
599 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
600 if (!handles) {
601 pthread_mutex_unlock(&ws->global_bo_list_lock);
602 return -ENOMEM;
603 }
604
605 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
606 assert(num < ws->num_buffers);
607 handles[num++] = bo->bo;
608 }
609
610 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
611 handles, NULL,
612 bo_list);
613 free(handles);
614 pthread_mutex_unlock(&ws->global_bo_list_lock);
615 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
616 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
617 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
618 if (cs->num_buffers == 0) {
619 *bo_list = 0;
620 return 0;
621 }
622 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
623 NULL, bo_list);
624 } else {
625 unsigned total_buffer_count = num_extra_bo;
626 unsigned unique_bo_count = num_extra_bo;
627 for (unsigned i = 0; i < count; ++i) {
628 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
629 total_buffer_count += cs->num_buffers;
630 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
631 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
632 }
633
634 if (extra_cs) {
635 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
636 }
637
638 if (radv_bo_list) {
639 total_buffer_count += radv_bo_list->count;
640 }
641
642 if (total_buffer_count == 0) {
643 *bo_list = 0;
644 return 0;
645 }
646 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
647 if (!handles) {
648 free(handles);
649 return -ENOMEM;
650 }
651
652 for (unsigned i = 0; i < num_extra_bo; i++) {
653 handles[i] = extra_bo_array[i]->bo;
654 }
655
656 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
657 struct radv_amdgpu_cs *cs;
658
659 if (i == count)
660 cs = (struct radv_amdgpu_cs*)extra_cs;
661 else
662 cs = (struct radv_amdgpu_cs*)cs_array[i];
663
664 if (!cs->num_buffers)
665 continue;
666
667 if (unique_bo_count == 0 && !cs->num_virtual_buffers) {
668 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
669 unique_bo_count = cs->num_buffers;
670 continue;
671 }
672 int unique_bo_so_far = unique_bo_count;
673 for (unsigned j = 0; j < cs->num_buffers; ++j) {
674 bool found = false;
675 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
676 if (handles[k] == cs->handles[j]) {
677 found = true;
678 break;
679 }
680 }
681 if (!found) {
682 handles[unique_bo_count] = cs->handles[j];
683 ++unique_bo_count;
684 }
685 }
686 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
687 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
688 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
689 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
690 bool found = false;
691 for (unsigned m = 0; m < unique_bo_count; ++m) {
692 if (handles[m] == bo->bo) {
693 found = true;
694 break;
695 }
696 }
697 if (!found) {
698 handles[unique_bo_count] = bo->bo;
699 ++unique_bo_count;
700 }
701 }
702 }
703 }
704
705 if (radv_bo_list) {
706 unsigned unique_bo_so_far = unique_bo_count;
707 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
708 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
709 bool found = false;
710 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
711 if (bo->bo == handles[j]) {
712 found = true;
713 break;
714 }
715 }
716 if (!found) {
717 handles[unique_bo_count] = bo->bo;
718 ++unique_bo_count;
719 }
720 }
721 }
722
723 if (unique_bo_count > 0) {
724 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
725 NULL, bo_list);
726 } else {
727 *bo_list = 0;
728 }
729
730 free(handles);
731 }
732
733 return r;
734 }
735
736 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
737 {
738 struct amdgpu_cs_fence_info ret = {0};
739 if (ctx->fence_map) {
740 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
741 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
742 }
743 return ret;
744 }
745
746 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
747 struct amdgpu_cs_request *request)
748 {
749 radv_amdgpu_request_to_fence(ctx,
750 &ctx->last_submission[request->ip_type][request->ring],
751 request);
752 }
753
754 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
755 int queue_idx,
756 struct radv_winsys_sem_info *sem_info,
757 const struct radv_winsys_bo_list *radv_bo_list,
758 struct radeon_cmdbuf **cs_array,
759 unsigned cs_count,
760 struct radeon_cmdbuf *initial_preamble_cs,
761 struct radeon_cmdbuf *continue_preamble_cs,
762 struct radeon_winsys_fence *_fence)
763 {
764 int r;
765 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
766 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
767 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
768 amdgpu_bo_list_handle bo_list;
769 struct amdgpu_cs_request request = {0};
770 struct amdgpu_cs_ib_info ibs[2];
771 unsigned number_of_ibs = 1;
772
773 for (unsigned i = cs_count; i--;) {
774 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
775
776 if (cs->is_chained) {
777 *cs->ib_size_ptr -= 4;
778 cs->is_chained = false;
779 }
780
781 if (i + 1 < cs_count) {
782 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
783 assert(cs->base.cdw + 4 <= cs->base.max_dw);
784
785 cs->is_chained = true;
786 *cs->ib_size_ptr += 4;
787
788 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
789 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
790 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
791 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
792 }
793 }
794
795 /* Create a buffer object list. */
796 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
797 initial_preamble_cs, radv_bo_list,
798 &bo_list);
799 if (r) {
800 fprintf(stderr, "amdgpu: buffer list creation failed for the "
801 "chained submission(%d)\n", r);
802 return r;
803 }
804
805 /* Configure the CS request. */
806 if (initial_preamble_cs) {
807 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
808 ibs[1] = cs0->ib;
809 number_of_ibs++;
810 } else {
811 ibs[0] = cs0->ib;
812 }
813
814 request.ip_type = cs0->hw_ip;
815 request.ring = queue_idx;
816 request.number_of_ibs = number_of_ibs;
817 request.ibs = ibs;
818 request.resources = bo_list;
819 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
820
821 /* Submit the CS. */
822 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
823 if (r) {
824 if (r == -ENOMEM)
825 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
826 else
827 fprintf(stderr, "amdgpu: The CS has been rejected, "
828 "see dmesg for more information.\n");
829 }
830
831 if (bo_list)
832 amdgpu_bo_list_destroy(bo_list);
833
834 if (r)
835 return r;
836
837 if (fence)
838 radv_amdgpu_request_to_fence(ctx, fence, &request);
839
840 radv_assign_last_submit(ctx, &request);
841
842 return 0;
843 }
844
845 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
846 int queue_idx,
847 struct radv_winsys_sem_info *sem_info,
848 const struct radv_winsys_bo_list *radv_bo_list,
849 struct radeon_cmdbuf **cs_array,
850 unsigned cs_count,
851 struct radeon_cmdbuf *initial_preamble_cs,
852 struct radeon_cmdbuf *continue_preamble_cs,
853 struct radeon_winsys_fence *_fence)
854 {
855 int r;
856 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
857 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
858 amdgpu_bo_list_handle bo_list;
859 struct amdgpu_cs_request request = {};
860 struct amdgpu_cs_ib_info *ibs;
861 struct radv_amdgpu_cs *cs0;
862 unsigned number_of_ibs;
863
864 assert(cs_count);
865 cs0 = radv_amdgpu_cs(cs_array[0]);
866
867 /* Compute the number of IBs for this submit. */
868 number_of_ibs = cs_count + !!initial_preamble_cs;
869
870 /* Create a buffer object list. */
871 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
872 initial_preamble_cs, radv_bo_list,
873 &bo_list);
874 if (r) {
875 fprintf(stderr, "amdgpu: buffer list creation failed "
876 "for the fallback submission (%d)\n", r);
877 return r;
878 }
879
880 ibs = malloc(number_of_ibs * sizeof(*ibs));
881 if (!ibs) {
882 if (bo_list)
883 amdgpu_bo_list_destroy(bo_list);
884 return -ENOMEM;
885 }
886
887 /* Configure the CS request. */
888 if (initial_preamble_cs)
889 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
890
891 for (unsigned i = 0; i < cs_count; i++) {
892 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
893
894 ibs[i + !!initial_preamble_cs] = cs->ib;
895
896 if (cs->is_chained) {
897 *cs->ib_size_ptr -= 4;
898 cs->is_chained = false;
899 }
900 }
901
902 request.ip_type = cs0->hw_ip;
903 request.ring = queue_idx;
904 request.resources = bo_list;
905 request.number_of_ibs = number_of_ibs;
906 request.ibs = ibs;
907 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
908
909 /* Submit the CS. */
910 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
911 if (r) {
912 if (r == -ENOMEM)
913 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
914 else
915 fprintf(stderr, "amdgpu: The CS has been rejected, "
916 "see dmesg for more information.\n");
917 }
918
919 if (bo_list)
920 amdgpu_bo_list_destroy(bo_list);
921 free(ibs);
922
923 if (r)
924 return r;
925
926 if (fence)
927 radv_amdgpu_request_to_fence(ctx, fence, &request);
928
929 radv_assign_last_submit(ctx, &request);
930
931 return 0;
932 }
933
934 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
935 int queue_idx,
936 struct radv_winsys_sem_info *sem_info,
937 const struct radv_winsys_bo_list *radv_bo_list,
938 struct radeon_cmdbuf **cs_array,
939 unsigned cs_count,
940 struct radeon_cmdbuf *initial_preamble_cs,
941 struct radeon_cmdbuf *continue_preamble_cs,
942 struct radeon_winsys_fence *_fence)
943 {
944 int r;
945 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
946 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
947 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
948 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
949 amdgpu_bo_list_handle bo_list;
950 struct amdgpu_cs_request request;
951 uint32_t pad_word = 0xffff1000U;
952 bool emit_signal_sem = sem_info->cs_emit_signal;
953
954 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
955 pad_word = 0x80000000;
956
957 assert(cs_count);
958
959 for (unsigned i = 0; i < cs_count;) {
960 struct amdgpu_cs_ib_info *ibs;
961 struct radeon_winsys_bo **bos;
962 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
963 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
964 unsigned number_of_ibs;
965 uint32_t *ptr;
966 unsigned cnt = 0;
967 unsigned size = 0;
968 unsigned pad_words = 0;
969
970 /* Compute the number of IBs for this submit. */
971 number_of_ibs = cs->num_old_cs_buffers + 1;
972
973 ibs = malloc(number_of_ibs * sizeof(*ibs));
974 if (!ibs)
975 return -ENOMEM;
976
977 bos = malloc(number_of_ibs * sizeof(*bos));
978 if (!bos) {
979 free(ibs);
980 return -ENOMEM;
981 }
982
983 if (number_of_ibs > 1) {
984 /* Special path when the maximum size in dwords has
985 * been reached because we need to handle more than one
986 * IB per submit.
987 */
988 struct radeon_cmdbuf **new_cs_array;
989 unsigned idx = 0;
990
991 new_cs_array = malloc(cs->num_old_cs_buffers *
992 sizeof(*new_cs_array));
993 assert(new_cs_array);
994
995 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
996 new_cs_array[idx++] = &cs->old_cs_buffers[j];
997 new_cs_array[idx++] = cs_array[i];
998
999 for (unsigned j = 0; j < number_of_ibs; j++) {
1000 struct radeon_cmdbuf *rcs = new_cs_array[j];
1001 bool needs_preamble = preamble_cs && j == 0;
1002 unsigned size = 0;
1003
1004 if (needs_preamble)
1005 size += preamble_cs->cdw;
1006 size += rcs->cdw;
1007
1008 assert(size < 0xffff8);
1009
1010 while (!size || (size & 7)) {
1011 size++;
1012 pad_words++;
1013 }
1014
1015 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1016 RADEON_DOMAIN_GTT,
1017 RADEON_FLAG_CPU_ACCESS |
1018 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1019 RADEON_FLAG_READ_ONLY);
1020 ptr = ws->buffer_map(bos[j]);
1021
1022 if (needs_preamble) {
1023 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1024 ptr += preamble_cs->cdw;
1025 }
1026
1027 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1028 ptr += rcs->cdw;
1029
1030 for (unsigned k = 0; k < pad_words; ++k)
1031 *ptr++ = pad_word;
1032
1033 ibs[j].size = size;
1034 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1035 }
1036
1037 cnt++;
1038 free(new_cs_array);
1039 } else {
1040 if (preamble_cs)
1041 size += preamble_cs->cdw;
1042
1043 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1044 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1045 ++cnt;
1046 }
1047
1048 while (!size || (size & 7)) {
1049 size++;
1050 pad_words++;
1051 }
1052 assert(cnt);
1053
1054 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1055 RADEON_DOMAIN_GTT,
1056 RADEON_FLAG_CPU_ACCESS |
1057 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1058 RADEON_FLAG_READ_ONLY);
1059 ptr = ws->buffer_map(bos[0]);
1060
1061 if (preamble_cs) {
1062 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1063 ptr += preamble_cs->cdw;
1064 }
1065
1066 for (unsigned j = 0; j < cnt; ++j) {
1067 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1068 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1069 ptr += cs->base.cdw;
1070
1071 }
1072
1073 for (unsigned j = 0; j < pad_words; ++j)
1074 *ptr++ = pad_word;
1075
1076 ibs[0].size = size;
1077 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1078 }
1079
1080 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
1081 (struct radv_amdgpu_winsys_bo **)bos,
1082 number_of_ibs, preamble_cs,
1083 radv_bo_list, &bo_list);
1084 if (r) {
1085 fprintf(stderr, "amdgpu: buffer list creation failed "
1086 "for the sysmem submission (%d)\n", r);
1087 free(ibs);
1088 free(bos);
1089 return r;
1090 }
1091
1092 memset(&request, 0, sizeof(request));
1093
1094 request.ip_type = cs0->hw_ip;
1095 request.ring = queue_idx;
1096 request.resources = bo_list;
1097 request.number_of_ibs = number_of_ibs;
1098 request.ibs = ibs;
1099 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1100
1101 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1102 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1103 if (r) {
1104 if (r == -ENOMEM)
1105 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1106 else
1107 fprintf(stderr, "amdgpu: The CS has been rejected, "
1108 "see dmesg for more information.\n");
1109 }
1110
1111 if (bo_list)
1112 amdgpu_bo_list_destroy(bo_list);
1113
1114 for (unsigned j = 0; j < number_of_ibs; j++) {
1115 ws->buffer_destroy(bos[j]);
1116 }
1117
1118 free(ibs);
1119 free(bos);
1120
1121 if (r)
1122 return r;
1123
1124 i += cnt;
1125 }
1126 if (fence)
1127 radv_amdgpu_request_to_fence(ctx, fence, &request);
1128
1129 radv_assign_last_submit(ctx, &request);
1130
1131 return 0;
1132 }
1133
1134 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1135 int queue_idx,
1136 struct radeon_cmdbuf **cs_array,
1137 unsigned cs_count,
1138 struct radeon_cmdbuf *initial_preamble_cs,
1139 struct radeon_cmdbuf *continue_preamble_cs,
1140 struct radv_winsys_sem_info *sem_info,
1141 const struct radv_winsys_bo_list *bo_list,
1142 bool can_patch,
1143 struct radeon_winsys_fence *_fence)
1144 {
1145 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1146 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1147 int ret;
1148
1149 assert(sem_info);
1150 if (!cs->ws->use_ib_bos) {
1151 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1152 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1153 } else if (can_patch && cs->ws->batchchain) {
1154 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1155 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1156 } else {
1157 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1158 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1159 }
1160
1161 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1162 return ret;
1163 }
1164
1165 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1166 {
1167 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1168 void *ret = NULL;
1169
1170 if (!cs->ib_buffer)
1171 return NULL;
1172 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1173 struct radv_amdgpu_winsys_bo *bo;
1174
1175 bo = (struct radv_amdgpu_winsys_bo*)
1176 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1177 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1178 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1179 return (char *)ret + (addr - bo->base.va);
1180 }
1181 }
1182 if(cs->ws->debug_all_bos) {
1183 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1184 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1185 &cs->ws->global_bo_list, global_list_item) {
1186 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1187 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1188 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1189 return (char *)ret + (addr - bo->base.va);
1190 }
1191 }
1192 }
1193 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1194 }
1195 return ret;
1196 }
1197
1198 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1199 FILE* file,
1200 const int *trace_ids, int trace_id_count)
1201 {
1202 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1203 void *ib = cs->base.buf;
1204 int num_dw = cs->base.cdw;
1205
1206 if (cs->ws->use_ib_bos) {
1207 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1208 num_dw = cs->ib.size;
1209 }
1210 assert(ib);
1211 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1212 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1213 }
1214
1215 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1216 {
1217 switch (radv_priority) {
1218 case RADEON_CTX_PRIORITY_REALTIME:
1219 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1220 case RADEON_CTX_PRIORITY_HIGH:
1221 return AMDGPU_CTX_PRIORITY_HIGH;
1222 case RADEON_CTX_PRIORITY_MEDIUM:
1223 return AMDGPU_CTX_PRIORITY_NORMAL;
1224 case RADEON_CTX_PRIORITY_LOW:
1225 return AMDGPU_CTX_PRIORITY_LOW;
1226 default:
1227 unreachable("Invalid context priority");
1228 }
1229 }
1230
1231 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1232 enum radeon_ctx_priority priority)
1233 {
1234 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1235 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1236 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1237 int r;
1238
1239 if (!ctx)
1240 return NULL;
1241
1242 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1243 if (r) {
1244 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1245 goto error_create;
1246 }
1247 ctx->ws = ws;
1248
1249 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1250 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1251 RADEON_DOMAIN_GTT,
1252 RADEON_FLAG_CPU_ACCESS|
1253 RADEON_FLAG_NO_INTERPROCESS_SHARING);
1254 if (ctx->fence_bo)
1255 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1256 if (ctx->fence_map)
1257 memset(ctx->fence_map, 0, 4096);
1258 return (struct radeon_winsys_ctx *)ctx;
1259 error_create:
1260 FREE(ctx);
1261 return NULL;
1262 }
1263
1264 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1265 {
1266 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1267 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1268 amdgpu_cs_ctx_free(ctx->ctx);
1269 FREE(ctx);
1270 }
1271
1272 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1273 enum ring_type ring_type, int ring_index)
1274 {
1275 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1276 int ip_type = ring_to_hw_ip(ring_type);
1277
1278 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1279 uint32_t expired;
1280 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1281 1000000000ull, 0, &expired);
1282
1283 if (ret || !expired)
1284 return false;
1285 }
1286
1287 return true;
1288 }
1289
1290 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1291 {
1292 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1293 if (!sem)
1294 return NULL;
1295
1296 return (struct radeon_winsys_sem *)sem;
1297 }
1298
1299 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1300 {
1301 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1302 FREE(sem);
1303 }
1304
1305 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1306 uint32_t ip_type,
1307 uint32_t ring,
1308 struct radv_winsys_sem_info *sem_info)
1309 {
1310 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1311 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1312
1313 if (sem->context)
1314 return -EINVAL;
1315
1316 *sem = ctx->last_submission[ip_type][ring].fence;
1317 }
1318 return 0;
1319 }
1320
1321 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1322 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1323 {
1324 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1325 if (!syncobj)
1326 return NULL;
1327
1328 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1329 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1330 sem->handle = counts->syncobj[i];
1331 }
1332
1333 chunk->chunk_id = chunk_id;
1334 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1335 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1336 return syncobj;
1337 }
1338
1339 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1340 struct amdgpu_cs_request *request,
1341 struct radv_winsys_sem_info *sem_info)
1342 {
1343 int r;
1344 int num_chunks;
1345 int size;
1346 bool user_fence;
1347 struct drm_amdgpu_cs_chunk *chunks;
1348 struct drm_amdgpu_cs_chunk_data *chunk_data;
1349 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1350 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1351 int i;
1352 struct amdgpu_cs_fence *sem;
1353
1354 user_fence = (request->fence_info.handle != NULL);
1355 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1356
1357 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1358
1359 size = request->number_of_ibs + (user_fence ? 1 : 0);
1360
1361 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1362
1363 num_chunks = request->number_of_ibs;
1364 for (i = 0; i < request->number_of_ibs; i++) {
1365 struct amdgpu_cs_ib_info *ib;
1366 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1367 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1368 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1369
1370 ib = &request->ibs[i];
1371
1372 chunk_data[i].ib_data._pad = 0;
1373 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1374 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1375 chunk_data[i].ib_data.ip_type = request->ip_type;
1376 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1377 chunk_data[i].ib_data.ring = request->ring;
1378 chunk_data[i].ib_data.flags = ib->flags;
1379 }
1380
1381 if (user_fence) {
1382 i = num_chunks++;
1383
1384 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1385 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1386 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1387
1388 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1389 &chunk_data[i]);
1390 }
1391
1392 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1393 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1394 &chunks[num_chunks],
1395 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1396 if (!wait_syncobj) {
1397 r = -ENOMEM;
1398 goto error_out;
1399 }
1400 num_chunks++;
1401
1402 if (sem_info->wait.sem_count == 0)
1403 sem_info->cs_emit_wait = false;
1404
1405 }
1406
1407 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1408 sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1409 int sem_count = 0;
1410
1411 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1412 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1413 if (!sem->context)
1414 continue;
1415 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1416
1417 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1418
1419 sem->context = NULL;
1420 }
1421 i = num_chunks++;
1422
1423 /* dependencies chunk */
1424 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1425 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1426 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1427
1428 sem_info->cs_emit_wait = false;
1429 }
1430
1431 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1432 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1433 &chunks[num_chunks],
1434 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1435 if (!signal_syncobj) {
1436 r = -ENOMEM;
1437 goto error_out;
1438 }
1439 num_chunks++;
1440 }
1441
1442 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1443 ctx->ctx,
1444 request->resources,
1445 num_chunks,
1446 chunks,
1447 &request->seq_no);
1448 error_out:
1449 free(wait_syncobj);
1450 free(signal_syncobj);
1451 return r;
1452 }
1453
1454 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1455 uint32_t *handle)
1456 {
1457 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1458 return amdgpu_cs_create_syncobj(ws->dev, handle);
1459 }
1460
1461 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1462 uint32_t handle)
1463 {
1464 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1465 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1466 }
1467
1468 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1469 uint32_t handle)
1470 {
1471 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1472 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1473 }
1474
1475 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1476 uint32_t handle)
1477 {
1478 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1479 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1480 }
1481
1482 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1483 uint32_t handle_count, bool wait_all, uint64_t timeout)
1484 {
1485 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1486 uint32_t tmp;
1487
1488 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1489 timeout = MIN2(timeout, INT64_MAX);
1490
1491 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1492 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1493 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1494 &tmp);
1495 if (ret == 0) {
1496 return true;
1497 } else if (ret == -1 && errno == ETIME) {
1498 return false;
1499 } else {
1500 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1501 return false;
1502 }
1503 }
1504
1505 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1506 uint32_t syncobj,
1507 int *fd)
1508 {
1509 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1510
1511 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1512 }
1513
1514 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1515 int fd,
1516 uint32_t *syncobj)
1517 {
1518 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1519
1520 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1521 }
1522
1523
1524 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1525 uint32_t syncobj,
1526 int *fd)
1527 {
1528 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1529
1530 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1531 }
1532
1533 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1534 uint32_t syncobj,
1535 int fd)
1536 {
1537 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1538
1539 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1540 }
1541
1542 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1543 {
1544 ws->base.ctx_create = radv_amdgpu_ctx_create;
1545 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1546 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1547 ws->base.cs_create = radv_amdgpu_cs_create;
1548 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1549 ws->base.cs_grow = radv_amdgpu_cs_grow;
1550 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1551 ws->base.cs_reset = radv_amdgpu_cs_reset;
1552 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1553 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1554 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1555 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1556 ws->base.create_fence = radv_amdgpu_create_fence;
1557 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1558 ws->base.create_sem = radv_amdgpu_create_sem;
1559 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1560 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1561 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1562 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1563 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1564 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1565 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1566 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1567 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1568 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1569 ws->base.fence_wait = radv_amdgpu_fence_wait;
1570 ws->base.fences_wait = radv_amdgpu_fences_wait;
1571 }