radv/winsys: Add priority handling during submit.
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "ac_debug.h"
33 #include "radv_radeon_winsys.h"
34 #include "radv_amdgpu_cs.h"
35 #include "radv_amdgpu_bo.h"
36 #include "sid.h"
37
38
39 enum {
40 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
41 };
42
43 struct radv_amdgpu_cs {
44 struct radeon_cmdbuf base;
45 struct radv_amdgpu_winsys *ws;
46
47 struct amdgpu_cs_ib_info ib;
48
49 struct radeon_winsys_bo *ib_buffer;
50 uint8_t *ib_mapped;
51 unsigned max_num_buffers;
52 unsigned num_buffers;
53 struct drm_amdgpu_bo_list_entry *handles;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 int *virtual_buffer_hash_table;
69
70 /* For chips that don't support chaining. */
71 struct radeon_cmdbuf *old_cs_buffers;
72 unsigned num_old_cs_buffers;
73 };
74
75 static inline struct radv_amdgpu_cs *
76 radv_amdgpu_cs(struct radeon_cmdbuf *base)
77 {
78 return (struct radv_amdgpu_cs*)base;
79 }
80
81 static int ring_to_hw_ip(enum ring_type ring)
82 {
83 switch (ring) {
84 case RING_GFX:
85 return AMDGPU_HW_IP_GFX;
86 case RING_DMA:
87 return AMDGPU_HW_IP_DMA;
88 case RING_COMPUTE:
89 return AMDGPU_HW_IP_COMPUTE;
90 default:
91 unreachable("unsupported ring");
92 }
93 }
94
95 struct radv_amdgpu_cs_request {
96 /** Specify flags with additional information */
97 uint64_t flags;
98
99 /** Specify HW IP block type to which to send the IB. */
100 unsigned ip_type;
101
102 /** IP instance index if there are several IPs of the same type. */
103 unsigned ip_instance;
104
105 /**
106 * Specify ring index of the IP. We could have several rings
107 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1.
108 */
109 uint32_t ring;
110
111 /**
112 * List handle with resources used by this request. This is a raw
113 * bo list handle used by the kernel.
114 */
115 uint32_t resources;
116
117 /**
118 * Number of dependencies this Command submission needs to
119 * wait for before starting execution.
120 */
121 uint32_t number_of_dependencies;
122
123 /**
124 * Array of dependencies which need to be met before
125 * execution can start.
126 */
127 struct amdgpu_cs_fence *dependencies;
128
129 /** Number of IBs to submit in the field ibs. */
130 uint32_t number_of_ibs;
131
132 /**
133 * IBs to submit. Those IBs will be submit together as single entity
134 */
135 struct amdgpu_cs_ib_info *ibs;
136
137 /**
138 * The returned sequence number for the command submission
139 */
140 uint64_t seq_no;
141
142 /**
143 * The fence information
144 */
145 struct amdgpu_cs_fence_info fence_info;
146 };
147
148
149 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
150 uint32_t ip_type,
151 uint32_t ring,
152 struct radv_winsys_sem_info *sem_info);
153 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
154 struct radv_amdgpu_cs_request *request,
155 struct radv_winsys_sem_info *sem_info);
156
157 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
158 struct radv_amdgpu_fence *fence,
159 struct radv_amdgpu_cs_request *req)
160 {
161 fence->fence.context = ctx->ctx;
162 fence->fence.ip_type = req->ip_type;
163 fence->fence.ip_instance = req->ip_instance;
164 fence->fence.ring = req->ring;
165 fence->fence.fence = req->seq_no;
166 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
167 }
168
169 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
170 {
171 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
172 return (struct radeon_winsys_fence*)fence;
173 }
174
175 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
176 {
177 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
178 free(fence);
179 }
180
181 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
182 struct radeon_winsys_fence *_fence,
183 bool absolute,
184 uint64_t timeout)
185 {
186 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
187 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
188 int r;
189 uint32_t expired = 0;
190
191 if (fence->user_ptr) {
192 if (*fence->user_ptr >= fence->fence.fence)
193 return true;
194 if (!absolute && !timeout)
195 return false;
196 }
197
198 /* Now use the libdrm query. */
199 r = amdgpu_cs_query_fence_status(&fence->fence,
200 timeout,
201 flags,
202 &expired);
203
204 if (r) {
205 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
206 return false;
207 }
208
209 if (expired)
210 return true;
211
212 return false;
213 }
214
215
216 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
217 struct radeon_winsys_fence *const *_fences,
218 uint32_t fence_count,
219 bool wait_all,
220 uint64_t timeout)
221 {
222 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
223 int r;
224 uint32_t expired = 0, first = 0;
225
226 if (!fences)
227 return false;
228
229 for (uint32_t i = 0; i < fence_count; ++i)
230 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
231
232 /* Now use the libdrm query. */
233 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
234 timeout, &expired, &first);
235
236 free(fences);
237 if (r) {
238 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
239 return false;
240 }
241
242 if (expired)
243 return true;
244
245 return false;
246 }
247
248 static void radv_amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
249 {
250 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
251
252 if (cs->ib_buffer)
253 cs->ws->base.buffer_destroy(cs->ib_buffer);
254 else
255 free(cs->base.buf);
256
257 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
258 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
259
260 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
261 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
262 free(rcs->buf);
263 }
264
265 free(cs->old_cs_buffers);
266 free(cs->old_ib_buffers);
267 free(cs->virtual_buffers);
268 free(cs->virtual_buffer_hash_table);
269 free(cs->handles);
270 free(cs);
271 }
272
273 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
274 enum ring_type ring_type)
275 {
276 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
277 cs->buffer_hash_table[i] = -1;
278
279 cs->hw_ip = ring_to_hw_ip(ring_type);
280 }
281
282 static struct radeon_cmdbuf *
283 radv_amdgpu_cs_create(struct radeon_winsys *ws,
284 enum ring_type ring_type)
285 {
286 struct radv_amdgpu_cs *cs;
287 uint32_t ib_size = 20 * 1024 * 4;
288 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
289 if (!cs)
290 return NULL;
291
292 cs->ws = radv_amdgpu_winsys(ws);
293 radv_amdgpu_init_cs(cs, ring_type);
294
295 if (cs->ws->use_ib_bos) {
296 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
297 RADEON_DOMAIN_GTT,
298 RADEON_FLAG_CPU_ACCESS |
299 RADEON_FLAG_NO_INTERPROCESS_SHARING |
300 RADEON_FLAG_READ_ONLY,
301 RADV_BO_PRIORITY_CS);
302 if (!cs->ib_buffer) {
303 free(cs);
304 return NULL;
305 }
306
307 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
308 if (!cs->ib_mapped) {
309 ws->buffer_destroy(cs->ib_buffer);
310 free(cs);
311 return NULL;
312 }
313
314 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
315 cs->base.buf = (uint32_t *)cs->ib_mapped;
316 cs->base.max_dw = ib_size / 4 - 4;
317 cs->ib_size_ptr = &cs->ib.size;
318 cs->ib.size = 0;
319
320 ws->cs_add_buffer(&cs->base, cs->ib_buffer);
321 } else {
322 cs->base.buf = malloc(16384);
323 cs->base.max_dw = 4096;
324 if (!cs->base.buf) {
325 free(cs);
326 return NULL;
327 }
328 }
329
330 return &cs->base;
331 }
332
333 static void radv_amdgpu_cs_grow(struct radeon_cmdbuf *_cs, size_t min_size)
334 {
335 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
336
337 if (cs->failed) {
338 cs->base.cdw = 0;
339 return;
340 }
341
342 if (!cs->ws->use_ib_bos) {
343 const uint64_t limit_dws = 0xffff8;
344 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
345 MIN2(cs->base.max_dw * 2, limit_dws));
346
347 /* The total ib size cannot exceed limit_dws dwords. */
348 if (ib_dws > limit_dws)
349 {
350 /* The maximum size in dwords has been reached,
351 * try to allocate a new one.
352 */
353 cs->old_cs_buffers =
354 realloc(cs->old_cs_buffers,
355 (cs->num_old_cs_buffers + 1) * sizeof(*cs->old_cs_buffers));
356 if (!cs->old_cs_buffers) {
357 cs->failed = true;
358 cs->base.cdw = 0;
359 return;
360 }
361
362 /* Store the current one for submitting it later. */
363 cs->old_cs_buffers[cs->num_old_cs_buffers].cdw = cs->base.cdw;
364 cs->old_cs_buffers[cs->num_old_cs_buffers].max_dw = cs->base.max_dw;
365 cs->old_cs_buffers[cs->num_old_cs_buffers].buf = cs->base.buf;
366 cs->num_old_cs_buffers++;
367
368 /* Reset the cs, it will be re-allocated below. */
369 cs->base.cdw = 0;
370 cs->base.buf = NULL;
371
372 /* Re-compute the number of dwords to allocate. */
373 ib_dws = MAX2(cs->base.cdw + min_size,
374 MIN2(cs->base.max_dw * 2, limit_dws));
375 if (ib_dws > limit_dws) {
376 fprintf(stderr, "amdgpu: Too high number of "
377 "dwords to allocate\n");
378 cs->failed = true;
379 return;
380 }
381 }
382
383 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
384 if (new_buf) {
385 cs->base.buf = new_buf;
386 cs->base.max_dw = ib_dws;
387 } else {
388 cs->failed = true;
389 cs->base.cdw = 0;
390 }
391 return;
392 }
393
394 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
395
396 /* max that fits in the chain size field. */
397 ib_size = MIN2(ib_size, 0xfffff);
398
399 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
400 radeon_emit(&cs->base, 0xffff1000);
401
402 *cs->ib_size_ptr |= cs->base.cdw + 4;
403
404 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
405 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
406 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
407 cs->max_num_old_ib_buffers * sizeof(void*));
408 }
409
410 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
411
412 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
413 RADEON_DOMAIN_GTT,
414 RADEON_FLAG_CPU_ACCESS |
415 RADEON_FLAG_NO_INTERPROCESS_SHARING |
416 RADEON_FLAG_READ_ONLY,
417 RADV_BO_PRIORITY_CS);
418
419 if (!cs->ib_buffer) {
420 cs->base.cdw = 0;
421 cs->failed = true;
422 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
423 }
424
425 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
426 if (!cs->ib_mapped) {
427 cs->ws->base.buffer_destroy(cs->ib_buffer);
428 cs->base.cdw = 0;
429 cs->failed = true;
430 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
431 }
432
433 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
434
435 radeon_emit(&cs->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
436 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va);
437 radeon_emit(&cs->base, radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32);
438 radeon_emit(&cs->base, S_3F2_CHAIN(1) | S_3F2_VALID(1));
439
440 cs->ib_size_ptr = cs->base.buf + cs->base.cdw - 1;
441
442 cs->base.buf = (uint32_t *)cs->ib_mapped;
443 cs->base.cdw = 0;
444 cs->base.max_dw = ib_size / 4 - 4;
445
446 }
447
448 static bool radv_amdgpu_cs_finalize(struct radeon_cmdbuf *_cs)
449 {
450 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
451
452 if (cs->ws->use_ib_bos) {
453 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
454 radeon_emit(&cs->base, 0xffff1000);
455
456 *cs->ib_size_ptr |= cs->base.cdw;
457
458 cs->is_chained = false;
459 }
460
461 return !cs->failed;
462 }
463
464 static void radv_amdgpu_cs_reset(struct radeon_cmdbuf *_cs)
465 {
466 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
467 cs->base.cdw = 0;
468 cs->failed = false;
469
470 for (unsigned i = 0; i < cs->num_buffers; ++i) {
471 unsigned hash = cs->handles[i].bo_handle &
472 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
473 cs->buffer_hash_table[hash] = -1;
474 }
475
476 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
477 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
478 cs->virtual_buffer_hash_table[hash] = -1;
479 }
480
481 cs->num_buffers = 0;
482 cs->num_virtual_buffers = 0;
483
484 if (cs->ws->use_ib_bos) {
485 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer);
486
487 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
488 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
489
490 cs->num_old_ib_buffers = 0;
491 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
492 cs->ib_size_ptr = &cs->ib.size;
493 cs->ib.size = 0;
494 } else {
495 for (unsigned i = 0; i < cs->num_old_cs_buffers; ++i) {
496 struct radeon_cmdbuf *rcs = &cs->old_cs_buffers[i];
497 free(rcs->buf);
498 }
499
500 free(cs->old_cs_buffers);
501 cs->old_cs_buffers = NULL;
502 cs->num_old_cs_buffers = 0;
503 }
504 }
505
506 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
507 uint32_t bo)
508 {
509 unsigned hash = bo & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
510 int index = cs->buffer_hash_table[hash];
511
512 if (index == -1)
513 return -1;
514
515 if (cs->handles[index].bo_handle == bo)
516 return index;
517
518 for (unsigned i = 0; i < cs->num_buffers; ++i) {
519 if (cs->handles[i].bo_handle == bo) {
520 cs->buffer_hash_table[hash] = i;
521 return i;
522 }
523 }
524
525 return -1;
526 }
527
528 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
529 uint32_t bo, uint8_t priority)
530 {
531 unsigned hash;
532 int index = radv_amdgpu_cs_find_buffer(cs, bo);
533
534 if (index != -1)
535 return;
536
537 if (cs->num_buffers == cs->max_num_buffers) {
538 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
539 cs->handles = realloc(cs->handles, new_count * sizeof(struct drm_amdgpu_bo_list_entry));
540 cs->max_num_buffers = new_count;
541 }
542
543 cs->handles[cs->num_buffers].bo_handle = bo;
544 cs->handles[cs->num_buffers].bo_priority = priority;
545
546 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
547 cs->buffer_hash_table[hash] = cs->num_buffers;
548
549 ++cs->num_buffers;
550 }
551
552 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_cmdbuf *_cs,
553 struct radeon_winsys_bo *bo)
554 {
555 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
556 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
557
558
559 if (!cs->virtual_buffer_hash_table) {
560 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
561 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
562 cs->virtual_buffer_hash_table[i] = -1;
563 }
564
565 if (cs->virtual_buffer_hash_table[hash] >= 0) {
566 int idx = cs->virtual_buffer_hash_table[hash];
567 if (cs->virtual_buffers[idx] == bo) {
568 return;
569 }
570 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
571 if (cs->virtual_buffers[i] == bo) {
572 cs->virtual_buffer_hash_table[hash] = i;
573 return;
574 }
575 }
576 }
577
578 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
579 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
580 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
581 }
582
583 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
584
585 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
586 ++cs->num_virtual_buffers;
587
588 }
589
590 static void radv_amdgpu_cs_add_buffer(struct radeon_cmdbuf *_cs,
591 struct radeon_winsys_bo *_bo)
592 {
593 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
594 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
595
596 if (bo->is_virtual) {
597 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo);
598 return;
599 }
600
601 if (bo->base.is_local)
602 return;
603
604 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo_handle, bo->priority);
605 }
606
607 static void radv_amdgpu_cs_execute_secondary(struct radeon_cmdbuf *_parent,
608 struct radeon_cmdbuf *_child)
609 {
610 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
611 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
612
613 for (unsigned i = 0; i < child->num_buffers; ++i) {
614 radv_amdgpu_cs_add_buffer_internal(parent,
615 child->handles[i].bo_handle,
616 child->handles[i].bo_priority);
617 }
618
619 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
620 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i]);
621 }
622
623 if (parent->ws->use_ib_bos) {
624 if (parent->base.cdw + 4 > parent->base.max_dw)
625 radv_amdgpu_cs_grow(&parent->base, 4);
626
627 radeon_emit(&parent->base, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
628 radeon_emit(&parent->base, child->ib.ib_mc_address);
629 radeon_emit(&parent->base, child->ib.ib_mc_address >> 32);
630 radeon_emit(&parent->base, child->ib.size);
631 } else {
632 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
633 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
634
635 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
636 parent->base.cdw += child->base.cdw;
637 }
638 }
639
640 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
641 struct radeon_cmdbuf **cs_array,
642 unsigned count,
643 struct radv_amdgpu_winsys_bo **extra_bo_array,
644 unsigned num_extra_bo,
645 struct radeon_cmdbuf *extra_cs,
646 const struct radv_winsys_bo_list *radv_bo_list,
647 uint32_t *bo_list)
648 {
649 int r = 0;
650
651 if (ws->debug_all_bos) {
652 struct radv_amdgpu_winsys_bo *bo;
653 struct drm_amdgpu_bo_list_entry *handles;
654 unsigned num = 0;
655
656 pthread_mutex_lock(&ws->global_bo_list_lock);
657
658 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
659 if (!handles) {
660 pthread_mutex_unlock(&ws->global_bo_list_lock);
661 return -ENOMEM;
662 }
663
664 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
665 assert(num < ws->num_buffers);
666 handles[num].bo_handle = bo->bo_handle;
667 handles[num].bo_priority = bo->priority;
668 }
669
670 r = amdgpu_bo_list_create_raw(ws->dev, ws->num_buffers,
671 handles, bo_list);
672 free(handles);
673 pthread_mutex_unlock(&ws->global_bo_list_lock);
674 } else if (count == 1 && !num_extra_bo && !extra_cs && !radv_bo_list &&
675 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
676 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
677 if (cs->num_buffers == 0) {
678 *bo_list = 0;
679 return 0;
680 }
681 r = amdgpu_bo_list_create_raw(ws->dev, cs->num_buffers, cs->handles,
682 bo_list);
683 } else {
684 unsigned total_buffer_count = num_extra_bo;
685 unsigned unique_bo_count = num_extra_bo;
686 for (unsigned i = 0; i < count; ++i) {
687 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
688 total_buffer_count += cs->num_buffers;
689 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
690 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
691 }
692
693 if (extra_cs) {
694 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
695 }
696
697 if (radv_bo_list) {
698 total_buffer_count += radv_bo_list->count;
699 }
700
701 if (total_buffer_count == 0) {
702 *bo_list = 0;
703 return 0;
704 }
705 struct drm_amdgpu_bo_list_entry *handles = malloc(sizeof(struct drm_amdgpu_bo_list_entry) * total_buffer_count);
706 if (!handles) {
707 free(handles);
708 return -ENOMEM;
709 }
710
711 for (unsigned i = 0; i < num_extra_bo; i++) {
712 handles[i].bo_handle = extra_bo_array[i]->bo_handle;
713 handles[i].bo_priority = extra_bo_array[i]->priority;
714 }
715
716 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
717 struct radv_amdgpu_cs *cs;
718
719 if (i == count)
720 cs = (struct radv_amdgpu_cs*)extra_cs;
721 else
722 cs = (struct radv_amdgpu_cs*)cs_array[i];
723
724 if (!cs->num_buffers)
725 continue;
726
727 if (unique_bo_count == 0 && !cs->num_virtual_buffers) {
728 memcpy(handles, cs->handles, cs->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
729 unique_bo_count = cs->num_buffers;
730 continue;
731 }
732 int unique_bo_so_far = unique_bo_count;
733 for (unsigned j = 0; j < cs->num_buffers; ++j) {
734 bool found = false;
735 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
736 if (handles[k].bo_handle == cs->handles[j].bo_handle) {
737 found = true;
738 break;
739 }
740 }
741 if (!found) {
742 handles[unique_bo_count] = cs->handles[j];
743 ++unique_bo_count;
744 }
745 }
746 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
747 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
748 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
749 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
750 bool found = false;
751 for (unsigned m = 0; m < unique_bo_count; ++m) {
752 if (handles[m].bo_handle == bo->bo_handle) {
753 found = true;
754 break;
755 }
756 }
757 if (!found) {
758 handles[unique_bo_count].bo_handle = bo->bo_handle;
759 handles[unique_bo_count].bo_priority = bo->priority;
760 ++unique_bo_count;
761 }
762 }
763 }
764 }
765
766 if (radv_bo_list) {
767 unsigned unique_bo_so_far = unique_bo_count;
768 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
769 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
770 bool found = false;
771 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
772 if (bo->bo_handle == handles[j].bo_handle) {
773 found = true;
774 break;
775 }
776 }
777 if (!found) {
778 handles[unique_bo_count].bo_handle = bo->bo_handle;
779 handles[unique_bo_count].bo_priority = bo->priority;
780 ++unique_bo_count;
781 }
782 }
783 }
784
785 if (unique_bo_count > 0) {
786 r = amdgpu_bo_list_create_raw(ws->dev, unique_bo_count, handles,
787 bo_list);
788 } else {
789 *bo_list = 0;
790 }
791
792 free(handles);
793 }
794
795 return r;
796 }
797
798 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
799 {
800 struct amdgpu_cs_fence_info ret = {0};
801 if (ctx->fence_map) {
802 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
803 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
804 }
805 return ret;
806 }
807
808 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
809 struct radv_amdgpu_cs_request *request)
810 {
811 radv_amdgpu_request_to_fence(ctx,
812 &ctx->last_submission[request->ip_type][request->ring],
813 request);
814 }
815
816 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
817 int queue_idx,
818 struct radv_winsys_sem_info *sem_info,
819 const struct radv_winsys_bo_list *radv_bo_list,
820 struct radeon_cmdbuf **cs_array,
821 unsigned cs_count,
822 struct radeon_cmdbuf *initial_preamble_cs,
823 struct radeon_cmdbuf *continue_preamble_cs,
824 struct radeon_winsys_fence *_fence)
825 {
826 int r;
827 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
828 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
829 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
830 uint32_t bo_list;
831 struct radv_amdgpu_cs_request request = {0};
832 struct amdgpu_cs_ib_info ibs[2];
833 unsigned number_of_ibs = 1;
834
835 for (unsigned i = cs_count; i--;) {
836 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
837
838 if (cs->is_chained) {
839 *cs->ib_size_ptr -= 4;
840 cs->is_chained = false;
841 }
842
843 if (i + 1 < cs_count) {
844 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
845 assert(cs->base.cdw + 4 <= cs->base.max_dw);
846
847 cs->is_chained = true;
848 *cs->ib_size_ptr += 4;
849
850 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
851 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
852 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
853 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
854 }
855 }
856
857 /* Create a buffer object list. */
858 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, 0,
859 initial_preamble_cs, radv_bo_list,
860 &bo_list);
861 if (r) {
862 fprintf(stderr, "amdgpu: buffer list creation failed for the "
863 "chained submission(%d)\n", r);
864 return r;
865 }
866
867 /* Configure the CS request. */
868 if (initial_preamble_cs) {
869 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
870 ibs[1] = cs0->ib;
871 number_of_ibs++;
872 } else {
873 ibs[0] = cs0->ib;
874 }
875
876 request.ip_type = cs0->hw_ip;
877 request.ring = queue_idx;
878 request.number_of_ibs = number_of_ibs;
879 request.ibs = ibs;
880 request.resources = bo_list;
881 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
882
883 /* Submit the CS. */
884 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
885 if (r) {
886 if (r == -ENOMEM)
887 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
888 else
889 fprintf(stderr, "amdgpu: The CS has been rejected, "
890 "see dmesg for more information.\n");
891 }
892
893 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
894
895 if (r)
896 return r;
897
898 if (fence)
899 radv_amdgpu_request_to_fence(ctx, fence, &request);
900
901 radv_assign_last_submit(ctx, &request);
902
903 return 0;
904 }
905
906 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
907 int queue_idx,
908 struct radv_winsys_sem_info *sem_info,
909 const struct radv_winsys_bo_list *radv_bo_list,
910 struct radeon_cmdbuf **cs_array,
911 unsigned cs_count,
912 struct radeon_cmdbuf *initial_preamble_cs,
913 struct radeon_cmdbuf *continue_preamble_cs,
914 struct radeon_winsys_fence *_fence)
915 {
916 int r;
917 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
918 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
919 uint32_t bo_list;
920 struct radv_amdgpu_cs_request request = {};
921 struct amdgpu_cs_ib_info *ibs;
922 struct radv_amdgpu_cs *cs0;
923 unsigned number_of_ibs;
924
925 assert(cs_count);
926 cs0 = radv_amdgpu_cs(cs_array[0]);
927
928 /* Compute the number of IBs for this submit. */
929 number_of_ibs = cs_count + !!initial_preamble_cs;
930
931 /* Create a buffer object list. */
932 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[0], cs_count, NULL, 0,
933 initial_preamble_cs, radv_bo_list,
934 &bo_list);
935 if (r) {
936 fprintf(stderr, "amdgpu: buffer list creation failed "
937 "for the fallback submission (%d)\n", r);
938 return r;
939 }
940
941 ibs = malloc(number_of_ibs * sizeof(*ibs));
942 if (!ibs) {
943 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
944 return -ENOMEM;
945 }
946
947 /* Configure the CS request. */
948 if (initial_preamble_cs)
949 ibs[0] = radv_amdgpu_cs(initial_preamble_cs)->ib;
950
951 for (unsigned i = 0; i < cs_count; i++) {
952 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
953
954 ibs[i + !!initial_preamble_cs] = cs->ib;
955
956 if (cs->is_chained) {
957 *cs->ib_size_ptr -= 4;
958 cs->is_chained = false;
959 }
960 }
961
962 request.ip_type = cs0->hw_ip;
963 request.ring = queue_idx;
964 request.resources = bo_list;
965 request.number_of_ibs = number_of_ibs;
966 request.ibs = ibs;
967 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
968
969 /* Submit the CS. */
970 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
971 if (r) {
972 if (r == -ENOMEM)
973 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
974 else
975 fprintf(stderr, "amdgpu: The CS has been rejected, "
976 "see dmesg for more information.\n");
977 }
978
979 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
980 free(ibs);
981
982 if (r)
983 return r;
984
985 if (fence)
986 radv_amdgpu_request_to_fence(ctx, fence, &request);
987
988 radv_assign_last_submit(ctx, &request);
989
990 return 0;
991 }
992
993 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
994 int queue_idx,
995 struct radv_winsys_sem_info *sem_info,
996 const struct radv_winsys_bo_list *radv_bo_list,
997 struct radeon_cmdbuf **cs_array,
998 unsigned cs_count,
999 struct radeon_cmdbuf *initial_preamble_cs,
1000 struct radeon_cmdbuf *continue_preamble_cs,
1001 struct radeon_winsys_fence *_fence)
1002 {
1003 int r;
1004 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1005 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
1006 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
1007 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
1008 uint32_t bo_list;
1009 struct radv_amdgpu_cs_request request;
1010 uint32_t pad_word = 0xffff1000U;
1011 bool emit_signal_sem = sem_info->cs_emit_signal;
1012
1013 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
1014 pad_word = 0x80000000;
1015
1016 assert(cs_count);
1017
1018 for (unsigned i = 0; i < cs_count;) {
1019 struct amdgpu_cs_ib_info *ibs;
1020 struct radeon_winsys_bo **bos;
1021 struct radeon_cmdbuf *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
1022 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
1023 unsigned number_of_ibs;
1024 uint32_t *ptr;
1025 unsigned cnt = 0;
1026 unsigned size = 0;
1027 unsigned pad_words = 0;
1028
1029 /* Compute the number of IBs for this submit. */
1030 number_of_ibs = cs->num_old_cs_buffers + 1;
1031
1032 ibs = malloc(number_of_ibs * sizeof(*ibs));
1033 if (!ibs)
1034 return -ENOMEM;
1035
1036 bos = malloc(number_of_ibs * sizeof(*bos));
1037 if (!bos) {
1038 free(ibs);
1039 return -ENOMEM;
1040 }
1041
1042 if (number_of_ibs > 1) {
1043 /* Special path when the maximum size in dwords has
1044 * been reached because we need to handle more than one
1045 * IB per submit.
1046 */
1047 struct radeon_cmdbuf **new_cs_array;
1048 unsigned idx = 0;
1049
1050 new_cs_array = malloc(cs->num_old_cs_buffers *
1051 sizeof(*new_cs_array));
1052 assert(new_cs_array);
1053
1054 for (unsigned j = 0; j < cs->num_old_cs_buffers; j++)
1055 new_cs_array[idx++] = &cs->old_cs_buffers[j];
1056 new_cs_array[idx++] = cs_array[i];
1057
1058 for (unsigned j = 0; j < number_of_ibs; j++) {
1059 struct radeon_cmdbuf *rcs = new_cs_array[j];
1060 bool needs_preamble = preamble_cs && j == 0;
1061 unsigned size = 0;
1062
1063 if (needs_preamble)
1064 size += preamble_cs->cdw;
1065 size += rcs->cdw;
1066
1067 assert(size < 0xffff8);
1068
1069 while (!size || (size & 7)) {
1070 size++;
1071 pad_words++;
1072 }
1073
1074 bos[j] = ws->buffer_create(ws, 4 * size, 4096,
1075 RADEON_DOMAIN_GTT,
1076 RADEON_FLAG_CPU_ACCESS |
1077 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1078 RADEON_FLAG_READ_ONLY,
1079 RADV_BO_PRIORITY_CS);
1080 ptr = ws->buffer_map(bos[j]);
1081
1082 if (needs_preamble) {
1083 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1084 ptr += preamble_cs->cdw;
1085 }
1086
1087 memcpy(ptr, rcs->buf, 4 * rcs->cdw);
1088 ptr += rcs->cdw;
1089
1090 for (unsigned k = 0; k < pad_words; ++k)
1091 *ptr++ = pad_word;
1092
1093 ibs[j].size = size;
1094 ibs[j].ib_mc_address = radv_buffer_get_va(bos[j]);
1095 }
1096
1097 cnt++;
1098 free(new_cs_array);
1099 } else {
1100 if (preamble_cs)
1101 size += preamble_cs->cdw;
1102
1103 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
1104 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
1105 ++cnt;
1106 }
1107
1108 while (!size || (size & 7)) {
1109 size++;
1110 pad_words++;
1111 }
1112 assert(cnt);
1113
1114 bos[0] = ws->buffer_create(ws, 4 * size, 4096,
1115 RADEON_DOMAIN_GTT,
1116 RADEON_FLAG_CPU_ACCESS |
1117 RADEON_FLAG_NO_INTERPROCESS_SHARING |
1118 RADEON_FLAG_READ_ONLY,
1119 RADV_BO_PRIORITY_CS);
1120 ptr = ws->buffer_map(bos[0]);
1121
1122 if (preamble_cs) {
1123 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
1124 ptr += preamble_cs->cdw;
1125 }
1126
1127 for (unsigned j = 0; j < cnt; ++j) {
1128 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
1129 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
1130 ptr += cs->base.cdw;
1131
1132 }
1133
1134 for (unsigned j = 0; j < pad_words; ++j)
1135 *ptr++ = pad_word;
1136
1137 ibs[0].size = size;
1138 ibs[0].ib_mc_address = radv_buffer_get_va(bos[0]);
1139 }
1140
1141 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
1142 (struct radv_amdgpu_winsys_bo **)bos,
1143 number_of_ibs, preamble_cs,
1144 radv_bo_list, &bo_list);
1145 if (r) {
1146 fprintf(stderr, "amdgpu: buffer list creation failed "
1147 "for the sysmem submission (%d)\n", r);
1148 free(ibs);
1149 free(bos);
1150 return r;
1151 }
1152
1153 memset(&request, 0, sizeof(request));
1154
1155 request.ip_type = cs0->hw_ip;
1156 request.ring = queue_idx;
1157 request.resources = bo_list;
1158 request.number_of_ibs = number_of_ibs;
1159 request.ibs = ibs;
1160 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
1161
1162 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
1163 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
1164 if (r) {
1165 if (r == -ENOMEM)
1166 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1167 else
1168 fprintf(stderr, "amdgpu: The CS has been rejected, "
1169 "see dmesg for more information.\n");
1170 }
1171
1172 amdgpu_bo_list_destroy_raw(ctx->ws->dev, bo_list);
1173
1174 for (unsigned j = 0; j < number_of_ibs; j++) {
1175 ws->buffer_destroy(bos[j]);
1176 }
1177
1178 free(ibs);
1179 free(bos);
1180
1181 if (r)
1182 return r;
1183
1184 i += cnt;
1185 }
1186 if (fence)
1187 radv_amdgpu_request_to_fence(ctx, fence, &request);
1188
1189 radv_assign_last_submit(ctx, &request);
1190
1191 return 0;
1192 }
1193
1194 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1195 int queue_idx,
1196 struct radeon_cmdbuf **cs_array,
1197 unsigned cs_count,
1198 struct radeon_cmdbuf *initial_preamble_cs,
1199 struct radeon_cmdbuf *continue_preamble_cs,
1200 struct radv_winsys_sem_info *sem_info,
1201 const struct radv_winsys_bo_list *bo_list,
1202 bool can_patch,
1203 struct radeon_winsys_fence *_fence)
1204 {
1205 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1206 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1207 int ret;
1208
1209 assert(sem_info);
1210 if (!cs->ws->use_ib_bos) {
1211 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1212 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1213 } else if (can_patch && cs->ws->batchchain) {
1214 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1215 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1216 } else {
1217 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1218 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1219 }
1220
1221 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1222 return ret;
1223 }
1224
1225 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1226 {
1227 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1228 void *ret = NULL;
1229
1230 if (!cs->ib_buffer)
1231 return NULL;
1232 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1233 struct radv_amdgpu_winsys_bo *bo;
1234
1235 bo = (struct radv_amdgpu_winsys_bo*)
1236 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1237 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1238 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1239 return (char *)ret + (addr - bo->base.va);
1240 }
1241 }
1242 if(cs->ws->debug_all_bos) {
1243 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1244 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1245 &cs->ws->global_bo_list, global_list_item) {
1246 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1247 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1248 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1249 return (char *)ret + (addr - bo->base.va);
1250 }
1251 }
1252 }
1253 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1254 }
1255 return ret;
1256 }
1257
1258 static void radv_amdgpu_winsys_cs_dump(struct radeon_cmdbuf *_cs,
1259 FILE* file,
1260 const int *trace_ids, int trace_id_count)
1261 {
1262 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1263 void *ib = cs->base.buf;
1264 int num_dw = cs->base.cdw;
1265
1266 if (cs->ws->use_ib_bos) {
1267 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1268 num_dw = cs->ib.size;
1269 }
1270 assert(ib);
1271 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1272 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1273 }
1274
1275 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1276 {
1277 switch (radv_priority) {
1278 case RADEON_CTX_PRIORITY_REALTIME:
1279 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1280 case RADEON_CTX_PRIORITY_HIGH:
1281 return AMDGPU_CTX_PRIORITY_HIGH;
1282 case RADEON_CTX_PRIORITY_MEDIUM:
1283 return AMDGPU_CTX_PRIORITY_NORMAL;
1284 case RADEON_CTX_PRIORITY_LOW:
1285 return AMDGPU_CTX_PRIORITY_LOW;
1286 default:
1287 unreachable("Invalid context priority");
1288 }
1289 }
1290
1291 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1292 enum radeon_ctx_priority priority)
1293 {
1294 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1295 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1296 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1297 int r;
1298
1299 if (!ctx)
1300 return NULL;
1301
1302 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1303 if (r) {
1304 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1305 goto error_create;
1306 }
1307 ctx->ws = ws;
1308
1309 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1310 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1311 RADEON_DOMAIN_GTT,
1312 RADEON_FLAG_CPU_ACCESS |
1313 RADEON_FLAG_NO_INTERPROCESS_SHARING,
1314 RADV_BO_PRIORITY_CS);
1315 if (ctx->fence_bo)
1316 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1317 if (ctx->fence_map)
1318 memset(ctx->fence_map, 0, 4096);
1319 return (struct radeon_winsys_ctx *)ctx;
1320 error_create:
1321 FREE(ctx);
1322 return NULL;
1323 }
1324
1325 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1326 {
1327 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1328 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1329 amdgpu_cs_ctx_free(ctx->ctx);
1330 FREE(ctx);
1331 }
1332
1333 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1334 enum ring_type ring_type, int ring_index)
1335 {
1336 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1337 int ip_type = ring_to_hw_ip(ring_type);
1338
1339 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1340 uint32_t expired;
1341 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1342 1000000000ull, 0, &expired);
1343
1344 if (ret || !expired)
1345 return false;
1346 }
1347
1348 return true;
1349 }
1350
1351 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1352 {
1353 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1354 if (!sem)
1355 return NULL;
1356
1357 return (struct radeon_winsys_sem *)sem;
1358 }
1359
1360 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1361 {
1362 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1363 FREE(sem);
1364 }
1365
1366 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1367 uint32_t ip_type,
1368 uint32_t ring,
1369 struct radv_winsys_sem_info *sem_info)
1370 {
1371 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1372 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1373
1374 if (sem->context)
1375 return -EINVAL;
1376
1377 *sem = ctx->last_submission[ip_type][ring].fence;
1378 }
1379 return 0;
1380 }
1381
1382 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1383 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1384 {
1385 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1386 if (!syncobj)
1387 return NULL;
1388
1389 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1390 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1391 sem->handle = counts->syncobj[i];
1392 }
1393
1394 chunk->chunk_id = chunk_id;
1395 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1396 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1397 return syncobj;
1398 }
1399
1400 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1401 struct radv_amdgpu_cs_request *request,
1402 struct radv_winsys_sem_info *sem_info)
1403 {
1404 int r;
1405 int num_chunks;
1406 int size;
1407 bool user_fence;
1408 struct drm_amdgpu_cs_chunk *chunks;
1409 struct drm_amdgpu_cs_chunk_data *chunk_data;
1410 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1411 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1412 int i;
1413 struct amdgpu_cs_fence *sem;
1414
1415 user_fence = (request->fence_info.handle != NULL);
1416 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1417
1418 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1419
1420 size = request->number_of_ibs + (user_fence ? 1 : 0);
1421
1422 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1423
1424 num_chunks = request->number_of_ibs;
1425 for (i = 0; i < request->number_of_ibs; i++) {
1426 struct amdgpu_cs_ib_info *ib;
1427 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1428 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1429 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1430
1431 ib = &request->ibs[i];
1432
1433 chunk_data[i].ib_data._pad = 0;
1434 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1435 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1436 chunk_data[i].ib_data.ip_type = request->ip_type;
1437 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1438 chunk_data[i].ib_data.ring = request->ring;
1439 chunk_data[i].ib_data.flags = ib->flags;
1440 }
1441
1442 if (user_fence) {
1443 i = num_chunks++;
1444
1445 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1446 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1447 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1448
1449 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1450 &chunk_data[i]);
1451 }
1452
1453 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1454 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1455 &chunks[num_chunks],
1456 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1457 if (!wait_syncobj) {
1458 r = -ENOMEM;
1459 goto error_out;
1460 }
1461 num_chunks++;
1462
1463 if (sem_info->wait.sem_count == 0)
1464 sem_info->cs_emit_wait = false;
1465
1466 }
1467
1468 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1469 sem_dependencies = alloca(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1470 int sem_count = 0;
1471
1472 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1473 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1474 if (!sem->context)
1475 continue;
1476 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1477
1478 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1479
1480 sem->context = NULL;
1481 }
1482 i = num_chunks++;
1483
1484 /* dependencies chunk */
1485 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1486 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1487 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1488
1489 sem_info->cs_emit_wait = false;
1490 }
1491
1492 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1493 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1494 &chunks[num_chunks],
1495 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1496 if (!signal_syncobj) {
1497 r = -ENOMEM;
1498 goto error_out;
1499 }
1500 num_chunks++;
1501 }
1502
1503 r = amdgpu_cs_submit_raw2(ctx->ws->dev,
1504 ctx->ctx,
1505 request->resources,
1506 num_chunks,
1507 chunks,
1508 &request->seq_no);
1509 error_out:
1510 free(wait_syncobj);
1511 free(signal_syncobj);
1512 return r;
1513 }
1514
1515 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1516 uint32_t *handle)
1517 {
1518 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1519 return amdgpu_cs_create_syncobj(ws->dev, handle);
1520 }
1521
1522 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1523 uint32_t handle)
1524 {
1525 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1526 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1527 }
1528
1529 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1530 uint32_t handle)
1531 {
1532 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1533 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1534 }
1535
1536 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1537 uint32_t handle)
1538 {
1539 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1540 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1541 }
1542
1543 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1544 uint32_t handle_count, bool wait_all, uint64_t timeout)
1545 {
1546 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1547 uint32_t tmp;
1548
1549 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1550 timeout = MIN2(timeout, INT64_MAX);
1551
1552 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1553 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1554 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1555 &tmp);
1556 if (ret == 0) {
1557 return true;
1558 } else if (ret == -1 && errno == ETIME) {
1559 return false;
1560 } else {
1561 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1562 return false;
1563 }
1564 }
1565
1566 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1567 uint32_t syncobj,
1568 int *fd)
1569 {
1570 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1571
1572 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1573 }
1574
1575 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1576 int fd,
1577 uint32_t *syncobj)
1578 {
1579 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1580
1581 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1582 }
1583
1584
1585 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1586 uint32_t syncobj,
1587 int *fd)
1588 {
1589 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1590
1591 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1592 }
1593
1594 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1595 uint32_t syncobj,
1596 int fd)
1597 {
1598 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1599
1600 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1601 }
1602
1603 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1604 {
1605 ws->base.ctx_create = radv_amdgpu_ctx_create;
1606 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1607 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1608 ws->base.cs_create = radv_amdgpu_cs_create;
1609 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1610 ws->base.cs_grow = radv_amdgpu_cs_grow;
1611 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1612 ws->base.cs_reset = radv_amdgpu_cs_reset;
1613 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1614 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1615 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1616 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1617 ws->base.create_fence = radv_amdgpu_create_fence;
1618 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1619 ws->base.create_sem = radv_amdgpu_create_sem;
1620 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1621 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1622 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1623 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1624 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1625 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1626 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1627 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1628 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1629 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1630 ws->base.fence_wait = radv_amdgpu_fence_wait;
1631 ws->base.fences_wait = radv_amdgpu_fences_wait;
1632 }