radv: Keep a global BO list for VkMemory.
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29 #include <pthread.h>
30 #include <errno.h>
31
32 #include "ac_debug.h"
33 #include "radv_radeon_winsys.h"
34 #include "radv_amdgpu_cs.h"
35 #include "radv_amdgpu_bo.h"
36 #include "sid.h"
37
38
39 enum {
40 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
41 };
42
43 struct radv_amdgpu_cs {
44 struct radeon_winsys_cs base;
45 struct radv_amdgpu_winsys *ws;
46
47 struct amdgpu_cs_ib_info ib;
48
49 struct radeon_winsys_bo *ib_buffer;
50 uint8_t *ib_mapped;
51 unsigned max_num_buffers;
52 unsigned num_buffers;
53 amdgpu_bo_handle *handles;
54 uint8_t *priorities;
55
56 struct radeon_winsys_bo **old_ib_buffers;
57 unsigned num_old_ib_buffers;
58 unsigned max_num_old_ib_buffers;
59 unsigned *ib_size_ptr;
60 bool failed;
61 bool is_chained;
62
63 int buffer_hash_table[1024];
64 unsigned hw_ip;
65
66 unsigned num_virtual_buffers;
67 unsigned max_num_virtual_buffers;
68 struct radeon_winsys_bo **virtual_buffers;
69 uint8_t *virtual_buffer_priorities;
70 int *virtual_buffer_hash_table;
71 };
72
73 static inline struct radv_amdgpu_cs *
74 radv_amdgpu_cs(struct radeon_winsys_cs *base)
75 {
76 return (struct radv_amdgpu_cs*)base;
77 }
78
79 static int ring_to_hw_ip(enum ring_type ring)
80 {
81 switch (ring) {
82 case RING_GFX:
83 return AMDGPU_HW_IP_GFX;
84 case RING_DMA:
85 return AMDGPU_HW_IP_DMA;
86 case RING_COMPUTE:
87 return AMDGPU_HW_IP_COMPUTE;
88 default:
89 unreachable("unsupported ring");
90 }
91 }
92
93 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
94 uint32_t ip_type,
95 uint32_t ring,
96 struct radv_winsys_sem_info *sem_info);
97 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
98 struct amdgpu_cs_request *request,
99 struct radv_winsys_sem_info *sem_info);
100
101 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
102 struct radv_amdgpu_fence *fence,
103 struct amdgpu_cs_request *req)
104 {
105 fence->fence.context = ctx->ctx;
106 fence->fence.ip_type = req->ip_type;
107 fence->fence.ip_instance = req->ip_instance;
108 fence->fence.ring = req->ring;
109 fence->fence.fence = req->seq_no;
110 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
111 }
112
113 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
114 {
115 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
116 return (struct radeon_winsys_fence*)fence;
117 }
118
119 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
120 {
121 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
122 free(fence);
123 }
124
125 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
126 struct radeon_winsys_fence *_fence,
127 bool absolute,
128 uint64_t timeout)
129 {
130 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
131 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
132 int r;
133 uint32_t expired = 0;
134
135 if (fence->user_ptr) {
136 if (*fence->user_ptr >= fence->fence.fence)
137 return true;
138 if (!absolute && !timeout)
139 return false;
140 }
141
142 /* Now use the libdrm query. */
143 r = amdgpu_cs_query_fence_status(&fence->fence,
144 timeout,
145 flags,
146 &expired);
147
148 if (r) {
149 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
150 return false;
151 }
152
153 if (expired)
154 return true;
155
156 return false;
157 }
158
159
160 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
161 struct radeon_winsys_fence *const *_fences,
162 uint32_t fence_count,
163 bool wait_all,
164 uint64_t timeout)
165 {
166 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
167 int r;
168 uint32_t expired = 0, first = 0;
169
170 if (!fences)
171 return false;
172
173 for (uint32_t i = 0; i < fence_count; ++i)
174 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
175
176 /* Now use the libdrm query. */
177 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
178 timeout, &expired, &first);
179
180 free(fences);
181 if (r) {
182 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
183 return false;
184 }
185
186 if (expired)
187 return true;
188
189 return false;
190 }
191
192 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
193 {
194 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
195
196 if (cs->ib_buffer)
197 cs->ws->base.buffer_destroy(cs->ib_buffer);
198 else
199 free(cs->base.buf);
200
201 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
202 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
203
204 free(cs->old_ib_buffers);
205 free(cs->virtual_buffers);
206 free(cs->virtual_buffer_priorities);
207 free(cs->virtual_buffer_hash_table);
208 free(cs->handles);
209 free(cs->priorities);
210 free(cs);
211 }
212
213 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
214 enum ring_type ring_type)
215 {
216 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
217 cs->buffer_hash_table[i] = -1;
218
219 cs->hw_ip = ring_to_hw_ip(ring_type);
220 }
221
222 static struct radeon_winsys_cs *
223 radv_amdgpu_cs_create(struct radeon_winsys *ws,
224 enum ring_type ring_type)
225 {
226 struct radv_amdgpu_cs *cs;
227 uint32_t ib_size = 20 * 1024 * 4;
228 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
229 if (!cs)
230 return NULL;
231
232 cs->ws = radv_amdgpu_winsys(ws);
233 radv_amdgpu_init_cs(cs, ring_type);
234
235 if (cs->ws->use_ib_bos) {
236 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
237 RADEON_DOMAIN_GTT,
238 RADEON_FLAG_CPU_ACCESS |
239 RADEON_FLAG_NO_INTERPROCESS_SHARING |
240 RADEON_FLAG_READ_ONLY);
241 if (!cs->ib_buffer) {
242 free(cs);
243 return NULL;
244 }
245
246 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
247 if (!cs->ib_mapped) {
248 ws->buffer_destroy(cs->ib_buffer);
249 free(cs);
250 return NULL;
251 }
252
253 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
254 cs->base.buf = (uint32_t *)cs->ib_mapped;
255 cs->base.max_dw = ib_size / 4 - 4;
256 cs->ib_size_ptr = &cs->ib.size;
257 cs->ib.size = 0;
258
259 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
260 } else {
261 cs->base.buf = malloc(16384);
262 cs->base.max_dw = 4096;
263 if (!cs->base.buf) {
264 free(cs);
265 return NULL;
266 }
267 }
268
269 return &cs->base;
270 }
271
272 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
273 {
274 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
275
276 if (cs->failed) {
277 cs->base.cdw = 0;
278 return;
279 }
280
281 if (!cs->ws->use_ib_bos) {
282 const uint64_t limit_dws = 0xffff8;
283 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
284 MIN2(cs->base.max_dw * 2, limit_dws));
285
286 /* The total ib size cannot exceed limit_dws dwords. */
287 if (ib_dws > limit_dws)
288 {
289 cs->failed = true;
290 cs->base.cdw = 0;
291 return;
292 }
293
294 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
295 if (new_buf) {
296 cs->base.buf = new_buf;
297 cs->base.max_dw = ib_dws;
298 } else {
299 cs->failed = true;
300 cs->base.cdw = 0;
301 }
302 return;
303 }
304
305 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
306
307 /* max that fits in the chain size field. */
308 ib_size = MIN2(ib_size, 0xfffff);
309
310 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
311 cs->base.buf[cs->base.cdw++] = 0xffff1000;
312
313 *cs->ib_size_ptr |= cs->base.cdw + 4;
314
315 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
316 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
317 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
318 cs->max_num_old_ib_buffers * sizeof(void*));
319 }
320
321 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
322
323 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
324 RADEON_DOMAIN_GTT,
325 RADEON_FLAG_CPU_ACCESS |
326 RADEON_FLAG_NO_INTERPROCESS_SHARING |
327 RADEON_FLAG_READ_ONLY);
328
329 if (!cs->ib_buffer) {
330 cs->base.cdw = 0;
331 cs->failed = true;
332 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
333 }
334
335 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
336 if (!cs->ib_mapped) {
337 cs->ws->base.buffer_destroy(cs->ib_buffer);
338 cs->base.cdw = 0;
339 cs->failed = true;
340 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
341 }
342
343 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
344
345 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
346 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
347 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
348 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
349 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
350
351 cs->base.buf = (uint32_t *)cs->ib_mapped;
352 cs->base.cdw = 0;
353 cs->base.max_dw = ib_size / 4 - 4;
354
355 }
356
357 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
358 {
359 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
360
361 if (cs->ws->use_ib_bos) {
362 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
363 cs->base.buf[cs->base.cdw++] = 0xffff1000;
364
365 *cs->ib_size_ptr |= cs->base.cdw;
366
367 cs->is_chained = false;
368 }
369
370 return !cs->failed;
371 }
372
373 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
374 {
375 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
376 cs->base.cdw = 0;
377 cs->failed = false;
378
379 for (unsigned i = 0; i < cs->num_buffers; ++i) {
380 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
381 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
382 cs->buffer_hash_table[hash] = -1;
383 }
384
385 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
386 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
387 cs->virtual_buffer_hash_table[hash] = -1;
388 }
389
390 cs->num_buffers = 0;
391 cs->num_virtual_buffers = 0;
392
393 if (cs->ws->use_ib_bos) {
394 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
395
396 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
397 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
398
399 cs->num_old_ib_buffers = 0;
400 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
401 cs->ib_size_ptr = &cs->ib.size;
402 cs->ib.size = 0;
403 }
404 }
405
406 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
407 amdgpu_bo_handle bo)
408 {
409 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
410 int index = cs->buffer_hash_table[hash];
411
412 if (index == -1)
413 return -1;
414
415 if (cs->handles[index] == bo)
416 return index;
417
418 for (unsigned i = 0; i < cs->num_buffers; ++i) {
419 if (cs->handles[i] == bo) {
420 cs->buffer_hash_table[hash] = i;
421 return i;
422 }
423 }
424
425 return -1;
426 }
427
428 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
429 amdgpu_bo_handle bo,
430 uint8_t priority)
431 {
432 unsigned hash;
433 int index = radv_amdgpu_cs_find_buffer(cs, bo);
434
435 if (index != -1) {
436 cs->priorities[index] = MAX2(cs->priorities[index], priority);
437 return;
438 }
439
440 if (cs->num_buffers == cs->max_num_buffers) {
441 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
442 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
443 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
444 cs->max_num_buffers = new_count;
445 }
446
447 cs->handles[cs->num_buffers] = bo;
448 cs->priorities[cs->num_buffers] = priority;
449
450 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
451 cs->buffer_hash_table[hash] = cs->num_buffers;
452
453 ++cs->num_buffers;
454 }
455
456 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
457 struct radeon_winsys_bo *bo,
458 uint8_t priority)
459 {
460 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
461 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
462
463
464 if (!cs->virtual_buffer_hash_table) {
465 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
466 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
467 cs->virtual_buffer_hash_table[i] = -1;
468 }
469
470 if (cs->virtual_buffer_hash_table[hash] >= 0) {
471 int idx = cs->virtual_buffer_hash_table[hash];
472 if (cs->virtual_buffers[idx] == bo) {
473 cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
474 return;
475 }
476 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
477 if (cs->virtual_buffers[i] == bo) {
478 cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
479 cs->virtual_buffer_hash_table[hash] = i;
480 return;
481 }
482 }
483 }
484
485 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
486 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
487 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
488 cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
489 }
490
491 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
492 cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
493
494 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
495 ++cs->num_virtual_buffers;
496
497 }
498
499 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
500 struct radeon_winsys_bo *_bo,
501 uint8_t priority)
502 {
503 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
504 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
505
506 if (bo->is_virtual) {
507 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
508 return;
509 }
510
511 if (bo->base.is_local)
512 return;
513
514 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
515 }
516
517 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
518 struct radeon_winsys_cs *_child)
519 {
520 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
521 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
522
523 for (unsigned i = 0; i < child->num_buffers; ++i) {
524 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
525 child->priorities[i]);
526 }
527
528 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
529 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
530 child->virtual_buffer_priorities[i]);
531 }
532
533 if (parent->ws->use_ib_bos) {
534 if (parent->base.cdw + 4 > parent->base.max_dw)
535 radv_amdgpu_cs_grow(&parent->base, 4);
536
537 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
538 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
539 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
540 parent->base.buf[parent->base.cdw++] = child->ib.size;
541 } else {
542 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
543 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
544
545 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
546 parent->base.cdw += child->base.cdw;
547 }
548 }
549
550 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
551 struct radeon_winsys_cs **cs_array,
552 unsigned count,
553 struct radv_amdgpu_winsys_bo *extra_bo,
554 struct radeon_winsys_cs *extra_cs,
555 const struct radv_winsys_bo_list *radv_bo_list,
556 amdgpu_bo_list_handle *bo_list)
557 {
558 int r = 0;
559
560 if (ws->debug_all_bos) {
561 struct radv_amdgpu_winsys_bo *bo;
562 amdgpu_bo_handle *handles;
563 unsigned num = 0;
564
565 pthread_mutex_lock(&ws->global_bo_list_lock);
566
567 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
568 if (!handles) {
569 pthread_mutex_unlock(&ws->global_bo_list_lock);
570 return -ENOMEM;
571 }
572
573 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
574 assert(num < ws->num_buffers);
575 handles[num++] = bo->bo;
576 }
577
578 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
579 handles, NULL,
580 bo_list);
581 free(handles);
582 pthread_mutex_unlock(&ws->global_bo_list_lock);
583 } else if (count == 1 && !extra_bo && !extra_cs && !radv_bo_list &&
584 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
585 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
586 if (cs->num_buffers == 0) {
587 *bo_list = 0;
588 return 0;
589 }
590 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
591 cs->priorities, bo_list);
592 } else {
593 unsigned total_buffer_count = !!extra_bo;
594 unsigned unique_bo_count = !!extra_bo;
595 for (unsigned i = 0; i < count; ++i) {
596 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
597 total_buffer_count += cs->num_buffers;
598 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
599 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
600 }
601
602 if (extra_cs) {
603 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
604 }
605
606 if (radv_bo_list) {
607 total_buffer_count += radv_bo_list->count;
608 }
609
610 if (total_buffer_count == 0) {
611 *bo_list = 0;
612 return 0;
613 }
614 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
615 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
616 if (!handles || !priorities) {
617 free(handles);
618 free(priorities);
619 return -ENOMEM;
620 }
621
622 if (extra_bo) {
623 handles[0] = extra_bo->bo;
624 priorities[0] = 8;
625 }
626
627 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
628 struct radv_amdgpu_cs *cs;
629
630 if (i == count)
631 cs = (struct radv_amdgpu_cs*)extra_cs;
632 else
633 cs = (struct radv_amdgpu_cs*)cs_array[i];
634
635 if (!cs->num_buffers)
636 continue;
637
638 if (unique_bo_count == 0) {
639 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
640 memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
641 unique_bo_count = cs->num_buffers;
642 continue;
643 }
644 int unique_bo_so_far = unique_bo_count;
645 for (unsigned j = 0; j < cs->num_buffers; ++j) {
646 bool found = false;
647 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
648 if (handles[k] == cs->handles[j]) {
649 found = true;
650 priorities[k] = MAX2(priorities[k],
651 cs->priorities[j]);
652 break;
653 }
654 }
655 if (!found) {
656 handles[unique_bo_count] = cs->handles[j];
657 priorities[unique_bo_count] = cs->priorities[j];
658 ++unique_bo_count;
659 }
660 }
661 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
662 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
663 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
664 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
665 bool found = false;
666 for (unsigned m = 0; m < unique_bo_count; ++m) {
667 if (handles[m] == bo->bo) {
668 found = true;
669 priorities[m] = MAX2(priorities[m],
670 cs->virtual_buffer_priorities[j]);
671 break;
672 }
673 }
674 if (!found) {
675 handles[unique_bo_count] = bo->bo;
676 priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
677 ++unique_bo_count;
678 }
679 }
680 }
681 }
682
683 if (radv_bo_list) {
684 unsigned unique_bo_so_far = unique_bo_count;
685 const unsigned default_bo_priority = 7;
686 for (unsigned i = 0; i < radv_bo_list->count; ++i) {
687 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(radv_bo_list->bos[i]);
688 bool found = false;
689 for (unsigned j = 0; j < unique_bo_so_far; ++j) {
690 if (bo->bo == handles[j]) {
691 found = true;
692 priorities[j] = MAX2(priorities[j], default_bo_priority);
693 break;
694 }
695 }
696 if (!found) {
697 handles[unique_bo_count] = bo->bo;
698 priorities[unique_bo_count] = default_bo_priority;
699 ++unique_bo_count;
700 }
701 }
702 }
703
704 if (unique_bo_count > 0) {
705 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
706 priorities, bo_list);
707 } else {
708 *bo_list = 0;
709 }
710
711 free(handles);
712 free(priorities);
713 }
714
715 return r;
716 }
717
718 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
719 {
720 struct amdgpu_cs_fence_info ret = {0};
721 if (ctx->fence_map) {
722 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
723 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
724 }
725 return ret;
726 }
727
728 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
729 struct amdgpu_cs_request *request)
730 {
731 radv_amdgpu_request_to_fence(ctx,
732 &ctx->last_submission[request->ip_type][request->ring],
733 request);
734 }
735
736 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
737 int queue_idx,
738 struct radv_winsys_sem_info *sem_info,
739 const struct radv_winsys_bo_list *radv_bo_list,
740 struct radeon_winsys_cs **cs_array,
741 unsigned cs_count,
742 struct radeon_winsys_cs *initial_preamble_cs,
743 struct radeon_winsys_cs *continue_preamble_cs,
744 struct radeon_winsys_fence *_fence)
745 {
746 int r;
747 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
748 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
749 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
750 amdgpu_bo_list_handle bo_list;
751 struct amdgpu_cs_request request = {0};
752 struct amdgpu_cs_ib_info ibs[2];
753
754 for (unsigned i = cs_count; i--;) {
755 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
756
757 if (cs->is_chained) {
758 *cs->ib_size_ptr -= 4;
759 cs->is_chained = false;
760 }
761
762 if (i + 1 < cs_count) {
763 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
764 assert(cs->base.cdw + 4 <= cs->base.max_dw);
765
766 cs->is_chained = true;
767 *cs->ib_size_ptr += 4;
768
769 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
770 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
771 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
772 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
773 }
774 }
775
776 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs,
777 radv_bo_list, &bo_list);
778 if (r) {
779 fprintf(stderr, "amdgpu: buffer list creation failed for the "
780 "chained submission(%d)\n", r);
781 return r;
782 }
783
784 request.ip_type = cs0->hw_ip;
785 request.ring = queue_idx;
786 request.number_of_ibs = 1;
787 request.ibs = &cs0->ib;
788 request.resources = bo_list;
789 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
790
791 if (initial_preamble_cs) {
792 request.ibs = ibs;
793 request.number_of_ibs = 2;
794 ibs[1] = cs0->ib;
795 ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
796 }
797
798 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
799 if (r) {
800 if (r == -ENOMEM)
801 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
802 else
803 fprintf(stderr, "amdgpu: The CS has been rejected, "
804 "see dmesg for more information.\n");
805 }
806
807 if (bo_list)
808 amdgpu_bo_list_destroy(bo_list);
809
810 if (fence)
811 radv_amdgpu_request_to_fence(ctx, fence, &request);
812
813 radv_assign_last_submit(ctx, &request);
814
815 return r;
816 }
817
818 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
819 int queue_idx,
820 struct radv_winsys_sem_info *sem_info,
821 const struct radv_winsys_bo_list *radv_bo_list,
822 struct radeon_winsys_cs **cs_array,
823 unsigned cs_count,
824 struct radeon_winsys_cs *initial_preamble_cs,
825 struct radeon_winsys_cs *continue_preamble_cs,
826 struct radeon_winsys_fence *_fence)
827 {
828 int r;
829 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
830 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
831 amdgpu_bo_list_handle bo_list;
832 struct amdgpu_cs_request request;
833 bool emit_signal_sem = sem_info->cs_emit_signal;
834 assert(cs_count);
835
836 for (unsigned i = 0; i < cs_count;) {
837 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
838 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
839 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
840 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
841 cs_count - i);
842
843 memset(&request, 0, sizeof(request));
844
845 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
846 preamble_cs, radv_bo_list, &bo_list);
847 if (r) {
848 fprintf(stderr, "amdgpu: buffer list creation failed "
849 "for the fallback submission (%d)\n", r);
850 return r;
851 }
852
853 request.ip_type = cs0->hw_ip;
854 request.ring = queue_idx;
855 request.resources = bo_list;
856 request.number_of_ibs = cnt + !!preamble_cs;
857 request.ibs = ibs;
858 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
859
860 if (preamble_cs) {
861 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
862 }
863
864 for (unsigned j = 0; j < cnt; ++j) {
865 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
866 ibs[j + !!preamble_cs] = cs->ib;
867
868 if (cs->is_chained) {
869 *cs->ib_size_ptr -= 4;
870 cs->is_chained = false;
871 }
872 }
873
874 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
875 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
876 if (r) {
877 if (r == -ENOMEM)
878 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
879 else
880 fprintf(stderr, "amdgpu: The CS has been rejected, "
881 "see dmesg for more information.\n");
882 }
883
884 if (bo_list)
885 amdgpu_bo_list_destroy(bo_list);
886
887 if (r)
888 return r;
889
890 i += cnt;
891 }
892 if (fence)
893 radv_amdgpu_request_to_fence(ctx, fence, &request);
894
895 radv_assign_last_submit(ctx, &request);
896
897 return 0;
898 }
899
900 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
901 int queue_idx,
902 struct radv_winsys_sem_info *sem_info,
903 const struct radv_winsys_bo_list *radv_bo_list,
904 struct radeon_winsys_cs **cs_array,
905 unsigned cs_count,
906 struct radeon_winsys_cs *initial_preamble_cs,
907 struct radeon_winsys_cs *continue_preamble_cs,
908 struct radeon_winsys_fence *_fence)
909 {
910 int r;
911 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
912 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
913 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
914 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
915 amdgpu_bo_list_handle bo_list;
916 struct amdgpu_cs_request request;
917 uint32_t pad_word = 0xffff1000U;
918 bool emit_signal_sem = sem_info->cs_emit_signal;
919
920 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
921 pad_word = 0x80000000;
922
923 assert(cs_count);
924
925 for (unsigned i = 0; i < cs_count;) {
926 struct amdgpu_cs_ib_info ib = {0};
927 struct radeon_winsys_bo *bo = NULL;
928 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
929 uint32_t *ptr;
930 unsigned cnt = 0;
931 unsigned size = 0;
932 unsigned pad_words = 0;
933 if (preamble_cs)
934 size += preamble_cs->cdw;
935
936 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
937 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
938 ++cnt;
939 }
940
941 while(!size || (size & 7)) {
942 size++;
943 pad_words++;
944 }
945 assert(cnt);
946
947 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT,
948 RADEON_FLAG_CPU_ACCESS |
949 RADEON_FLAG_NO_INTERPROCESS_SHARING |
950 RADEON_FLAG_READ_ONLY);
951 ptr = ws->buffer_map(bo);
952
953 if (preamble_cs) {
954 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
955 ptr += preamble_cs->cdw;
956 }
957
958 for (unsigned j = 0; j < cnt; ++j) {
959 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
960 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
961 ptr += cs->base.cdw;
962
963 }
964
965 for (unsigned j = 0; j < pad_words; ++j)
966 *ptr++ = pad_word;
967
968 memset(&request, 0, sizeof(request));
969
970
971 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
972 (struct radv_amdgpu_winsys_bo*)bo,
973 preamble_cs, radv_bo_list, &bo_list);
974 if (r) {
975 fprintf(stderr, "amdgpu: buffer list creation failed "
976 "for the sysmem submission (%d)\n", r);
977 return r;
978 }
979
980 ib.size = size;
981 ib.ib_mc_address = radv_buffer_get_va(bo);
982
983 request.ip_type = cs0->hw_ip;
984 request.ring = queue_idx;
985 request.resources = bo_list;
986 request.number_of_ibs = 1;
987 request.ibs = &ib;
988 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
989
990 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
991 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
992 if (r) {
993 if (r == -ENOMEM)
994 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
995 else
996 fprintf(stderr, "amdgpu: The CS has been rejected, "
997 "see dmesg for more information.\n");
998 }
999
1000 if (bo_list)
1001 amdgpu_bo_list_destroy(bo_list);
1002
1003 ws->buffer_destroy(bo);
1004 if (r)
1005 return r;
1006
1007 i += cnt;
1008 }
1009 if (fence)
1010 radv_amdgpu_request_to_fence(ctx, fence, &request);
1011
1012 radv_assign_last_submit(ctx, &request);
1013
1014 return 0;
1015 }
1016
1017 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
1018 int queue_idx,
1019 struct radeon_winsys_cs **cs_array,
1020 unsigned cs_count,
1021 struct radeon_winsys_cs *initial_preamble_cs,
1022 struct radeon_winsys_cs *continue_preamble_cs,
1023 struct radv_winsys_sem_info *sem_info,
1024 const struct radv_winsys_bo_list *bo_list,
1025 bool can_patch,
1026 struct radeon_winsys_fence *_fence)
1027 {
1028 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
1029 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
1030 int ret;
1031
1032 assert(sem_info);
1033 if (!cs->ws->use_ib_bos) {
1034 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, bo_list, cs_array,
1035 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1036 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
1037 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, bo_list, cs_array,
1038 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1039 } else {
1040 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, bo_list, cs_array,
1041 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1042 }
1043
1044 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1045 return ret;
1046 }
1047
1048 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1049 {
1050 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1051 void *ret = NULL;
1052
1053 if (!cs->ib_buffer)
1054 return NULL;
1055 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1056 struct radv_amdgpu_winsys_bo *bo;
1057
1058 bo = (struct radv_amdgpu_winsys_bo*)
1059 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1060 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1061 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1062 return (char *)ret + (addr - bo->base.va);
1063 }
1064 }
1065 if(cs->ws->debug_all_bos) {
1066 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1067 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1068 &cs->ws->global_bo_list, global_list_item) {
1069 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1070 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1071 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1072 return (char *)ret + (addr - bo->base.va);
1073 }
1074 }
1075 }
1076 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1077 }
1078 return ret;
1079 }
1080
1081 static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
1082 FILE* file,
1083 const int *trace_ids, int trace_id_count)
1084 {
1085 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1086 void *ib = cs->base.buf;
1087 int num_dw = cs->base.cdw;
1088
1089 if (cs->ws->use_ib_bos) {
1090 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1091 num_dw = cs->ib.size;
1092 }
1093 assert(ib);
1094 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1095 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1096 }
1097
1098 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1099 {
1100 switch (radv_priority) {
1101 case RADEON_CTX_PRIORITY_REALTIME:
1102 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1103 case RADEON_CTX_PRIORITY_HIGH:
1104 return AMDGPU_CTX_PRIORITY_HIGH;
1105 case RADEON_CTX_PRIORITY_MEDIUM:
1106 return AMDGPU_CTX_PRIORITY_NORMAL;
1107 case RADEON_CTX_PRIORITY_LOW:
1108 return AMDGPU_CTX_PRIORITY_LOW;
1109 default:
1110 unreachable("Invalid context priority");
1111 }
1112 }
1113
1114 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1115 enum radeon_ctx_priority priority)
1116 {
1117 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1118 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1119 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1120 int r;
1121
1122 if (!ctx)
1123 return NULL;
1124
1125 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1126 if (r) {
1127 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1128 goto error_create;
1129 }
1130 ctx->ws = ws;
1131
1132 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1133 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1134 RADEON_DOMAIN_GTT,
1135 RADEON_FLAG_CPU_ACCESS|
1136 RADEON_FLAG_NO_INTERPROCESS_SHARING);
1137 if (ctx->fence_bo)
1138 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1139 if (ctx->fence_map)
1140 memset(ctx->fence_map, 0, 4096);
1141 return (struct radeon_winsys_ctx *)ctx;
1142 error_create:
1143 FREE(ctx);
1144 return NULL;
1145 }
1146
1147 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1148 {
1149 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1150 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1151 amdgpu_cs_ctx_free(ctx->ctx);
1152 FREE(ctx);
1153 }
1154
1155 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1156 enum ring_type ring_type, int ring_index)
1157 {
1158 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1159 int ip_type = ring_to_hw_ip(ring_type);
1160
1161 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1162 uint32_t expired;
1163 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1164 1000000000ull, 0, &expired);
1165
1166 if (ret || !expired)
1167 return false;
1168 }
1169
1170 return true;
1171 }
1172
1173 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1174 {
1175 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1176 if (!sem)
1177 return NULL;
1178
1179 return (struct radeon_winsys_sem *)sem;
1180 }
1181
1182 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1183 {
1184 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1185 FREE(sem);
1186 }
1187
1188 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1189 uint32_t ip_type,
1190 uint32_t ring,
1191 struct radv_winsys_sem_info *sem_info)
1192 {
1193 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1194 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1195
1196 if (sem->context)
1197 return -EINVAL;
1198
1199 *sem = ctx->last_submission[ip_type][ring].fence;
1200 }
1201 return 0;
1202 }
1203
1204 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1205 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1206 {
1207 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1208 if (!syncobj)
1209 return NULL;
1210
1211 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1212 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1213 sem->handle = counts->syncobj[i];
1214 }
1215
1216 chunk->chunk_id = chunk_id;
1217 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1218 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1219 return syncobj;
1220 }
1221
1222 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1223 struct amdgpu_cs_request *request,
1224 struct radv_winsys_sem_info *sem_info)
1225 {
1226 int r;
1227 int num_chunks;
1228 int size;
1229 bool user_fence;
1230 struct drm_amdgpu_cs_chunk *chunks;
1231 struct drm_amdgpu_cs_chunk_data *chunk_data;
1232 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1233 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1234 int i;
1235 struct amdgpu_cs_fence *sem;
1236
1237 user_fence = (request->fence_info.handle != NULL);
1238 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1239
1240 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1241
1242 size = request->number_of_ibs + (user_fence ? 1 : 0);
1243
1244 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1245
1246 num_chunks = request->number_of_ibs;
1247 for (i = 0; i < request->number_of_ibs; i++) {
1248 struct amdgpu_cs_ib_info *ib;
1249 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1250 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1251 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1252
1253 ib = &request->ibs[i];
1254
1255 chunk_data[i].ib_data._pad = 0;
1256 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1257 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1258 chunk_data[i].ib_data.ip_type = request->ip_type;
1259 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1260 chunk_data[i].ib_data.ring = request->ring;
1261 chunk_data[i].ib_data.flags = ib->flags;
1262 }
1263
1264 if (user_fence) {
1265 i = num_chunks++;
1266
1267 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1268 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1269 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1270
1271 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1272 &chunk_data[i]);
1273 }
1274
1275 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1276 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1277 &chunks[num_chunks],
1278 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1279 if (!wait_syncobj) {
1280 r = -ENOMEM;
1281 goto error_out;
1282 }
1283 num_chunks++;
1284
1285 if (sem_info->wait.sem_count == 0)
1286 sem_info->cs_emit_wait = false;
1287
1288 }
1289
1290 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1291 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1292 if (!sem_dependencies) {
1293 r = -ENOMEM;
1294 goto error_out;
1295 }
1296 int sem_count = 0;
1297 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1298 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1299 if (!sem->context)
1300 continue;
1301 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1302
1303 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1304
1305 sem->context = NULL;
1306 }
1307 i = num_chunks++;
1308
1309 /* dependencies chunk */
1310 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1311 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1312 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1313
1314 sem_info->cs_emit_wait = false;
1315 }
1316
1317 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1318 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1319 &chunks[num_chunks],
1320 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1321 if (!signal_syncobj) {
1322 r = -ENOMEM;
1323 goto error_out;
1324 }
1325 num_chunks++;
1326 }
1327
1328 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1329 ctx->ctx,
1330 request->resources,
1331 num_chunks,
1332 chunks,
1333 &request->seq_no);
1334 error_out:
1335 free(sem_dependencies);
1336 free(wait_syncobj);
1337 free(signal_syncobj);
1338 return r;
1339 }
1340
1341 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1342 uint32_t *handle)
1343 {
1344 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1345 return amdgpu_cs_create_syncobj(ws->dev, handle);
1346 }
1347
1348 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1349 uint32_t handle)
1350 {
1351 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1352 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1353 }
1354
1355 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1356 uint32_t handle)
1357 {
1358 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1359 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1360 }
1361
1362 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1363 uint32_t handle)
1364 {
1365 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1366 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1367 }
1368
1369 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1370 uint32_t handle_count, bool wait_all, uint64_t timeout)
1371 {
1372 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1373 uint32_t tmp;
1374
1375 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1376 timeout = MIN2(timeout, INT64_MAX);
1377
1378 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1379 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1380 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1381 &tmp);
1382 if (ret == 0) {
1383 return true;
1384 } else if (ret == -1 && errno == ETIME) {
1385 return false;
1386 } else {
1387 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1388 return false;
1389 }
1390 }
1391
1392 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1393 uint32_t syncobj,
1394 int *fd)
1395 {
1396 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1397
1398 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1399 }
1400
1401 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1402 int fd,
1403 uint32_t *syncobj)
1404 {
1405 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1406
1407 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1408 }
1409
1410
1411 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1412 uint32_t syncobj,
1413 int *fd)
1414 {
1415 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1416
1417 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1418 }
1419
1420 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1421 uint32_t syncobj,
1422 int fd)
1423 {
1424 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1425
1426 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1427 }
1428
1429 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1430 {
1431 ws->base.ctx_create = radv_amdgpu_ctx_create;
1432 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1433 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1434 ws->base.cs_create = radv_amdgpu_cs_create;
1435 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1436 ws->base.cs_grow = radv_amdgpu_cs_grow;
1437 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1438 ws->base.cs_reset = radv_amdgpu_cs_reset;
1439 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1440 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1441 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1442 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1443 ws->base.create_fence = radv_amdgpu_create_fence;
1444 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1445 ws->base.create_sem = radv_amdgpu_create_sem;
1446 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1447 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1448 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1449 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1450 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1451 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1452 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1453 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1454 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1455 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1456 ws->base.fence_wait = radv_amdgpu_fence_wait;
1457 ws->base.fences_wait = radv_amdgpu_fences_wait;
1458 }