cd7ab384e7361c5536b522a822270c13d7d26a46
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "ac_debug.h"
31 #include "radv_radeon_winsys.h"
32 #include "radv_amdgpu_cs.h"
33 #include "radv_amdgpu_bo.h"
34 #include "sid.h"
35
36
37 enum {
38 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
39 };
40
41 struct radv_amdgpu_cs {
42 struct radeon_winsys_cs base;
43 struct radv_amdgpu_winsys *ws;
44
45 struct amdgpu_cs_ib_info ib;
46
47 struct radeon_winsys_bo *ib_buffer;
48 uint8_t *ib_mapped;
49 unsigned max_num_buffers;
50 unsigned num_buffers;
51 amdgpu_bo_handle *handles;
52 uint8_t *priorities;
53
54 struct radeon_winsys_bo **old_ib_buffers;
55 unsigned num_old_ib_buffers;
56 unsigned max_num_old_ib_buffers;
57 unsigned *ib_size_ptr;
58 bool failed;
59 bool is_chained;
60
61 int buffer_hash_table[1024];
62 unsigned hw_ip;
63
64 unsigned num_virtual_buffers;
65 unsigned max_num_virtual_buffers;
66 struct radeon_winsys_bo **virtual_buffers;
67 uint8_t *virtual_buffer_priorities;
68 int *virtual_buffer_hash_table;
69 };
70
71 static inline struct radv_amdgpu_cs *
72 radv_amdgpu_cs(struct radeon_winsys_cs *base)
73 {
74 return (struct radv_amdgpu_cs*)base;
75 }
76
77 static int ring_to_hw_ip(enum ring_type ring)
78 {
79 switch (ring) {
80 case RING_GFX:
81 return AMDGPU_HW_IP_GFX;
82 case RING_DMA:
83 return AMDGPU_HW_IP_DMA;
84 case RING_COMPUTE:
85 return AMDGPU_HW_IP_COMPUTE;
86 default:
87 unreachable("unsupported ring");
88 }
89 }
90
91 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
92 uint32_t ip_type,
93 uint32_t ring,
94 struct radv_winsys_sem_info *sem_info);
95 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
96 struct amdgpu_cs_request *request,
97 struct radv_winsys_sem_info *sem_info);
98
99 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
100 struct radv_amdgpu_fence *fence,
101 struct amdgpu_cs_request *req)
102 {
103 fence->fence.context = ctx->ctx;
104 fence->fence.ip_type = req->ip_type;
105 fence->fence.ip_instance = req->ip_instance;
106 fence->fence.ring = req->ring;
107 fence->fence.fence = req->seq_no;
108 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
109 }
110
111 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
112 {
113 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
114 return (struct radeon_winsys_fence*)fence;
115 }
116
117 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
118 {
119 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
120 free(fence);
121 }
122
123 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
124 struct radeon_winsys_fence *_fence,
125 bool absolute,
126 uint64_t timeout)
127 {
128 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
129 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
130 int r;
131 uint32_t expired = 0;
132
133 if (fence->user_ptr) {
134 if (*fence->user_ptr >= fence->fence.fence)
135 return true;
136 if (!absolute && !timeout)
137 return false;
138 }
139
140 /* Now use the libdrm query. */
141 r = amdgpu_cs_query_fence_status(&fence->fence,
142 timeout,
143 flags,
144 &expired);
145
146 if (r) {
147 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
148 return false;
149 }
150
151 if (expired)
152 return true;
153
154 return false;
155 }
156
157
158 static bool radv_amdgpu_fences_wait(struct radeon_winsys *_ws,
159 struct radeon_winsys_fence *const *_fences,
160 uint32_t fence_count,
161 bool wait_all,
162 uint64_t timeout)
163 {
164 struct amdgpu_cs_fence *fences = malloc(sizeof(struct amdgpu_cs_fence) * fence_count);
165 int r;
166 uint32_t expired = 0, first = 0;
167
168 if (!fences)
169 return false;
170
171 for (uint32_t i = 0; i < fence_count; ++i)
172 fences[i] = ((struct radv_amdgpu_fence *)_fences[i])->fence;
173
174 /* Now use the libdrm query. */
175 r = amdgpu_cs_wait_fences(fences, fence_count, wait_all,
176 timeout, &expired, &first);
177
178 free(fences);
179 if (r) {
180 fprintf(stderr, "amdgpu: amdgpu_cs_wait_fences failed.\n");
181 return false;
182 }
183
184 if (expired)
185 return true;
186
187 return false;
188 }
189
190 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
191 {
192 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
193
194 if (cs->ib_buffer)
195 cs->ws->base.buffer_destroy(cs->ib_buffer);
196 else
197 free(cs->base.buf);
198
199 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
200 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
201
202 free(cs->old_ib_buffers);
203 free(cs->virtual_buffers);
204 free(cs->virtual_buffer_priorities);
205 free(cs->virtual_buffer_hash_table);
206 free(cs->handles);
207 free(cs->priorities);
208 free(cs);
209 }
210
211 static void radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
212 enum ring_type ring_type)
213 {
214 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
215 cs->buffer_hash_table[i] = -1;
216
217 cs->hw_ip = ring_to_hw_ip(ring_type);
218 }
219
220 static struct radeon_winsys_cs *
221 radv_amdgpu_cs_create(struct radeon_winsys *ws,
222 enum ring_type ring_type)
223 {
224 struct radv_amdgpu_cs *cs;
225 uint32_t ib_size = 20 * 1024 * 4;
226 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
227 if (!cs)
228 return NULL;
229
230 cs->ws = radv_amdgpu_winsys(ws);
231 radv_amdgpu_init_cs(cs, ring_type);
232
233 if (cs->ws->use_ib_bos) {
234 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
235 RADEON_DOMAIN_GTT,
236 RADEON_FLAG_CPU_ACCESS |
237 RADEON_FLAG_NO_INTERPROCESS_SHARING |
238 RADEON_FLAG_READ_ONLY);
239 if (!cs->ib_buffer) {
240 free(cs);
241 return NULL;
242 }
243
244 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
245 if (!cs->ib_mapped) {
246 ws->buffer_destroy(cs->ib_buffer);
247 free(cs);
248 return NULL;
249 }
250
251 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
252 cs->base.buf = (uint32_t *)cs->ib_mapped;
253 cs->base.max_dw = ib_size / 4 - 4;
254 cs->ib_size_ptr = &cs->ib.size;
255 cs->ib.size = 0;
256
257 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
258 } else {
259 cs->base.buf = malloc(16384);
260 cs->base.max_dw = 4096;
261 if (!cs->base.buf) {
262 free(cs);
263 return NULL;
264 }
265 }
266
267 return &cs->base;
268 }
269
270 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
271 {
272 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
273
274 if (cs->failed) {
275 cs->base.cdw = 0;
276 return;
277 }
278
279 if (!cs->ws->use_ib_bos) {
280 const uint64_t limit_dws = 0xffff8;
281 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
282 MIN2(cs->base.max_dw * 2, limit_dws));
283
284 /* The total ib size cannot exceed limit_dws dwords. */
285 if (ib_dws > limit_dws)
286 {
287 cs->failed = true;
288 cs->base.cdw = 0;
289 return;
290 }
291
292 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
293 if (new_buf) {
294 cs->base.buf = new_buf;
295 cs->base.max_dw = ib_dws;
296 } else {
297 cs->failed = true;
298 cs->base.cdw = 0;
299 }
300 return;
301 }
302
303 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
304
305 /* max that fits in the chain size field. */
306 ib_size = MIN2(ib_size, 0xfffff);
307
308 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
309 cs->base.buf[cs->base.cdw++] = 0xffff1000;
310
311 *cs->ib_size_ptr |= cs->base.cdw + 4;
312
313 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
314 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
315 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
316 cs->max_num_old_ib_buffers * sizeof(void*));
317 }
318
319 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
320
321 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
322 RADEON_DOMAIN_GTT,
323 RADEON_FLAG_CPU_ACCESS |
324 RADEON_FLAG_NO_INTERPROCESS_SHARING |
325 RADEON_FLAG_READ_ONLY);
326
327 if (!cs->ib_buffer) {
328 cs->base.cdw = 0;
329 cs->failed = true;
330 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
331 }
332
333 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
334 if (!cs->ib_mapped) {
335 cs->ws->base.buffer_destroy(cs->ib_buffer);
336 cs->base.cdw = 0;
337 cs->failed = true;
338 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
339 }
340
341 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
342
343 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
344 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
345 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
346 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
347 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
348
349 cs->base.buf = (uint32_t *)cs->ib_mapped;
350 cs->base.cdw = 0;
351 cs->base.max_dw = ib_size / 4 - 4;
352
353 }
354
355 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
356 {
357 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
358
359 if (cs->ws->use_ib_bos) {
360 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
361 cs->base.buf[cs->base.cdw++] = 0xffff1000;
362
363 *cs->ib_size_ptr |= cs->base.cdw;
364
365 cs->is_chained = false;
366 }
367
368 return !cs->failed;
369 }
370
371 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
372 {
373 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
374 cs->base.cdw = 0;
375 cs->failed = false;
376
377 for (unsigned i = 0; i < cs->num_buffers; ++i) {
378 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
379 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
380 cs->buffer_hash_table[hash] = -1;
381 }
382
383 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
384 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
385 cs->virtual_buffer_hash_table[hash] = -1;
386 }
387
388 cs->num_buffers = 0;
389 cs->num_virtual_buffers = 0;
390
391 if (cs->ws->use_ib_bos) {
392 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
393
394 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
395 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
396
397 cs->num_old_ib_buffers = 0;
398 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
399 cs->ib_size_ptr = &cs->ib.size;
400 cs->ib.size = 0;
401 }
402 }
403
404 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
405 amdgpu_bo_handle bo)
406 {
407 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
408 int index = cs->buffer_hash_table[hash];
409
410 if (index == -1)
411 return -1;
412
413 if (cs->handles[index] == bo)
414 return index;
415
416 for (unsigned i = 0; i < cs->num_buffers; ++i) {
417 if (cs->handles[i] == bo) {
418 cs->buffer_hash_table[hash] = i;
419 return i;
420 }
421 }
422
423 return -1;
424 }
425
426 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
427 amdgpu_bo_handle bo,
428 uint8_t priority)
429 {
430 unsigned hash;
431 int index = radv_amdgpu_cs_find_buffer(cs, bo);
432
433 if (index != -1) {
434 cs->priorities[index] = MAX2(cs->priorities[index], priority);
435 return;
436 }
437
438 if (cs->num_buffers == cs->max_num_buffers) {
439 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
440 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
441 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
442 cs->max_num_buffers = new_count;
443 }
444
445 cs->handles[cs->num_buffers] = bo;
446 cs->priorities[cs->num_buffers] = priority;
447
448 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
449 cs->buffer_hash_table[hash] = cs->num_buffers;
450
451 ++cs->num_buffers;
452 }
453
454 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
455 struct radeon_winsys_bo *bo,
456 uint8_t priority)
457 {
458 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
459 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
460
461
462 if (!cs->virtual_buffer_hash_table) {
463 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
464 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
465 cs->virtual_buffer_hash_table[i] = -1;
466 }
467
468 if (cs->virtual_buffer_hash_table[hash] >= 0) {
469 int idx = cs->virtual_buffer_hash_table[hash];
470 if (cs->virtual_buffers[idx] == bo) {
471 cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
472 return;
473 }
474 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
475 if (cs->virtual_buffers[i] == bo) {
476 cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
477 cs->virtual_buffer_hash_table[hash] = i;
478 return;
479 }
480 }
481 }
482
483 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
484 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
485 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
486 cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
487 }
488
489 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
490 cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
491
492 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
493 ++cs->num_virtual_buffers;
494
495 }
496
497 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
498 struct radeon_winsys_bo *_bo,
499 uint8_t priority)
500 {
501 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
502 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
503
504 if (bo->is_virtual) {
505 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
506 return;
507 }
508
509 if (bo->base.is_local)
510 return;
511
512 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
513 }
514
515 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
516 struct radeon_winsys_cs *_child)
517 {
518 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
519 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
520
521 for (unsigned i = 0; i < child->num_buffers; ++i) {
522 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
523 child->priorities[i]);
524 }
525
526 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
527 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
528 child->virtual_buffer_priorities[i]);
529 }
530
531 if (parent->ws->use_ib_bos) {
532 if (parent->base.cdw + 4 > parent->base.max_dw)
533 radv_amdgpu_cs_grow(&parent->base, 4);
534
535 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
536 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
537 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
538 parent->base.buf[parent->base.cdw++] = child->ib.size;
539 } else {
540 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
541 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
542
543 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
544 parent->base.cdw += child->base.cdw;
545 }
546 }
547
548 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
549 struct radeon_winsys_cs **cs_array,
550 unsigned count,
551 struct radv_amdgpu_winsys_bo *extra_bo,
552 struct radeon_winsys_cs *extra_cs,
553 amdgpu_bo_list_handle *bo_list)
554 {
555 int r = 0;
556
557 if (ws->debug_all_bos) {
558 struct radv_amdgpu_winsys_bo *bo;
559 amdgpu_bo_handle *handles;
560 unsigned num = 0;
561
562 pthread_mutex_lock(&ws->global_bo_list_lock);
563
564 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
565 if (!handles) {
566 pthread_mutex_unlock(&ws->global_bo_list_lock);
567 return -ENOMEM;
568 }
569
570 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
571 assert(num < ws->num_buffers);
572 handles[num++] = bo->bo;
573 }
574
575 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
576 handles, NULL,
577 bo_list);
578 free(handles);
579 pthread_mutex_unlock(&ws->global_bo_list_lock);
580 } else if (count == 1 && !extra_bo && !extra_cs &&
581 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
582 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
583 if (cs->num_buffers == 0) {
584 *bo_list = 0;
585 return 0;
586 }
587 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
588 cs->priorities, bo_list);
589 } else {
590 unsigned total_buffer_count = !!extra_bo;
591 unsigned unique_bo_count = !!extra_bo;
592 for (unsigned i = 0; i < count; ++i) {
593 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
594 total_buffer_count += cs->num_buffers;
595 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
596 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
597 }
598
599 if (extra_cs) {
600 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
601 }
602 if (total_buffer_count == 0) {
603 *bo_list = 0;
604 return 0;
605 }
606 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
607 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
608 if (!handles || !priorities) {
609 free(handles);
610 free(priorities);
611 return -ENOMEM;
612 }
613
614 if (extra_bo) {
615 handles[0] = extra_bo->bo;
616 priorities[0] = 8;
617 }
618
619 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
620 struct radv_amdgpu_cs *cs;
621
622 if (i == count)
623 cs = (struct radv_amdgpu_cs*)extra_cs;
624 else
625 cs = (struct radv_amdgpu_cs*)cs_array[i];
626
627 if (!cs->num_buffers)
628 continue;
629
630 if (unique_bo_count == 0) {
631 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
632 memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
633 unique_bo_count = cs->num_buffers;
634 continue;
635 }
636 int unique_bo_so_far = unique_bo_count;
637 for (unsigned j = 0; j < cs->num_buffers; ++j) {
638 bool found = false;
639 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
640 if (handles[k] == cs->handles[j]) {
641 found = true;
642 priorities[k] = MAX2(priorities[k],
643 cs->priorities[j]);
644 break;
645 }
646 }
647 if (!found) {
648 handles[unique_bo_count] = cs->handles[j];
649 priorities[unique_bo_count] = cs->priorities[j];
650 ++unique_bo_count;
651 }
652 }
653 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
654 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
655 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
656 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
657 bool found = false;
658 for (unsigned m = 0; m < unique_bo_count; ++m) {
659 if (handles[m] == bo->bo) {
660 found = true;
661 priorities[m] = MAX2(priorities[m],
662 cs->virtual_buffer_priorities[j]);
663 break;
664 }
665 }
666 if (!found) {
667 handles[unique_bo_count] = bo->bo;
668 priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
669 ++unique_bo_count;
670 }
671 }
672 }
673 }
674
675 if (unique_bo_count > 0) {
676 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
677 priorities, bo_list);
678 } else {
679 *bo_list = 0;
680 }
681
682 free(handles);
683 free(priorities);
684 }
685
686 return r;
687 }
688
689 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
690 {
691 struct amdgpu_cs_fence_info ret = {0};
692 if (ctx->fence_map) {
693 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
694 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
695 }
696 return ret;
697 }
698
699 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
700 struct amdgpu_cs_request *request)
701 {
702 radv_amdgpu_request_to_fence(ctx,
703 &ctx->last_submission[request->ip_type][request->ring],
704 request);
705 }
706
707 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
708 int queue_idx,
709 struct radv_winsys_sem_info *sem_info,
710 struct radeon_winsys_cs **cs_array,
711 unsigned cs_count,
712 struct radeon_winsys_cs *initial_preamble_cs,
713 struct radeon_winsys_cs *continue_preamble_cs,
714 struct radeon_winsys_fence *_fence)
715 {
716 int r;
717 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
718 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
719 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
720 amdgpu_bo_list_handle bo_list;
721 struct amdgpu_cs_request request = {0};
722 struct amdgpu_cs_ib_info ibs[2];
723
724 for (unsigned i = cs_count; i--;) {
725 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
726
727 if (cs->is_chained) {
728 *cs->ib_size_ptr -= 4;
729 cs->is_chained = false;
730 }
731
732 if (i + 1 < cs_count) {
733 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
734 assert(cs->base.cdw + 4 <= cs->base.max_dw);
735
736 cs->is_chained = true;
737 *cs->ib_size_ptr += 4;
738
739 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
740 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
741 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
742 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
743 }
744 }
745
746 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs, &bo_list);
747 if (r) {
748 fprintf(stderr, "amdgpu: buffer list creation failed for the "
749 "chained submission(%d)\n", r);
750 return r;
751 }
752
753 request.ip_type = cs0->hw_ip;
754 request.ring = queue_idx;
755 request.number_of_ibs = 1;
756 request.ibs = &cs0->ib;
757 request.resources = bo_list;
758 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
759
760 if (initial_preamble_cs) {
761 request.ibs = ibs;
762 request.number_of_ibs = 2;
763 ibs[1] = cs0->ib;
764 ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
765 }
766
767 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
768 if (r) {
769 if (r == -ENOMEM)
770 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
771 else
772 fprintf(stderr, "amdgpu: The CS has been rejected, "
773 "see dmesg for more information.\n");
774 }
775
776 if (bo_list)
777 amdgpu_bo_list_destroy(bo_list);
778
779 if (fence)
780 radv_amdgpu_request_to_fence(ctx, fence, &request);
781
782 radv_assign_last_submit(ctx, &request);
783
784 return r;
785 }
786
787 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
788 int queue_idx,
789 struct radv_winsys_sem_info *sem_info,
790 struct radeon_winsys_cs **cs_array,
791 unsigned cs_count,
792 struct radeon_winsys_cs *initial_preamble_cs,
793 struct radeon_winsys_cs *continue_preamble_cs,
794 struct radeon_winsys_fence *_fence)
795 {
796 int r;
797 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
798 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
799 amdgpu_bo_list_handle bo_list;
800 struct amdgpu_cs_request request;
801 bool emit_signal_sem = sem_info->cs_emit_signal;
802 assert(cs_count);
803
804 for (unsigned i = 0; i < cs_count;) {
805 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
806 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
807 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
808 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
809 cs_count - i);
810
811 memset(&request, 0, sizeof(request));
812
813 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
814 preamble_cs, &bo_list);
815 if (r) {
816 fprintf(stderr, "amdgpu: buffer list creation failed "
817 "for the fallback submission (%d)\n", r);
818 return r;
819 }
820
821 request.ip_type = cs0->hw_ip;
822 request.ring = queue_idx;
823 request.resources = bo_list;
824 request.number_of_ibs = cnt + !!preamble_cs;
825 request.ibs = ibs;
826 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
827
828 if (preamble_cs) {
829 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
830 }
831
832 for (unsigned j = 0; j < cnt; ++j) {
833 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
834 ibs[j + !!preamble_cs] = cs->ib;
835
836 if (cs->is_chained) {
837 *cs->ib_size_ptr -= 4;
838 cs->is_chained = false;
839 }
840 }
841
842 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
843 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
844 if (r) {
845 if (r == -ENOMEM)
846 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
847 else
848 fprintf(stderr, "amdgpu: The CS has been rejected, "
849 "see dmesg for more information.\n");
850 }
851
852 if (bo_list)
853 amdgpu_bo_list_destroy(bo_list);
854
855 if (r)
856 return r;
857
858 i += cnt;
859 }
860 if (fence)
861 radv_amdgpu_request_to_fence(ctx, fence, &request);
862
863 radv_assign_last_submit(ctx, &request);
864
865 return 0;
866 }
867
868 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
869 int queue_idx,
870 struct radv_winsys_sem_info *sem_info,
871 struct radeon_winsys_cs **cs_array,
872 unsigned cs_count,
873 struct radeon_winsys_cs *initial_preamble_cs,
874 struct radeon_winsys_cs *continue_preamble_cs,
875 struct radeon_winsys_fence *_fence)
876 {
877 int r;
878 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
879 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
880 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
881 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
882 amdgpu_bo_list_handle bo_list;
883 struct amdgpu_cs_request request;
884 uint32_t pad_word = 0xffff1000U;
885 bool emit_signal_sem = sem_info->cs_emit_signal;
886
887 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
888 pad_word = 0x80000000;
889
890 assert(cs_count);
891
892 for (unsigned i = 0; i < cs_count;) {
893 struct amdgpu_cs_ib_info ib = {0};
894 struct radeon_winsys_bo *bo = NULL;
895 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
896 uint32_t *ptr;
897 unsigned cnt = 0;
898 unsigned size = 0;
899 unsigned pad_words = 0;
900 if (preamble_cs)
901 size += preamble_cs->cdw;
902
903 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
904 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
905 ++cnt;
906 }
907
908 while(!size || (size & 7)) {
909 size++;
910 pad_words++;
911 }
912 assert(cnt);
913
914 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT,
915 RADEON_FLAG_CPU_ACCESS |
916 RADEON_FLAG_NO_INTERPROCESS_SHARING |
917 RADEON_FLAG_READ_ONLY);
918 ptr = ws->buffer_map(bo);
919
920 if (preamble_cs) {
921 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
922 ptr += preamble_cs->cdw;
923 }
924
925 for (unsigned j = 0; j < cnt; ++j) {
926 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
927 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
928 ptr += cs->base.cdw;
929
930 }
931
932 for (unsigned j = 0; j < pad_words; ++j)
933 *ptr++ = pad_word;
934
935 memset(&request, 0, sizeof(request));
936
937
938 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
939 (struct radv_amdgpu_winsys_bo*)bo,
940 preamble_cs, &bo_list);
941 if (r) {
942 fprintf(stderr, "amdgpu: buffer list creation failed "
943 "for the sysmem submission (%d)\n", r);
944 return r;
945 }
946
947 ib.size = size;
948 ib.ib_mc_address = radv_buffer_get_va(bo);
949
950 request.ip_type = cs0->hw_ip;
951 request.ring = queue_idx;
952 request.resources = bo_list;
953 request.number_of_ibs = 1;
954 request.ibs = &ib;
955 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
956
957 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
958 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
959 if (r) {
960 if (r == -ENOMEM)
961 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
962 else
963 fprintf(stderr, "amdgpu: The CS has been rejected, "
964 "see dmesg for more information.\n");
965 }
966
967 if (bo_list)
968 amdgpu_bo_list_destroy(bo_list);
969
970 ws->buffer_destroy(bo);
971 if (r)
972 return r;
973
974 i += cnt;
975 }
976 if (fence)
977 radv_amdgpu_request_to_fence(ctx, fence, &request);
978
979 radv_assign_last_submit(ctx, &request);
980
981 return 0;
982 }
983
984 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
985 int queue_idx,
986 struct radeon_winsys_cs **cs_array,
987 unsigned cs_count,
988 struct radeon_winsys_cs *initial_preamble_cs,
989 struct radeon_winsys_cs *continue_preamble_cs,
990 struct radv_winsys_sem_info *sem_info,
991 bool can_patch,
992 struct radeon_winsys_fence *_fence)
993 {
994 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
995 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
996 int ret;
997
998 assert(sem_info);
999 if (!cs->ws->use_ib_bos) {
1000 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array,
1001 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1002 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
1003 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array,
1004 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1005 } else {
1006 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array,
1007 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
1008 }
1009
1010 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
1011 return ret;
1012 }
1013
1014 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
1015 {
1016 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1017 void *ret = NULL;
1018
1019 if (!cs->ib_buffer)
1020 return NULL;
1021 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
1022 struct radv_amdgpu_winsys_bo *bo;
1023
1024 bo = (struct radv_amdgpu_winsys_bo*)
1025 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
1026 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1027 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
1028 return (char *)ret + (addr - bo->base.va);
1029 }
1030 }
1031 if(cs->ws->debug_all_bos) {
1032 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
1033 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
1034 &cs->ws->global_bo_list, global_list_item) {
1035 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
1036 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
1037 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1038 return (char *)ret + (addr - bo->base.va);
1039 }
1040 }
1041 }
1042 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1043 }
1044 return ret;
1045 }
1046
1047 static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
1048 FILE* file,
1049 const int *trace_ids, int trace_id_count)
1050 {
1051 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1052 void *ib = cs->base.buf;
1053 int num_dw = cs->base.cdw;
1054
1055 if (cs->ws->use_ib_bos) {
1056 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1057 num_dw = cs->ib.size;
1058 }
1059 assert(ib);
1060 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1061 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1062 }
1063
1064 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1065 {
1066 switch (radv_priority) {
1067 case RADEON_CTX_PRIORITY_REALTIME:
1068 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1069 case RADEON_CTX_PRIORITY_HIGH:
1070 return AMDGPU_CTX_PRIORITY_HIGH;
1071 case RADEON_CTX_PRIORITY_MEDIUM:
1072 return AMDGPU_CTX_PRIORITY_NORMAL;
1073 case RADEON_CTX_PRIORITY_LOW:
1074 return AMDGPU_CTX_PRIORITY_LOW;
1075 default:
1076 unreachable("Invalid context priority");
1077 }
1078 }
1079
1080 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1081 enum radeon_ctx_priority priority)
1082 {
1083 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1084 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1085 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1086 int r;
1087
1088 if (!ctx)
1089 return NULL;
1090
1091 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1092 if (r) {
1093 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1094 goto error_create;
1095 }
1096 ctx->ws = ws;
1097
1098 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1099 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1100 RADEON_DOMAIN_GTT,
1101 RADEON_FLAG_CPU_ACCESS|
1102 RADEON_FLAG_NO_INTERPROCESS_SHARING);
1103 if (ctx->fence_bo)
1104 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1105 if (ctx->fence_map)
1106 memset(ctx->fence_map, 0, 4096);
1107 return (struct radeon_winsys_ctx *)ctx;
1108 error_create:
1109 FREE(ctx);
1110 return NULL;
1111 }
1112
1113 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1114 {
1115 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1116 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1117 amdgpu_cs_ctx_free(ctx->ctx);
1118 FREE(ctx);
1119 }
1120
1121 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1122 enum ring_type ring_type, int ring_index)
1123 {
1124 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1125 int ip_type = ring_to_hw_ip(ring_type);
1126
1127 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1128 uint32_t expired;
1129 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1130 1000000000ull, 0, &expired);
1131
1132 if (ret || !expired)
1133 return false;
1134 }
1135
1136 return true;
1137 }
1138
1139 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1140 {
1141 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1142 if (!sem)
1143 return NULL;
1144
1145 return (struct radeon_winsys_sem *)sem;
1146 }
1147
1148 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1149 {
1150 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1151 FREE(sem);
1152 }
1153
1154 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1155 uint32_t ip_type,
1156 uint32_t ring,
1157 struct radv_winsys_sem_info *sem_info)
1158 {
1159 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1160 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1161
1162 if (sem->context)
1163 return -EINVAL;
1164
1165 *sem = ctx->last_submission[ip_type][ring].fence;
1166 }
1167 return 0;
1168 }
1169
1170 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1171 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1172 {
1173 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1174 if (!syncobj)
1175 return NULL;
1176
1177 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1178 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1179 sem->handle = counts->syncobj[i];
1180 }
1181
1182 chunk->chunk_id = chunk_id;
1183 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1184 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1185 return syncobj;
1186 }
1187
1188 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1189 struct amdgpu_cs_request *request,
1190 struct radv_winsys_sem_info *sem_info)
1191 {
1192 int r;
1193 int num_chunks;
1194 int size;
1195 bool user_fence;
1196 struct drm_amdgpu_cs_chunk *chunks;
1197 struct drm_amdgpu_cs_chunk_data *chunk_data;
1198 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1199 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1200 int i;
1201 struct amdgpu_cs_fence *sem;
1202
1203 user_fence = (request->fence_info.handle != NULL);
1204 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1205
1206 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1207
1208 size = request->number_of_ibs + (user_fence ? 1 : 0);
1209
1210 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1211
1212 num_chunks = request->number_of_ibs;
1213 for (i = 0; i < request->number_of_ibs; i++) {
1214 struct amdgpu_cs_ib_info *ib;
1215 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1216 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1217 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1218
1219 ib = &request->ibs[i];
1220
1221 chunk_data[i].ib_data._pad = 0;
1222 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1223 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1224 chunk_data[i].ib_data.ip_type = request->ip_type;
1225 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1226 chunk_data[i].ib_data.ring = request->ring;
1227 chunk_data[i].ib_data.flags = ib->flags;
1228 }
1229
1230 if (user_fence) {
1231 i = num_chunks++;
1232
1233 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1234 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1235 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1236
1237 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1238 &chunk_data[i]);
1239 }
1240
1241 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1242 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1243 &chunks[num_chunks],
1244 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1245 if (!wait_syncobj) {
1246 r = -ENOMEM;
1247 goto error_out;
1248 }
1249 num_chunks++;
1250
1251 if (sem_info->wait.sem_count == 0)
1252 sem_info->cs_emit_wait = false;
1253
1254 }
1255
1256 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1257 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1258 if (!sem_dependencies) {
1259 r = -ENOMEM;
1260 goto error_out;
1261 }
1262 int sem_count = 0;
1263 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1264 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1265 if (!sem->context)
1266 continue;
1267 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1268
1269 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1270
1271 sem->context = NULL;
1272 }
1273 i = num_chunks++;
1274
1275 /* dependencies chunk */
1276 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1277 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1278 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1279
1280 sem_info->cs_emit_wait = false;
1281 }
1282
1283 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1284 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1285 &chunks[num_chunks],
1286 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1287 if (!signal_syncobj) {
1288 r = -ENOMEM;
1289 goto error_out;
1290 }
1291 num_chunks++;
1292 }
1293
1294 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1295 ctx->ctx,
1296 request->resources,
1297 num_chunks,
1298 chunks,
1299 &request->seq_no);
1300 error_out:
1301 free(sem_dependencies);
1302 free(wait_syncobj);
1303 free(signal_syncobj);
1304 return r;
1305 }
1306
1307 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1308 uint32_t *handle)
1309 {
1310 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1311 return amdgpu_cs_create_syncobj(ws->dev, handle);
1312 }
1313
1314 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1315 uint32_t handle)
1316 {
1317 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1318 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1319 }
1320
1321 static void radv_amdgpu_reset_syncobj(struct radeon_winsys *_ws,
1322 uint32_t handle)
1323 {
1324 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1325 amdgpu_cs_syncobj_reset(ws->dev, &handle, 1);
1326 }
1327
1328 static void radv_amdgpu_signal_syncobj(struct radeon_winsys *_ws,
1329 uint32_t handle)
1330 {
1331 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1332 amdgpu_cs_syncobj_signal(ws->dev, &handle, 1);
1333 }
1334
1335 static bool radv_amdgpu_wait_syncobj(struct radeon_winsys *_ws, const uint32_t *handles,
1336 uint32_t handle_count, bool wait_all, uint64_t timeout)
1337 {
1338 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1339 uint32_t tmp;
1340
1341 /* The timeouts are signed, while vulkan timeouts are unsigned. */
1342 timeout = MIN2(timeout, INT64_MAX);
1343
1344 int ret = amdgpu_cs_syncobj_wait(ws->dev, (uint32_t*)handles, handle_count, timeout,
1345 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
1346 (wait_all ? DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL : 0),
1347 &tmp);
1348 if (ret == 0) {
1349 return true;
1350 } else if (ret == -1 && errno == ETIME) {
1351 return false;
1352 } else {
1353 fprintf(stderr, "amdgpu: radv_amdgpu_wait_syncobj failed!\nerrno: %d\n", errno);
1354 return false;
1355 }
1356 }
1357
1358 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1359 uint32_t syncobj,
1360 int *fd)
1361 {
1362 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1363
1364 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1365 }
1366
1367 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1368 int fd,
1369 uint32_t *syncobj)
1370 {
1371 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1372
1373 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1374 }
1375
1376
1377 static int radv_amdgpu_export_syncobj_to_sync_file(struct radeon_winsys *_ws,
1378 uint32_t syncobj,
1379 int *fd)
1380 {
1381 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1382
1383 return amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, fd);
1384 }
1385
1386 static int radv_amdgpu_import_syncobj_from_sync_file(struct radeon_winsys *_ws,
1387 uint32_t syncobj,
1388 int fd)
1389 {
1390 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1391
1392 return amdgpu_cs_syncobj_import_sync_file(ws->dev, syncobj, fd);
1393 }
1394
1395 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1396 {
1397 ws->base.ctx_create = radv_amdgpu_ctx_create;
1398 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1399 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1400 ws->base.cs_create = radv_amdgpu_cs_create;
1401 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1402 ws->base.cs_grow = radv_amdgpu_cs_grow;
1403 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1404 ws->base.cs_reset = radv_amdgpu_cs_reset;
1405 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1406 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1407 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1408 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1409 ws->base.create_fence = radv_amdgpu_create_fence;
1410 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1411 ws->base.create_sem = radv_amdgpu_create_sem;
1412 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1413 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1414 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1415 ws->base.reset_syncobj = radv_amdgpu_reset_syncobj;
1416 ws->base.signal_syncobj = radv_amdgpu_signal_syncobj;
1417 ws->base.wait_syncobj = radv_amdgpu_wait_syncobj;
1418 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1419 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1420 ws->base.export_syncobj_to_sync_file = radv_amdgpu_export_syncobj_to_sync_file;
1421 ws->base.import_syncobj_from_sync_file = radv_amdgpu_import_syncobj_from_sync_file;
1422 ws->base.fence_wait = radv_amdgpu_fence_wait;
1423 ws->base.fences_wait = radv_amdgpu_fences_wait;
1424 }