radv: Implement VK_EXT_global_priority
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "ac_debug.h"
31 #include "amdgpu_id.h"
32 #include "radv_radeon_winsys.h"
33 #include "radv_amdgpu_cs.h"
34 #include "radv_amdgpu_bo.h"
35 #include "sid.h"
36
37
38 enum {
39 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
40 };
41
42 struct radv_amdgpu_cs {
43 struct radeon_winsys_cs base;
44 struct radv_amdgpu_winsys *ws;
45
46 struct amdgpu_cs_ib_info ib;
47
48 struct radeon_winsys_bo *ib_buffer;
49 uint8_t *ib_mapped;
50 unsigned max_num_buffers;
51 unsigned num_buffers;
52 amdgpu_bo_handle *handles;
53 uint8_t *priorities;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 uint8_t *virtual_buffer_priorities;
69 int *virtual_buffer_hash_table;
70 };
71
72 static inline struct radv_amdgpu_cs *
73 radv_amdgpu_cs(struct radeon_winsys_cs *base)
74 {
75 return (struct radv_amdgpu_cs*)base;
76 }
77
78 static int ring_to_hw_ip(enum ring_type ring)
79 {
80 switch (ring) {
81 case RING_GFX:
82 return AMDGPU_HW_IP_GFX;
83 case RING_DMA:
84 return AMDGPU_HW_IP_DMA;
85 case RING_COMPUTE:
86 return AMDGPU_HW_IP_COMPUTE;
87 default:
88 unreachable("unsupported ring");
89 }
90 }
91
92 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
93 uint32_t ip_type,
94 uint32_t ring,
95 struct radv_winsys_sem_info *sem_info);
96 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
97 struct amdgpu_cs_request *request,
98 struct radv_winsys_sem_info *sem_info);
99
100 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
101 struct radv_amdgpu_fence *fence,
102 struct amdgpu_cs_request *req)
103 {
104 fence->fence.context = ctx->ctx;
105 fence->fence.ip_type = req->ip_type;
106 fence->fence.ip_instance = req->ip_instance;
107 fence->fence.ring = req->ring;
108 fence->fence.fence = req->seq_no;
109 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
110 }
111
112 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
113 {
114 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
115 return (struct radeon_winsys_fence*)fence;
116 }
117
118 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
119 {
120 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
121 free(fence);
122 }
123
124 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
125 struct radeon_winsys_fence *_fence,
126 bool absolute,
127 uint64_t timeout)
128 {
129 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
130 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
131 int r;
132 uint32_t expired = 0;
133
134 if (fence->user_ptr) {
135 if (*fence->user_ptr >= fence->fence.fence)
136 return true;
137 if (!absolute && !timeout)
138 return false;
139 }
140
141 /* Now use the libdrm query. */
142 r = amdgpu_cs_query_fence_status(&fence->fence,
143 timeout,
144 flags,
145 &expired);
146
147 if (r) {
148 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
149 return false;
150 }
151
152 if (expired)
153 return true;
154
155 return false;
156 }
157
158 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
159 {
160 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
161
162 if (cs->ib_buffer)
163 cs->ws->base.buffer_destroy(cs->ib_buffer);
164 else
165 free(cs->base.buf);
166
167 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
168 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
169
170 free(cs->old_ib_buffers);
171 free(cs->virtual_buffers);
172 free(cs->virtual_buffer_priorities);
173 free(cs->virtual_buffer_hash_table);
174 free(cs->handles);
175 free(cs->priorities);
176 free(cs);
177 }
178
179 static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
180 enum ring_type ring_type)
181 {
182 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
183 cs->buffer_hash_table[i] = -1;
184
185 cs->hw_ip = ring_to_hw_ip(ring_type);
186 return true;
187 }
188
189 static struct radeon_winsys_cs *
190 radv_amdgpu_cs_create(struct radeon_winsys *ws,
191 enum ring_type ring_type)
192 {
193 struct radv_amdgpu_cs *cs;
194 uint32_t ib_size = 20 * 1024 * 4;
195 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
196 if (!cs)
197 return NULL;
198
199 cs->ws = radv_amdgpu_winsys(ws);
200 radv_amdgpu_init_cs(cs, ring_type);
201
202 if (cs->ws->use_ib_bos) {
203 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
204 RADEON_DOMAIN_GTT,
205 RADEON_FLAG_CPU_ACCESS);
206 if (!cs->ib_buffer) {
207 free(cs);
208 return NULL;
209 }
210
211 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
212 if (!cs->ib_mapped) {
213 ws->buffer_destroy(cs->ib_buffer);
214 free(cs);
215 return NULL;
216 }
217
218 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
219 cs->base.buf = (uint32_t *)cs->ib_mapped;
220 cs->base.max_dw = ib_size / 4 - 4;
221 cs->ib_size_ptr = &cs->ib.size;
222 cs->ib.size = 0;
223
224 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
225 } else {
226 cs->base.buf = malloc(16384);
227 cs->base.max_dw = 4096;
228 if (!cs->base.buf) {
229 free(cs);
230 return NULL;
231 }
232 }
233
234 return &cs->base;
235 }
236
237 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
238 {
239 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
240
241 if (cs->failed) {
242 cs->base.cdw = 0;
243 return;
244 }
245
246 if (!cs->ws->use_ib_bos) {
247 const uint64_t limit_dws = 0xffff8;
248 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
249 MIN2(cs->base.max_dw * 2, limit_dws));
250
251 /* The total ib size cannot exceed limit_dws dwords. */
252 if (ib_dws > limit_dws)
253 {
254 cs->failed = true;
255 cs->base.cdw = 0;
256 return;
257 }
258
259 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
260 if (new_buf) {
261 cs->base.buf = new_buf;
262 cs->base.max_dw = ib_dws;
263 } else {
264 cs->failed = true;
265 cs->base.cdw = 0;
266 }
267 return;
268 }
269
270 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
271
272 /* max that fits in the chain size field. */
273 ib_size = MIN2(ib_size, 0xfffff);
274
275 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
276 cs->base.buf[cs->base.cdw++] = 0xffff1000;
277
278 *cs->ib_size_ptr |= cs->base.cdw + 4;
279
280 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
281 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
282 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
283 cs->max_num_old_ib_buffers * sizeof(void*));
284 }
285
286 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
287
288 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
289 RADEON_DOMAIN_GTT,
290 RADEON_FLAG_CPU_ACCESS);
291
292 if (!cs->ib_buffer) {
293 cs->base.cdw = 0;
294 cs->failed = true;
295 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
296 }
297
298 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
299 if (!cs->ib_mapped) {
300 cs->ws->base.buffer_destroy(cs->ib_buffer);
301 cs->base.cdw = 0;
302 cs->failed = true;
303 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
304 }
305
306 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
307
308 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
309 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
310 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
311 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
312 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
313
314 cs->base.buf = (uint32_t *)cs->ib_mapped;
315 cs->base.cdw = 0;
316 cs->base.max_dw = ib_size / 4 - 4;
317
318 }
319
320 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
321 {
322 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
323
324 if (cs->ws->use_ib_bos) {
325 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
326 cs->base.buf[cs->base.cdw++] = 0xffff1000;
327
328 *cs->ib_size_ptr |= cs->base.cdw;
329
330 cs->is_chained = false;
331 }
332
333 return !cs->failed;
334 }
335
336 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
337 {
338 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
339 cs->base.cdw = 0;
340 cs->failed = false;
341
342 for (unsigned i = 0; i < cs->num_buffers; ++i) {
343 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
344 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
345 cs->buffer_hash_table[hash] = -1;
346 }
347
348 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
349 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
350 cs->virtual_buffer_hash_table[hash] = -1;
351 }
352
353 cs->num_buffers = 0;
354 cs->num_virtual_buffers = 0;
355
356 if (cs->ws->use_ib_bos) {
357 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
358
359 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
360 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
361
362 cs->num_old_ib_buffers = 0;
363 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
364 cs->ib_size_ptr = &cs->ib.size;
365 cs->ib.size = 0;
366 }
367 }
368
369 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
370 amdgpu_bo_handle bo)
371 {
372 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
373 int index = cs->buffer_hash_table[hash];
374
375 if (index == -1)
376 return -1;
377
378 if (cs->handles[index] == bo)
379 return index;
380
381 for (unsigned i = 0; i < cs->num_buffers; ++i) {
382 if (cs->handles[i] == bo) {
383 cs->buffer_hash_table[hash] = i;
384 return i;
385 }
386 }
387
388 return -1;
389 }
390
391 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
392 amdgpu_bo_handle bo,
393 uint8_t priority)
394 {
395 unsigned hash;
396 int index = radv_amdgpu_cs_find_buffer(cs, bo);
397
398 if (index != -1) {
399 cs->priorities[index] = MAX2(cs->priorities[index], priority);
400 return;
401 }
402
403 if (cs->num_buffers == cs->max_num_buffers) {
404 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
405 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
406 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
407 cs->max_num_buffers = new_count;
408 }
409
410 cs->handles[cs->num_buffers] = bo;
411 cs->priorities[cs->num_buffers] = priority;
412
413 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
414 cs->buffer_hash_table[hash] = cs->num_buffers;
415
416 ++cs->num_buffers;
417 }
418
419 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
420 struct radeon_winsys_bo *bo,
421 uint8_t priority)
422 {
423 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
424 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
425
426
427 if (!cs->virtual_buffer_hash_table) {
428 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
429 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
430 cs->virtual_buffer_hash_table[i] = -1;
431 }
432
433 if (cs->virtual_buffer_hash_table[hash] >= 0) {
434 int idx = cs->virtual_buffer_hash_table[hash];
435 if (cs->virtual_buffers[idx] == bo) {
436 cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
437 return;
438 }
439 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
440 if (cs->virtual_buffers[i] == bo) {
441 cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
442 cs->virtual_buffer_hash_table[hash] = i;
443 return;
444 }
445 }
446 }
447
448 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
449 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
450 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
451 cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
452 }
453
454 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
455 cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
456
457 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
458 ++cs->num_virtual_buffers;
459
460 }
461
462 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
463 struct radeon_winsys_bo *_bo,
464 uint8_t priority)
465 {
466 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
467 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
468
469 if (bo->is_virtual) {
470 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
471 return;
472 }
473
474 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
475 }
476
477 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
478 struct radeon_winsys_cs *_child)
479 {
480 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
481 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
482
483 for (unsigned i = 0; i < child->num_buffers; ++i) {
484 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
485 child->priorities[i]);
486 }
487
488 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
489 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
490 child->virtual_buffer_priorities[i]);
491 }
492
493 if (parent->ws->use_ib_bos) {
494 if (parent->base.cdw + 4 > parent->base.max_dw)
495 radv_amdgpu_cs_grow(&parent->base, 4);
496
497 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
498 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
499 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
500 parent->base.buf[parent->base.cdw++] = child->ib.size;
501 } else {
502 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
503 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
504
505 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
506 parent->base.cdw += child->base.cdw;
507 }
508 }
509
510 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
511 struct radeon_winsys_cs **cs_array,
512 unsigned count,
513 struct radv_amdgpu_winsys_bo *extra_bo,
514 struct radeon_winsys_cs *extra_cs,
515 amdgpu_bo_list_handle *bo_list)
516 {
517 int r;
518 if (ws->debug_all_bos) {
519 struct radv_amdgpu_winsys_bo *bo;
520 amdgpu_bo_handle *handles;
521 unsigned num = 0;
522
523 pthread_mutex_lock(&ws->global_bo_list_lock);
524
525 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
526 if (!handles) {
527 pthread_mutex_unlock(&ws->global_bo_list_lock);
528 return -ENOMEM;
529 }
530
531 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
532 assert(num < ws->num_buffers);
533 handles[num++] = bo->bo;
534 }
535
536 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
537 handles, NULL,
538 bo_list);
539 free(handles);
540 pthread_mutex_unlock(&ws->global_bo_list_lock);
541 } else if (count == 1 && !extra_bo && !extra_cs &&
542 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
543 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
544 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
545 cs->priorities, bo_list);
546 } else {
547 unsigned total_buffer_count = !!extra_bo;
548 unsigned unique_bo_count = !!extra_bo;
549 for (unsigned i = 0; i < count; ++i) {
550 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
551 total_buffer_count += cs->num_buffers;
552 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
553 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
554 }
555
556 if (extra_cs) {
557 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
558 }
559
560 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
561 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
562 if (!handles || !priorities) {
563 free(handles);
564 free(priorities);
565 return -ENOMEM;
566 }
567
568 if (extra_bo) {
569 handles[0] = extra_bo->bo;
570 priorities[0] = 8;
571 }
572
573 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
574 struct radv_amdgpu_cs *cs;
575
576 if (i == count)
577 cs = (struct radv_amdgpu_cs*)extra_cs;
578 else
579 cs = (struct radv_amdgpu_cs*)cs_array[i];
580
581 if (!cs->num_buffers)
582 continue;
583
584 if (unique_bo_count == 0) {
585 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
586 memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
587 unique_bo_count = cs->num_buffers;
588 continue;
589 }
590 int unique_bo_so_far = unique_bo_count;
591 for (unsigned j = 0; j < cs->num_buffers; ++j) {
592 bool found = false;
593 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
594 if (handles[k] == cs->handles[j]) {
595 found = true;
596 priorities[k] = MAX2(priorities[k],
597 cs->priorities[j]);
598 break;
599 }
600 }
601 if (!found) {
602 handles[unique_bo_count] = cs->handles[j];
603 priorities[unique_bo_count] = cs->priorities[j];
604 ++unique_bo_count;
605 }
606 }
607 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
608 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
609 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
610 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
611 bool found = false;
612 for (unsigned m = 0; m < unique_bo_count; ++m) {
613 if (handles[m] == bo->bo) {
614 found = true;
615 priorities[m] = MAX2(priorities[m],
616 cs->virtual_buffer_priorities[j]);
617 break;
618 }
619 }
620 if (!found) {
621 handles[unique_bo_count] = bo->bo;
622 priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
623 ++unique_bo_count;
624 }
625 }
626 }
627 }
628 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
629 priorities, bo_list);
630
631 free(handles);
632 free(priorities);
633 }
634
635 return r;
636 }
637
638 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
639 {
640 struct amdgpu_cs_fence_info ret = {0};
641 if (ctx->fence_map) {
642 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
643 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
644 }
645 return ret;
646 }
647
648 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
649 struct amdgpu_cs_request *request)
650 {
651 radv_amdgpu_request_to_fence(ctx,
652 &ctx->last_submission[request->ip_type][request->ring],
653 request);
654 }
655
656 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
657 int queue_idx,
658 struct radv_winsys_sem_info *sem_info,
659 struct radeon_winsys_cs **cs_array,
660 unsigned cs_count,
661 struct radeon_winsys_cs *initial_preamble_cs,
662 struct radeon_winsys_cs *continue_preamble_cs,
663 struct radeon_winsys_fence *_fence)
664 {
665 int r;
666 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
667 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
668 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
669 amdgpu_bo_list_handle bo_list;
670 struct amdgpu_cs_request request = {0};
671 struct amdgpu_cs_ib_info ibs[2];
672
673 for (unsigned i = cs_count; i--;) {
674 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
675
676 if (cs->is_chained) {
677 *cs->ib_size_ptr -= 4;
678 cs->is_chained = false;
679 }
680
681 if (i + 1 < cs_count) {
682 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
683 assert(cs->base.cdw + 4 <= cs->base.max_dw);
684
685 cs->is_chained = true;
686 *cs->ib_size_ptr += 4;
687
688 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
689 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
690 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
691 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
692 }
693 }
694
695 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs, &bo_list);
696 if (r) {
697 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
698 return r;
699 }
700
701 request.ip_type = cs0->hw_ip;
702 request.ring = queue_idx;
703 request.number_of_ibs = 1;
704 request.ibs = &cs0->ib;
705 request.resources = bo_list;
706 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
707
708 if (initial_preamble_cs) {
709 request.ibs = ibs;
710 request.number_of_ibs = 2;
711 ibs[1] = cs0->ib;
712 ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
713 }
714
715 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
716 if (r) {
717 if (r == -ENOMEM)
718 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
719 else
720 fprintf(stderr, "amdgpu: The CS has been rejected, "
721 "see dmesg for more information.\n");
722 }
723
724 amdgpu_bo_list_destroy(bo_list);
725
726 if (fence)
727 radv_amdgpu_request_to_fence(ctx, fence, &request);
728
729 radv_assign_last_submit(ctx, &request);
730
731 return r;
732 }
733
734 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
735 int queue_idx,
736 struct radv_winsys_sem_info *sem_info,
737 struct radeon_winsys_cs **cs_array,
738 unsigned cs_count,
739 struct radeon_winsys_cs *initial_preamble_cs,
740 struct radeon_winsys_cs *continue_preamble_cs,
741 struct radeon_winsys_fence *_fence)
742 {
743 int r;
744 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
745 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
746 amdgpu_bo_list_handle bo_list;
747 struct amdgpu_cs_request request;
748 bool emit_signal_sem = sem_info->cs_emit_signal;
749 assert(cs_count);
750
751 for (unsigned i = 0; i < cs_count;) {
752 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
753 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
754 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
755 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
756 cs_count - i);
757
758 memset(&request, 0, sizeof(request));
759
760 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
761 preamble_cs, &bo_list);
762 if (r) {
763 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
764 return r;
765 }
766
767 request.ip_type = cs0->hw_ip;
768 request.ring = queue_idx;
769 request.resources = bo_list;
770 request.number_of_ibs = cnt + !!preamble_cs;
771 request.ibs = ibs;
772 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
773
774 if (preamble_cs) {
775 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
776 }
777
778 for (unsigned j = 0; j < cnt; ++j) {
779 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
780 ibs[j + !!preamble_cs] = cs->ib;
781
782 if (cs->is_chained) {
783 *cs->ib_size_ptr -= 4;
784 cs->is_chained = false;
785 }
786 }
787
788 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
789 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
790 if (r) {
791 if (r == -ENOMEM)
792 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
793 else
794 fprintf(stderr, "amdgpu: The CS has been rejected, "
795 "see dmesg for more information.\n");
796 }
797
798 amdgpu_bo_list_destroy(bo_list);
799
800 if (r)
801 return r;
802
803 i += cnt;
804 }
805 if (fence)
806 radv_amdgpu_request_to_fence(ctx, fence, &request);
807
808 radv_assign_last_submit(ctx, &request);
809
810 return 0;
811 }
812
813 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
814 int queue_idx,
815 struct radv_winsys_sem_info *sem_info,
816 struct radeon_winsys_cs **cs_array,
817 unsigned cs_count,
818 struct radeon_winsys_cs *initial_preamble_cs,
819 struct radeon_winsys_cs *continue_preamble_cs,
820 struct radeon_winsys_fence *_fence)
821 {
822 int r;
823 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
824 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
825 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
826 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
827 amdgpu_bo_list_handle bo_list;
828 struct amdgpu_cs_request request;
829 uint32_t pad_word = 0xffff1000U;
830 bool emit_signal_sem = sem_info->cs_emit_signal;
831
832 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
833 pad_word = 0x80000000;
834
835 assert(cs_count);
836
837 for (unsigned i = 0; i < cs_count;) {
838 struct amdgpu_cs_ib_info ib = {0};
839 struct radeon_winsys_bo *bo = NULL;
840 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
841 uint32_t *ptr;
842 unsigned cnt = 0;
843 unsigned size = 0;
844 unsigned pad_words = 0;
845 if (preamble_cs)
846 size += preamble_cs->cdw;
847
848 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
849 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
850 ++cnt;
851 }
852
853 while(!size || (size & 7)) {
854 size++;
855 pad_words++;
856 }
857 assert(cnt);
858
859 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS);
860 ptr = ws->buffer_map(bo);
861
862 if (preamble_cs) {
863 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
864 ptr += preamble_cs->cdw;
865 }
866
867 for (unsigned j = 0; j < cnt; ++j) {
868 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
869 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
870 ptr += cs->base.cdw;
871
872 }
873
874 for (unsigned j = 0; j < pad_words; ++j)
875 *ptr++ = pad_word;
876
877 memset(&request, 0, sizeof(request));
878
879
880 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
881 (struct radv_amdgpu_winsys_bo*)bo,
882 preamble_cs, &bo_list);
883 if (r) {
884 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
885 return r;
886 }
887
888 ib.size = size;
889 ib.ib_mc_address = radv_buffer_get_va(bo);
890
891 request.ip_type = cs0->hw_ip;
892 request.ring = queue_idx;
893 request.resources = bo_list;
894 request.number_of_ibs = 1;
895 request.ibs = &ib;
896 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
897
898 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
899 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
900 if (r) {
901 if (r == -ENOMEM)
902 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
903 else
904 fprintf(stderr, "amdgpu: The CS has been rejected, "
905 "see dmesg for more information.\n");
906 }
907
908 amdgpu_bo_list_destroy(bo_list);
909
910 ws->buffer_destroy(bo);
911 if (r)
912 return r;
913
914 i += cnt;
915 }
916 if (fence)
917 radv_amdgpu_request_to_fence(ctx, fence, &request);
918
919 radv_assign_last_submit(ctx, &request);
920
921 return 0;
922 }
923
924 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
925 int queue_idx,
926 struct radeon_winsys_cs **cs_array,
927 unsigned cs_count,
928 struct radeon_winsys_cs *initial_preamble_cs,
929 struct radeon_winsys_cs *continue_preamble_cs,
930 struct radv_winsys_sem_info *sem_info,
931 bool can_patch,
932 struct radeon_winsys_fence *_fence)
933 {
934 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
935 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
936 int ret;
937
938 assert(sem_info);
939 if (!cs->ws->use_ib_bos) {
940 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array,
941 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
942 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
943 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array,
944 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
945 } else {
946 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array,
947 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
948 }
949
950 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
951 return ret;
952 }
953
954 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
955 {
956 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
957 void *ret = NULL;
958
959 if (!cs->ib_buffer)
960 return NULL;
961 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
962 struct radv_amdgpu_winsys_bo *bo;
963
964 bo = (struct radv_amdgpu_winsys_bo*)
965 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
966 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
967 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
968 return (char *)ret + (addr - bo->base.va);
969 }
970 }
971 if(cs->ws->debug_all_bos) {
972 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
973 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
974 &cs->ws->global_bo_list, global_list_item) {
975 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
976 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
977 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
978 return (char *)ret + (addr - bo->base.va);
979 }
980 }
981 }
982 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
983 }
984 return ret;
985 }
986
987 static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
988 FILE* file,
989 const int *trace_ids, int trace_id_count)
990 {
991 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
992 void *ib = cs->base.buf;
993 int num_dw = cs->base.cdw;
994
995 if (cs->ws->use_ib_bos) {
996 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
997 num_dw = cs->ib.size;
998 }
999 assert(ib);
1000 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1001 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1002 }
1003
1004 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1005 {
1006 switch (radv_priority) {
1007 case RADEON_CTX_PRIORITY_REALTIME:
1008 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1009 case RADEON_CTX_PRIORITY_HIGH:
1010 return AMDGPU_CTX_PRIORITY_HIGH;
1011 case RADEON_CTX_PRIORITY_MEDIUM:
1012 return AMDGPU_CTX_PRIORITY_NORMAL;
1013 case RADEON_CTX_PRIORITY_LOW:
1014 return AMDGPU_CTX_PRIORITY_LOW;
1015 default:
1016 unreachable("Invalid context priority");
1017 }
1018 }
1019
1020 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1021 enum radeon_ctx_priority priority)
1022 {
1023 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1024 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1025 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1026 int r;
1027
1028 if (!ctx)
1029 return NULL;
1030
1031 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1032 if (r) {
1033 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1034 goto error_create;
1035 }
1036 ctx->ws = ws;
1037
1038 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1039 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1040 RADEON_DOMAIN_GTT,
1041 RADEON_FLAG_CPU_ACCESS);
1042 if (ctx->fence_bo)
1043 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1044 if (ctx->fence_map)
1045 memset(ctx->fence_map, 0, 4096);
1046 return (struct radeon_winsys_ctx *)ctx;
1047 error_create:
1048 FREE(ctx);
1049 return NULL;
1050 }
1051
1052 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1053 {
1054 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1055 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1056 amdgpu_cs_ctx_free(ctx->ctx);
1057 FREE(ctx);
1058 }
1059
1060 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1061 enum ring_type ring_type, int ring_index)
1062 {
1063 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1064 int ip_type = ring_to_hw_ip(ring_type);
1065
1066 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1067 uint32_t expired;
1068 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1069 1000000000ull, 0, &expired);
1070
1071 if (ret || !expired)
1072 return false;
1073 }
1074
1075 return true;
1076 }
1077
1078 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1079 {
1080 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1081 if (!sem)
1082 return NULL;
1083
1084 return (struct radeon_winsys_sem *)sem;
1085 }
1086
1087 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1088 {
1089 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1090 FREE(sem);
1091 }
1092
1093 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1094 uint32_t ip_type,
1095 uint32_t ring,
1096 struct radv_winsys_sem_info *sem_info)
1097 {
1098 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1099 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1100
1101 if (sem->context)
1102 return -EINVAL;
1103
1104 *sem = ctx->last_submission[ip_type][ring].fence;
1105 }
1106 return 0;
1107 }
1108
1109 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1110 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1111 {
1112 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1113 if (!syncobj)
1114 return NULL;
1115
1116 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1117 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1118 sem->handle = counts->syncobj[i];
1119 }
1120
1121 chunk->chunk_id = chunk_id;
1122 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1123 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1124 return syncobj;
1125 }
1126
1127 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1128 struct amdgpu_cs_request *request,
1129 struct radv_winsys_sem_info *sem_info)
1130 {
1131 int r;
1132 int num_chunks;
1133 int size;
1134 bool user_fence;
1135 struct drm_amdgpu_cs_chunk *chunks;
1136 struct drm_amdgpu_cs_chunk_data *chunk_data;
1137 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1138 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1139 int i;
1140 struct amdgpu_cs_fence *sem;
1141
1142 user_fence = (request->fence_info.handle != NULL);
1143 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1144
1145 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1146
1147 size = request->number_of_ibs + (user_fence ? 1 : 0);
1148
1149 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1150
1151 num_chunks = request->number_of_ibs;
1152 for (i = 0; i < request->number_of_ibs; i++) {
1153 struct amdgpu_cs_ib_info *ib;
1154 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1155 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1156 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1157
1158 ib = &request->ibs[i];
1159
1160 chunk_data[i].ib_data._pad = 0;
1161 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1162 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1163 chunk_data[i].ib_data.ip_type = request->ip_type;
1164 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1165 chunk_data[i].ib_data.ring = request->ring;
1166 chunk_data[i].ib_data.flags = ib->flags;
1167 }
1168
1169 if (user_fence) {
1170 i = num_chunks++;
1171
1172 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1173 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1174 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1175
1176 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1177 &chunk_data[i]);
1178 }
1179
1180 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1181 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1182 &chunks[num_chunks],
1183 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1184 if (!wait_syncobj) {
1185 r = -ENOMEM;
1186 goto error_out;
1187 }
1188 num_chunks++;
1189
1190 if (sem_info->wait.sem_count == 0)
1191 sem_info->cs_emit_wait = false;
1192
1193 }
1194
1195 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1196 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1197 if (!sem_dependencies) {
1198 r = -ENOMEM;
1199 goto error_out;
1200 }
1201 int sem_count = 0;
1202 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1203 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1204 if (!sem->context)
1205 continue;
1206 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1207
1208 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1209
1210 sem->context = NULL;
1211 }
1212 i = num_chunks++;
1213
1214 /* dependencies chunk */
1215 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1216 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1217 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1218
1219 sem_info->cs_emit_wait = false;
1220 }
1221
1222 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1223 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1224 &chunks[num_chunks],
1225 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1226 if (!signal_syncobj) {
1227 r = -ENOMEM;
1228 goto error_out;
1229 }
1230 num_chunks++;
1231 }
1232
1233 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1234 ctx->ctx,
1235 request->resources,
1236 num_chunks,
1237 chunks,
1238 &request->seq_no);
1239 error_out:
1240 free(sem_dependencies);
1241 free(wait_syncobj);
1242 free(signal_syncobj);
1243 return r;
1244 }
1245
1246 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1247 uint32_t *handle)
1248 {
1249 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1250 return amdgpu_cs_create_syncobj(ws->dev, handle);
1251 }
1252
1253 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1254 uint32_t handle)
1255 {
1256 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1257 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1258 }
1259
1260 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1261 uint32_t syncobj,
1262 int *fd)
1263 {
1264 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1265
1266 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1267 }
1268
1269 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1270 int fd,
1271 uint32_t *syncobj)
1272 {
1273 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1274
1275 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1276 }
1277
1278 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1279 {
1280 ws->base.ctx_create = radv_amdgpu_ctx_create;
1281 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1282 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1283 ws->base.cs_create = radv_amdgpu_cs_create;
1284 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1285 ws->base.cs_grow = radv_amdgpu_cs_grow;
1286 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1287 ws->base.cs_reset = radv_amdgpu_cs_reset;
1288 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1289 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1290 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1291 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1292 ws->base.create_fence = radv_amdgpu_create_fence;
1293 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1294 ws->base.create_sem = radv_amdgpu_create_sem;
1295 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1296 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1297 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1298 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1299 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1300 ws->base.fence_wait = radv_amdgpu_fence_wait;
1301 }