radv/winsys: do not try to create a BO list with 0 buffers
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "ac_debug.h"
31 #include "radv_radeon_winsys.h"
32 #include "radv_amdgpu_cs.h"
33 #include "radv_amdgpu_bo.h"
34 #include "sid.h"
35
36
37 enum {
38 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
39 };
40
41 struct radv_amdgpu_cs {
42 struct radeon_winsys_cs base;
43 struct radv_amdgpu_winsys *ws;
44
45 struct amdgpu_cs_ib_info ib;
46
47 struct radeon_winsys_bo *ib_buffer;
48 uint8_t *ib_mapped;
49 unsigned max_num_buffers;
50 unsigned num_buffers;
51 amdgpu_bo_handle *handles;
52 uint8_t *priorities;
53
54 struct radeon_winsys_bo **old_ib_buffers;
55 unsigned num_old_ib_buffers;
56 unsigned max_num_old_ib_buffers;
57 unsigned *ib_size_ptr;
58 bool failed;
59 bool is_chained;
60
61 int buffer_hash_table[1024];
62 unsigned hw_ip;
63
64 unsigned num_virtual_buffers;
65 unsigned max_num_virtual_buffers;
66 struct radeon_winsys_bo **virtual_buffers;
67 uint8_t *virtual_buffer_priorities;
68 int *virtual_buffer_hash_table;
69 };
70
71 static inline struct radv_amdgpu_cs *
72 radv_amdgpu_cs(struct radeon_winsys_cs *base)
73 {
74 return (struct radv_amdgpu_cs*)base;
75 }
76
77 static int ring_to_hw_ip(enum ring_type ring)
78 {
79 switch (ring) {
80 case RING_GFX:
81 return AMDGPU_HW_IP_GFX;
82 case RING_DMA:
83 return AMDGPU_HW_IP_DMA;
84 case RING_COMPUTE:
85 return AMDGPU_HW_IP_COMPUTE;
86 default:
87 unreachable("unsupported ring");
88 }
89 }
90
91 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
92 uint32_t ip_type,
93 uint32_t ring,
94 struct radv_winsys_sem_info *sem_info);
95 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
96 struct amdgpu_cs_request *request,
97 struct radv_winsys_sem_info *sem_info);
98
99 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
100 struct radv_amdgpu_fence *fence,
101 struct amdgpu_cs_request *req)
102 {
103 fence->fence.context = ctx->ctx;
104 fence->fence.ip_type = req->ip_type;
105 fence->fence.ip_instance = req->ip_instance;
106 fence->fence.ring = req->ring;
107 fence->fence.fence = req->seq_no;
108 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
109 }
110
111 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
112 {
113 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
114 return (struct radeon_winsys_fence*)fence;
115 }
116
117 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
118 {
119 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
120 free(fence);
121 }
122
123 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
124 struct radeon_winsys_fence *_fence,
125 bool absolute,
126 uint64_t timeout)
127 {
128 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
129 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
130 int r;
131 uint32_t expired = 0;
132
133 if (fence->user_ptr) {
134 if (*fence->user_ptr >= fence->fence.fence)
135 return true;
136 if (!absolute && !timeout)
137 return false;
138 }
139
140 /* Now use the libdrm query. */
141 r = amdgpu_cs_query_fence_status(&fence->fence,
142 timeout,
143 flags,
144 &expired);
145
146 if (r) {
147 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
148 return false;
149 }
150
151 if (expired)
152 return true;
153
154 return false;
155 }
156
157 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
158 {
159 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
160
161 if (cs->ib_buffer)
162 cs->ws->base.buffer_destroy(cs->ib_buffer);
163 else
164 free(cs->base.buf);
165
166 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
167 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
168
169 free(cs->old_ib_buffers);
170 free(cs->virtual_buffers);
171 free(cs->virtual_buffer_priorities);
172 free(cs->virtual_buffer_hash_table);
173 free(cs->handles);
174 free(cs->priorities);
175 free(cs);
176 }
177
178 static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
179 enum ring_type ring_type)
180 {
181 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
182 cs->buffer_hash_table[i] = -1;
183
184 cs->hw_ip = ring_to_hw_ip(ring_type);
185 return true;
186 }
187
188 static struct radeon_winsys_cs *
189 radv_amdgpu_cs_create(struct radeon_winsys *ws,
190 enum ring_type ring_type)
191 {
192 struct radv_amdgpu_cs *cs;
193 uint32_t ib_size = 20 * 1024 * 4;
194 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
195 if (!cs)
196 return NULL;
197
198 cs->ws = radv_amdgpu_winsys(ws);
199 radv_amdgpu_init_cs(cs, ring_type);
200
201 if (cs->ws->use_ib_bos) {
202 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
203 RADEON_DOMAIN_GTT,
204 RADEON_FLAG_CPU_ACCESS|
205 RADEON_FLAG_NO_INTERPROCESS_SHARING);
206 if (!cs->ib_buffer) {
207 free(cs);
208 return NULL;
209 }
210
211 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
212 if (!cs->ib_mapped) {
213 ws->buffer_destroy(cs->ib_buffer);
214 free(cs);
215 return NULL;
216 }
217
218 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
219 cs->base.buf = (uint32_t *)cs->ib_mapped;
220 cs->base.max_dw = ib_size / 4 - 4;
221 cs->ib_size_ptr = &cs->ib.size;
222 cs->ib.size = 0;
223
224 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
225 } else {
226 cs->base.buf = malloc(16384);
227 cs->base.max_dw = 4096;
228 if (!cs->base.buf) {
229 free(cs);
230 return NULL;
231 }
232 }
233
234 return &cs->base;
235 }
236
237 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
238 {
239 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
240
241 if (cs->failed) {
242 cs->base.cdw = 0;
243 return;
244 }
245
246 if (!cs->ws->use_ib_bos) {
247 const uint64_t limit_dws = 0xffff8;
248 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
249 MIN2(cs->base.max_dw * 2, limit_dws));
250
251 /* The total ib size cannot exceed limit_dws dwords. */
252 if (ib_dws > limit_dws)
253 {
254 cs->failed = true;
255 cs->base.cdw = 0;
256 return;
257 }
258
259 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
260 if (new_buf) {
261 cs->base.buf = new_buf;
262 cs->base.max_dw = ib_dws;
263 } else {
264 cs->failed = true;
265 cs->base.cdw = 0;
266 }
267 return;
268 }
269
270 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
271
272 /* max that fits in the chain size field. */
273 ib_size = MIN2(ib_size, 0xfffff);
274
275 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
276 cs->base.buf[cs->base.cdw++] = 0xffff1000;
277
278 *cs->ib_size_ptr |= cs->base.cdw + 4;
279
280 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
281 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
282 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
283 cs->max_num_old_ib_buffers * sizeof(void*));
284 }
285
286 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
287
288 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
289 RADEON_DOMAIN_GTT,
290 RADEON_FLAG_CPU_ACCESS|
291 RADEON_FLAG_NO_INTERPROCESS_SHARING);
292
293 if (!cs->ib_buffer) {
294 cs->base.cdw = 0;
295 cs->failed = true;
296 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
297 }
298
299 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
300 if (!cs->ib_mapped) {
301 cs->ws->base.buffer_destroy(cs->ib_buffer);
302 cs->base.cdw = 0;
303 cs->failed = true;
304 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
305 }
306
307 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
308
309 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
310 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
311 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
312 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
313 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
314
315 cs->base.buf = (uint32_t *)cs->ib_mapped;
316 cs->base.cdw = 0;
317 cs->base.max_dw = ib_size / 4 - 4;
318
319 }
320
321 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
322 {
323 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
324
325 if (cs->ws->use_ib_bos) {
326 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
327 cs->base.buf[cs->base.cdw++] = 0xffff1000;
328
329 *cs->ib_size_ptr |= cs->base.cdw;
330
331 cs->is_chained = false;
332 }
333
334 return !cs->failed;
335 }
336
337 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
338 {
339 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
340 cs->base.cdw = 0;
341 cs->failed = false;
342
343 for (unsigned i = 0; i < cs->num_buffers; ++i) {
344 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
345 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
346 cs->buffer_hash_table[hash] = -1;
347 }
348
349 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
350 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
351 cs->virtual_buffer_hash_table[hash] = -1;
352 }
353
354 cs->num_buffers = 0;
355 cs->num_virtual_buffers = 0;
356
357 if (cs->ws->use_ib_bos) {
358 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
359
360 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
361 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
362
363 cs->num_old_ib_buffers = 0;
364 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
365 cs->ib_size_ptr = &cs->ib.size;
366 cs->ib.size = 0;
367 }
368 }
369
370 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
371 amdgpu_bo_handle bo)
372 {
373 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
374 int index = cs->buffer_hash_table[hash];
375
376 if (index == -1)
377 return -1;
378
379 if (cs->handles[index] == bo)
380 return index;
381
382 for (unsigned i = 0; i < cs->num_buffers; ++i) {
383 if (cs->handles[i] == bo) {
384 cs->buffer_hash_table[hash] = i;
385 return i;
386 }
387 }
388
389 return -1;
390 }
391
392 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
393 amdgpu_bo_handle bo,
394 uint8_t priority)
395 {
396 unsigned hash;
397 int index = radv_amdgpu_cs_find_buffer(cs, bo);
398
399 if (index != -1) {
400 cs->priorities[index] = MAX2(cs->priorities[index], priority);
401 return;
402 }
403
404 if (cs->num_buffers == cs->max_num_buffers) {
405 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
406 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
407 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
408 cs->max_num_buffers = new_count;
409 }
410
411 cs->handles[cs->num_buffers] = bo;
412 cs->priorities[cs->num_buffers] = priority;
413
414 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
415 cs->buffer_hash_table[hash] = cs->num_buffers;
416
417 ++cs->num_buffers;
418 }
419
420 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
421 struct radeon_winsys_bo *bo,
422 uint8_t priority)
423 {
424 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
425 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
426
427
428 if (!cs->virtual_buffer_hash_table) {
429 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
430 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
431 cs->virtual_buffer_hash_table[i] = -1;
432 }
433
434 if (cs->virtual_buffer_hash_table[hash] >= 0) {
435 int idx = cs->virtual_buffer_hash_table[hash];
436 if (cs->virtual_buffers[idx] == bo) {
437 cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
438 return;
439 }
440 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
441 if (cs->virtual_buffers[i] == bo) {
442 cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
443 cs->virtual_buffer_hash_table[hash] = i;
444 return;
445 }
446 }
447 }
448
449 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
450 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
451 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
452 cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
453 }
454
455 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
456 cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
457
458 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
459 ++cs->num_virtual_buffers;
460
461 }
462
463 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
464 struct radeon_winsys_bo *_bo,
465 uint8_t priority)
466 {
467 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
468 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
469
470 if (bo->is_virtual) {
471 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
472 return;
473 }
474
475 if (bo->base.is_local)
476 return;
477
478 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
479 }
480
481 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
482 struct radeon_winsys_cs *_child)
483 {
484 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
485 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
486
487 for (unsigned i = 0; i < child->num_buffers; ++i) {
488 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
489 child->priorities[i]);
490 }
491
492 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
493 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
494 child->virtual_buffer_priorities[i]);
495 }
496
497 if (parent->ws->use_ib_bos) {
498 if (parent->base.cdw + 4 > parent->base.max_dw)
499 radv_amdgpu_cs_grow(&parent->base, 4);
500
501 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
502 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
503 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
504 parent->base.buf[parent->base.cdw++] = child->ib.size;
505 } else {
506 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
507 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
508
509 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
510 parent->base.cdw += child->base.cdw;
511 }
512 }
513
514 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
515 struct radeon_winsys_cs **cs_array,
516 unsigned count,
517 struct radv_amdgpu_winsys_bo *extra_bo,
518 struct radeon_winsys_cs *extra_cs,
519 amdgpu_bo_list_handle *bo_list)
520 {
521 int r = 0;
522
523 if (ws->debug_all_bos) {
524 struct radv_amdgpu_winsys_bo *bo;
525 amdgpu_bo_handle *handles;
526 unsigned num = 0;
527
528 pthread_mutex_lock(&ws->global_bo_list_lock);
529
530 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
531 if (!handles) {
532 pthread_mutex_unlock(&ws->global_bo_list_lock);
533 return -ENOMEM;
534 }
535
536 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
537 assert(num < ws->num_buffers);
538 handles[num++] = bo->bo;
539 }
540
541 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
542 handles, NULL,
543 bo_list);
544 free(handles);
545 pthread_mutex_unlock(&ws->global_bo_list_lock);
546 } else if (count == 1 && !extra_bo && !extra_cs &&
547 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
548 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
549 if (cs->num_buffers == 0) {
550 *bo_list = 0;
551 return 0;
552 }
553 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
554 cs->priorities, bo_list);
555 } else {
556 unsigned total_buffer_count = !!extra_bo;
557 unsigned unique_bo_count = !!extra_bo;
558 for (unsigned i = 0; i < count; ++i) {
559 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
560 total_buffer_count += cs->num_buffers;
561 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
562 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
563 }
564
565 if (extra_cs) {
566 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
567 }
568 if (total_buffer_count == 0) {
569 *bo_list = 0;
570 return 0;
571 }
572 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
573 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
574 if (!handles || !priorities) {
575 free(handles);
576 free(priorities);
577 return -ENOMEM;
578 }
579
580 if (extra_bo) {
581 handles[0] = extra_bo->bo;
582 priorities[0] = 8;
583 }
584
585 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
586 struct radv_amdgpu_cs *cs;
587
588 if (i == count)
589 cs = (struct radv_amdgpu_cs*)extra_cs;
590 else
591 cs = (struct radv_amdgpu_cs*)cs_array[i];
592
593 if (!cs->num_buffers)
594 continue;
595
596 if (unique_bo_count == 0) {
597 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
598 memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
599 unique_bo_count = cs->num_buffers;
600 continue;
601 }
602 int unique_bo_so_far = unique_bo_count;
603 for (unsigned j = 0; j < cs->num_buffers; ++j) {
604 bool found = false;
605 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
606 if (handles[k] == cs->handles[j]) {
607 found = true;
608 priorities[k] = MAX2(priorities[k],
609 cs->priorities[j]);
610 break;
611 }
612 }
613 if (!found) {
614 handles[unique_bo_count] = cs->handles[j];
615 priorities[unique_bo_count] = cs->priorities[j];
616 ++unique_bo_count;
617 }
618 }
619 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
620 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
621 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
622 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
623 bool found = false;
624 for (unsigned m = 0; m < unique_bo_count; ++m) {
625 if (handles[m] == bo->bo) {
626 found = true;
627 priorities[m] = MAX2(priorities[m],
628 cs->virtual_buffer_priorities[j]);
629 break;
630 }
631 }
632 if (!found) {
633 handles[unique_bo_count] = bo->bo;
634 priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
635 ++unique_bo_count;
636 }
637 }
638 }
639 }
640
641 if (unique_bo_count > 0) {
642 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
643 priorities, bo_list);
644 } else {
645 *bo_list = 0;
646 }
647
648 free(handles);
649 free(priorities);
650 }
651
652 return r;
653 }
654
655 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
656 {
657 struct amdgpu_cs_fence_info ret = {0};
658 if (ctx->fence_map) {
659 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
660 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
661 }
662 return ret;
663 }
664
665 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
666 struct amdgpu_cs_request *request)
667 {
668 radv_amdgpu_request_to_fence(ctx,
669 &ctx->last_submission[request->ip_type][request->ring],
670 request);
671 }
672
673 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
674 int queue_idx,
675 struct radv_winsys_sem_info *sem_info,
676 struct radeon_winsys_cs **cs_array,
677 unsigned cs_count,
678 struct radeon_winsys_cs *initial_preamble_cs,
679 struct radeon_winsys_cs *continue_preamble_cs,
680 struct radeon_winsys_fence *_fence)
681 {
682 int r;
683 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
684 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
685 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
686 amdgpu_bo_list_handle bo_list;
687 struct amdgpu_cs_request request = {0};
688 struct amdgpu_cs_ib_info ibs[2];
689
690 for (unsigned i = cs_count; i--;) {
691 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
692
693 if (cs->is_chained) {
694 *cs->ib_size_ptr -= 4;
695 cs->is_chained = false;
696 }
697
698 if (i + 1 < cs_count) {
699 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
700 assert(cs->base.cdw + 4 <= cs->base.max_dw);
701
702 cs->is_chained = true;
703 *cs->ib_size_ptr += 4;
704
705 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
706 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
707 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
708 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
709 }
710 }
711
712 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs, &bo_list);
713 if (r) {
714 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
715 return r;
716 }
717
718 request.ip_type = cs0->hw_ip;
719 request.ring = queue_idx;
720 request.number_of_ibs = 1;
721 request.ibs = &cs0->ib;
722 request.resources = bo_list;
723 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
724
725 if (initial_preamble_cs) {
726 request.ibs = ibs;
727 request.number_of_ibs = 2;
728 ibs[1] = cs0->ib;
729 ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
730 }
731
732 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
733 if (r) {
734 if (r == -ENOMEM)
735 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
736 else
737 fprintf(stderr, "amdgpu: The CS has been rejected, "
738 "see dmesg for more information.\n");
739 }
740
741 if (bo_list)
742 amdgpu_bo_list_destroy(bo_list);
743
744 if (fence)
745 radv_amdgpu_request_to_fence(ctx, fence, &request);
746
747 radv_assign_last_submit(ctx, &request);
748
749 return r;
750 }
751
752 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
753 int queue_idx,
754 struct radv_winsys_sem_info *sem_info,
755 struct radeon_winsys_cs **cs_array,
756 unsigned cs_count,
757 struct radeon_winsys_cs *initial_preamble_cs,
758 struct radeon_winsys_cs *continue_preamble_cs,
759 struct radeon_winsys_fence *_fence)
760 {
761 int r;
762 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
763 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
764 amdgpu_bo_list_handle bo_list;
765 struct amdgpu_cs_request request;
766 bool emit_signal_sem = sem_info->cs_emit_signal;
767 assert(cs_count);
768
769 for (unsigned i = 0; i < cs_count;) {
770 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
771 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
772 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
773 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
774 cs_count - i);
775
776 memset(&request, 0, sizeof(request));
777
778 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
779 preamble_cs, &bo_list);
780 if (r) {
781 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
782 return r;
783 }
784
785 request.ip_type = cs0->hw_ip;
786 request.ring = queue_idx;
787 request.resources = bo_list;
788 request.number_of_ibs = cnt + !!preamble_cs;
789 request.ibs = ibs;
790 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
791
792 if (preamble_cs) {
793 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
794 }
795
796 for (unsigned j = 0; j < cnt; ++j) {
797 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
798 ibs[j + !!preamble_cs] = cs->ib;
799
800 if (cs->is_chained) {
801 *cs->ib_size_ptr -= 4;
802 cs->is_chained = false;
803 }
804 }
805
806 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
807 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
808 if (r) {
809 if (r == -ENOMEM)
810 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
811 else
812 fprintf(stderr, "amdgpu: The CS has been rejected, "
813 "see dmesg for more information.\n");
814 }
815
816 if (bo_list)
817 amdgpu_bo_list_destroy(bo_list);
818
819 if (r)
820 return r;
821
822 i += cnt;
823 }
824 if (fence)
825 radv_amdgpu_request_to_fence(ctx, fence, &request);
826
827 radv_assign_last_submit(ctx, &request);
828
829 return 0;
830 }
831
832 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
833 int queue_idx,
834 struct radv_winsys_sem_info *sem_info,
835 struct radeon_winsys_cs **cs_array,
836 unsigned cs_count,
837 struct radeon_winsys_cs *initial_preamble_cs,
838 struct radeon_winsys_cs *continue_preamble_cs,
839 struct radeon_winsys_fence *_fence)
840 {
841 int r;
842 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
843 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
844 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
845 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
846 amdgpu_bo_list_handle bo_list;
847 struct amdgpu_cs_request request;
848 uint32_t pad_word = 0xffff1000U;
849 bool emit_signal_sem = sem_info->cs_emit_signal;
850
851 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
852 pad_word = 0x80000000;
853
854 assert(cs_count);
855
856 for (unsigned i = 0; i < cs_count;) {
857 struct amdgpu_cs_ib_info ib = {0};
858 struct radeon_winsys_bo *bo = NULL;
859 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
860 uint32_t *ptr;
861 unsigned cnt = 0;
862 unsigned size = 0;
863 unsigned pad_words = 0;
864 if (preamble_cs)
865 size += preamble_cs->cdw;
866
867 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
868 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
869 ++cnt;
870 }
871
872 while(!size || (size & 7)) {
873 size++;
874 pad_words++;
875 }
876 assert(cnt);
877
878 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
879 ptr = ws->buffer_map(bo);
880
881 if (preamble_cs) {
882 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
883 ptr += preamble_cs->cdw;
884 }
885
886 for (unsigned j = 0; j < cnt; ++j) {
887 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
888 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
889 ptr += cs->base.cdw;
890
891 }
892
893 for (unsigned j = 0; j < pad_words; ++j)
894 *ptr++ = pad_word;
895
896 memset(&request, 0, sizeof(request));
897
898
899 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
900 (struct radv_amdgpu_winsys_bo*)bo,
901 preamble_cs, &bo_list);
902 if (r) {
903 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
904 return r;
905 }
906
907 ib.size = size;
908 ib.ib_mc_address = radv_buffer_get_va(bo);
909
910 request.ip_type = cs0->hw_ip;
911 request.ring = queue_idx;
912 request.resources = bo_list;
913 request.number_of_ibs = 1;
914 request.ibs = &ib;
915 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
916
917 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
918 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
919 if (r) {
920 if (r == -ENOMEM)
921 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
922 else
923 fprintf(stderr, "amdgpu: The CS has been rejected, "
924 "see dmesg for more information.\n");
925 }
926
927 if (bo_list)
928 amdgpu_bo_list_destroy(bo_list);
929
930 ws->buffer_destroy(bo);
931 if (r)
932 return r;
933
934 i += cnt;
935 }
936 if (fence)
937 radv_amdgpu_request_to_fence(ctx, fence, &request);
938
939 radv_assign_last_submit(ctx, &request);
940
941 return 0;
942 }
943
944 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
945 int queue_idx,
946 struct radeon_winsys_cs **cs_array,
947 unsigned cs_count,
948 struct radeon_winsys_cs *initial_preamble_cs,
949 struct radeon_winsys_cs *continue_preamble_cs,
950 struct radv_winsys_sem_info *sem_info,
951 bool can_patch,
952 struct radeon_winsys_fence *_fence)
953 {
954 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
955 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
956 int ret;
957
958 assert(sem_info);
959 if (!cs->ws->use_ib_bos) {
960 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array,
961 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
962 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
963 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array,
964 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
965 } else {
966 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array,
967 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
968 }
969
970 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
971 return ret;
972 }
973
974 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
975 {
976 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
977 void *ret = NULL;
978
979 if (!cs->ib_buffer)
980 return NULL;
981 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
982 struct radv_amdgpu_winsys_bo *bo;
983
984 bo = (struct radv_amdgpu_winsys_bo*)
985 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
986 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
987 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
988 return (char *)ret + (addr - bo->base.va);
989 }
990 }
991 if(cs->ws->debug_all_bos) {
992 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
993 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
994 &cs->ws->global_bo_list, global_list_item) {
995 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
996 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
997 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
998 return (char *)ret + (addr - bo->base.va);
999 }
1000 }
1001 }
1002 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
1003 }
1004 return ret;
1005 }
1006
1007 static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
1008 FILE* file,
1009 const int *trace_ids, int trace_id_count)
1010 {
1011 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1012 void *ib = cs->base.buf;
1013 int num_dw = cs->base.cdw;
1014
1015 if (cs->ws->use_ib_bos) {
1016 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1017 num_dw = cs->ib.size;
1018 }
1019 assert(ib);
1020 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1021 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1022 }
1023
1024 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1025 {
1026 switch (radv_priority) {
1027 case RADEON_CTX_PRIORITY_REALTIME:
1028 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1029 case RADEON_CTX_PRIORITY_HIGH:
1030 return AMDGPU_CTX_PRIORITY_HIGH;
1031 case RADEON_CTX_PRIORITY_MEDIUM:
1032 return AMDGPU_CTX_PRIORITY_NORMAL;
1033 case RADEON_CTX_PRIORITY_LOW:
1034 return AMDGPU_CTX_PRIORITY_LOW;
1035 default:
1036 unreachable("Invalid context priority");
1037 }
1038 }
1039
1040 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1041 enum radeon_ctx_priority priority)
1042 {
1043 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1044 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1045 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1046 int r;
1047
1048 if (!ctx)
1049 return NULL;
1050
1051 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1052 if (r) {
1053 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1054 goto error_create;
1055 }
1056 ctx->ws = ws;
1057
1058 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1059 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1060 RADEON_DOMAIN_GTT,
1061 RADEON_FLAG_CPU_ACCESS|
1062 RADEON_FLAG_NO_INTERPROCESS_SHARING);
1063 if (ctx->fence_bo)
1064 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1065 if (ctx->fence_map)
1066 memset(ctx->fence_map, 0, 4096);
1067 return (struct radeon_winsys_ctx *)ctx;
1068 error_create:
1069 FREE(ctx);
1070 return NULL;
1071 }
1072
1073 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1074 {
1075 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1076 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1077 amdgpu_cs_ctx_free(ctx->ctx);
1078 FREE(ctx);
1079 }
1080
1081 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1082 enum ring_type ring_type, int ring_index)
1083 {
1084 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1085 int ip_type = ring_to_hw_ip(ring_type);
1086
1087 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1088 uint32_t expired;
1089 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1090 1000000000ull, 0, &expired);
1091
1092 if (ret || !expired)
1093 return false;
1094 }
1095
1096 return true;
1097 }
1098
1099 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1100 {
1101 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1102 if (!sem)
1103 return NULL;
1104
1105 return (struct radeon_winsys_sem *)sem;
1106 }
1107
1108 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1109 {
1110 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1111 FREE(sem);
1112 }
1113
1114 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1115 uint32_t ip_type,
1116 uint32_t ring,
1117 struct radv_winsys_sem_info *sem_info)
1118 {
1119 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1120 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1121
1122 if (sem->context)
1123 return -EINVAL;
1124
1125 *sem = ctx->last_submission[ip_type][ring].fence;
1126 }
1127 return 0;
1128 }
1129
1130 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1131 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1132 {
1133 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1134 if (!syncobj)
1135 return NULL;
1136
1137 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1138 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1139 sem->handle = counts->syncobj[i];
1140 }
1141
1142 chunk->chunk_id = chunk_id;
1143 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1144 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1145 return syncobj;
1146 }
1147
1148 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1149 struct amdgpu_cs_request *request,
1150 struct radv_winsys_sem_info *sem_info)
1151 {
1152 int r;
1153 int num_chunks;
1154 int size;
1155 bool user_fence;
1156 struct drm_amdgpu_cs_chunk *chunks;
1157 struct drm_amdgpu_cs_chunk_data *chunk_data;
1158 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1159 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1160 int i;
1161 struct amdgpu_cs_fence *sem;
1162
1163 user_fence = (request->fence_info.handle != NULL);
1164 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1165
1166 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1167
1168 size = request->number_of_ibs + (user_fence ? 1 : 0);
1169
1170 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1171
1172 num_chunks = request->number_of_ibs;
1173 for (i = 0; i < request->number_of_ibs; i++) {
1174 struct amdgpu_cs_ib_info *ib;
1175 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1176 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1177 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1178
1179 ib = &request->ibs[i];
1180
1181 chunk_data[i].ib_data._pad = 0;
1182 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1183 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1184 chunk_data[i].ib_data.ip_type = request->ip_type;
1185 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1186 chunk_data[i].ib_data.ring = request->ring;
1187 chunk_data[i].ib_data.flags = ib->flags;
1188 }
1189
1190 if (user_fence) {
1191 i = num_chunks++;
1192
1193 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1194 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1195 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1196
1197 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1198 &chunk_data[i]);
1199 }
1200
1201 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1202 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1203 &chunks[num_chunks],
1204 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1205 if (!wait_syncobj) {
1206 r = -ENOMEM;
1207 goto error_out;
1208 }
1209 num_chunks++;
1210
1211 if (sem_info->wait.sem_count == 0)
1212 sem_info->cs_emit_wait = false;
1213
1214 }
1215
1216 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1217 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1218 if (!sem_dependencies) {
1219 r = -ENOMEM;
1220 goto error_out;
1221 }
1222 int sem_count = 0;
1223 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1224 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1225 if (!sem->context)
1226 continue;
1227 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1228
1229 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1230
1231 sem->context = NULL;
1232 }
1233 i = num_chunks++;
1234
1235 /* dependencies chunk */
1236 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1237 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1238 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1239
1240 sem_info->cs_emit_wait = false;
1241 }
1242
1243 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1244 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1245 &chunks[num_chunks],
1246 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1247 if (!signal_syncobj) {
1248 r = -ENOMEM;
1249 goto error_out;
1250 }
1251 num_chunks++;
1252 }
1253
1254 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1255 ctx->ctx,
1256 request->resources,
1257 num_chunks,
1258 chunks,
1259 &request->seq_no);
1260 error_out:
1261 free(sem_dependencies);
1262 free(wait_syncobj);
1263 free(signal_syncobj);
1264 return r;
1265 }
1266
1267 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1268 uint32_t *handle)
1269 {
1270 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1271 return amdgpu_cs_create_syncobj(ws->dev, handle);
1272 }
1273
1274 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1275 uint32_t handle)
1276 {
1277 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1278 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1279 }
1280
1281 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1282 uint32_t syncobj,
1283 int *fd)
1284 {
1285 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1286
1287 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1288 }
1289
1290 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1291 int fd,
1292 uint32_t *syncobj)
1293 {
1294 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1295
1296 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1297 }
1298
1299 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1300 {
1301 ws->base.ctx_create = radv_amdgpu_ctx_create;
1302 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1303 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1304 ws->base.cs_create = radv_amdgpu_cs_create;
1305 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1306 ws->base.cs_grow = radv_amdgpu_cs_grow;
1307 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1308 ws->base.cs_reset = radv_amdgpu_cs_reset;
1309 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1310 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1311 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1312 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1313 ws->base.create_fence = radv_amdgpu_create_fence;
1314 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1315 ws->base.create_sem = radv_amdgpu_create_sem;
1316 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1317 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1318 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1319 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1320 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1321 ws->base.fence_wait = radv_amdgpu_fence_wait;
1322 }