939c221e0c8c0b5c3587896c214f7607fa6a4810
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "ac_debug.h"
31 #include "amdgpu_id.h"
32 #include "radv_radeon_winsys.h"
33 #include "radv_amdgpu_cs.h"
34 #include "radv_amdgpu_bo.h"
35 #include "sid.h"
36
37
38 enum {
39 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
40 };
41
42 struct radv_amdgpu_cs {
43 struct radeon_winsys_cs base;
44 struct radv_amdgpu_winsys *ws;
45
46 struct amdgpu_cs_ib_info ib;
47
48 struct radeon_winsys_bo *ib_buffer;
49 uint8_t *ib_mapped;
50 unsigned max_num_buffers;
51 unsigned num_buffers;
52 amdgpu_bo_handle *handles;
53 uint8_t *priorities;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 uint8_t *virtual_buffer_priorities;
69 int *virtual_buffer_hash_table;
70 };
71
72 static inline struct radv_amdgpu_cs *
73 radv_amdgpu_cs(struct radeon_winsys_cs *base)
74 {
75 return (struct radv_amdgpu_cs*)base;
76 }
77
78 static int ring_to_hw_ip(enum ring_type ring)
79 {
80 switch (ring) {
81 case RING_GFX:
82 return AMDGPU_HW_IP_GFX;
83 case RING_DMA:
84 return AMDGPU_HW_IP_DMA;
85 case RING_COMPUTE:
86 return AMDGPU_HW_IP_COMPUTE;
87 default:
88 unreachable("unsupported ring");
89 }
90 }
91
92 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
93 uint32_t ip_type,
94 uint32_t ring,
95 struct radv_winsys_sem_info *sem_info);
96 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
97 struct amdgpu_cs_request *request,
98 struct radv_winsys_sem_info *sem_info);
99
100 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
101 struct radv_amdgpu_fence *fence,
102 struct amdgpu_cs_request *req)
103 {
104 fence->fence.context = ctx->ctx;
105 fence->fence.ip_type = req->ip_type;
106 fence->fence.ip_instance = req->ip_instance;
107 fence->fence.ring = req->ring;
108 fence->fence.fence = req->seq_no;
109 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
110 }
111
112 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
113 {
114 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
115 return (struct radeon_winsys_fence*)fence;
116 }
117
118 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
119 {
120 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
121 free(fence);
122 }
123
124 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
125 struct radeon_winsys_fence *_fence,
126 bool absolute,
127 uint64_t timeout)
128 {
129 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
130 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
131 int r;
132 uint32_t expired = 0;
133
134 if (fence->user_ptr) {
135 if (*fence->user_ptr >= fence->fence.fence)
136 return true;
137 if (!absolute && !timeout)
138 return false;
139 }
140
141 /* Now use the libdrm query. */
142 r = amdgpu_cs_query_fence_status(&fence->fence,
143 timeout,
144 flags,
145 &expired);
146
147 if (r) {
148 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
149 return false;
150 }
151
152 if (expired)
153 return true;
154
155 return false;
156 }
157
158 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
159 {
160 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
161
162 if (cs->ib_buffer)
163 cs->ws->base.buffer_destroy(cs->ib_buffer);
164 else
165 free(cs->base.buf);
166
167 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
168 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
169
170 free(cs->old_ib_buffers);
171 free(cs->virtual_buffers);
172 free(cs->virtual_buffer_priorities);
173 free(cs->virtual_buffer_hash_table);
174 free(cs->handles);
175 free(cs->priorities);
176 free(cs);
177 }
178
179 static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
180 enum ring_type ring_type)
181 {
182 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
183 cs->buffer_hash_table[i] = -1;
184
185 cs->hw_ip = ring_to_hw_ip(ring_type);
186 return true;
187 }
188
189 static struct radeon_winsys_cs *
190 radv_amdgpu_cs_create(struct radeon_winsys *ws,
191 enum ring_type ring_type)
192 {
193 struct radv_amdgpu_cs *cs;
194 uint32_t ib_size = 20 * 1024 * 4;
195 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
196 if (!cs)
197 return NULL;
198
199 cs->ws = radv_amdgpu_winsys(ws);
200 radv_amdgpu_init_cs(cs, ring_type);
201
202 if (cs->ws->use_ib_bos) {
203 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
204 RADEON_DOMAIN_GTT,
205 RADEON_FLAG_CPU_ACCESS|
206 RADEON_FLAG_NO_INTERPROCESS_SHARING);
207 if (!cs->ib_buffer) {
208 free(cs);
209 return NULL;
210 }
211
212 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
213 if (!cs->ib_mapped) {
214 ws->buffer_destroy(cs->ib_buffer);
215 free(cs);
216 return NULL;
217 }
218
219 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
220 cs->base.buf = (uint32_t *)cs->ib_mapped;
221 cs->base.max_dw = ib_size / 4 - 4;
222 cs->ib_size_ptr = &cs->ib.size;
223 cs->ib.size = 0;
224
225 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
226 } else {
227 cs->base.buf = malloc(16384);
228 cs->base.max_dw = 4096;
229 if (!cs->base.buf) {
230 free(cs);
231 return NULL;
232 }
233 }
234
235 return &cs->base;
236 }
237
238 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
239 {
240 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
241
242 if (cs->failed) {
243 cs->base.cdw = 0;
244 return;
245 }
246
247 if (!cs->ws->use_ib_bos) {
248 const uint64_t limit_dws = 0xffff8;
249 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
250 MIN2(cs->base.max_dw * 2, limit_dws));
251
252 /* The total ib size cannot exceed limit_dws dwords. */
253 if (ib_dws > limit_dws)
254 {
255 cs->failed = true;
256 cs->base.cdw = 0;
257 return;
258 }
259
260 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
261 if (new_buf) {
262 cs->base.buf = new_buf;
263 cs->base.max_dw = ib_dws;
264 } else {
265 cs->failed = true;
266 cs->base.cdw = 0;
267 }
268 return;
269 }
270
271 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
272
273 /* max that fits in the chain size field. */
274 ib_size = MIN2(ib_size, 0xfffff);
275
276 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
277 cs->base.buf[cs->base.cdw++] = 0xffff1000;
278
279 *cs->ib_size_ptr |= cs->base.cdw + 4;
280
281 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
282 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
283 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
284 cs->max_num_old_ib_buffers * sizeof(void*));
285 }
286
287 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
288
289 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
290 RADEON_DOMAIN_GTT,
291 RADEON_FLAG_CPU_ACCESS|
292 RADEON_FLAG_NO_INTERPROCESS_SHARING);
293
294 if (!cs->ib_buffer) {
295 cs->base.cdw = 0;
296 cs->failed = true;
297 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
298 }
299
300 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
301 if (!cs->ib_mapped) {
302 cs->ws->base.buffer_destroy(cs->ib_buffer);
303 cs->base.cdw = 0;
304 cs->failed = true;
305 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
306 }
307
308 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
309
310 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
311 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
312 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
313 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
314 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
315
316 cs->base.buf = (uint32_t *)cs->ib_mapped;
317 cs->base.cdw = 0;
318 cs->base.max_dw = ib_size / 4 - 4;
319
320 }
321
322 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
323 {
324 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
325
326 if (cs->ws->use_ib_bos) {
327 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
328 cs->base.buf[cs->base.cdw++] = 0xffff1000;
329
330 *cs->ib_size_ptr |= cs->base.cdw;
331
332 cs->is_chained = false;
333 }
334
335 return !cs->failed;
336 }
337
338 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
339 {
340 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
341 cs->base.cdw = 0;
342 cs->failed = false;
343
344 for (unsigned i = 0; i < cs->num_buffers; ++i) {
345 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
346 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
347 cs->buffer_hash_table[hash] = -1;
348 }
349
350 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
351 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
352 cs->virtual_buffer_hash_table[hash] = -1;
353 }
354
355 cs->num_buffers = 0;
356 cs->num_virtual_buffers = 0;
357
358 if (cs->ws->use_ib_bos) {
359 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
360
361 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
362 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
363
364 cs->num_old_ib_buffers = 0;
365 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
366 cs->ib_size_ptr = &cs->ib.size;
367 cs->ib.size = 0;
368 }
369 }
370
371 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
372 amdgpu_bo_handle bo)
373 {
374 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
375 int index = cs->buffer_hash_table[hash];
376
377 if (index == -1)
378 return -1;
379
380 if (cs->handles[index] == bo)
381 return index;
382
383 for (unsigned i = 0; i < cs->num_buffers; ++i) {
384 if (cs->handles[i] == bo) {
385 cs->buffer_hash_table[hash] = i;
386 return i;
387 }
388 }
389
390 return -1;
391 }
392
393 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
394 amdgpu_bo_handle bo,
395 uint8_t priority)
396 {
397 unsigned hash;
398 int index = radv_amdgpu_cs_find_buffer(cs, bo);
399
400 if (index != -1) {
401 cs->priorities[index] = MAX2(cs->priorities[index], priority);
402 return;
403 }
404
405 if (cs->num_buffers == cs->max_num_buffers) {
406 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
407 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
408 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
409 cs->max_num_buffers = new_count;
410 }
411
412 cs->handles[cs->num_buffers] = bo;
413 cs->priorities[cs->num_buffers] = priority;
414
415 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
416 cs->buffer_hash_table[hash] = cs->num_buffers;
417
418 ++cs->num_buffers;
419 }
420
421 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
422 struct radeon_winsys_bo *bo,
423 uint8_t priority)
424 {
425 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
426 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
427
428
429 if (!cs->virtual_buffer_hash_table) {
430 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
431 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
432 cs->virtual_buffer_hash_table[i] = -1;
433 }
434
435 if (cs->virtual_buffer_hash_table[hash] >= 0) {
436 int idx = cs->virtual_buffer_hash_table[hash];
437 if (cs->virtual_buffers[idx] == bo) {
438 cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
439 return;
440 }
441 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
442 if (cs->virtual_buffers[i] == bo) {
443 cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
444 cs->virtual_buffer_hash_table[hash] = i;
445 return;
446 }
447 }
448 }
449
450 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
451 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
452 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
453 cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
454 }
455
456 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
457 cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
458
459 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
460 ++cs->num_virtual_buffers;
461
462 }
463
464 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
465 struct radeon_winsys_bo *_bo,
466 uint8_t priority)
467 {
468 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
469 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
470
471 if (bo->is_virtual) {
472 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
473 return;
474 }
475
476 if (bo->is_local)
477 return;
478
479 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
480 }
481
482 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
483 struct radeon_winsys_cs *_child)
484 {
485 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
486 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
487
488 for (unsigned i = 0; i < child->num_buffers; ++i) {
489 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
490 child->priorities[i]);
491 }
492
493 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
494 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
495 child->virtual_buffer_priorities[i]);
496 }
497
498 if (parent->ws->use_ib_bos) {
499 if (parent->base.cdw + 4 > parent->base.max_dw)
500 radv_amdgpu_cs_grow(&parent->base, 4);
501
502 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
503 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
504 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
505 parent->base.buf[parent->base.cdw++] = child->ib.size;
506 } else {
507 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
508 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
509
510 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
511 parent->base.cdw += child->base.cdw;
512 }
513 }
514
515 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
516 struct radeon_winsys_cs **cs_array,
517 unsigned count,
518 struct radv_amdgpu_winsys_bo *extra_bo,
519 struct radeon_winsys_cs *extra_cs,
520 amdgpu_bo_list_handle *bo_list)
521 {
522 int r;
523 if (ws->debug_all_bos) {
524 struct radv_amdgpu_winsys_bo *bo;
525 amdgpu_bo_handle *handles;
526 unsigned num = 0;
527
528 pthread_mutex_lock(&ws->global_bo_list_lock);
529
530 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
531 if (!handles) {
532 pthread_mutex_unlock(&ws->global_bo_list_lock);
533 return -ENOMEM;
534 }
535
536 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
537 assert(num < ws->num_buffers);
538 handles[num++] = bo->bo;
539 }
540
541 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
542 handles, NULL,
543 bo_list);
544 free(handles);
545 pthread_mutex_unlock(&ws->global_bo_list_lock);
546 } else if (count == 1 && !extra_bo && !extra_cs &&
547 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
548 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
549 if (cs->num_buffers == 0) {
550 *bo_list = 0;
551 return 0;
552 }
553 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
554 cs->priorities, bo_list);
555 } else {
556 unsigned total_buffer_count = !!extra_bo;
557 unsigned unique_bo_count = !!extra_bo;
558 for (unsigned i = 0; i < count; ++i) {
559 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
560 total_buffer_count += cs->num_buffers;
561 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
562 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
563 }
564
565 if (extra_cs) {
566 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
567 }
568 if (total_buffer_count == 0) {
569 *bo_list = 0;
570 return 0;
571 }
572 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
573 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
574 if (!handles || !priorities) {
575 free(handles);
576 free(priorities);
577 return -ENOMEM;
578 }
579
580 if (extra_bo) {
581 handles[0] = extra_bo->bo;
582 priorities[0] = 8;
583 }
584
585 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
586 struct radv_amdgpu_cs *cs;
587
588 if (i == count)
589 cs = (struct radv_amdgpu_cs*)extra_cs;
590 else
591 cs = (struct radv_amdgpu_cs*)cs_array[i];
592
593 if (!cs->num_buffers)
594 continue;
595
596 if (unique_bo_count == 0) {
597 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
598 memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
599 unique_bo_count = cs->num_buffers;
600 continue;
601 }
602 int unique_bo_so_far = unique_bo_count;
603 for (unsigned j = 0; j < cs->num_buffers; ++j) {
604 bool found = false;
605 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
606 if (handles[k] == cs->handles[j]) {
607 found = true;
608 priorities[k] = MAX2(priorities[k],
609 cs->priorities[j]);
610 break;
611 }
612 }
613 if (!found) {
614 handles[unique_bo_count] = cs->handles[j];
615 priorities[unique_bo_count] = cs->priorities[j];
616 ++unique_bo_count;
617 }
618 }
619 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
620 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
621 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
622 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
623 bool found = false;
624 for (unsigned m = 0; m < unique_bo_count; ++m) {
625 if (handles[m] == bo->bo) {
626 found = true;
627 priorities[m] = MAX2(priorities[m],
628 cs->virtual_buffer_priorities[j]);
629 break;
630 }
631 }
632 if (!found) {
633 handles[unique_bo_count] = bo->bo;
634 priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
635 ++unique_bo_count;
636 }
637 }
638 }
639 }
640 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
641 priorities, bo_list);
642
643 free(handles);
644 free(priorities);
645 }
646
647 return r;
648 }
649
650 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
651 {
652 struct amdgpu_cs_fence_info ret = {0};
653 if (ctx->fence_map) {
654 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
655 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
656 }
657 return ret;
658 }
659
660 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
661 struct amdgpu_cs_request *request)
662 {
663 radv_amdgpu_request_to_fence(ctx,
664 &ctx->last_submission[request->ip_type][request->ring],
665 request);
666 }
667
668 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
669 int queue_idx,
670 struct radv_winsys_sem_info *sem_info,
671 struct radeon_winsys_cs **cs_array,
672 unsigned cs_count,
673 struct radeon_winsys_cs *initial_preamble_cs,
674 struct radeon_winsys_cs *continue_preamble_cs,
675 struct radeon_winsys_fence *_fence)
676 {
677 int r;
678 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
679 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
680 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
681 amdgpu_bo_list_handle bo_list;
682 struct amdgpu_cs_request request = {0};
683 struct amdgpu_cs_ib_info ibs[2];
684
685 for (unsigned i = cs_count; i--;) {
686 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
687
688 if (cs->is_chained) {
689 *cs->ib_size_ptr -= 4;
690 cs->is_chained = false;
691 }
692
693 if (i + 1 < cs_count) {
694 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
695 assert(cs->base.cdw + 4 <= cs->base.max_dw);
696
697 cs->is_chained = true;
698 *cs->ib_size_ptr += 4;
699
700 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
701 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
702 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
703 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
704 }
705 }
706
707 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs, &bo_list);
708 if (r) {
709 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
710 return r;
711 }
712
713 request.ip_type = cs0->hw_ip;
714 request.ring = queue_idx;
715 request.number_of_ibs = 1;
716 request.ibs = &cs0->ib;
717 request.resources = bo_list;
718 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
719
720 if (initial_preamble_cs) {
721 request.ibs = ibs;
722 request.number_of_ibs = 2;
723 ibs[1] = cs0->ib;
724 ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
725 }
726
727 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
728 if (r) {
729 if (r == -ENOMEM)
730 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
731 else
732 fprintf(stderr, "amdgpu: The CS has been rejected, "
733 "see dmesg for more information.\n");
734 }
735
736 if (bo_list)
737 amdgpu_bo_list_destroy(bo_list);
738
739 if (fence)
740 radv_amdgpu_request_to_fence(ctx, fence, &request);
741
742 radv_assign_last_submit(ctx, &request);
743
744 return r;
745 }
746
747 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
748 int queue_idx,
749 struct radv_winsys_sem_info *sem_info,
750 struct radeon_winsys_cs **cs_array,
751 unsigned cs_count,
752 struct radeon_winsys_cs *initial_preamble_cs,
753 struct radeon_winsys_cs *continue_preamble_cs,
754 struct radeon_winsys_fence *_fence)
755 {
756 int r;
757 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
758 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
759 amdgpu_bo_list_handle bo_list;
760 struct amdgpu_cs_request request;
761 bool emit_signal_sem = sem_info->cs_emit_signal;
762 assert(cs_count);
763
764 for (unsigned i = 0; i < cs_count;) {
765 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
766 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
767 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
768 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
769 cs_count - i);
770
771 memset(&request, 0, sizeof(request));
772
773 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
774 preamble_cs, &bo_list);
775 if (r) {
776 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
777 return r;
778 }
779
780 request.ip_type = cs0->hw_ip;
781 request.ring = queue_idx;
782 request.resources = bo_list;
783 request.number_of_ibs = cnt + !!preamble_cs;
784 request.ibs = ibs;
785 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
786
787 if (preamble_cs) {
788 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
789 }
790
791 for (unsigned j = 0; j < cnt; ++j) {
792 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
793 ibs[j + !!preamble_cs] = cs->ib;
794
795 if (cs->is_chained) {
796 *cs->ib_size_ptr -= 4;
797 cs->is_chained = false;
798 }
799 }
800
801 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
802 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
803 if (r) {
804 if (r == -ENOMEM)
805 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
806 else
807 fprintf(stderr, "amdgpu: The CS has been rejected, "
808 "see dmesg for more information.\n");
809 }
810
811 if (bo_list)
812 amdgpu_bo_list_destroy(bo_list);
813
814 if (r)
815 return r;
816
817 i += cnt;
818 }
819 if (fence)
820 radv_amdgpu_request_to_fence(ctx, fence, &request);
821
822 radv_assign_last_submit(ctx, &request);
823
824 return 0;
825 }
826
827 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
828 int queue_idx,
829 struct radv_winsys_sem_info *sem_info,
830 struct radeon_winsys_cs **cs_array,
831 unsigned cs_count,
832 struct radeon_winsys_cs *initial_preamble_cs,
833 struct radeon_winsys_cs *continue_preamble_cs,
834 struct radeon_winsys_fence *_fence)
835 {
836 int r;
837 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
838 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
839 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
840 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
841 amdgpu_bo_list_handle bo_list;
842 struct amdgpu_cs_request request;
843 uint32_t pad_word = 0xffff1000U;
844 bool emit_signal_sem = sem_info->cs_emit_signal;
845
846 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
847 pad_word = 0x80000000;
848
849 assert(cs_count);
850
851 for (unsigned i = 0; i < cs_count;) {
852 struct amdgpu_cs_ib_info ib = {0};
853 struct radeon_winsys_bo *bo = NULL;
854 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
855 uint32_t *ptr;
856 unsigned cnt = 0;
857 unsigned size = 0;
858 unsigned pad_words = 0;
859 if (preamble_cs)
860 size += preamble_cs->cdw;
861
862 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
863 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
864 ++cnt;
865 }
866
867 while(!size || (size & 7)) {
868 size++;
869 pad_words++;
870 }
871 assert(cnt);
872
873 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS|RADEON_FLAG_NO_INTERPROCESS_SHARING);
874 ptr = ws->buffer_map(bo);
875
876 if (preamble_cs) {
877 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
878 ptr += preamble_cs->cdw;
879 }
880
881 for (unsigned j = 0; j < cnt; ++j) {
882 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
883 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
884 ptr += cs->base.cdw;
885
886 }
887
888 for (unsigned j = 0; j < pad_words; ++j)
889 *ptr++ = pad_word;
890
891 memset(&request, 0, sizeof(request));
892
893
894 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
895 (struct radv_amdgpu_winsys_bo*)bo,
896 preamble_cs, &bo_list);
897 if (r) {
898 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
899 return r;
900 }
901
902 ib.size = size;
903 ib.ib_mc_address = radv_buffer_get_va(bo);
904
905 request.ip_type = cs0->hw_ip;
906 request.ring = queue_idx;
907 request.resources = bo_list;
908 request.number_of_ibs = 1;
909 request.ibs = &ib;
910 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
911
912 sem_info->cs_emit_signal = (i == cs_count - cnt) ? emit_signal_sem : false;
913 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
914 if (r) {
915 if (r == -ENOMEM)
916 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
917 else
918 fprintf(stderr, "amdgpu: The CS has been rejected, "
919 "see dmesg for more information.\n");
920 }
921
922 if (bo_list)
923 amdgpu_bo_list_destroy(bo_list);
924
925 ws->buffer_destroy(bo);
926 if (r)
927 return r;
928
929 i += cnt;
930 }
931 if (fence)
932 radv_amdgpu_request_to_fence(ctx, fence, &request);
933
934 radv_assign_last_submit(ctx, &request);
935
936 return 0;
937 }
938
939 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
940 int queue_idx,
941 struct radeon_winsys_cs **cs_array,
942 unsigned cs_count,
943 struct radeon_winsys_cs *initial_preamble_cs,
944 struct radeon_winsys_cs *continue_preamble_cs,
945 struct radv_winsys_sem_info *sem_info,
946 bool can_patch,
947 struct radeon_winsys_fence *_fence)
948 {
949 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
950 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
951 int ret;
952
953 assert(sem_info);
954 if (!cs->ws->use_ib_bos) {
955 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, sem_info, cs_array,
956 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
957 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
958 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, sem_info, cs_array,
959 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
960 } else {
961 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, sem_info, cs_array,
962 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
963 }
964
965 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, sem_info);
966 return ret;
967 }
968
969 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
970 {
971 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
972 void *ret = NULL;
973
974 if (!cs->ib_buffer)
975 return NULL;
976 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
977 struct radv_amdgpu_winsys_bo *bo;
978
979 bo = (struct radv_amdgpu_winsys_bo*)
980 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
981 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
982 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
983 return (char *)ret + (addr - bo->base.va);
984 }
985 }
986 if(cs->ws->debug_all_bos) {
987 pthread_mutex_lock(&cs->ws->global_bo_list_lock);
988 list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
989 &cs->ws->global_bo_list, global_list_item) {
990 if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
991 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
992 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
993 return (char *)ret + (addr - bo->base.va);
994 }
995 }
996 }
997 pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
998 }
999 return ret;
1000 }
1001
1002 static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
1003 FILE* file,
1004 const int *trace_ids, int trace_id_count)
1005 {
1006 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
1007 void *ib = cs->base.buf;
1008 int num_dw = cs->base.cdw;
1009
1010 if (cs->ws->use_ib_bos) {
1011 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1012 num_dw = cs->ib.size;
1013 }
1014 assert(ib);
1015 ac_parse_ib(file, ib, num_dw, trace_ids, trace_id_count, "main IB",
1016 cs->ws->info.chip_class, radv_amdgpu_winsys_get_cpu_addr, cs);
1017 }
1018
1019 static uint32_t radv_to_amdgpu_priority(enum radeon_ctx_priority radv_priority)
1020 {
1021 switch (radv_priority) {
1022 case RADEON_CTX_PRIORITY_REALTIME:
1023 return AMDGPU_CTX_PRIORITY_VERY_HIGH;
1024 case RADEON_CTX_PRIORITY_HIGH:
1025 return AMDGPU_CTX_PRIORITY_HIGH;
1026 case RADEON_CTX_PRIORITY_MEDIUM:
1027 return AMDGPU_CTX_PRIORITY_NORMAL;
1028 case RADEON_CTX_PRIORITY_LOW:
1029 return AMDGPU_CTX_PRIORITY_LOW;
1030 default:
1031 unreachable("Invalid context priority");
1032 }
1033 }
1034
1035 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws,
1036 enum radeon_ctx_priority priority)
1037 {
1038 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1039 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1040 uint32_t amdgpu_priority = radv_to_amdgpu_priority(priority);
1041 int r;
1042
1043 if (!ctx)
1044 return NULL;
1045
1046 r = amdgpu_cs_ctx_create2(ws->dev, amdgpu_priority, &ctx->ctx);
1047 if (r) {
1048 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create2 failed. (%i)\n", r);
1049 goto error_create;
1050 }
1051 ctx->ws = ws;
1052
1053 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1054 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1055 RADEON_DOMAIN_GTT,
1056 RADEON_FLAG_CPU_ACCESS|
1057 RADEON_FLAG_NO_INTERPROCESS_SHARING);
1058 if (ctx->fence_bo)
1059 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1060 if (ctx->fence_map)
1061 memset(ctx->fence_map, 0, 4096);
1062 return (struct radeon_winsys_ctx *)ctx;
1063 error_create:
1064 FREE(ctx);
1065 return NULL;
1066 }
1067
1068 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1069 {
1070 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1071 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1072 amdgpu_cs_ctx_free(ctx->ctx);
1073 FREE(ctx);
1074 }
1075
1076 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1077 enum ring_type ring_type, int ring_index)
1078 {
1079 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1080 int ip_type = ring_to_hw_ip(ring_type);
1081
1082 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1083 uint32_t expired;
1084 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1085 1000000000ull, 0, &expired);
1086
1087 if (ret || !expired)
1088 return false;
1089 }
1090
1091 return true;
1092 }
1093
1094 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1095 {
1096 struct amdgpu_cs_fence *sem = CALLOC_STRUCT(amdgpu_cs_fence);
1097 if (!sem)
1098 return NULL;
1099
1100 return (struct radeon_winsys_sem *)sem;
1101 }
1102
1103 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1104 {
1105 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)_sem;
1106 FREE(sem);
1107 }
1108
1109 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1110 uint32_t ip_type,
1111 uint32_t ring,
1112 struct radv_winsys_sem_info *sem_info)
1113 {
1114 for (unsigned i = 0; i < sem_info->signal.sem_count; i++) {
1115 struct amdgpu_cs_fence *sem = (struct amdgpu_cs_fence *)(sem_info->signal.sem)[i];
1116
1117 if (sem->context)
1118 return -EINVAL;
1119
1120 *sem = ctx->last_submission[ip_type][ring].fence;
1121 }
1122 return 0;
1123 }
1124
1125 static struct drm_amdgpu_cs_chunk_sem *radv_amdgpu_cs_alloc_syncobj_chunk(struct radv_winsys_sem_counts *counts,
1126 struct drm_amdgpu_cs_chunk *chunk, int chunk_id)
1127 {
1128 struct drm_amdgpu_cs_chunk_sem *syncobj = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * counts->syncobj_count);
1129 if (!syncobj)
1130 return NULL;
1131
1132 for (unsigned i = 0; i < counts->syncobj_count; i++) {
1133 struct drm_amdgpu_cs_chunk_sem *sem = &syncobj[i];
1134 sem->handle = counts->syncobj[i];
1135 }
1136
1137 chunk->chunk_id = chunk_id;
1138 chunk->length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * counts->syncobj_count;
1139 chunk->chunk_data = (uint64_t)(uintptr_t)syncobj;
1140 return syncobj;
1141 }
1142
1143 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1144 struct amdgpu_cs_request *request,
1145 struct radv_winsys_sem_info *sem_info)
1146 {
1147 int r;
1148 int num_chunks;
1149 int size;
1150 bool user_fence;
1151 struct drm_amdgpu_cs_chunk *chunks;
1152 struct drm_amdgpu_cs_chunk_data *chunk_data;
1153 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
1154 struct drm_amdgpu_cs_chunk_sem *wait_syncobj = NULL, *signal_syncobj = NULL;
1155 int i;
1156 struct amdgpu_cs_fence *sem;
1157
1158 user_fence = (request->fence_info.handle != NULL);
1159 size = request->number_of_ibs + (user_fence ? 2 : 1) + 3;
1160
1161 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
1162
1163 size = request->number_of_ibs + (user_fence ? 1 : 0);
1164
1165 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
1166
1167 num_chunks = request->number_of_ibs;
1168 for (i = 0; i < request->number_of_ibs; i++) {
1169 struct amdgpu_cs_ib_info *ib;
1170 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
1171 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1172 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1173
1174 ib = &request->ibs[i];
1175
1176 chunk_data[i].ib_data._pad = 0;
1177 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
1178 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
1179 chunk_data[i].ib_data.ip_type = request->ip_type;
1180 chunk_data[i].ib_data.ip_instance = request->ip_instance;
1181 chunk_data[i].ib_data.ring = request->ring;
1182 chunk_data[i].ib_data.flags = ib->flags;
1183 }
1184
1185 if (user_fence) {
1186 i = num_chunks++;
1187
1188 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1189 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1190 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
1191
1192 amdgpu_cs_chunk_fence_info_to_data(&request->fence_info,
1193 &chunk_data[i]);
1194 }
1195
1196 if (sem_info->wait.syncobj_count && sem_info->cs_emit_wait) {
1197 wait_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->wait,
1198 &chunks[num_chunks],
1199 AMDGPU_CHUNK_ID_SYNCOBJ_IN);
1200 if (!wait_syncobj) {
1201 r = -ENOMEM;
1202 goto error_out;
1203 }
1204 num_chunks++;
1205
1206 if (sem_info->wait.sem_count == 0)
1207 sem_info->cs_emit_wait = false;
1208
1209 }
1210
1211 if (sem_info->wait.sem_count && sem_info->cs_emit_wait) {
1212 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_info->wait.sem_count);
1213 if (!sem_dependencies) {
1214 r = -ENOMEM;
1215 goto error_out;
1216 }
1217 int sem_count = 0;
1218 for (unsigned j = 0; j < sem_info->wait.sem_count; j++) {
1219 sem = (struct amdgpu_cs_fence *)sem_info->wait.sem[j];
1220 if (!sem->context)
1221 continue;
1222 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
1223
1224 amdgpu_cs_chunk_fence_to_dep(sem, dep);
1225
1226 sem->context = NULL;
1227 }
1228 i = num_chunks++;
1229
1230 /* dependencies chunk */
1231 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1232 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
1233 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
1234
1235 sem_info->cs_emit_wait = false;
1236 }
1237
1238 if (sem_info->signal.syncobj_count && sem_info->cs_emit_signal) {
1239 signal_syncobj = radv_amdgpu_cs_alloc_syncobj_chunk(&sem_info->signal,
1240 &chunks[num_chunks],
1241 AMDGPU_CHUNK_ID_SYNCOBJ_OUT);
1242 if (!signal_syncobj) {
1243 r = -ENOMEM;
1244 goto error_out;
1245 }
1246 num_chunks++;
1247 }
1248
1249 r = amdgpu_cs_submit_raw(ctx->ws->dev,
1250 ctx->ctx,
1251 request->resources,
1252 num_chunks,
1253 chunks,
1254 &request->seq_no);
1255 error_out:
1256 free(sem_dependencies);
1257 free(wait_syncobj);
1258 free(signal_syncobj);
1259 return r;
1260 }
1261
1262 static int radv_amdgpu_create_syncobj(struct radeon_winsys *_ws,
1263 uint32_t *handle)
1264 {
1265 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1266 return amdgpu_cs_create_syncobj(ws->dev, handle);
1267 }
1268
1269 static void radv_amdgpu_destroy_syncobj(struct radeon_winsys *_ws,
1270 uint32_t handle)
1271 {
1272 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1273 amdgpu_cs_destroy_syncobj(ws->dev, handle);
1274 }
1275
1276 static int radv_amdgpu_export_syncobj(struct radeon_winsys *_ws,
1277 uint32_t syncobj,
1278 int *fd)
1279 {
1280 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1281
1282 return amdgpu_cs_export_syncobj(ws->dev, syncobj, fd);
1283 }
1284
1285 static int radv_amdgpu_import_syncobj(struct radeon_winsys *_ws,
1286 int fd,
1287 uint32_t *syncobj)
1288 {
1289 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1290
1291 return amdgpu_cs_import_syncobj(ws->dev, fd, syncobj);
1292 }
1293
1294 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1295 {
1296 ws->base.ctx_create = radv_amdgpu_ctx_create;
1297 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1298 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1299 ws->base.cs_create = radv_amdgpu_cs_create;
1300 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1301 ws->base.cs_grow = radv_amdgpu_cs_grow;
1302 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1303 ws->base.cs_reset = radv_amdgpu_cs_reset;
1304 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1305 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1306 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1307 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1308 ws->base.create_fence = radv_amdgpu_create_fence;
1309 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1310 ws->base.create_sem = radv_amdgpu_create_sem;
1311 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1312 ws->base.create_syncobj = radv_amdgpu_create_syncobj;
1313 ws->base.destroy_syncobj = radv_amdgpu_destroy_syncobj;
1314 ws->base.export_syncobj = radv_amdgpu_export_syncobj;
1315 ws->base.import_syncobj = radv_amdgpu_import_syncobj;
1316 ws->base.fence_wait = radv_amdgpu_fence_wait;
1317 }