radv: introduce some wrapper in cs code to make porting off libdrm_amdgpu easier.
[mesa.git] / src / amd / vulkan / winsys / amdgpu / radv_amdgpu_cs.c
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include <stdlib.h>
26 #include <amdgpu.h>
27 #include <amdgpu_drm.h>
28 #include <assert.h>
29
30 #include "ac_debug.h"
31 #include "amdgpu_id.h"
32 #include "radv_radeon_winsys.h"
33 #include "radv_amdgpu_cs.h"
34 #include "radv_amdgpu_bo.h"
35 #include "sid.h"
36
37
38 enum {
39 VIRTUAL_BUFFER_HASH_TABLE_SIZE = 1024
40 };
41
42 struct radv_amdgpu_cs {
43 struct radeon_winsys_cs base;
44 struct radv_amdgpu_winsys *ws;
45
46 struct amdgpu_cs_ib_info ib;
47
48 struct radeon_winsys_bo *ib_buffer;
49 uint8_t *ib_mapped;
50 unsigned max_num_buffers;
51 unsigned num_buffers;
52 amdgpu_bo_handle *handles;
53 uint8_t *priorities;
54
55 struct radeon_winsys_bo **old_ib_buffers;
56 unsigned num_old_ib_buffers;
57 unsigned max_num_old_ib_buffers;
58 unsigned *ib_size_ptr;
59 bool failed;
60 bool is_chained;
61
62 int buffer_hash_table[1024];
63 unsigned hw_ip;
64
65 unsigned num_virtual_buffers;
66 unsigned max_num_virtual_buffers;
67 struct radeon_winsys_bo **virtual_buffers;
68 uint8_t *virtual_buffer_priorities;
69 int *virtual_buffer_hash_table;
70 };
71
72 static inline struct radv_amdgpu_cs *
73 radv_amdgpu_cs(struct radeon_winsys_cs *base)
74 {
75 return (struct radv_amdgpu_cs*)base;
76 }
77
78 struct radv_amdgpu_sem_info {
79 int wait_sem_count;
80 struct radeon_winsys_sem **wait_sems;
81 int signal_sem_count;
82 struct radeon_winsys_sem **signal_sems;
83 };
84
85 static int ring_to_hw_ip(enum ring_type ring)
86 {
87 switch (ring) {
88 case RING_GFX:
89 return AMDGPU_HW_IP_GFX;
90 case RING_DMA:
91 return AMDGPU_HW_IP_DMA;
92 case RING_COMPUTE:
93 return AMDGPU_HW_IP_COMPUTE;
94 default:
95 unreachable("unsupported ring");
96 }
97 }
98
99 static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
100 uint32_t ip_type,
101 uint32_t ring,
102 struct radv_amdgpu_sem_info *sem_info);
103 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
104 uint32_t ip_type,
105 uint32_t ring,
106 struct radv_amdgpu_sem_info *sem_info);
107 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
108 struct amdgpu_cs_request *request,
109 struct radv_amdgpu_sem_info *sem_info);
110
111 static void radv_amdgpu_request_to_fence(struct radv_amdgpu_ctx *ctx,
112 struct radv_amdgpu_fence *fence,
113 struct amdgpu_cs_request *req)
114 {
115 fence->fence.context = ctx->ctx;
116 fence->fence.ip_type = req->ip_type;
117 fence->fence.ip_instance = req->ip_instance;
118 fence->fence.ring = req->ring;
119 fence->fence.fence = req->seq_no;
120 fence->user_ptr = (volatile uint64_t*)(ctx->fence_map + (req->ip_type * MAX_RINGS_PER_TYPE + req->ring) * sizeof(uint64_t));
121 }
122
123 static struct radeon_winsys_fence *radv_amdgpu_create_fence()
124 {
125 struct radv_amdgpu_fence *fence = calloc(1, sizeof(struct radv_amdgpu_fence));
126 return (struct radeon_winsys_fence*)fence;
127 }
128
129 static void radv_amdgpu_destroy_fence(struct radeon_winsys_fence *_fence)
130 {
131 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
132 free(fence);
133 }
134
135 static bool radv_amdgpu_fence_wait(struct radeon_winsys *_ws,
136 struct radeon_winsys_fence *_fence,
137 bool absolute,
138 uint64_t timeout)
139 {
140 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
141 unsigned flags = absolute ? AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE : 0;
142 int r;
143 uint32_t expired = 0;
144
145 if (fence->user_ptr) {
146 if (*fence->user_ptr >= fence->fence.fence)
147 return true;
148 if (!absolute && !timeout)
149 return false;
150 }
151
152 /* Now use the libdrm query. */
153 r = amdgpu_cs_query_fence_status(&fence->fence,
154 timeout,
155 flags,
156 &expired);
157
158 if (r) {
159 fprintf(stderr, "amdgpu: radv_amdgpu_cs_query_fence_status failed.\n");
160 return false;
161 }
162
163 if (expired)
164 return true;
165
166 return false;
167 }
168
169 static void radv_amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
170 {
171 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(rcs);
172
173 if (cs->ib_buffer)
174 cs->ws->base.buffer_destroy(cs->ib_buffer);
175 else
176 free(cs->base.buf);
177
178 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
179 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
180
181 free(cs->old_ib_buffers);
182 free(cs->virtual_buffers);
183 free(cs->virtual_buffer_priorities);
184 free(cs->virtual_buffer_hash_table);
185 free(cs->handles);
186 free(cs->priorities);
187 free(cs);
188 }
189
190 static boolean radv_amdgpu_init_cs(struct radv_amdgpu_cs *cs,
191 enum ring_type ring_type)
192 {
193 for (int i = 0; i < ARRAY_SIZE(cs->buffer_hash_table); ++i)
194 cs->buffer_hash_table[i] = -1;
195
196 cs->hw_ip = ring_to_hw_ip(ring_type);
197 return true;
198 }
199
200 static struct radeon_winsys_cs *
201 radv_amdgpu_cs_create(struct radeon_winsys *ws,
202 enum ring_type ring_type)
203 {
204 struct radv_amdgpu_cs *cs;
205 uint32_t ib_size = 20 * 1024 * 4;
206 cs = calloc(1, sizeof(struct radv_amdgpu_cs));
207 if (!cs)
208 return NULL;
209
210 cs->ws = radv_amdgpu_winsys(ws);
211 radv_amdgpu_init_cs(cs, ring_type);
212
213 if (cs->ws->use_ib_bos) {
214 cs->ib_buffer = ws->buffer_create(ws, ib_size, 0,
215 RADEON_DOMAIN_GTT,
216 RADEON_FLAG_CPU_ACCESS);
217 if (!cs->ib_buffer) {
218 free(cs);
219 return NULL;
220 }
221
222 cs->ib_mapped = ws->buffer_map(cs->ib_buffer);
223 if (!cs->ib_mapped) {
224 ws->buffer_destroy(cs->ib_buffer);
225 free(cs);
226 return NULL;
227 }
228
229 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
230 cs->base.buf = (uint32_t *)cs->ib_mapped;
231 cs->base.max_dw = ib_size / 4 - 4;
232 cs->ib_size_ptr = &cs->ib.size;
233 cs->ib.size = 0;
234
235 ws->cs_add_buffer(&cs->base, cs->ib_buffer, 8);
236 } else {
237 cs->base.buf = malloc(16384);
238 cs->base.max_dw = 4096;
239 if (!cs->base.buf) {
240 free(cs);
241 return NULL;
242 }
243 }
244
245 return &cs->base;
246 }
247
248 static void radv_amdgpu_cs_grow(struct radeon_winsys_cs *_cs, size_t min_size)
249 {
250 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
251
252 if (cs->failed) {
253 cs->base.cdw = 0;
254 return;
255 }
256
257 if (!cs->ws->use_ib_bos) {
258 const uint64_t limit_dws = 0xffff8;
259 uint64_t ib_dws = MAX2(cs->base.cdw + min_size,
260 MIN2(cs->base.max_dw * 2, limit_dws));
261
262 /* The total ib size cannot exceed limit_dws dwords. */
263 if (ib_dws > limit_dws)
264 {
265 cs->failed = true;
266 cs->base.cdw = 0;
267 return;
268 }
269
270 uint32_t *new_buf = realloc(cs->base.buf, ib_dws * 4);
271 if (new_buf) {
272 cs->base.buf = new_buf;
273 cs->base.max_dw = ib_dws;
274 } else {
275 cs->failed = true;
276 cs->base.cdw = 0;
277 }
278 return;
279 }
280
281 uint64_t ib_size = MAX2(min_size * 4 + 16, cs->base.max_dw * 4 * 2);
282
283 /* max that fits in the chain size field. */
284 ib_size = MIN2(ib_size, 0xfffff);
285
286 while (!cs->base.cdw || (cs->base.cdw & 7) != 4)
287 cs->base.buf[cs->base.cdw++] = 0xffff1000;
288
289 *cs->ib_size_ptr |= cs->base.cdw + 4;
290
291 if (cs->num_old_ib_buffers == cs->max_num_old_ib_buffers) {
292 cs->max_num_old_ib_buffers = MAX2(1, cs->max_num_old_ib_buffers * 2);
293 cs->old_ib_buffers = realloc(cs->old_ib_buffers,
294 cs->max_num_old_ib_buffers * sizeof(void*));
295 }
296
297 cs->old_ib_buffers[cs->num_old_ib_buffers++] = cs->ib_buffer;
298
299 cs->ib_buffer = cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0,
300 RADEON_DOMAIN_GTT,
301 RADEON_FLAG_CPU_ACCESS);
302
303 if (!cs->ib_buffer) {
304 cs->base.cdw = 0;
305 cs->failed = true;
306 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
307 }
308
309 cs->ib_mapped = cs->ws->base.buffer_map(cs->ib_buffer);
310 if (!cs->ib_mapped) {
311 cs->ws->base.buffer_destroy(cs->ib_buffer);
312 cs->base.cdw = 0;
313 cs->failed = true;
314 cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers];
315 }
316
317 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
318
319 cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
320 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
321 cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va >> 32;
322 cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
323 cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
324
325 cs->base.buf = (uint32_t *)cs->ib_mapped;
326 cs->base.cdw = 0;
327 cs->base.max_dw = ib_size / 4 - 4;
328
329 }
330
331 static bool radv_amdgpu_cs_finalize(struct radeon_winsys_cs *_cs)
332 {
333 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
334
335 if (cs->ws->use_ib_bos) {
336 while (!cs->base.cdw || (cs->base.cdw & 7) != 0)
337 cs->base.buf[cs->base.cdw++] = 0xffff1000;
338
339 *cs->ib_size_ptr |= cs->base.cdw;
340
341 cs->is_chained = false;
342 }
343
344 return !cs->failed;
345 }
346
347 static void radv_amdgpu_cs_reset(struct radeon_winsys_cs *_cs)
348 {
349 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
350 cs->base.cdw = 0;
351 cs->failed = false;
352
353 for (unsigned i = 0; i < cs->num_buffers; ++i) {
354 unsigned hash = ((uintptr_t)cs->handles[i] >> 6) &
355 (ARRAY_SIZE(cs->buffer_hash_table) - 1);
356 cs->buffer_hash_table[hash] = -1;
357 }
358
359 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
360 unsigned hash = ((uintptr_t)cs->virtual_buffers[i] >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
361 cs->virtual_buffer_hash_table[hash] = -1;
362 }
363
364 cs->num_buffers = 0;
365 cs->num_virtual_buffers = 0;
366
367 if (cs->ws->use_ib_bos) {
368 cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
369
370 for (unsigned i = 0; i < cs->num_old_ib_buffers; ++i)
371 cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
372
373 cs->num_old_ib_buffers = 0;
374 cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
375 cs->ib_size_ptr = &cs->ib.size;
376 cs->ib.size = 0;
377 }
378 }
379
380 static int radv_amdgpu_cs_find_buffer(struct radv_amdgpu_cs *cs,
381 amdgpu_bo_handle bo)
382 {
383 unsigned hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
384 int index = cs->buffer_hash_table[hash];
385
386 if (index == -1)
387 return -1;
388
389 if (cs->handles[index] == bo)
390 return index;
391
392 for (unsigned i = 0; i < cs->num_buffers; ++i) {
393 if (cs->handles[i] == bo) {
394 cs->buffer_hash_table[hash] = i;
395 return i;
396 }
397 }
398
399 return -1;
400 }
401
402 static void radv_amdgpu_cs_add_buffer_internal(struct radv_amdgpu_cs *cs,
403 amdgpu_bo_handle bo,
404 uint8_t priority)
405 {
406 unsigned hash;
407 int index = radv_amdgpu_cs_find_buffer(cs, bo);
408
409 if (index != -1) {
410 cs->priorities[index] = MAX2(cs->priorities[index], priority);
411 return;
412 }
413
414 if (cs->num_buffers == cs->max_num_buffers) {
415 unsigned new_count = MAX2(1, cs->max_num_buffers * 2);
416 cs->handles = realloc(cs->handles, new_count * sizeof(amdgpu_bo_handle));
417 cs->priorities = realloc(cs->priorities, new_count * sizeof(uint8_t));
418 cs->max_num_buffers = new_count;
419 }
420
421 cs->handles[cs->num_buffers] = bo;
422 cs->priorities[cs->num_buffers] = priority;
423
424 hash = ((uintptr_t)bo >> 6) & (ARRAY_SIZE(cs->buffer_hash_table) - 1);
425 cs->buffer_hash_table[hash] = cs->num_buffers;
426
427 ++cs->num_buffers;
428 }
429
430 static void radv_amdgpu_cs_add_virtual_buffer(struct radeon_winsys_cs *_cs,
431 struct radeon_winsys_bo *bo,
432 uint8_t priority)
433 {
434 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
435 unsigned hash = ((uintptr_t)bo >> 6) & (VIRTUAL_BUFFER_HASH_TABLE_SIZE - 1);
436
437
438 if (!cs->virtual_buffer_hash_table) {
439 cs->virtual_buffer_hash_table = malloc(VIRTUAL_BUFFER_HASH_TABLE_SIZE * sizeof(int));
440 for (int i = 0; i < VIRTUAL_BUFFER_HASH_TABLE_SIZE; ++i)
441 cs->virtual_buffer_hash_table[i] = -1;
442 }
443
444 if (cs->virtual_buffer_hash_table[hash] >= 0) {
445 int idx = cs->virtual_buffer_hash_table[hash];
446 if (cs->virtual_buffers[idx] == bo) {
447 cs->virtual_buffer_priorities[idx] = MAX2(cs->virtual_buffer_priorities[idx], priority);
448 return;
449 }
450 for (unsigned i = 0; i < cs->num_virtual_buffers; ++i) {
451 if (cs->virtual_buffers[i] == bo) {
452 cs->virtual_buffer_priorities[i] = MAX2(cs->virtual_buffer_priorities[i], priority);
453 cs->virtual_buffer_hash_table[hash] = i;
454 return;
455 }
456 }
457 }
458
459 if(cs->max_num_virtual_buffers <= cs->num_virtual_buffers) {
460 cs->max_num_virtual_buffers = MAX2(2, cs->max_num_virtual_buffers * 2);
461 cs->virtual_buffers = realloc(cs->virtual_buffers, sizeof(struct radv_amdgpu_virtual_virtual_buffer*) * cs->max_num_virtual_buffers);
462 cs->virtual_buffer_priorities = realloc(cs->virtual_buffer_priorities, sizeof(uint8_t) * cs->max_num_virtual_buffers);
463 }
464
465 cs->virtual_buffers[cs->num_virtual_buffers] = bo;
466 cs->virtual_buffer_priorities[cs->num_virtual_buffers] = priority;
467
468 cs->virtual_buffer_hash_table[hash] = cs->num_virtual_buffers;
469 ++cs->num_virtual_buffers;
470
471 }
472
473 static void radv_amdgpu_cs_add_buffer(struct radeon_winsys_cs *_cs,
474 struct radeon_winsys_bo *_bo,
475 uint8_t priority)
476 {
477 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(_cs);
478 struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
479
480 if (bo->is_virtual) {
481 radv_amdgpu_cs_add_virtual_buffer(_cs, _bo, priority);
482 return;
483 }
484
485 radv_amdgpu_cs_add_buffer_internal(cs, bo->bo, priority);
486 }
487
488 static void radv_amdgpu_cs_execute_secondary(struct radeon_winsys_cs *_parent,
489 struct radeon_winsys_cs *_child)
490 {
491 struct radv_amdgpu_cs *parent = radv_amdgpu_cs(_parent);
492 struct radv_amdgpu_cs *child = radv_amdgpu_cs(_child);
493
494 for (unsigned i = 0; i < child->num_buffers; ++i) {
495 radv_amdgpu_cs_add_buffer_internal(parent, child->handles[i],
496 child->priorities[i]);
497 }
498
499 for (unsigned i = 0; i < child->num_virtual_buffers; ++i) {
500 radv_amdgpu_cs_add_buffer(&parent->base, child->virtual_buffers[i],
501 child->virtual_buffer_priorities[i]);
502 }
503
504 if (parent->ws->use_ib_bos) {
505 if (parent->base.cdw + 4 > parent->base.max_dw)
506 radv_amdgpu_cs_grow(&parent->base, 4);
507
508 parent->base.buf[parent->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
509 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address;
510 parent->base.buf[parent->base.cdw++] = child->ib.ib_mc_address >> 32;
511 parent->base.buf[parent->base.cdw++] = child->ib.size;
512 } else {
513 if (parent->base.cdw + child->base.cdw > parent->base.max_dw)
514 radv_amdgpu_cs_grow(&parent->base, child->base.cdw);
515
516 memcpy(parent->base.buf + parent->base.cdw, child->base.buf, 4 * child->base.cdw);
517 parent->base.cdw += child->base.cdw;
518 }
519 }
520
521 static int radv_amdgpu_create_bo_list(struct radv_amdgpu_winsys *ws,
522 struct radeon_winsys_cs **cs_array,
523 unsigned count,
524 struct radv_amdgpu_winsys_bo *extra_bo,
525 struct radeon_winsys_cs *extra_cs,
526 amdgpu_bo_list_handle *bo_list)
527 {
528 int r;
529 if (ws->debug_all_bos) {
530 struct radv_amdgpu_winsys_bo *bo;
531 amdgpu_bo_handle *handles;
532 unsigned num = 0;
533
534 pthread_mutex_lock(&ws->global_bo_list_lock);
535
536 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
537 if (!handles) {
538 pthread_mutex_unlock(&ws->global_bo_list_lock);
539 return -ENOMEM;
540 }
541
542 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
543 assert(num < ws->num_buffers);
544 handles[num++] = bo->bo;
545 }
546
547 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
548 handles, NULL,
549 bo_list);
550 free(handles);
551 pthread_mutex_unlock(&ws->global_bo_list_lock);
552 } else if (count == 1 && !extra_bo && !extra_cs &&
553 !radv_amdgpu_cs(cs_array[0])->num_virtual_buffers) {
554 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[0];
555 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers, cs->handles,
556 cs->priorities, bo_list);
557 } else {
558 unsigned total_buffer_count = !!extra_bo;
559 unsigned unique_bo_count = !!extra_bo;
560 for (unsigned i = 0; i < count; ++i) {
561 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs*)cs_array[i];
562 total_buffer_count += cs->num_buffers;
563 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j)
564 total_buffer_count += radv_amdgpu_winsys_bo(cs->virtual_buffers[j])->bo_count;
565 }
566
567 if (extra_cs) {
568 total_buffer_count += ((struct radv_amdgpu_cs*)extra_cs)->num_buffers;
569 }
570
571 amdgpu_bo_handle *handles = malloc(sizeof(amdgpu_bo_handle) * total_buffer_count);
572 uint8_t *priorities = malloc(sizeof(uint8_t) * total_buffer_count);
573 if (!handles || !priorities) {
574 free(handles);
575 free(priorities);
576 return -ENOMEM;
577 }
578
579 if (extra_bo) {
580 handles[0] = extra_bo->bo;
581 priorities[0] = 8;
582 }
583
584 for (unsigned i = 0; i < count + !!extra_cs; ++i) {
585 struct radv_amdgpu_cs *cs;
586
587 if (i == count)
588 cs = (struct radv_amdgpu_cs*)extra_cs;
589 else
590 cs = (struct radv_amdgpu_cs*)cs_array[i];
591
592 if (!cs->num_buffers)
593 continue;
594
595 if (unique_bo_count == 0) {
596 memcpy(handles, cs->handles, cs->num_buffers * sizeof(amdgpu_bo_handle));
597 memcpy(priorities, cs->priorities, cs->num_buffers * sizeof(uint8_t));
598 unique_bo_count = cs->num_buffers;
599 continue;
600 }
601 int unique_bo_so_far = unique_bo_count;
602 for (unsigned j = 0; j < cs->num_buffers; ++j) {
603 bool found = false;
604 for (unsigned k = 0; k < unique_bo_so_far; ++k) {
605 if (handles[k] == cs->handles[j]) {
606 found = true;
607 priorities[k] = MAX2(priorities[k],
608 cs->priorities[j]);
609 break;
610 }
611 }
612 if (!found) {
613 handles[unique_bo_count] = cs->handles[j];
614 priorities[unique_bo_count] = cs->priorities[j];
615 ++unique_bo_count;
616 }
617 }
618 for (unsigned j = 0; j < cs->num_virtual_buffers; ++j) {
619 struct radv_amdgpu_winsys_bo *virtual_bo = radv_amdgpu_winsys_bo(cs->virtual_buffers[j]);
620 for(unsigned k = 0; k < virtual_bo->bo_count; ++k) {
621 struct radv_amdgpu_winsys_bo *bo = virtual_bo->bos[k];
622 bool found = false;
623 for (unsigned m = 0; m < unique_bo_count; ++m) {
624 if (handles[m] == bo->bo) {
625 found = true;
626 priorities[m] = MAX2(priorities[m],
627 cs->virtual_buffer_priorities[j]);
628 break;
629 }
630 }
631 if (!found) {
632 handles[unique_bo_count] = bo->bo;
633 priorities[unique_bo_count] = cs->virtual_buffer_priorities[j];
634 ++unique_bo_count;
635 }
636 }
637 }
638 }
639 r = amdgpu_bo_list_create(ws->dev, unique_bo_count, handles,
640 priorities, bo_list);
641
642 free(handles);
643 free(priorities);
644 }
645
646 return r;
647 }
648
649 static struct amdgpu_cs_fence_info radv_set_cs_fence(struct radv_amdgpu_ctx *ctx, int ip_type, int ring)
650 {
651 struct amdgpu_cs_fence_info ret = {0};
652 if (ctx->fence_map) {
653 ret.handle = radv_amdgpu_winsys_bo(ctx->fence_bo)->bo;
654 ret.offset = (ip_type * MAX_RINGS_PER_TYPE + ring) * sizeof(uint64_t);
655 }
656 return ret;
657 }
658
659 static void radv_assign_last_submit(struct radv_amdgpu_ctx *ctx,
660 struct amdgpu_cs_request *request)
661 {
662 radv_amdgpu_request_to_fence(ctx,
663 &ctx->last_submission[request->ip_type][request->ring],
664 request);
665 }
666
667 static int radv_amdgpu_winsys_cs_submit_chained(struct radeon_winsys_ctx *_ctx,
668 int queue_idx,
669 struct radv_amdgpu_sem_info *sem_info,
670 struct radeon_winsys_cs **cs_array,
671 unsigned cs_count,
672 struct radeon_winsys_cs *initial_preamble_cs,
673 struct radeon_winsys_cs *continue_preamble_cs,
674 struct radeon_winsys_fence *_fence)
675 {
676 int r;
677 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
678 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
679 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
680 amdgpu_bo_list_handle bo_list;
681 struct amdgpu_cs_request request = {0};
682 struct amdgpu_cs_ib_info ibs[2];
683
684 for (unsigned i = cs_count; i--;) {
685 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i]);
686
687 if (cs->is_chained) {
688 *cs->ib_size_ptr -= 4;
689 cs->is_chained = false;
690 }
691
692 if (i + 1 < cs_count) {
693 struct radv_amdgpu_cs *next = radv_amdgpu_cs(cs_array[i + 1]);
694 assert(cs->base.cdw + 4 <= cs->base.max_dw);
695
696 cs->is_chained = true;
697 *cs->ib_size_ptr += 4;
698
699 cs->base.buf[cs->base.cdw + 0] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
700 cs->base.buf[cs->base.cdw + 1] = next->ib.ib_mc_address;
701 cs->base.buf[cs->base.cdw + 2] = next->ib.ib_mc_address >> 32;
702 cs->base.buf[cs->base.cdw + 3] = S_3F2_CHAIN(1) | S_3F2_VALID(1) | next->ib.size;
703 }
704 }
705
706 r = radv_amdgpu_create_bo_list(cs0->ws, cs_array, cs_count, NULL, initial_preamble_cs, &bo_list);
707 if (r) {
708 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
709 return r;
710 }
711
712 request.ip_type = cs0->hw_ip;
713 request.ring = queue_idx;
714 request.number_of_ibs = 1;
715 request.ibs = &cs0->ib;
716 request.resources = bo_list;
717 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
718
719 if (initial_preamble_cs) {
720 request.ibs = ibs;
721 request.number_of_ibs = 2;
722 ibs[1] = cs0->ib;
723 ibs[0] = ((struct radv_amdgpu_cs*)initial_preamble_cs)->ib;
724 }
725
726 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
727 if (r) {
728 if (r == -ENOMEM)
729 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
730 else
731 fprintf(stderr, "amdgpu: The CS has been rejected, "
732 "see dmesg for more information.\n");
733 }
734
735 amdgpu_bo_list_destroy(bo_list);
736
737 if (fence)
738 radv_amdgpu_request_to_fence(ctx, fence, &request);
739
740 radv_assign_last_submit(ctx, &request);
741
742 return r;
743 }
744
745 static int radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx,
746 int queue_idx,
747 struct radv_amdgpu_sem_info *sem_info,
748 struct radeon_winsys_cs **cs_array,
749 unsigned cs_count,
750 struct radeon_winsys_cs *initial_preamble_cs,
751 struct radeon_winsys_cs *continue_preamble_cs,
752 struct radeon_winsys_fence *_fence)
753 {
754 int r;
755 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
756 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
757 amdgpu_bo_list_handle bo_list;
758 struct amdgpu_cs_request request;
759
760 assert(cs_count);
761
762 for (unsigned i = 0; i < cs_count;) {
763 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[i]);
764 struct amdgpu_cs_ib_info ibs[AMDGPU_CS_MAX_IBS_PER_SUBMIT];
765 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
766 unsigned cnt = MIN2(AMDGPU_CS_MAX_IBS_PER_SUBMIT - !!preamble_cs,
767 cs_count - i);
768
769 memset(&request, 0, sizeof(request));
770
771 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt, NULL,
772 preamble_cs, &bo_list);
773 if (r) {
774 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
775 return r;
776 }
777
778 request.ip_type = cs0->hw_ip;
779 request.ring = queue_idx;
780 request.resources = bo_list;
781 request.number_of_ibs = cnt + !!preamble_cs;
782 request.ibs = ibs;
783 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
784
785 if (preamble_cs) {
786 ibs[0] = radv_amdgpu_cs(preamble_cs)->ib;
787 }
788
789 for (unsigned j = 0; j < cnt; ++j) {
790 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
791 ibs[j + !!preamble_cs] = cs->ib;
792
793 if (cs->is_chained) {
794 *cs->ib_size_ptr -= 4;
795 cs->is_chained = false;
796 }
797 }
798
799 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
800 if (r) {
801 if (r == -ENOMEM)
802 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
803 else
804 fprintf(stderr, "amdgpu: The CS has been rejected, "
805 "see dmesg for more information.\n");
806 }
807
808 amdgpu_bo_list_destroy(bo_list);
809
810 if (r)
811 return r;
812
813 i += cnt;
814 }
815 if (fence)
816 radv_amdgpu_request_to_fence(ctx, fence, &request);
817
818 radv_assign_last_submit(ctx, &request);
819
820 return 0;
821 }
822
823 static int radv_amdgpu_winsys_cs_submit_sysmem(struct radeon_winsys_ctx *_ctx,
824 int queue_idx,
825 struct radv_amdgpu_sem_info *sem_info,
826 struct radeon_winsys_cs **cs_array,
827 unsigned cs_count,
828 struct radeon_winsys_cs *initial_preamble_cs,
829 struct radeon_winsys_cs *continue_preamble_cs,
830 struct radeon_winsys_fence *_fence)
831 {
832 int r;
833 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
834 struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
835 struct radv_amdgpu_cs *cs0 = radv_amdgpu_cs(cs_array[0]);
836 struct radeon_winsys *ws = (struct radeon_winsys*)cs0->ws;
837 amdgpu_bo_list_handle bo_list;
838 struct amdgpu_cs_request request;
839 uint32_t pad_word = 0xffff1000U;
840
841 if (radv_amdgpu_winsys(ws)->info.chip_class == SI)
842 pad_word = 0x80000000;
843
844 assert(cs_count);
845
846 for (unsigned i = 0; i < cs_count;) {
847 struct amdgpu_cs_ib_info ib = {0};
848 struct radeon_winsys_bo *bo = NULL;
849 struct radeon_winsys_cs *preamble_cs = i ? continue_preamble_cs : initial_preamble_cs;
850 uint32_t *ptr;
851 unsigned cnt = 0;
852 unsigned size = 0;
853
854 if (preamble_cs)
855 size += preamble_cs->cdw;
856
857 while (i + cnt < cs_count && 0xffff8 - size >= radv_amdgpu_cs(cs_array[i + cnt])->base.cdw) {
858 size += radv_amdgpu_cs(cs_array[i + cnt])->base.cdw;
859 ++cnt;
860 }
861
862 assert(cnt);
863
864 bo = ws->buffer_create(ws, 4 * size, 4096, RADEON_DOMAIN_GTT, RADEON_FLAG_CPU_ACCESS);
865 ptr = ws->buffer_map(bo);
866
867 if (preamble_cs) {
868 memcpy(ptr, preamble_cs->buf, preamble_cs->cdw * 4);
869 ptr += preamble_cs->cdw;
870 }
871
872 for (unsigned j = 0; j < cnt; ++j) {
873 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[i + j]);
874 memcpy(ptr, cs->base.buf, 4 * cs->base.cdw);
875 ptr += cs->base.cdw;
876
877 }
878
879 while(!size || (size & 7)) {
880 *ptr++ = pad_word;
881 ++size;
882 }
883
884 memset(&request, 0, sizeof(request));
885
886
887 r = radv_amdgpu_create_bo_list(cs0->ws, &cs_array[i], cnt,
888 (struct radv_amdgpu_winsys_bo*)bo,
889 preamble_cs, &bo_list);
890 if (r) {
891 fprintf(stderr, "amdgpu: Failed to created the BO list for submission\n");
892 return r;
893 }
894
895 ib.size = size;
896 ib.ib_mc_address = ws->buffer_get_va(bo);
897
898 request.ip_type = cs0->hw_ip;
899 request.ring = queue_idx;
900 request.resources = bo_list;
901 request.number_of_ibs = 1;
902 request.ibs = &ib;
903 request.fence_info = radv_set_cs_fence(ctx, cs0->hw_ip, queue_idx);
904
905 r = radv_amdgpu_cs_submit(ctx, &request, sem_info);
906 if (r) {
907 if (r == -ENOMEM)
908 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
909 else
910 fprintf(stderr, "amdgpu: The CS has been rejected, "
911 "see dmesg for more information.\n");
912 }
913
914 amdgpu_bo_list_destroy(bo_list);
915
916 ws->buffer_destroy(bo);
917 if (r)
918 return r;
919
920 i += cnt;
921 }
922 if (fence)
923 radv_amdgpu_request_to_fence(ctx, fence, &request);
924
925 radv_assign_last_submit(ctx, &request);
926
927 return 0;
928 }
929
930 static int radv_amdgpu_winsys_cs_submit(struct radeon_winsys_ctx *_ctx,
931 int queue_idx,
932 struct radeon_winsys_cs **cs_array,
933 unsigned cs_count,
934 struct radeon_winsys_cs *initial_preamble_cs,
935 struct radeon_winsys_cs *continue_preamble_cs,
936 struct radeon_winsys_sem **wait_sem,
937 unsigned wait_sem_count,
938 struct radeon_winsys_sem **signal_sem,
939 unsigned signal_sem_count,
940 bool can_patch,
941 struct radeon_winsys_fence *_fence)
942 {
943 struct radv_amdgpu_cs *cs = radv_amdgpu_cs(cs_array[0]);
944 struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
945 int ret;
946 struct radv_amdgpu_sem_info sem_info = {0};
947
948 sem_info.wait_sems = wait_sem;
949 sem_info.wait_sem_count = wait_sem_count;
950 sem_info.signal_sems = signal_sem;
951 sem_info.signal_sem_count = signal_sem_count;
952
953 radv_amdgpu_wait_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
954
955 if (!cs->ws->use_ib_bos) {
956 ret = radv_amdgpu_winsys_cs_submit_sysmem(_ctx, queue_idx, &sem_info, cs_array,
957 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
958 } else if (can_patch && cs_count > AMDGPU_CS_MAX_IBS_PER_SUBMIT && cs->ws->batchchain) {
959 ret = radv_amdgpu_winsys_cs_submit_chained(_ctx, queue_idx, &sem_info, cs_array,
960 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
961 } else {
962 ret = radv_amdgpu_winsys_cs_submit_fallback(_ctx, queue_idx, &sem_info, cs_array,
963 cs_count, initial_preamble_cs, continue_preamble_cs, _fence);
964 }
965
966 radv_amdgpu_signal_sems(ctx, cs->hw_ip, queue_idx, &sem_info);
967 return ret;
968 }
969
970
971 static void *radv_amdgpu_winsys_get_cpu_addr(void *_cs, uint64_t addr)
972 {
973 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
974 void *ret = NULL;
975
976 if (!cs->ib_buffer)
977 return NULL;
978 for (unsigned i = 0; i <= cs->num_old_ib_buffers; ++i) {
979 struct radv_amdgpu_winsys_bo *bo;
980
981 bo = (struct radv_amdgpu_winsys_bo*)
982 (i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
983 if (addr >= bo->va && addr - bo->va < bo->size) {
984 if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
985 return (char *)ret + (addr - bo->va);
986 }
987 }
988 return ret;
989 }
990
991 static void radv_amdgpu_winsys_cs_dump(struct radeon_winsys_cs *_cs,
992 FILE* file,
993 uint32_t trace_id)
994 {
995 struct radv_amdgpu_cs *cs = (struct radv_amdgpu_cs *)_cs;
996 void *ib = cs->base.buf;
997 int num_dw = cs->base.cdw;
998
999 if (cs->ws->use_ib_bos) {
1000 ib = radv_amdgpu_winsys_get_cpu_addr(cs, cs->ib.ib_mc_address);
1001 num_dw = cs->ib.size;
1002 }
1003 assert(ib);
1004 ac_parse_ib(file, ib, num_dw, trace_id, "main IB", cs->ws->info.chip_class,
1005 radv_amdgpu_winsys_get_cpu_addr, cs);
1006 }
1007
1008 static struct radeon_winsys_ctx *radv_amdgpu_ctx_create(struct radeon_winsys *_ws)
1009 {
1010 struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
1011 struct radv_amdgpu_ctx *ctx = CALLOC_STRUCT(radv_amdgpu_ctx);
1012 int r;
1013
1014 if (!ctx)
1015 return NULL;
1016 r = amdgpu_cs_ctx_create(ws->dev, &ctx->ctx);
1017 if (r) {
1018 fprintf(stderr, "amdgpu: radv_amdgpu_cs_ctx_create failed. (%i)\n", r);
1019 goto error_create;
1020 }
1021 ctx->ws = ws;
1022
1023 assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
1024 ctx->fence_bo = ws->base.buffer_create(&ws->base, 4096, 8,
1025 RADEON_DOMAIN_GTT,
1026 RADEON_FLAG_CPU_ACCESS);
1027 if (ctx->fence_bo)
1028 ctx->fence_map = (uint64_t*)ws->base.buffer_map(ctx->fence_bo);
1029 if (ctx->fence_map)
1030 memset(ctx->fence_map, 0, 4096);
1031 return (struct radeon_winsys_ctx *)ctx;
1032 error_create:
1033 FREE(ctx);
1034 return NULL;
1035 }
1036
1037 static void radv_amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
1038 {
1039 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1040 ctx->ws->base.buffer_destroy(ctx->fence_bo);
1041 amdgpu_cs_ctx_free(ctx->ctx);
1042 FREE(ctx);
1043 }
1044
1045 static bool radv_amdgpu_ctx_wait_idle(struct radeon_winsys_ctx *rwctx,
1046 enum ring_type ring_type, int ring_index)
1047 {
1048 struct radv_amdgpu_ctx *ctx = (struct radv_amdgpu_ctx *)rwctx;
1049 int ip_type = ring_to_hw_ip(ring_type);
1050
1051 if (ctx->last_submission[ip_type][ring_index].fence.fence) {
1052 uint32_t expired;
1053 int ret = amdgpu_cs_query_fence_status(&ctx->last_submission[ip_type][ring_index].fence,
1054 1000000000ull, 0, &expired);
1055
1056 if (ret || !expired)
1057 return false;
1058 }
1059
1060 return true;
1061 }
1062
1063 static struct radeon_winsys_sem *radv_amdgpu_create_sem(struct radeon_winsys *_ws)
1064 {
1065 int ret;
1066 amdgpu_semaphore_handle sem;
1067
1068 ret = amdgpu_cs_create_semaphore(&sem);
1069 if (ret)
1070 return NULL;
1071 return (struct radeon_winsys_sem *)sem;
1072 }
1073
1074 static void radv_amdgpu_destroy_sem(struct radeon_winsys_sem *_sem)
1075 {
1076 amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)_sem;
1077 amdgpu_cs_destroy_semaphore(sem);
1078 }
1079
1080 static void radv_amdgpu_wait_sems(struct radv_amdgpu_ctx *ctx,
1081 uint32_t ip_type,
1082 uint32_t ring,
1083 struct radv_amdgpu_sem_info *sem_info)
1084 {
1085 for (unsigned i = 0; i < sem_info->wait_sem_count; i++) {
1086 amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->wait_sems[i];
1087 amdgpu_cs_wait_semaphore(ctx->ctx, ip_type, 0, ring,
1088 sem);
1089 }
1090 }
1091
1092 static int radv_amdgpu_signal_sems(struct radv_amdgpu_ctx *ctx,
1093 uint32_t ip_type,
1094 uint32_t ring,
1095 struct radv_amdgpu_sem_info *sem_info)
1096 {
1097 for (unsigned i = 0; i < sem_info->signal_sem_count; i++) {
1098 amdgpu_semaphore_handle sem = (amdgpu_semaphore_handle)sem_info->signal_sems[i];
1099 amdgpu_cs_signal_semaphore(ctx->ctx, ip_type, 0, ring,
1100 sem);
1101 }
1102 return 0;
1103 }
1104
1105 static int radv_amdgpu_cs_submit(struct radv_amdgpu_ctx *ctx,
1106 struct amdgpu_cs_request *request,
1107 struct radv_amdgpu_sem_info *sem_info)
1108 {
1109 return amdgpu_cs_submit(ctx->ctx, 0, request, 1);
1110 }
1111
1112 void radv_amdgpu_cs_init_functions(struct radv_amdgpu_winsys *ws)
1113 {
1114 ws->base.ctx_create = radv_amdgpu_ctx_create;
1115 ws->base.ctx_destroy = radv_amdgpu_ctx_destroy;
1116 ws->base.ctx_wait_idle = radv_amdgpu_ctx_wait_idle;
1117 ws->base.cs_create = radv_amdgpu_cs_create;
1118 ws->base.cs_destroy = radv_amdgpu_cs_destroy;
1119 ws->base.cs_grow = radv_amdgpu_cs_grow;
1120 ws->base.cs_finalize = radv_amdgpu_cs_finalize;
1121 ws->base.cs_reset = radv_amdgpu_cs_reset;
1122 ws->base.cs_add_buffer = radv_amdgpu_cs_add_buffer;
1123 ws->base.cs_execute_secondary = radv_amdgpu_cs_execute_secondary;
1124 ws->base.cs_submit = radv_amdgpu_winsys_cs_submit;
1125 ws->base.cs_dump = radv_amdgpu_winsys_cs_dump;
1126 ws->base.create_fence = radv_amdgpu_create_fence;
1127 ws->base.destroy_fence = radv_amdgpu_destroy_fence;
1128 ws->base.create_sem = radv_amdgpu_create_sem;
1129 ws->base.destroy_sem = radv_amdgpu_destroy_sem;
1130 ws->base.fence_wait = radv_amdgpu_fence_wait;
1131 }