Revert "winsys/amdgpu: Add R600_DEBUG flag to reserve VMID per ctx."
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36
37 #include "amd/common/sid.h"
38
39 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
40
41 /* FENCES */
42
43 static struct pipe_fence_handle *
44 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
45 unsigned ip_instance, unsigned ring)
46 {
47 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
48
49 fence->reference.count = 1;
50 fence->ws = ctx->ws;
51 fence->ctx = ctx;
52 fence->fence.context = ctx->ctx;
53 fence->fence.ip_type = ip_type;
54 fence->fence.ip_instance = ip_instance;
55 fence->fence.ring = ring;
56 fence->submission_in_progress = true;
57 p_atomic_inc(&ctx->refcount);
58 return (struct pipe_fence_handle *)fence;
59 }
60
61 static struct pipe_fence_handle *
62 amdgpu_fence_import_sync_file(struct radeon_winsys *rws, int fd)
63 {
64 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
65 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
66
67 if (!fence)
68 return NULL;
69
70 pipe_reference_init(&fence->reference, 1);
71 fence->ws = ws;
72 /* fence->ctx == NULL means that the fence is syncobj-based. */
73
74 /* Convert sync_file into syncobj. */
75 int r = amdgpu_cs_create_syncobj(ws->dev, &fence->syncobj);
76 if (r) {
77 FREE(fence);
78 return NULL;
79 }
80
81 r = amdgpu_cs_syncobj_import_sync_file(ws->dev, fence->syncobj, fd);
82 if (r) {
83 amdgpu_cs_destroy_syncobj(ws->dev, fence->syncobj);
84 FREE(fence);
85 return NULL;
86 }
87 return (struct pipe_fence_handle*)fence;
88 }
89
90 static int amdgpu_fence_export_sync_file(struct radeon_winsys *rws,
91 struct pipe_fence_handle *pfence)
92 {
93 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
94 struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
95
96 if (amdgpu_fence_is_syncobj(fence)) {
97 int fd, r;
98
99 /* Convert syncobj into sync_file. */
100 r = amdgpu_cs_syncobj_export_sync_file(ws->dev, fence->syncobj, &fd);
101 return r ? -1 : fd;
102 }
103
104 os_wait_until_zero(&fence->submission_in_progress, PIPE_TIMEOUT_INFINITE);
105
106 /* Convert the amdgpu fence into a fence FD. */
107 int fd;
108 if (amdgpu_cs_fence_to_handle(ws->dev, &fence->fence,
109 AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD,
110 (uint32_t*)&fd))
111 return -1;
112
113 return fd;
114 }
115
116 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
117 uint64_t seq_no,
118 uint64_t *user_fence_cpu_address)
119 {
120 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
121
122 rfence->fence.fence = seq_no;
123 rfence->user_fence_cpu_address = user_fence_cpu_address;
124 rfence->submission_in_progress = false;
125 }
126
127 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
128 {
129 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
130
131 rfence->signalled = true;
132 rfence->submission_in_progress = false;
133 }
134
135 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
136 bool absolute)
137 {
138 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
139 uint32_t expired;
140 int64_t abs_timeout;
141 uint64_t *user_fence_cpu;
142 int r;
143
144 if (rfence->signalled)
145 return true;
146
147 /* Handle syncobjs. */
148 if (amdgpu_fence_is_syncobj(rfence)) {
149 /* Absolute timeouts are only be used by BO fences, which aren't
150 * backed by syncobjs.
151 */
152 assert(!absolute);
153
154 if (amdgpu_cs_syncobj_wait(rfence->ws->dev, &rfence->syncobj, 1,
155 timeout, 0, NULL))
156 return false;
157
158 rfence->signalled = true;
159 return true;
160 }
161
162 if (absolute)
163 abs_timeout = timeout;
164 else
165 abs_timeout = os_time_get_absolute_timeout(timeout);
166
167 /* The fence might not have a number assigned if its IB is being
168 * submitted in the other thread right now. Wait until the submission
169 * is done. */
170 if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
171 abs_timeout))
172 return false;
173
174 user_fence_cpu = rfence->user_fence_cpu_address;
175 if (user_fence_cpu) {
176 if (*user_fence_cpu >= rfence->fence.fence) {
177 rfence->signalled = true;
178 return true;
179 }
180
181 /* No timeout, just query: no need for the ioctl. */
182 if (!absolute && !timeout)
183 return false;
184 }
185
186 /* Now use the libdrm query. */
187 r = amdgpu_cs_query_fence_status(&rfence->fence,
188 abs_timeout,
189 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
190 &expired);
191 if (r) {
192 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
193 return false;
194 }
195
196 if (expired) {
197 /* This variable can only transition from false to true, so it doesn't
198 * matter if threads race for it. */
199 rfence->signalled = true;
200 return true;
201 }
202 return false;
203 }
204
205 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
206 struct pipe_fence_handle *fence,
207 uint64_t timeout)
208 {
209 return amdgpu_fence_wait(fence, timeout, false);
210 }
211
212 static struct pipe_fence_handle *
213 amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
214 {
215 struct amdgpu_cs *cs = amdgpu_cs(rcs);
216 struct pipe_fence_handle *fence = NULL;
217
218 if (debug_get_option_noop())
219 return NULL;
220
221 if (cs->next_fence) {
222 amdgpu_fence_reference(&fence, cs->next_fence);
223 return fence;
224 }
225
226 fence = amdgpu_fence_create(cs->ctx,
227 cs->csc->ib[IB_MAIN].ip_type,
228 cs->csc->ib[IB_MAIN].ip_instance,
229 cs->csc->ib[IB_MAIN].ring);
230 if (!fence)
231 return NULL;
232
233 amdgpu_fence_reference(&cs->next_fence, fence);
234 return fence;
235 }
236
237 /* CONTEXTS */
238
239 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
240 {
241 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
242 int r;
243 struct amdgpu_bo_alloc_request alloc_buffer = {};
244 amdgpu_bo_handle buf_handle;
245
246 if (!ctx)
247 return NULL;
248
249 ctx->ws = amdgpu_winsys(ws);
250 ctx->refcount = 1;
251 ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
252
253 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
254 if (r) {
255 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
256 goto error_create;
257 }
258
259 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
260 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
261 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
262
263 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
264 if (r) {
265 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
266 goto error_user_fence_alloc;
267 }
268
269 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
270 if (r) {
271 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
272 goto error_user_fence_map;
273 }
274
275 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
276 ctx->user_fence_bo = buf_handle;
277
278 return (struct radeon_winsys_ctx*)ctx;
279
280 error_user_fence_map:
281 amdgpu_bo_free(buf_handle);
282 error_user_fence_alloc:
283 amdgpu_cs_ctx_free(ctx->ctx);
284 error_create:
285 FREE(ctx);
286 return NULL;
287 }
288
289 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
290 {
291 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
292 }
293
294 static enum pipe_reset_status
295 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
296 {
297 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
298 uint32_t result, hangs;
299 int r;
300
301 /* Return a failure due to a rejected command submission. */
302 if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
303 return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
304 PIPE_INNOCENT_CONTEXT_RESET;
305 }
306
307 /* Return a failure due to a GPU hang. */
308 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
309 if (r) {
310 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
311 return PIPE_NO_RESET;
312 }
313
314 switch (result) {
315 case AMDGPU_CTX_GUILTY_RESET:
316 return PIPE_GUILTY_CONTEXT_RESET;
317 case AMDGPU_CTX_INNOCENT_RESET:
318 return PIPE_INNOCENT_CONTEXT_RESET;
319 case AMDGPU_CTX_UNKNOWN_RESET:
320 return PIPE_UNKNOWN_CONTEXT_RESET;
321 case AMDGPU_CTX_NO_RESET:
322 default:
323 return PIPE_NO_RESET;
324 }
325 }
326
327 /* COMMAND SUBMISSION */
328
329 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
330 {
331 return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD &&
332 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE &&
333 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_DEC;
334 }
335
336 static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
337 {
338 return cs->ctx->ws->info.chip_class >= CIK &&
339 cs->ring_type == RING_GFX;
340 }
341
342 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
343 {
344 if (ring_type == RING_GFX)
345 return 4; /* for chaining */
346
347 return 0;
348 }
349
350 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
351 {
352 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
353 int i = cs->buffer_indices_hashlist[hash];
354 struct amdgpu_cs_buffer *buffers;
355 int num_buffers;
356
357 if (bo->bo) {
358 buffers = cs->real_buffers;
359 num_buffers = cs->num_real_buffers;
360 } else if (!bo->sparse) {
361 buffers = cs->slab_buffers;
362 num_buffers = cs->num_slab_buffers;
363 } else {
364 buffers = cs->sparse_buffers;
365 num_buffers = cs->num_sparse_buffers;
366 }
367
368 /* not found or found */
369 if (i < 0 || (i < num_buffers && buffers[i].bo == bo))
370 return i;
371
372 /* Hash collision, look for the BO in the list of buffers linearly. */
373 for (i = num_buffers - 1; i >= 0; i--) {
374 if (buffers[i].bo == bo) {
375 /* Put this buffer in the hash list.
376 * This will prevent additional hash collisions if there are
377 * several consecutive lookup_buffer calls for the same buffer.
378 *
379 * Example: Assuming buffers A,B,C collide in the hash list,
380 * the following sequence of buffers:
381 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
382 * will collide here: ^ and here: ^,
383 * meaning that we should get very few collisions in the end. */
384 cs->buffer_indices_hashlist[hash] = i;
385 return i;
386 }
387 }
388 return -1;
389 }
390
391 static int
392 amdgpu_do_add_real_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
393 {
394 struct amdgpu_cs_buffer *buffer;
395 int idx;
396
397 /* New buffer, check if the backing array is large enough. */
398 if (cs->num_real_buffers >= cs->max_real_buffers) {
399 unsigned new_max =
400 MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
401 struct amdgpu_cs_buffer *new_buffers;
402
403 new_buffers = MALLOC(new_max * sizeof(*new_buffers));
404
405 if (!new_buffers) {
406 fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");
407 FREE(new_buffers);
408 return -1;
409 }
410
411 memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
412
413 FREE(cs->real_buffers);
414
415 cs->max_real_buffers = new_max;
416 cs->real_buffers = new_buffers;
417 }
418
419 idx = cs->num_real_buffers;
420 buffer = &cs->real_buffers[idx];
421
422 memset(buffer, 0, sizeof(*buffer));
423 amdgpu_winsys_bo_reference(&buffer->bo, bo);
424 p_atomic_inc(&bo->num_cs_references);
425 cs->num_real_buffers++;
426
427 return idx;
428 }
429
430 static int
431 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
432 {
433 struct amdgpu_cs_context *cs = acs->csc;
434 unsigned hash;
435 int idx = amdgpu_lookup_buffer(cs, bo);
436
437 if (idx >= 0)
438 return idx;
439
440 idx = amdgpu_do_add_real_buffer(cs, bo);
441
442 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
443 cs->buffer_indices_hashlist[hash] = idx;
444
445 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
446 acs->main.base.used_vram += bo->base.size;
447 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
448 acs->main.base.used_gart += bo->base.size;
449
450 return idx;
451 }
452
453 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs *acs,
454 struct amdgpu_winsys_bo *bo)
455 {
456 struct amdgpu_cs_context *cs = acs->csc;
457 struct amdgpu_cs_buffer *buffer;
458 unsigned hash;
459 int idx = amdgpu_lookup_buffer(cs, bo);
460 int real_idx;
461
462 if (idx >= 0)
463 return idx;
464
465 real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real);
466 if (real_idx < 0)
467 return -1;
468
469 /* New buffer, check if the backing array is large enough. */
470 if (cs->num_slab_buffers >= cs->max_slab_buffers) {
471 unsigned new_max =
472 MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));
473 struct amdgpu_cs_buffer *new_buffers;
474
475 new_buffers = REALLOC(cs->slab_buffers,
476 cs->max_slab_buffers * sizeof(*new_buffers),
477 new_max * sizeof(*new_buffers));
478 if (!new_buffers) {
479 fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
480 return -1;
481 }
482
483 cs->max_slab_buffers = new_max;
484 cs->slab_buffers = new_buffers;
485 }
486
487 idx = cs->num_slab_buffers;
488 buffer = &cs->slab_buffers[idx];
489
490 memset(buffer, 0, sizeof(*buffer));
491 amdgpu_winsys_bo_reference(&buffer->bo, bo);
492 buffer->u.slab.real_idx = real_idx;
493 p_atomic_inc(&bo->num_cs_references);
494 cs->num_slab_buffers++;
495
496 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
497 cs->buffer_indices_hashlist[hash] = idx;
498
499 return idx;
500 }
501
502 static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
503 struct amdgpu_winsys_bo *bo)
504 {
505 struct amdgpu_cs_context *cs = acs->csc;
506 struct amdgpu_cs_buffer *buffer;
507 unsigned hash;
508 int idx = amdgpu_lookup_buffer(cs, bo);
509
510 if (idx >= 0)
511 return idx;
512
513 /* New buffer, check if the backing array is large enough. */
514 if (cs->num_sparse_buffers >= cs->max_sparse_buffers) {
515 unsigned new_max =
516 MAX2(cs->max_sparse_buffers + 16, (unsigned)(cs->max_sparse_buffers * 1.3));
517 struct amdgpu_cs_buffer *new_buffers;
518
519 new_buffers = REALLOC(cs->sparse_buffers,
520 cs->max_sparse_buffers * sizeof(*new_buffers),
521 new_max * sizeof(*new_buffers));
522 if (!new_buffers) {
523 fprintf(stderr, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
524 return -1;
525 }
526
527 cs->max_sparse_buffers = new_max;
528 cs->sparse_buffers = new_buffers;
529 }
530
531 idx = cs->num_sparse_buffers;
532 buffer = &cs->sparse_buffers[idx];
533
534 memset(buffer, 0, sizeof(*buffer));
535 amdgpu_winsys_bo_reference(&buffer->bo, bo);
536 p_atomic_inc(&bo->num_cs_references);
537 cs->num_sparse_buffers++;
538
539 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
540 cs->buffer_indices_hashlist[hash] = idx;
541
542 /* We delay adding the backing buffers until we really have to. However,
543 * we cannot delay accounting for memory use.
544 */
545 mtx_lock(&bo->u.sparse.commit_lock);
546
547 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
548 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
549 acs->main.base.used_vram += backing->bo->base.size;
550 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
551 acs->main.base.used_gart += backing->bo->base.size;
552 }
553
554 mtx_unlock(&bo->u.sparse.commit_lock);
555
556 return idx;
557 }
558
559 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
560 struct pb_buffer *buf,
561 enum radeon_bo_usage usage,
562 enum radeon_bo_domain domains,
563 enum radeon_bo_priority priority)
564 {
565 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
566 * the buffer placement during command submission.
567 */
568 struct amdgpu_cs *acs = amdgpu_cs(rcs);
569 struct amdgpu_cs_context *cs = acs->csc;
570 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
571 struct amdgpu_cs_buffer *buffer;
572 int index;
573
574 /* Fast exit for no-op calls.
575 * This is very effective with suballocators and linear uploaders that
576 * are outside of the winsys.
577 */
578 if (bo == cs->last_added_bo &&
579 (usage & cs->last_added_bo_usage) == usage &&
580 (1ull << priority) & cs->last_added_bo_priority_usage)
581 return cs->last_added_bo_index;
582
583 if (!bo->sparse) {
584 if (!bo->bo) {
585 index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
586 if (index < 0)
587 return 0;
588
589 buffer = &cs->slab_buffers[index];
590 buffer->usage |= usage;
591
592 usage &= ~RADEON_USAGE_SYNCHRONIZED;
593 index = buffer->u.slab.real_idx;
594 } else {
595 index = amdgpu_lookup_or_add_real_buffer(acs, bo);
596 if (index < 0)
597 return 0;
598 }
599
600 buffer = &cs->real_buffers[index];
601 } else {
602 index = amdgpu_lookup_or_add_sparse_buffer(acs, bo);
603 if (index < 0)
604 return 0;
605
606 buffer = &cs->sparse_buffers[index];
607 }
608
609 buffer->u.real.priority_usage |= 1ull << priority;
610 buffer->usage |= usage;
611
612 cs->last_added_bo = bo;
613 cs->last_added_bo_index = index;
614 cs->last_added_bo_usage = buffer->usage;
615 cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
616 return index;
617 }
618
619 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
620 enum ring_type ring_type)
621 {
622 struct pb_buffer *pb;
623 uint8_t *mapped;
624 unsigned buffer_size;
625
626 /* Always create a buffer that is at least as large as the maximum seen IB
627 * size, aligned to a power of two (and multiplied by 4 to reduce internal
628 * fragmentation if chaining is not available). Limit to 512k dwords, which
629 * is the largest power of two that fits into the size field of the
630 * INDIRECT_BUFFER packet.
631 */
632 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
633 buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
634 else
635 buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
636
637 buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
638
639 switch (ib->ib_type) {
640 case IB_MAIN:
641 buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
642 break;
643 default:
644 unreachable("unhandled IB type");
645 }
646
647 pb = ws->base.buffer_create(&ws->base, buffer_size,
648 ws->info.gart_page_size,
649 RADEON_DOMAIN_GTT,
650 RADEON_FLAG_NO_INTERPROCESS_SHARING |
651 (ring_type == RING_GFX ||
652 ring_type == RING_COMPUTE ||
653 ring_type == RING_DMA ?
654 RADEON_FLAG_GTT_WC : 0));
655 if (!pb)
656 return false;
657
658 mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
659 if (!mapped) {
660 pb_reference(&pb, NULL);
661 return false;
662 }
663
664 pb_reference(&ib->big_ib_buffer, pb);
665 pb_reference(&pb, NULL);
666
667 ib->ib_mapped = mapped;
668 ib->used_ib_space = 0;
669
670 return true;
671 }
672
673 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
674 {
675 switch (ib_type) {
676 case IB_MAIN:
677 /* Smaller submits means the GPU gets busy sooner and there is less
678 * waiting for buffers and fences. Proof:
679 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
680 */
681 return 20 * 1024;
682 default:
683 unreachable("bad ib_type");
684 }
685 }
686
687 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
688 enum ib_type ib_type)
689 {
690 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
691 /* Small IBs are better than big IBs, because the GPU goes idle quicker
692 * and there is less waiting for buffers and fences. Proof:
693 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
694 */
695 struct amdgpu_ib *ib = NULL;
696 struct drm_amdgpu_cs_chunk_ib *info = &cs->csc->ib[ib_type];
697 unsigned ib_size = 0;
698
699 switch (ib_type) {
700 case IB_MAIN:
701 ib = &cs->main;
702 ib_size = 4 * 1024 * 4;
703 break;
704 default:
705 unreachable("unhandled IB type");
706 }
707
708 if (!amdgpu_cs_has_chaining(cs)) {
709 ib_size = MAX2(ib_size,
710 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
711 amdgpu_ib_max_submit_dwords(ib_type)));
712 }
713
714 ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
715
716 ib->base.prev_dw = 0;
717 ib->base.num_prev = 0;
718 ib->base.current.cdw = 0;
719 ib->base.current.buf = NULL;
720
721 /* Allocate a new buffer for IBs if the current buffer is all used. */
722 if (!ib->big_ib_buffer ||
723 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
724 if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type))
725 return false;
726 }
727
728 info->va_start = amdgpu_winsys_bo(ib->big_ib_buffer)->va + ib->used_ib_space;
729 info->ib_bytes = 0;
730 /* ib_bytes is in dwords and the conversion to bytes will be done before
731 * the CS ioctl. */
732 ib->ptr_ib_size = &info->ib_bytes;
733 ib->ptr_ib_size_inside_ib = false;
734
735 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
736 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
737
738 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
739
740 ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
741 ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
742 return true;
743 }
744
745 static void amdgpu_set_ib_size(struct amdgpu_ib *ib)
746 {
747 if (ib->ptr_ib_size_inside_ib) {
748 *ib->ptr_ib_size = ib->base.current.cdw |
749 S_3F2_CHAIN(1) | S_3F2_VALID(1);
750 } else {
751 *ib->ptr_ib_size = ib->base.current.cdw;
752 }
753 }
754
755 static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
756 {
757 amdgpu_set_ib_size(ib);
758 ib->used_ib_space += ib->base.current.cdw * 4;
759 ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
760 }
761
762 static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
763 enum ring_type ring_type)
764 {
765 switch (ring_type) {
766 case RING_DMA:
767 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;
768 break;
769
770 case RING_UVD:
771 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;
772 break;
773
774 case RING_VCE:
775 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;
776 break;
777
778 case RING_COMPUTE:
779 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_COMPUTE;
780 break;
781
782 case RING_VCN_DEC:
783 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;
784 break;
785
786 default:
787 case RING_GFX:
788 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_GFX;
789 break;
790 }
791
792 memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
793 cs->last_added_bo = NULL;
794 return true;
795 }
796
797 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
798 {
799 unsigned i;
800
801 for (i = 0; i < cs->num_real_buffers; i++) {
802 p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
803 amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
804 }
805 for (i = 0; i < cs->num_slab_buffers; i++) {
806 p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
807 amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
808 }
809 for (i = 0; i < cs->num_sparse_buffers; i++) {
810 p_atomic_dec(&cs->sparse_buffers[i].bo->num_cs_references);
811 amdgpu_winsys_bo_reference(&cs->sparse_buffers[i].bo, NULL);
812 }
813 for (i = 0; i < cs->num_fence_dependencies; i++)
814 amdgpu_fence_reference(&cs->fence_dependencies[i], NULL);
815
816 cs->num_real_buffers = 0;
817 cs->num_slab_buffers = 0;
818 cs->num_sparse_buffers = 0;
819 cs->num_fence_dependencies = 0;
820 amdgpu_fence_reference(&cs->fence, NULL);
821
822 memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
823 cs->last_added_bo = NULL;
824 }
825
826 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
827 {
828 amdgpu_cs_context_cleanup(cs);
829 FREE(cs->flags);
830 FREE(cs->real_buffers);
831 FREE(cs->handles);
832 FREE(cs->slab_buffers);
833 FREE(cs->sparse_buffers);
834 FREE(cs->fence_dependencies);
835 }
836
837
838 static struct radeon_winsys_cs *
839 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
840 enum ring_type ring_type,
841 void (*flush)(void *ctx, unsigned flags,
842 struct pipe_fence_handle **fence),
843 void *flush_ctx)
844 {
845 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
846 struct amdgpu_cs *cs;
847
848 cs = CALLOC_STRUCT(amdgpu_cs);
849 if (!cs) {
850 return NULL;
851 }
852
853 util_queue_fence_init(&cs->flush_completed);
854
855 cs->ctx = ctx;
856 cs->flush_cs = flush;
857 cs->flush_data = flush_ctx;
858 cs->ring_type = ring_type;
859
860 struct amdgpu_cs_fence_info fence_info;
861 fence_info.handle = cs->ctx->user_fence_bo;
862 fence_info.offset = cs->ring_type;
863 amdgpu_cs_chunk_fence_info_to_data(&fence_info, (void*)&cs->fence_chunk);
864
865 cs->main.ib_type = IB_MAIN;
866
867 if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
868 FREE(cs);
869 return NULL;
870 }
871
872 if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
873 amdgpu_destroy_cs_context(&cs->csc1);
874 FREE(cs);
875 return NULL;
876 }
877
878 /* Set the first submission context as current. */
879 cs->csc = &cs->csc1;
880 cs->cst = &cs->csc2;
881
882 if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
883 amdgpu_destroy_cs_context(&cs->csc2);
884 amdgpu_destroy_cs_context(&cs->csc1);
885 FREE(cs);
886 return NULL;
887 }
888
889 p_atomic_inc(&ctx->ws->num_cs);
890 return &cs->main.base;
891 }
892
893 static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
894 {
895 return true;
896 }
897
898 static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
899 {
900 struct amdgpu_ib *ib = amdgpu_ib(rcs);
901 struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
902 unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
903 uint64_t va;
904 uint32_t *new_ptr_ib_size;
905
906 assert(rcs->current.cdw <= rcs->current.max_dw);
907
908 if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
909 return false;
910
911 ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
912
913 if (rcs->current.max_dw - rcs->current.cdw >= dw)
914 return true;
915
916 if (!amdgpu_cs_has_chaining(cs))
917 return false;
918
919 /* Allocate a new chunk */
920 if (rcs->num_prev >= rcs->max_prev) {
921 unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
922 struct radeon_winsys_cs_chunk *new_prev;
923
924 new_prev = REALLOC(rcs->prev,
925 sizeof(*new_prev) * rcs->max_prev,
926 sizeof(*new_prev) * new_max_prev);
927 if (!new_prev)
928 return false;
929
930 rcs->prev = new_prev;
931 rcs->max_prev = new_max_prev;
932 }
933
934 if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib, cs->ring_type))
935 return false;
936
937 assert(ib->used_ib_space == 0);
938 va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
939
940 /* This space was originally reserved. */
941 rcs->current.max_dw += 4;
942 assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
943
944 /* Pad with NOPs and add INDIRECT_BUFFER packet */
945 while ((rcs->current.cdw & 7) != 4)
946 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
947
948 radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
949 : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
950 radeon_emit(rcs, va);
951 radeon_emit(rcs, va >> 32);
952 new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw++];
953
954 assert((rcs->current.cdw & 7) == 0);
955 assert(rcs->current.cdw <= rcs->current.max_dw);
956
957 amdgpu_set_ib_size(ib);
958 ib->ptr_ib_size = new_ptr_ib_size;
959 ib->ptr_ib_size_inside_ib = true;
960
961 /* Hook up the new chunk */
962 rcs->prev[rcs->num_prev].buf = rcs->current.buf;
963 rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
964 rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
965 rcs->num_prev++;
966
967 ib->base.prev_dw += ib->base.current.cdw;
968 ib->base.current.cdw = 0;
969
970 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
971 ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
972
973 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
974 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
975
976 return true;
977 }
978
979 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
980 struct radeon_bo_list_item *list)
981 {
982 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
983 int i;
984
985 if (list) {
986 for (i = 0; i < cs->num_real_buffers; i++) {
987 list[i].bo_size = cs->real_buffers[i].bo->base.size;
988 list[i].vm_address = cs->real_buffers[i].bo->va;
989 list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;
990 }
991 }
992 return cs->num_real_buffers;
993 }
994
995 static unsigned add_fence_dependency_entry(struct amdgpu_cs_context *cs)
996 {
997 unsigned idx = cs->num_fence_dependencies++;
998
999 if (idx >= cs->max_fence_dependencies) {
1000 unsigned size;
1001 const unsigned increment = 8;
1002
1003 cs->max_fence_dependencies = idx + increment;
1004 size = cs->max_fence_dependencies * sizeof(cs->fence_dependencies[0]);
1005 cs->fence_dependencies = realloc(cs->fence_dependencies, size);
1006 /* Clear the newly-allocated elements. */
1007 memset(cs->fence_dependencies + idx, 0,
1008 increment * sizeof(cs->fence_dependencies[0]));
1009 }
1010 return idx;
1011 }
1012
1013 static bool is_noop_fence_dependency(struct amdgpu_cs *acs,
1014 struct amdgpu_fence *fence)
1015 {
1016 struct amdgpu_cs_context *cs = acs->csc;
1017
1018 if (!amdgpu_fence_is_syncobj(fence) &&
1019 fence->ctx == acs->ctx &&
1020 fence->fence.ip_type == cs->ib[IB_MAIN].ip_type &&
1021 fence->fence.ip_instance == cs->ib[IB_MAIN].ip_instance &&
1022 fence->fence.ring == cs->ib[IB_MAIN].ring)
1023 return true;
1024
1025 return amdgpu_fence_wait((void *)fence, 0, false);
1026 }
1027
1028 static void amdgpu_cs_add_fence_dependency(struct radeon_winsys_cs *rws,
1029 struct pipe_fence_handle *pfence)
1030 {
1031 struct amdgpu_cs *acs = amdgpu_cs(rws);
1032 struct amdgpu_cs_context *cs = acs->csc;
1033 struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
1034
1035 if (is_noop_fence_dependency(acs, fence))
1036 return;
1037
1038 unsigned idx = add_fence_dependency_entry(cs);
1039 amdgpu_fence_reference(&cs->fence_dependencies[idx],
1040 (struct pipe_fence_handle*)fence);
1041 }
1042
1043 static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs *acs,
1044 struct amdgpu_cs_buffer *buffer)
1045 {
1046 struct amdgpu_cs_context *cs = acs->csc;
1047 struct amdgpu_winsys_bo *bo = buffer->bo;
1048 unsigned new_num_fences = 0;
1049
1050 for (unsigned j = 0; j < bo->num_fences; ++j) {
1051 struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
1052
1053 if (is_noop_fence_dependency(acs, bo_fence))
1054 continue;
1055
1056 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
1057 new_num_fences++;
1058
1059 if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
1060 continue;
1061
1062 unsigned idx = add_fence_dependency_entry(cs);
1063 amdgpu_fence_reference(&cs->fence_dependencies[idx],
1064 (struct pipe_fence_handle*)bo_fence);
1065 }
1066
1067 for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
1068 amdgpu_fence_reference(&bo->fences[j], NULL);
1069
1070 bo->num_fences = new_num_fences;
1071 }
1072
1073 /* Add the given list of fences to the buffer's fence list.
1074 *
1075 * Must be called with the winsys bo_fence_lock held.
1076 */
1077 void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
1078 unsigned num_fences,
1079 struct pipe_fence_handle **fences)
1080 {
1081 if (bo->num_fences + num_fences > bo->max_fences) {
1082 unsigned new_max_fences = MAX2(bo->num_fences + num_fences, bo->max_fences * 2);
1083 struct pipe_fence_handle **new_fences =
1084 REALLOC(bo->fences,
1085 bo->num_fences * sizeof(*new_fences),
1086 new_max_fences * sizeof(*new_fences));
1087 if (likely(new_fences)) {
1088 bo->fences = new_fences;
1089 bo->max_fences = new_max_fences;
1090 } else {
1091 unsigned drop;
1092
1093 fprintf(stderr, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
1094 if (!bo->num_fences)
1095 return;
1096
1097 bo->num_fences--; /* prefer to keep the most recent fence if possible */
1098 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
1099
1100 drop = bo->num_fences + num_fences - bo->max_fences;
1101 num_fences -= drop;
1102 fences += drop;
1103 }
1104 }
1105
1106 for (unsigned i = 0; i < num_fences; ++i) {
1107 bo->fences[bo->num_fences] = NULL;
1108 amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]);
1109 bo->num_fences++;
1110 }
1111 }
1112
1113 static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs *acs,
1114 struct pipe_fence_handle *fence,
1115 unsigned num_buffers,
1116 struct amdgpu_cs_buffer *buffers)
1117 {
1118 for (unsigned i = 0; i < num_buffers; i++) {
1119 struct amdgpu_cs_buffer *buffer = &buffers[i];
1120 struct amdgpu_winsys_bo *bo = buffer->bo;
1121
1122 amdgpu_add_bo_fence_dependencies(acs, buffer);
1123 p_atomic_inc(&bo->num_active_ioctls);
1124 amdgpu_add_fences(bo, 1, &fence);
1125 }
1126 }
1127
1128 /* Since the kernel driver doesn't synchronize execution between different
1129 * rings automatically, we have to add fence dependencies manually.
1130 */
1131 static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs *acs)
1132 {
1133 struct amdgpu_cs_context *cs = acs->csc;
1134
1135 cs->num_fence_dependencies = 0;
1136
1137 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);
1138 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);
1139 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_sparse_buffers, cs->sparse_buffers);
1140 }
1141
1142 /* Add backing of sparse buffers to the buffer list.
1143 *
1144 * This is done late, during submission, to keep the buffer list short before
1145 * submit, and to avoid managing fences for the backing buffers.
1146 */
1147 static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
1148 {
1149 for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {
1150 struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
1151 struct amdgpu_winsys_bo *bo = buffer->bo;
1152
1153 mtx_lock(&bo->u.sparse.commit_lock);
1154
1155 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
1156 /* We can directly add the buffer here, because we know that each
1157 * backing buffer occurs only once.
1158 */
1159 int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
1160 if (idx < 0) {
1161 fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
1162 mtx_unlock(&bo->u.sparse.commit_lock);
1163 return false;
1164 }
1165
1166 cs->real_buffers[idx].usage = buffer->usage & ~RADEON_USAGE_SYNCHRONIZED;
1167 cs->real_buffers[idx].u.real.priority_usage = buffer->u.real.priority_usage;
1168 p_atomic_inc(&backing->bo->num_active_ioctls);
1169 }
1170
1171 mtx_unlock(&bo->u.sparse.commit_lock);
1172 }
1173
1174 return true;
1175 }
1176
1177 void amdgpu_cs_submit_ib(void *job, int thread_index)
1178 {
1179 struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
1180 struct amdgpu_winsys *ws = acs->ctx->ws;
1181 struct amdgpu_cs_context *cs = acs->cst;
1182 int i, r;
1183 amdgpu_bo_list_handle bo_list = NULL;
1184 uint64_t seq_no = 0;
1185 bool has_user_fence = amdgpu_cs_has_user_fence(cs);
1186
1187 /* Create the buffer list.
1188 * Use a buffer list containing all allocated buffers if requested.
1189 */
1190 if (ws->debug_all_bos) {
1191 struct amdgpu_winsys_bo *bo;
1192 amdgpu_bo_handle *handles;
1193 unsigned num = 0;
1194
1195 mtx_lock(&ws->global_bo_list_lock);
1196
1197 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
1198 if (!handles) {
1199 mtx_unlock(&ws->global_bo_list_lock);
1200 amdgpu_cs_context_cleanup(cs);
1201 cs->error_code = -ENOMEM;
1202 return;
1203 }
1204
1205 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
1206 assert(num < ws->num_buffers);
1207 handles[num++] = bo->bo;
1208 }
1209
1210 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
1211 handles, NULL, &bo_list);
1212 free(handles);
1213 mtx_unlock(&ws->global_bo_list_lock);
1214 } else {
1215 unsigned num_handles;
1216
1217 if (!amdgpu_add_sparse_backing_buffers(cs)) {
1218 r = -ENOMEM;
1219 goto bo_list_error;
1220 }
1221
1222 if (cs->max_real_submit < cs->num_real_buffers) {
1223 FREE(cs->handles);
1224 FREE(cs->flags);
1225
1226 cs->handles = MALLOC(sizeof(*cs->handles) * cs->num_real_buffers);
1227 cs->flags = MALLOC(sizeof(*cs->flags) * cs->num_real_buffers);
1228
1229 if (!cs->handles || !cs->flags) {
1230 cs->max_real_submit = 0;
1231 r = -ENOMEM;
1232 goto bo_list_error;
1233 }
1234 }
1235
1236 num_handles = 0;
1237 for (i = 0; i < cs->num_real_buffers; ++i) {
1238 struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
1239
1240 if (buffer->bo->is_local)
1241 continue;
1242
1243 assert(buffer->u.real.priority_usage != 0);
1244
1245 cs->handles[num_handles] = buffer->bo->bo;
1246 cs->flags[num_handles] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
1247 ++num_handles;
1248 }
1249
1250 if (acs->ring_type == RING_GFX)
1251 ws->gfx_bo_list_counter += cs->num_real_buffers;
1252
1253 if (num_handles) {
1254 r = amdgpu_bo_list_create(ws->dev, num_handles,
1255 cs->handles, cs->flags, &bo_list);
1256 } else {
1257 r = 0;
1258 }
1259 }
1260 bo_list_error:
1261
1262 if (r) {
1263 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
1264 amdgpu_fence_signalled(cs->fence);
1265 cs->error_code = r;
1266 goto cleanup;
1267 }
1268
1269 if (acs->ctx->num_rejected_cs) {
1270 r = -ECANCELED;
1271 } else {
1272 struct drm_amdgpu_cs_chunk chunks[4];
1273 unsigned num_chunks = 0;
1274
1275 /* Convert from dwords to bytes. */
1276 cs->ib[IB_MAIN].ib_bytes *= 4;
1277
1278 /* IB */
1279 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
1280 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1281 chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];
1282 num_chunks++;
1283
1284 /* Fence */
1285 if (has_user_fence) {
1286 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1287 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1288 chunks[num_chunks].chunk_data = (uintptr_t)&acs->fence_chunk;
1289 num_chunks++;
1290 }
1291
1292 /* Dependencies */
1293 unsigned num_dependencies = cs->num_fence_dependencies;
1294 unsigned num_syncobj_dependencies = 0;
1295
1296 if (num_dependencies) {
1297 struct drm_amdgpu_cs_chunk_dep *dep_chunk =
1298 alloca(num_dependencies * sizeof(*dep_chunk));
1299 unsigned num = 0;
1300
1301 for (unsigned i = 0; i < num_dependencies; i++) {
1302 struct amdgpu_fence *fence =
1303 (struct amdgpu_fence*)cs->fence_dependencies[i];
1304
1305 if (amdgpu_fence_is_syncobj(fence)) {
1306 num_syncobj_dependencies++;
1307 continue;
1308 }
1309
1310 assert(!fence->submission_in_progress);
1311 amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[num++]);
1312 }
1313
1314 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1315 chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num;
1316 chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
1317 num_chunks++;
1318 }
1319
1320 /* Syncobj dependencies. */
1321 if (num_syncobj_dependencies) {
1322 struct drm_amdgpu_cs_chunk_sem *sem_chunk =
1323 alloca(num_syncobj_dependencies * sizeof(sem_chunk[0]));
1324 unsigned num = 0;
1325
1326 for (unsigned i = 0; i < num_dependencies; i++) {
1327 struct amdgpu_fence *fence =
1328 (struct amdgpu_fence*)cs->fence_dependencies[i];
1329
1330 if (!amdgpu_fence_is_syncobj(fence))
1331 continue;
1332
1333 assert(!fence->submission_in_progress);
1334 sem_chunk[num++].handle = fence->syncobj;
1335 }
1336
1337 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;
1338 chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4 * num;
1339 chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
1340 num_chunks++;
1341 }
1342
1343 assert(num_chunks <= ARRAY_SIZE(chunks));
1344
1345 r = amdgpu_cs_submit_raw(ws->dev, acs->ctx->ctx, bo_list,
1346 num_chunks, chunks, &seq_no);
1347 }
1348
1349 cs->error_code = r;
1350 if (r) {
1351 if (r == -ENOMEM)
1352 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1353 else if (r == -ECANCELED)
1354 fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1355 else
1356 fprintf(stderr, "amdgpu: The CS has been rejected, "
1357 "see dmesg for more information (%i).\n", r);
1358
1359 amdgpu_fence_signalled(cs->fence);
1360
1361 acs->ctx->num_rejected_cs++;
1362 ws->num_total_rejected_cs++;
1363 } else {
1364 /* Success. */
1365 uint64_t *user_fence = NULL;
1366
1367 if (has_user_fence)
1368 user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type;
1369 amdgpu_fence_submitted(cs->fence, seq_no, user_fence);
1370 }
1371
1372 /* Cleanup. */
1373 if (bo_list)
1374 amdgpu_bo_list_destroy(bo_list);
1375
1376 cleanup:
1377 for (i = 0; i < cs->num_real_buffers; i++)
1378 p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
1379 for (i = 0; i < cs->num_slab_buffers; i++)
1380 p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
1381 for (i = 0; i < cs->num_sparse_buffers; i++)
1382 p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);
1383
1384 amdgpu_cs_context_cleanup(cs);
1385 }
1386
1387 /* Make sure the previous submission is completed. */
1388 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
1389 {
1390 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1391
1392 /* Wait for any pending ioctl of this CS to complete. */
1393 util_queue_fence_wait(&cs->flush_completed);
1394 }
1395
1396 static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
1397 unsigned flags,
1398 struct pipe_fence_handle **fence)
1399 {
1400 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1401 struct amdgpu_winsys *ws = cs->ctx->ws;
1402 int error_code = 0;
1403
1404 rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
1405
1406 switch (cs->ring_type) {
1407 case RING_DMA:
1408 /* pad DMA ring to 8 DWs */
1409 if (ws->info.chip_class <= SI) {
1410 while (rcs->current.cdw & 7)
1411 radeon_emit(rcs, 0xf0000000); /* NOP packet */
1412 } else {
1413 while (rcs->current.cdw & 7)
1414 radeon_emit(rcs, 0x00000000); /* NOP packet */
1415 }
1416 break;
1417 case RING_GFX:
1418 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1419 if (ws->info.gfx_ib_pad_with_type2) {
1420 while (rcs->current.cdw & 7)
1421 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1422 } else {
1423 while (rcs->current.cdw & 7)
1424 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1425 }
1426 ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;
1427 break;
1428 case RING_UVD:
1429 while (rcs->current.cdw & 15)
1430 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1431 break;
1432 case RING_VCN_DEC:
1433 while (rcs->current.cdw & 15)
1434 radeon_emit(rcs, 0x81ff); /* nop packet */
1435 break;
1436 default:
1437 break;
1438 }
1439
1440 if (rcs->current.cdw > rcs->current.max_dw) {
1441 fprintf(stderr, "amdgpu: command stream overflowed\n");
1442 }
1443
1444 /* If the CS is not empty or overflowed.... */
1445 if (likely(radeon_emitted(&cs->main.base, 0) &&
1446 cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
1447 !debug_get_option_noop())) {
1448 struct amdgpu_cs_context *cur = cs->csc;
1449
1450 /* Set IB sizes. */
1451 amdgpu_ib_finalize(&cs->main);
1452
1453 /* Create a fence. */
1454 amdgpu_fence_reference(&cur->fence, NULL);
1455 if (cs->next_fence) {
1456 /* just move the reference */
1457 cur->fence = cs->next_fence;
1458 cs->next_fence = NULL;
1459 } else {
1460 cur->fence = amdgpu_fence_create(cs->ctx,
1461 cur->ib[IB_MAIN].ip_type,
1462 cur->ib[IB_MAIN].ip_instance,
1463 cur->ib[IB_MAIN].ring);
1464 }
1465 if (fence)
1466 amdgpu_fence_reference(fence, cur->fence);
1467
1468 amdgpu_cs_sync_flush(rcs);
1469
1470 /* Prepare buffers.
1471 *
1472 * This fence must be held until the submission is queued to ensure
1473 * that the order of fence dependency updates matches the order of
1474 * submissions.
1475 */
1476 mtx_lock(&ws->bo_fence_lock);
1477 amdgpu_add_fence_dependencies_bo_lists(cs);
1478
1479 /* Swap command streams. "cst" is going to be submitted. */
1480 cs->csc = cs->cst;
1481 cs->cst = cur;
1482
1483 /* Submit. */
1484 util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
1485 amdgpu_cs_submit_ib, NULL);
1486 /* The submission has been queued, unlock the fence now. */
1487 mtx_unlock(&ws->bo_fence_lock);
1488
1489 if (!(flags & RADEON_FLUSH_ASYNC)) {
1490 amdgpu_cs_sync_flush(rcs);
1491 error_code = cur->error_code;
1492 }
1493 } else {
1494 amdgpu_cs_context_cleanup(cs->csc);
1495 }
1496
1497 amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
1498
1499 cs->main.base.used_gart = 0;
1500 cs->main.base.used_vram = 0;
1501
1502 if (cs->ring_type == RING_GFX)
1503 ws->num_gfx_IBs++;
1504 else if (cs->ring_type == RING_DMA)
1505 ws->num_sdma_IBs++;
1506
1507 return error_code;
1508 }
1509
1510 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
1511 {
1512 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1513
1514 amdgpu_cs_sync_flush(rcs);
1515 util_queue_fence_destroy(&cs->flush_completed);
1516 p_atomic_dec(&cs->ctx->ws->num_cs);
1517 pb_reference(&cs->main.big_ib_buffer, NULL);
1518 FREE(cs->main.base.prev);
1519 amdgpu_destroy_cs_context(&cs->csc1);
1520 amdgpu_destroy_cs_context(&cs->csc2);
1521 amdgpu_fence_reference(&cs->next_fence, NULL);
1522 FREE(cs);
1523 }
1524
1525 static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
1526 struct pb_buffer *_buf,
1527 enum radeon_bo_usage usage)
1528 {
1529 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1530 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
1531
1532 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
1533 }
1534
1535 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
1536 {
1537 ws->base.ctx_create = amdgpu_ctx_create;
1538 ws->base.ctx_destroy = amdgpu_ctx_destroy;
1539 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
1540 ws->base.cs_create = amdgpu_cs_create;
1541 ws->base.cs_destroy = amdgpu_cs_destroy;
1542 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
1543 ws->base.cs_validate = amdgpu_cs_validate;
1544 ws->base.cs_check_space = amdgpu_cs_check_space;
1545 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
1546 ws->base.cs_flush = amdgpu_cs_flush;
1547 ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
1548 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
1549 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
1550 ws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;
1551 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
1552 ws->base.fence_reference = amdgpu_fence_reference;
1553 ws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;
1554 ws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;
1555 }