amd,radeonsi: rename radeon_winsys_cs -> radeon_cmdbuf
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28
29 #include "amdgpu_cs.h"
30 #include "util/os_time.h"
31 #include <inttypes.h>
32 #include <stdio.h>
33
34 #include "amd/common/sid.h"
35
36 #ifndef AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE
37 #define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
38 #endif
39
40 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
41
42 /* FENCES */
43
44 static struct pipe_fence_handle *
45 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
46 unsigned ip_instance, unsigned ring)
47 {
48 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
49
50 fence->reference.count = 1;
51 fence->ws = ctx->ws;
52 fence->ctx = ctx;
53 fence->fence.context = ctx->ctx;
54 fence->fence.ip_type = ip_type;
55 fence->fence.ip_instance = ip_instance;
56 fence->fence.ring = ring;
57 util_queue_fence_init(&fence->submitted);
58 util_queue_fence_reset(&fence->submitted);
59 p_atomic_inc(&ctx->refcount);
60 return (struct pipe_fence_handle *)fence;
61 }
62
63 static struct pipe_fence_handle *
64 amdgpu_fence_import_syncobj(struct radeon_winsys *rws, int fd)
65 {
66 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
67 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
68 int r;
69
70 if (!fence)
71 return NULL;
72
73 pipe_reference_init(&fence->reference, 1);
74 fence->ws = ws;
75
76 r = amdgpu_cs_import_syncobj(ws->dev, fd, &fence->syncobj);
77 if (r) {
78 FREE(fence);
79 return NULL;
80 }
81
82 util_queue_fence_init(&fence->submitted);
83
84 assert(amdgpu_fence_is_syncobj(fence));
85 return (struct pipe_fence_handle*)fence;
86 }
87
88 static struct pipe_fence_handle *
89 amdgpu_fence_import_sync_file(struct radeon_winsys *rws, int fd)
90 {
91 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
92 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
93
94 if (!fence)
95 return NULL;
96
97 pipe_reference_init(&fence->reference, 1);
98 fence->ws = ws;
99 /* fence->ctx == NULL means that the fence is syncobj-based. */
100
101 /* Convert sync_file into syncobj. */
102 int r = amdgpu_cs_create_syncobj(ws->dev, &fence->syncobj);
103 if (r) {
104 FREE(fence);
105 return NULL;
106 }
107
108 r = amdgpu_cs_syncobj_import_sync_file(ws->dev, fence->syncobj, fd);
109 if (r) {
110 amdgpu_cs_destroy_syncobj(ws->dev, fence->syncobj);
111 FREE(fence);
112 return NULL;
113 }
114
115 util_queue_fence_init(&fence->submitted);
116
117 return (struct pipe_fence_handle*)fence;
118 }
119
120 static int amdgpu_fence_export_sync_file(struct radeon_winsys *rws,
121 struct pipe_fence_handle *pfence)
122 {
123 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
124 struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
125
126 if (amdgpu_fence_is_syncobj(fence)) {
127 int fd, r;
128
129 /* Convert syncobj into sync_file. */
130 r = amdgpu_cs_syncobj_export_sync_file(ws->dev, fence->syncobj, &fd);
131 return r ? -1 : fd;
132 }
133
134 util_queue_fence_wait(&fence->submitted);
135
136 /* Convert the amdgpu fence into a fence FD. */
137 int fd;
138 if (amdgpu_cs_fence_to_handle(ws->dev, &fence->fence,
139 AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD,
140 (uint32_t*)&fd))
141 return -1;
142
143 return fd;
144 }
145
146 static int amdgpu_export_signalled_sync_file(struct radeon_winsys *rws)
147 {
148 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
149 uint32_t syncobj;
150 int fd = -1;
151
152 int r = amdgpu_cs_create_syncobj2(ws->dev, DRM_SYNCOBJ_CREATE_SIGNALED,
153 &syncobj);
154 if (r) {
155 return -1;
156 }
157
158 r = amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, &fd);
159 if (r) {
160 fd = -1;
161 }
162
163 amdgpu_cs_destroy_syncobj(ws->dev, syncobj);
164 return fd;
165 }
166
167 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
168 uint64_t seq_no,
169 uint64_t *user_fence_cpu_address)
170 {
171 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
172
173 rfence->fence.fence = seq_no;
174 rfence->user_fence_cpu_address = user_fence_cpu_address;
175 util_queue_fence_signal(&rfence->submitted);
176 }
177
178 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
179 {
180 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
181
182 rfence->signalled = true;
183 util_queue_fence_signal(&rfence->submitted);
184 }
185
186 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
187 bool absolute)
188 {
189 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
190 uint32_t expired;
191 int64_t abs_timeout;
192 uint64_t *user_fence_cpu;
193 int r;
194
195 if (rfence->signalled)
196 return true;
197
198 /* Handle syncobjs. */
199 if (amdgpu_fence_is_syncobj(rfence)) {
200 /* Absolute timeouts are only be used by BO fences, which aren't
201 * backed by syncobjs.
202 */
203 assert(!absolute);
204
205 if (amdgpu_cs_syncobj_wait(rfence->ws->dev, &rfence->syncobj, 1,
206 timeout, 0, NULL))
207 return false;
208
209 rfence->signalled = true;
210 return true;
211 }
212
213 if (absolute)
214 abs_timeout = timeout;
215 else
216 abs_timeout = os_time_get_absolute_timeout(timeout);
217
218 /* The fence might not have a number assigned if its IB is being
219 * submitted in the other thread right now. Wait until the submission
220 * is done. */
221 if (!util_queue_fence_wait_timeout(&rfence->submitted, abs_timeout))
222 return false;
223
224 user_fence_cpu = rfence->user_fence_cpu_address;
225 if (user_fence_cpu) {
226 if (*user_fence_cpu >= rfence->fence.fence) {
227 rfence->signalled = true;
228 return true;
229 }
230
231 /* No timeout, just query: no need for the ioctl. */
232 if (!absolute && !timeout)
233 return false;
234 }
235
236 /* Now use the libdrm query. */
237 r = amdgpu_cs_query_fence_status(&rfence->fence,
238 abs_timeout,
239 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
240 &expired);
241 if (r) {
242 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
243 return false;
244 }
245
246 if (expired) {
247 /* This variable can only transition from false to true, so it doesn't
248 * matter if threads race for it. */
249 rfence->signalled = true;
250 return true;
251 }
252 return false;
253 }
254
255 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
256 struct pipe_fence_handle *fence,
257 uint64_t timeout)
258 {
259 return amdgpu_fence_wait(fence, timeout, false);
260 }
261
262 static struct pipe_fence_handle *
263 amdgpu_cs_get_next_fence(struct radeon_cmdbuf *rcs)
264 {
265 struct amdgpu_cs *cs = amdgpu_cs(rcs);
266 struct pipe_fence_handle *fence = NULL;
267
268 if (debug_get_option_noop())
269 return NULL;
270
271 if (cs->next_fence) {
272 amdgpu_fence_reference(&fence, cs->next_fence);
273 return fence;
274 }
275
276 fence = amdgpu_fence_create(cs->ctx,
277 cs->csc->ib[IB_MAIN].ip_type,
278 cs->csc->ib[IB_MAIN].ip_instance,
279 cs->csc->ib[IB_MAIN].ring);
280 if (!fence)
281 return NULL;
282
283 amdgpu_fence_reference(&cs->next_fence, fence);
284 return fence;
285 }
286
287 /* CONTEXTS */
288
289 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
290 {
291 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
292 int r;
293 struct amdgpu_bo_alloc_request alloc_buffer = {};
294 amdgpu_bo_handle buf_handle;
295
296 if (!ctx)
297 return NULL;
298
299 ctx->ws = amdgpu_winsys(ws);
300 ctx->refcount = 1;
301 ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
302
303 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
304 if (r) {
305 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
306 goto error_create;
307 }
308
309 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
310 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
311 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
312
313 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
314 if (r) {
315 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
316 goto error_user_fence_alloc;
317 }
318
319 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
320 if (r) {
321 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
322 goto error_user_fence_map;
323 }
324
325 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
326 ctx->user_fence_bo = buf_handle;
327
328 return (struct radeon_winsys_ctx*)ctx;
329
330 error_user_fence_map:
331 amdgpu_bo_free(buf_handle);
332 error_user_fence_alloc:
333 amdgpu_cs_ctx_free(ctx->ctx);
334 error_create:
335 FREE(ctx);
336 return NULL;
337 }
338
339 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
340 {
341 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
342 }
343
344 static enum pipe_reset_status
345 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
346 {
347 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
348 uint32_t result, hangs;
349 int r;
350
351 /* Return a failure due to a rejected command submission. */
352 if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
353 return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
354 PIPE_INNOCENT_CONTEXT_RESET;
355 }
356
357 /* Return a failure due to a GPU hang. */
358 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
359 if (r) {
360 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
361 return PIPE_NO_RESET;
362 }
363
364 switch (result) {
365 case AMDGPU_CTX_GUILTY_RESET:
366 return PIPE_GUILTY_CONTEXT_RESET;
367 case AMDGPU_CTX_INNOCENT_RESET:
368 return PIPE_INNOCENT_CONTEXT_RESET;
369 case AMDGPU_CTX_UNKNOWN_RESET:
370 return PIPE_UNKNOWN_CONTEXT_RESET;
371 case AMDGPU_CTX_NO_RESET:
372 default:
373 return PIPE_NO_RESET;
374 }
375 }
376
377 /* COMMAND SUBMISSION */
378
379 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
380 {
381 return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD &&
382 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE &&
383 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD_ENC &&
384 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_DEC &&
385 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_ENC;
386 }
387
388 static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
389 {
390 return cs->ctx->ws->info.chip_class >= CIK &&
391 cs->ring_type == RING_GFX;
392 }
393
394 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
395 {
396 if (ring_type == RING_GFX)
397 return 4; /* for chaining */
398
399 return 0;
400 }
401
402 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
403 {
404 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
405 int i = cs->buffer_indices_hashlist[hash];
406 struct amdgpu_cs_buffer *buffers;
407 int num_buffers;
408
409 if (bo->bo) {
410 buffers = cs->real_buffers;
411 num_buffers = cs->num_real_buffers;
412 } else if (!bo->sparse) {
413 buffers = cs->slab_buffers;
414 num_buffers = cs->num_slab_buffers;
415 } else {
416 buffers = cs->sparse_buffers;
417 num_buffers = cs->num_sparse_buffers;
418 }
419
420 /* not found or found */
421 if (i < 0 || (i < num_buffers && buffers[i].bo == bo))
422 return i;
423
424 /* Hash collision, look for the BO in the list of buffers linearly. */
425 for (i = num_buffers - 1; i >= 0; i--) {
426 if (buffers[i].bo == bo) {
427 /* Put this buffer in the hash list.
428 * This will prevent additional hash collisions if there are
429 * several consecutive lookup_buffer calls for the same buffer.
430 *
431 * Example: Assuming buffers A,B,C collide in the hash list,
432 * the following sequence of buffers:
433 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
434 * will collide here: ^ and here: ^,
435 * meaning that we should get very few collisions in the end. */
436 cs->buffer_indices_hashlist[hash] = i;
437 return i;
438 }
439 }
440 return -1;
441 }
442
443 static int
444 amdgpu_do_add_real_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
445 {
446 struct amdgpu_cs_buffer *buffer;
447 int idx;
448
449 /* New buffer, check if the backing array is large enough. */
450 if (cs->num_real_buffers >= cs->max_real_buffers) {
451 unsigned new_max =
452 MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
453 struct amdgpu_cs_buffer *new_buffers;
454
455 new_buffers = MALLOC(new_max * sizeof(*new_buffers));
456
457 if (!new_buffers) {
458 fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");
459 FREE(new_buffers);
460 return -1;
461 }
462
463 memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
464
465 FREE(cs->real_buffers);
466
467 cs->max_real_buffers = new_max;
468 cs->real_buffers = new_buffers;
469 }
470
471 idx = cs->num_real_buffers;
472 buffer = &cs->real_buffers[idx];
473
474 memset(buffer, 0, sizeof(*buffer));
475 amdgpu_winsys_bo_reference(&buffer->bo, bo);
476 p_atomic_inc(&bo->num_cs_references);
477 cs->num_real_buffers++;
478
479 return idx;
480 }
481
482 static int
483 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
484 {
485 struct amdgpu_cs_context *cs = acs->csc;
486 unsigned hash;
487 int idx = amdgpu_lookup_buffer(cs, bo);
488
489 if (idx >= 0)
490 return idx;
491
492 idx = amdgpu_do_add_real_buffer(cs, bo);
493
494 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
495 cs->buffer_indices_hashlist[hash] = idx;
496
497 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
498 acs->main.base.used_vram += bo->base.size;
499 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
500 acs->main.base.used_gart += bo->base.size;
501
502 return idx;
503 }
504
505 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs *acs,
506 struct amdgpu_winsys_bo *bo)
507 {
508 struct amdgpu_cs_context *cs = acs->csc;
509 struct amdgpu_cs_buffer *buffer;
510 unsigned hash;
511 int idx = amdgpu_lookup_buffer(cs, bo);
512 int real_idx;
513
514 if (idx >= 0)
515 return idx;
516
517 real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real);
518 if (real_idx < 0)
519 return -1;
520
521 /* New buffer, check if the backing array is large enough. */
522 if (cs->num_slab_buffers >= cs->max_slab_buffers) {
523 unsigned new_max =
524 MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));
525 struct amdgpu_cs_buffer *new_buffers;
526
527 new_buffers = REALLOC(cs->slab_buffers,
528 cs->max_slab_buffers * sizeof(*new_buffers),
529 new_max * sizeof(*new_buffers));
530 if (!new_buffers) {
531 fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
532 return -1;
533 }
534
535 cs->max_slab_buffers = new_max;
536 cs->slab_buffers = new_buffers;
537 }
538
539 idx = cs->num_slab_buffers;
540 buffer = &cs->slab_buffers[idx];
541
542 memset(buffer, 0, sizeof(*buffer));
543 amdgpu_winsys_bo_reference(&buffer->bo, bo);
544 buffer->u.slab.real_idx = real_idx;
545 p_atomic_inc(&bo->num_cs_references);
546 cs->num_slab_buffers++;
547
548 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
549 cs->buffer_indices_hashlist[hash] = idx;
550
551 return idx;
552 }
553
554 static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
555 struct amdgpu_winsys_bo *bo)
556 {
557 struct amdgpu_cs_context *cs = acs->csc;
558 struct amdgpu_cs_buffer *buffer;
559 unsigned hash;
560 int idx = amdgpu_lookup_buffer(cs, bo);
561
562 if (idx >= 0)
563 return idx;
564
565 /* New buffer, check if the backing array is large enough. */
566 if (cs->num_sparse_buffers >= cs->max_sparse_buffers) {
567 unsigned new_max =
568 MAX2(cs->max_sparse_buffers + 16, (unsigned)(cs->max_sparse_buffers * 1.3));
569 struct amdgpu_cs_buffer *new_buffers;
570
571 new_buffers = REALLOC(cs->sparse_buffers,
572 cs->max_sparse_buffers * sizeof(*new_buffers),
573 new_max * sizeof(*new_buffers));
574 if (!new_buffers) {
575 fprintf(stderr, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
576 return -1;
577 }
578
579 cs->max_sparse_buffers = new_max;
580 cs->sparse_buffers = new_buffers;
581 }
582
583 idx = cs->num_sparse_buffers;
584 buffer = &cs->sparse_buffers[idx];
585
586 memset(buffer, 0, sizeof(*buffer));
587 amdgpu_winsys_bo_reference(&buffer->bo, bo);
588 p_atomic_inc(&bo->num_cs_references);
589 cs->num_sparse_buffers++;
590
591 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
592 cs->buffer_indices_hashlist[hash] = idx;
593
594 /* We delay adding the backing buffers until we really have to. However,
595 * we cannot delay accounting for memory use.
596 */
597 simple_mtx_lock(&bo->u.sparse.commit_lock);
598
599 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
600 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
601 acs->main.base.used_vram += backing->bo->base.size;
602 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
603 acs->main.base.used_gart += backing->bo->base.size;
604 }
605
606 simple_mtx_unlock(&bo->u.sparse.commit_lock);
607
608 return idx;
609 }
610
611 static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs,
612 struct pb_buffer *buf,
613 enum radeon_bo_usage usage,
614 enum radeon_bo_domain domains,
615 enum radeon_bo_priority priority)
616 {
617 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
618 * the buffer placement during command submission.
619 */
620 struct amdgpu_cs *acs = amdgpu_cs(rcs);
621 struct amdgpu_cs_context *cs = acs->csc;
622 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
623 struct amdgpu_cs_buffer *buffer;
624 int index;
625
626 /* Fast exit for no-op calls.
627 * This is very effective with suballocators and linear uploaders that
628 * are outside of the winsys.
629 */
630 if (bo == cs->last_added_bo &&
631 (usage & cs->last_added_bo_usage) == usage &&
632 (1ull << priority) & cs->last_added_bo_priority_usage)
633 return cs->last_added_bo_index;
634
635 if (!bo->sparse) {
636 if (!bo->bo) {
637 index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
638 if (index < 0)
639 return 0;
640
641 buffer = &cs->slab_buffers[index];
642 buffer->usage |= usage;
643
644 usage &= ~RADEON_USAGE_SYNCHRONIZED;
645 index = buffer->u.slab.real_idx;
646 } else {
647 index = amdgpu_lookup_or_add_real_buffer(acs, bo);
648 if (index < 0)
649 return 0;
650 }
651
652 buffer = &cs->real_buffers[index];
653 } else {
654 index = amdgpu_lookup_or_add_sparse_buffer(acs, bo);
655 if (index < 0)
656 return 0;
657
658 buffer = &cs->sparse_buffers[index];
659 }
660
661 buffer->u.real.priority_usage |= 1ull << priority;
662 buffer->usage |= usage;
663
664 cs->last_added_bo = bo;
665 cs->last_added_bo_index = index;
666 cs->last_added_bo_usage = buffer->usage;
667 cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
668 return index;
669 }
670
671 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
672 enum ring_type ring_type)
673 {
674 struct pb_buffer *pb;
675 uint8_t *mapped;
676 unsigned buffer_size;
677
678 /* Always create a buffer that is at least as large as the maximum seen IB
679 * size, aligned to a power of two (and multiplied by 4 to reduce internal
680 * fragmentation if chaining is not available). Limit to 512k dwords, which
681 * is the largest power of two that fits into the size field of the
682 * INDIRECT_BUFFER packet.
683 */
684 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
685 buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
686 else
687 buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
688
689 buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
690
691 switch (ib->ib_type) {
692 case IB_MAIN:
693 buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
694 break;
695 default:
696 unreachable("unhandled IB type");
697 }
698
699 pb = ws->base.buffer_create(&ws->base, buffer_size,
700 ws->info.gart_page_size,
701 RADEON_DOMAIN_GTT,
702 RADEON_FLAG_NO_INTERPROCESS_SHARING |
703 (ring_type == RING_GFX ||
704 ring_type == RING_COMPUTE ||
705 ring_type == RING_DMA ?
706 RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC : 0));
707 if (!pb)
708 return false;
709
710 mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
711 if (!mapped) {
712 pb_reference(&pb, NULL);
713 return false;
714 }
715
716 pb_reference(&ib->big_ib_buffer, pb);
717 pb_reference(&pb, NULL);
718
719 ib->ib_mapped = mapped;
720 ib->used_ib_space = 0;
721
722 return true;
723 }
724
725 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
726 {
727 switch (ib_type) {
728 case IB_MAIN:
729 /* Smaller submits means the GPU gets busy sooner and there is less
730 * waiting for buffers and fences. Proof:
731 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
732 */
733 return 20 * 1024;
734 default:
735 unreachable("bad ib_type");
736 }
737 }
738
739 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
740 enum ib_type ib_type)
741 {
742 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
743 /* Small IBs are better than big IBs, because the GPU goes idle quicker
744 * and there is less waiting for buffers and fences. Proof:
745 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
746 */
747 struct amdgpu_ib *ib = NULL;
748 struct drm_amdgpu_cs_chunk_ib *info = &cs->csc->ib[ib_type];
749 unsigned ib_size = 0;
750
751 switch (ib_type) {
752 case IB_MAIN:
753 ib = &cs->main;
754 ib_size = 4 * 1024 * 4;
755 break;
756 default:
757 unreachable("unhandled IB type");
758 }
759
760 if (!amdgpu_cs_has_chaining(cs)) {
761 ib_size = MAX2(ib_size,
762 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
763 amdgpu_ib_max_submit_dwords(ib_type)));
764 }
765
766 ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
767
768 ib->base.prev_dw = 0;
769 ib->base.num_prev = 0;
770 ib->base.current.cdw = 0;
771 ib->base.current.buf = NULL;
772
773 /* Allocate a new buffer for IBs if the current buffer is all used. */
774 if (!ib->big_ib_buffer ||
775 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
776 if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type))
777 return false;
778 }
779
780 info->va_start = amdgpu_winsys_bo(ib->big_ib_buffer)->va + ib->used_ib_space;
781 info->ib_bytes = 0;
782 /* ib_bytes is in dwords and the conversion to bytes will be done before
783 * the CS ioctl. */
784 ib->ptr_ib_size = &info->ib_bytes;
785 ib->ptr_ib_size_inside_ib = false;
786
787 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
788 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
789
790 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
791
792 ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
793 ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
794 return true;
795 }
796
797 static void amdgpu_set_ib_size(struct amdgpu_ib *ib)
798 {
799 if (ib->ptr_ib_size_inside_ib) {
800 *ib->ptr_ib_size = ib->base.current.cdw |
801 S_3F2_CHAIN(1) | S_3F2_VALID(1);
802 } else {
803 *ib->ptr_ib_size = ib->base.current.cdw;
804 }
805 }
806
807 static void amdgpu_ib_finalize(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
808 {
809 amdgpu_set_ib_size(ib);
810 ib->used_ib_space += ib->base.current.cdw * 4;
811 ib->used_ib_space = align(ib->used_ib_space, ws->info.ib_start_alignment);
812 ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
813 }
814
815 static bool amdgpu_init_cs_context(struct amdgpu_winsys *ws,
816 struct amdgpu_cs_context *cs,
817 enum ring_type ring_type)
818 {
819 switch (ring_type) {
820 case RING_DMA:
821 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;
822 break;
823
824 case RING_UVD:
825 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;
826 break;
827
828 case RING_UVD_ENC:
829 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD_ENC;
830 break;
831
832 case RING_VCE:
833 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;
834 break;
835
836 case RING_VCN_DEC:
837 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;
838 break;
839
840 case RING_VCN_ENC:
841 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_ENC;
842 break;
843
844 case RING_COMPUTE:
845 case RING_GFX:
846 cs->ib[IB_MAIN].ip_type = ring_type == RING_GFX ? AMDGPU_HW_IP_GFX :
847 AMDGPU_HW_IP_COMPUTE;
848
849 /* The kernel shouldn't invalidate L2 and vL1. The proper place for cache
850 * invalidation is the beginning of IBs (the previous commit does that),
851 * because completion of an IB doesn't care about the state of GPU caches,
852 * but the beginning of an IB does. Draw calls from multiple IBs can be
853 * executed in parallel, so draw calls from the current IB can finish after
854 * the next IB starts drawing, and so the cache flush at the end of IB
855 * is always late.
856 */
857 if (ws->info.drm_minor >= 26)
858 cs->ib[IB_MAIN].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;
859 break;
860
861 default:
862 assert(0);
863 }
864
865 memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
866 cs->last_added_bo = NULL;
867 return true;
868 }
869
870 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
871 {
872 unsigned i;
873
874 for (i = 0; i < cs->num_real_buffers; i++) {
875 p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
876 amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
877 }
878 for (i = 0; i < cs->num_slab_buffers; i++) {
879 p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
880 amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
881 }
882 for (i = 0; i < cs->num_sparse_buffers; i++) {
883 p_atomic_dec(&cs->sparse_buffers[i].bo->num_cs_references);
884 amdgpu_winsys_bo_reference(&cs->sparse_buffers[i].bo, NULL);
885 }
886 for (i = 0; i < cs->num_fence_dependencies; i++)
887 amdgpu_fence_reference(&cs->fence_dependencies[i], NULL);
888 for (i = 0; i < cs->num_syncobj_to_signal; i++)
889 amdgpu_fence_reference(&cs->syncobj_to_signal[i], NULL);
890
891 cs->num_real_buffers = 0;
892 cs->num_slab_buffers = 0;
893 cs->num_sparse_buffers = 0;
894 cs->num_fence_dependencies = 0;
895 cs->num_syncobj_to_signal = 0;
896 amdgpu_fence_reference(&cs->fence, NULL);
897
898 memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
899 cs->last_added_bo = NULL;
900 }
901
902 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
903 {
904 amdgpu_cs_context_cleanup(cs);
905 FREE(cs->flags);
906 FREE(cs->real_buffers);
907 FREE(cs->handles);
908 FREE(cs->slab_buffers);
909 FREE(cs->sparse_buffers);
910 FREE(cs->fence_dependencies);
911 FREE(cs->syncobj_to_signal);
912 }
913
914
915 static struct radeon_cmdbuf *
916 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
917 enum ring_type ring_type,
918 void (*flush)(void *ctx, unsigned flags,
919 struct pipe_fence_handle **fence),
920 void *flush_ctx)
921 {
922 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
923 struct amdgpu_cs *cs;
924
925 cs = CALLOC_STRUCT(amdgpu_cs);
926 if (!cs) {
927 return NULL;
928 }
929
930 util_queue_fence_init(&cs->flush_completed);
931
932 cs->ctx = ctx;
933 cs->flush_cs = flush;
934 cs->flush_data = flush_ctx;
935 cs->ring_type = ring_type;
936
937 struct amdgpu_cs_fence_info fence_info;
938 fence_info.handle = cs->ctx->user_fence_bo;
939 fence_info.offset = cs->ring_type;
940 amdgpu_cs_chunk_fence_info_to_data(&fence_info, (void*)&cs->fence_chunk);
941
942 cs->main.ib_type = IB_MAIN;
943
944 if (!amdgpu_init_cs_context(ctx->ws, &cs->csc1, ring_type)) {
945 FREE(cs);
946 return NULL;
947 }
948
949 if (!amdgpu_init_cs_context(ctx->ws, &cs->csc2, ring_type)) {
950 amdgpu_destroy_cs_context(&cs->csc1);
951 FREE(cs);
952 return NULL;
953 }
954
955 /* Set the first submission context as current. */
956 cs->csc = &cs->csc1;
957 cs->cst = &cs->csc2;
958
959 if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
960 amdgpu_destroy_cs_context(&cs->csc2);
961 amdgpu_destroy_cs_context(&cs->csc1);
962 FREE(cs);
963 return NULL;
964 }
965
966 p_atomic_inc(&ctx->ws->num_cs);
967 return &cs->main.base;
968 }
969
970 static bool amdgpu_cs_validate(struct radeon_cmdbuf *rcs)
971 {
972 return true;
973 }
974
975 static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw)
976 {
977 struct amdgpu_ib *ib = amdgpu_ib(rcs);
978 struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
979 unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
980 uint64_t va;
981 uint32_t *new_ptr_ib_size;
982
983 assert(rcs->current.cdw <= rcs->current.max_dw);
984
985 if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
986 return false;
987
988 ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
989
990 if (rcs->current.max_dw - rcs->current.cdw >= dw)
991 return true;
992
993 if (!amdgpu_cs_has_chaining(cs))
994 return false;
995
996 /* Allocate a new chunk */
997 if (rcs->num_prev >= rcs->max_prev) {
998 unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
999 struct radeon_cmdbuf_chunk *new_prev;
1000
1001 new_prev = REALLOC(rcs->prev,
1002 sizeof(*new_prev) * rcs->max_prev,
1003 sizeof(*new_prev) * new_max_prev);
1004 if (!new_prev)
1005 return false;
1006
1007 rcs->prev = new_prev;
1008 rcs->max_prev = new_max_prev;
1009 }
1010
1011 if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib, cs->ring_type))
1012 return false;
1013
1014 assert(ib->used_ib_space == 0);
1015 va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
1016
1017 /* This space was originally reserved. */
1018 rcs->current.max_dw += 4;
1019 assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
1020
1021 /* Pad with NOPs and add INDIRECT_BUFFER packet */
1022 while ((rcs->current.cdw & 7) != 4)
1023 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1024
1025 radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
1026 : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
1027 radeon_emit(rcs, va);
1028 radeon_emit(rcs, va >> 32);
1029 new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw++];
1030
1031 assert((rcs->current.cdw & 7) == 0);
1032 assert(rcs->current.cdw <= rcs->current.max_dw);
1033
1034 amdgpu_set_ib_size(ib);
1035 ib->ptr_ib_size = new_ptr_ib_size;
1036 ib->ptr_ib_size_inside_ib = true;
1037
1038 /* Hook up the new chunk */
1039 rcs->prev[rcs->num_prev].buf = rcs->current.buf;
1040 rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
1041 rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
1042 rcs->num_prev++;
1043
1044 ib->base.prev_dw += ib->base.current.cdw;
1045 ib->base.current.cdw = 0;
1046
1047 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
1048 ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
1049
1050 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
1051 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
1052
1053 return true;
1054 }
1055
1056 static unsigned amdgpu_cs_get_buffer_list(struct radeon_cmdbuf *rcs,
1057 struct radeon_bo_list_item *list)
1058 {
1059 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
1060 int i;
1061
1062 if (list) {
1063 for (i = 0; i < cs->num_real_buffers; i++) {
1064 list[i].bo_size = cs->real_buffers[i].bo->base.size;
1065 list[i].vm_address = cs->real_buffers[i].bo->va;
1066 list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;
1067 }
1068 }
1069 return cs->num_real_buffers;
1070 }
1071
1072 static unsigned add_fence_dependency_entry(struct amdgpu_cs_context *cs)
1073 {
1074 unsigned idx = cs->num_fence_dependencies++;
1075
1076 if (idx >= cs->max_fence_dependencies) {
1077 unsigned size;
1078 const unsigned increment = 8;
1079
1080 cs->max_fence_dependencies = idx + increment;
1081 size = cs->max_fence_dependencies * sizeof(cs->fence_dependencies[0]);
1082 cs->fence_dependencies = realloc(cs->fence_dependencies, size);
1083 /* Clear the newly-allocated elements. */
1084 memset(cs->fence_dependencies + idx, 0,
1085 increment * sizeof(cs->fence_dependencies[0]));
1086 }
1087 return idx;
1088 }
1089
1090 static bool is_noop_fence_dependency(struct amdgpu_cs *acs,
1091 struct amdgpu_fence *fence)
1092 {
1093 struct amdgpu_cs_context *cs = acs->csc;
1094
1095 if (!amdgpu_fence_is_syncobj(fence) &&
1096 fence->ctx == acs->ctx &&
1097 fence->fence.ip_type == cs->ib[IB_MAIN].ip_type &&
1098 fence->fence.ip_instance == cs->ib[IB_MAIN].ip_instance &&
1099 fence->fence.ring == cs->ib[IB_MAIN].ring)
1100 return true;
1101
1102 return amdgpu_fence_wait((void *)fence, 0, false);
1103 }
1104
1105 static void amdgpu_cs_add_fence_dependency(struct radeon_cmdbuf *rws,
1106 struct pipe_fence_handle *pfence)
1107 {
1108 struct amdgpu_cs *acs = amdgpu_cs(rws);
1109 struct amdgpu_cs_context *cs = acs->csc;
1110 struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
1111
1112 util_queue_fence_wait(&fence->submitted);
1113
1114 if (is_noop_fence_dependency(acs, fence))
1115 return;
1116
1117 unsigned idx = add_fence_dependency_entry(cs);
1118 amdgpu_fence_reference(&cs->fence_dependencies[idx],
1119 (struct pipe_fence_handle*)fence);
1120 }
1121
1122 static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs *acs,
1123 struct amdgpu_cs_buffer *buffer)
1124 {
1125 struct amdgpu_cs_context *cs = acs->csc;
1126 struct amdgpu_winsys_bo *bo = buffer->bo;
1127 unsigned new_num_fences = 0;
1128
1129 for (unsigned j = 0; j < bo->num_fences; ++j) {
1130 struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
1131
1132 if (is_noop_fence_dependency(acs, bo_fence))
1133 continue;
1134
1135 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
1136 new_num_fences++;
1137
1138 if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
1139 continue;
1140
1141 unsigned idx = add_fence_dependency_entry(cs);
1142 amdgpu_fence_reference(&cs->fence_dependencies[idx],
1143 (struct pipe_fence_handle*)bo_fence);
1144 }
1145
1146 for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
1147 amdgpu_fence_reference(&bo->fences[j], NULL);
1148
1149 bo->num_fences = new_num_fences;
1150 }
1151
1152 /* Add the given list of fences to the buffer's fence list.
1153 *
1154 * Must be called with the winsys bo_fence_lock held.
1155 */
1156 void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
1157 unsigned num_fences,
1158 struct pipe_fence_handle **fences)
1159 {
1160 if (bo->num_fences + num_fences > bo->max_fences) {
1161 unsigned new_max_fences = MAX2(bo->num_fences + num_fences, bo->max_fences * 2);
1162 struct pipe_fence_handle **new_fences =
1163 REALLOC(bo->fences,
1164 bo->num_fences * sizeof(*new_fences),
1165 new_max_fences * sizeof(*new_fences));
1166 if (likely(new_fences)) {
1167 bo->fences = new_fences;
1168 bo->max_fences = new_max_fences;
1169 } else {
1170 unsigned drop;
1171
1172 fprintf(stderr, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
1173 if (!bo->num_fences)
1174 return;
1175
1176 bo->num_fences--; /* prefer to keep the most recent fence if possible */
1177 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
1178
1179 drop = bo->num_fences + num_fences - bo->max_fences;
1180 num_fences -= drop;
1181 fences += drop;
1182 }
1183 }
1184
1185 for (unsigned i = 0; i < num_fences; ++i) {
1186 bo->fences[bo->num_fences] = NULL;
1187 amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]);
1188 bo->num_fences++;
1189 }
1190 }
1191
1192 static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs *acs,
1193 struct pipe_fence_handle *fence,
1194 unsigned num_buffers,
1195 struct amdgpu_cs_buffer *buffers)
1196 {
1197 for (unsigned i = 0; i < num_buffers; i++) {
1198 struct amdgpu_cs_buffer *buffer = &buffers[i];
1199 struct amdgpu_winsys_bo *bo = buffer->bo;
1200
1201 amdgpu_add_bo_fence_dependencies(acs, buffer);
1202 p_atomic_inc(&bo->num_active_ioctls);
1203 amdgpu_add_fences(bo, 1, &fence);
1204 }
1205 }
1206
1207 /* Since the kernel driver doesn't synchronize execution between different
1208 * rings automatically, we have to add fence dependencies manually.
1209 */
1210 static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs *acs)
1211 {
1212 struct amdgpu_cs_context *cs = acs->csc;
1213
1214 cs->num_fence_dependencies = 0;
1215
1216 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);
1217 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);
1218 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_sparse_buffers, cs->sparse_buffers);
1219 }
1220
1221 static unsigned add_syncobj_to_signal_entry(struct amdgpu_cs_context *cs)
1222 {
1223 unsigned idx = cs->num_syncobj_to_signal++;
1224
1225 if (idx >= cs->max_syncobj_to_signal) {
1226 unsigned size;
1227 const unsigned increment = 8;
1228
1229 cs->max_syncobj_to_signal = idx + increment;
1230 size = cs->max_syncobj_to_signal * sizeof(cs->syncobj_to_signal[0]);
1231 cs->syncobj_to_signal = realloc(cs->syncobj_to_signal, size);
1232 /* Clear the newly-allocated elements. */
1233 memset(cs->syncobj_to_signal + idx, 0,
1234 increment * sizeof(cs->syncobj_to_signal[0]));
1235 }
1236 return idx;
1237 }
1238
1239 static void amdgpu_cs_add_syncobj_signal(struct radeon_cmdbuf *rws,
1240 struct pipe_fence_handle *fence)
1241 {
1242 struct amdgpu_cs *acs = amdgpu_cs(rws);
1243 struct amdgpu_cs_context *cs = acs->csc;
1244
1245 assert(amdgpu_fence_is_syncobj((struct amdgpu_fence *)fence));
1246
1247 unsigned idx = add_syncobj_to_signal_entry(cs);
1248 amdgpu_fence_reference(&cs->syncobj_to_signal[idx], fence);
1249 }
1250
1251 /* Add backing of sparse buffers to the buffer list.
1252 *
1253 * This is done late, during submission, to keep the buffer list short before
1254 * submit, and to avoid managing fences for the backing buffers.
1255 */
1256 static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
1257 {
1258 for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {
1259 struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
1260 struct amdgpu_winsys_bo *bo = buffer->bo;
1261
1262 simple_mtx_lock(&bo->u.sparse.commit_lock);
1263
1264 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
1265 /* We can directly add the buffer here, because we know that each
1266 * backing buffer occurs only once.
1267 */
1268 int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
1269 if (idx < 0) {
1270 fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
1271 simple_mtx_unlock(&bo->u.sparse.commit_lock);
1272 return false;
1273 }
1274
1275 cs->real_buffers[idx].usage = buffer->usage & ~RADEON_USAGE_SYNCHRONIZED;
1276 cs->real_buffers[idx].u.real.priority_usage = buffer->u.real.priority_usage;
1277 p_atomic_inc(&backing->bo->num_active_ioctls);
1278 }
1279
1280 simple_mtx_unlock(&bo->u.sparse.commit_lock);
1281 }
1282
1283 return true;
1284 }
1285
1286 void amdgpu_cs_submit_ib(void *job, int thread_index)
1287 {
1288 struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
1289 struct amdgpu_winsys *ws = acs->ctx->ws;
1290 struct amdgpu_cs_context *cs = acs->cst;
1291 int i, r;
1292 amdgpu_bo_list_handle bo_list = NULL;
1293 uint64_t seq_no = 0;
1294 bool has_user_fence = amdgpu_cs_has_user_fence(cs);
1295
1296 /* Create the buffer list.
1297 * Use a buffer list containing all allocated buffers if requested.
1298 */
1299 if (ws->debug_all_bos) {
1300 struct amdgpu_winsys_bo *bo;
1301 amdgpu_bo_handle *handles;
1302 unsigned num = 0;
1303
1304 simple_mtx_lock(&ws->global_bo_list_lock);
1305
1306 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
1307 if (!handles) {
1308 simple_mtx_unlock(&ws->global_bo_list_lock);
1309 amdgpu_cs_context_cleanup(cs);
1310 cs->error_code = -ENOMEM;
1311 return;
1312 }
1313
1314 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
1315 assert(num < ws->num_buffers);
1316 handles[num++] = bo->bo;
1317 }
1318
1319 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
1320 handles, NULL, &bo_list);
1321 free(handles);
1322 simple_mtx_unlock(&ws->global_bo_list_lock);
1323 } else {
1324 unsigned num_handles;
1325
1326 if (!amdgpu_add_sparse_backing_buffers(cs)) {
1327 r = -ENOMEM;
1328 goto bo_list_error;
1329 }
1330
1331 if (cs->max_real_submit < cs->num_real_buffers) {
1332 FREE(cs->handles);
1333 FREE(cs->flags);
1334
1335 cs->handles = MALLOC(sizeof(*cs->handles) * cs->num_real_buffers);
1336 cs->flags = MALLOC(sizeof(*cs->flags) * cs->num_real_buffers);
1337
1338 if (!cs->handles || !cs->flags) {
1339 cs->max_real_submit = 0;
1340 r = -ENOMEM;
1341 goto bo_list_error;
1342 }
1343 }
1344
1345 num_handles = 0;
1346 for (i = 0; i < cs->num_real_buffers; ++i) {
1347 struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
1348
1349 if (buffer->bo->is_local)
1350 continue;
1351
1352 assert(buffer->u.real.priority_usage != 0);
1353
1354 cs->handles[num_handles] = buffer->bo->bo;
1355 cs->flags[num_handles] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
1356 ++num_handles;
1357 }
1358
1359 if (acs->ring_type == RING_GFX)
1360 ws->gfx_bo_list_counter += cs->num_real_buffers;
1361
1362 if (num_handles) {
1363 r = amdgpu_bo_list_create(ws->dev, num_handles,
1364 cs->handles, cs->flags, &bo_list);
1365 } else {
1366 r = 0;
1367 }
1368 }
1369 bo_list_error:
1370
1371 if (r) {
1372 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
1373 amdgpu_fence_signalled(cs->fence);
1374 cs->error_code = r;
1375 goto cleanup;
1376 }
1377
1378 if (acs->ctx->num_rejected_cs) {
1379 r = -ECANCELED;
1380 } else {
1381 struct drm_amdgpu_cs_chunk chunks[5];
1382 unsigned num_chunks = 0;
1383
1384 /* Convert from dwords to bytes. */
1385 cs->ib[IB_MAIN].ib_bytes *= 4;
1386
1387 /* IB */
1388 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
1389 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1390 chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];
1391 num_chunks++;
1392
1393 /* Fence */
1394 if (has_user_fence) {
1395 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1396 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1397 chunks[num_chunks].chunk_data = (uintptr_t)&acs->fence_chunk;
1398 num_chunks++;
1399 }
1400
1401 /* Dependencies */
1402 unsigned num_dependencies = cs->num_fence_dependencies;
1403 unsigned num_syncobj_dependencies = 0;
1404
1405 if (num_dependencies) {
1406 struct drm_amdgpu_cs_chunk_dep *dep_chunk =
1407 alloca(num_dependencies * sizeof(*dep_chunk));
1408 unsigned num = 0;
1409
1410 for (unsigned i = 0; i < num_dependencies; i++) {
1411 struct amdgpu_fence *fence =
1412 (struct amdgpu_fence*)cs->fence_dependencies[i];
1413
1414 if (amdgpu_fence_is_syncobj(fence)) {
1415 num_syncobj_dependencies++;
1416 continue;
1417 }
1418
1419 assert(util_queue_fence_is_signalled(&fence->submitted));
1420 amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[num++]);
1421 }
1422
1423 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1424 chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num;
1425 chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
1426 num_chunks++;
1427 }
1428
1429 /* Syncobj dependencies. */
1430 if (num_syncobj_dependencies) {
1431 struct drm_amdgpu_cs_chunk_sem *sem_chunk =
1432 alloca(num_syncobj_dependencies * sizeof(sem_chunk[0]));
1433 unsigned num = 0;
1434
1435 for (unsigned i = 0; i < num_dependencies; i++) {
1436 struct amdgpu_fence *fence =
1437 (struct amdgpu_fence*)cs->fence_dependencies[i];
1438
1439 if (!amdgpu_fence_is_syncobj(fence))
1440 continue;
1441
1442 assert(util_queue_fence_is_signalled(&fence->submitted));
1443 sem_chunk[num++].handle = fence->syncobj;
1444 }
1445
1446 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;
1447 chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4 * num;
1448 chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
1449 num_chunks++;
1450 }
1451
1452 /* Syncobj sygnals. */
1453 if (cs->num_syncobj_to_signal) {
1454 struct drm_amdgpu_cs_chunk_sem *sem_chunk =
1455 alloca(cs->num_syncobj_to_signal * sizeof(sem_chunk[0]));
1456
1457 for (unsigned i = 0; i < cs->num_syncobj_to_signal; i++) {
1458 struct amdgpu_fence *fence =
1459 (struct amdgpu_fence*)cs->syncobj_to_signal[i];
1460
1461 assert(amdgpu_fence_is_syncobj(fence));
1462 sem_chunk[i].handle = fence->syncobj;
1463 }
1464
1465 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_OUT;
1466 chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4
1467 * cs->num_syncobj_to_signal;
1468 chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
1469 num_chunks++;
1470 }
1471
1472 assert(num_chunks <= ARRAY_SIZE(chunks));
1473
1474 r = amdgpu_cs_submit_raw(ws->dev, acs->ctx->ctx, bo_list,
1475 num_chunks, chunks, &seq_no);
1476 }
1477
1478 cs->error_code = r;
1479 if (r) {
1480 if (r == -ENOMEM)
1481 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1482 else if (r == -ECANCELED)
1483 fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1484 else
1485 fprintf(stderr, "amdgpu: The CS has been rejected, "
1486 "see dmesg for more information (%i).\n", r);
1487
1488 amdgpu_fence_signalled(cs->fence);
1489
1490 acs->ctx->num_rejected_cs++;
1491 ws->num_total_rejected_cs++;
1492 } else {
1493 /* Success. */
1494 uint64_t *user_fence = NULL;
1495
1496 if (has_user_fence)
1497 user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type;
1498 amdgpu_fence_submitted(cs->fence, seq_no, user_fence);
1499 }
1500
1501 /* Cleanup. */
1502 if (bo_list)
1503 amdgpu_bo_list_destroy(bo_list);
1504
1505 cleanup:
1506 for (i = 0; i < cs->num_real_buffers; i++)
1507 p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
1508 for (i = 0; i < cs->num_slab_buffers; i++)
1509 p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
1510 for (i = 0; i < cs->num_sparse_buffers; i++)
1511 p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);
1512
1513 amdgpu_cs_context_cleanup(cs);
1514 }
1515
1516 /* Make sure the previous submission is completed. */
1517 void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs)
1518 {
1519 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1520
1521 /* Wait for any pending ioctl of this CS to complete. */
1522 util_queue_fence_wait(&cs->flush_completed);
1523 }
1524
1525 static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
1526 unsigned flags,
1527 struct pipe_fence_handle **fence)
1528 {
1529 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1530 struct amdgpu_winsys *ws = cs->ctx->ws;
1531 int error_code = 0;
1532
1533 rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
1534
1535 switch (cs->ring_type) {
1536 case RING_DMA:
1537 /* pad DMA ring to 8 DWs */
1538 if (ws->info.chip_class <= SI) {
1539 while (rcs->current.cdw & 7)
1540 radeon_emit(rcs, 0xf0000000); /* NOP packet */
1541 } else {
1542 while (rcs->current.cdw & 7)
1543 radeon_emit(rcs, 0x00000000); /* NOP packet */
1544 }
1545 break;
1546 case RING_GFX:
1547 case RING_COMPUTE:
1548 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1549 if (ws->info.gfx_ib_pad_with_type2) {
1550 while (rcs->current.cdw & 7)
1551 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1552 } else {
1553 while (rcs->current.cdw & 7)
1554 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1555 }
1556 if (cs->ring_type == RING_GFX)
1557 ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;
1558 break;
1559 case RING_UVD:
1560 case RING_UVD_ENC:
1561 while (rcs->current.cdw & 15)
1562 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1563 break;
1564 case RING_VCN_DEC:
1565 while (rcs->current.cdw & 15)
1566 radeon_emit(rcs, 0x81ff); /* nop packet */
1567 break;
1568 default:
1569 break;
1570 }
1571
1572 if (rcs->current.cdw > rcs->current.max_dw) {
1573 fprintf(stderr, "amdgpu: command stream overflowed\n");
1574 }
1575
1576 /* If the CS is not empty or overflowed.... */
1577 if (likely(radeon_emitted(&cs->main.base, 0) &&
1578 cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
1579 !debug_get_option_noop())) {
1580 struct amdgpu_cs_context *cur = cs->csc;
1581
1582 /* Set IB sizes. */
1583 amdgpu_ib_finalize(ws, &cs->main);
1584
1585 /* Create a fence. */
1586 amdgpu_fence_reference(&cur->fence, NULL);
1587 if (cs->next_fence) {
1588 /* just move the reference */
1589 cur->fence = cs->next_fence;
1590 cs->next_fence = NULL;
1591 } else {
1592 cur->fence = amdgpu_fence_create(cs->ctx,
1593 cur->ib[IB_MAIN].ip_type,
1594 cur->ib[IB_MAIN].ip_instance,
1595 cur->ib[IB_MAIN].ring);
1596 }
1597 if (fence)
1598 amdgpu_fence_reference(fence, cur->fence);
1599
1600 amdgpu_cs_sync_flush(rcs);
1601
1602 /* Prepare buffers.
1603 *
1604 * This fence must be held until the submission is queued to ensure
1605 * that the order of fence dependency updates matches the order of
1606 * submissions.
1607 */
1608 simple_mtx_lock(&ws->bo_fence_lock);
1609 amdgpu_add_fence_dependencies_bo_lists(cs);
1610
1611 /* Swap command streams. "cst" is going to be submitted. */
1612 cs->csc = cs->cst;
1613 cs->cst = cur;
1614
1615 /* Submit. */
1616 util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
1617 amdgpu_cs_submit_ib, NULL);
1618 /* The submission has been queued, unlock the fence now. */
1619 simple_mtx_unlock(&ws->bo_fence_lock);
1620
1621 if (!(flags & PIPE_FLUSH_ASYNC)) {
1622 amdgpu_cs_sync_flush(rcs);
1623 error_code = cur->error_code;
1624 }
1625 } else {
1626 amdgpu_cs_context_cleanup(cs->csc);
1627 }
1628
1629 amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
1630
1631 cs->main.base.used_gart = 0;
1632 cs->main.base.used_vram = 0;
1633
1634 if (cs->ring_type == RING_GFX)
1635 ws->num_gfx_IBs++;
1636 else if (cs->ring_type == RING_DMA)
1637 ws->num_sdma_IBs++;
1638
1639 return error_code;
1640 }
1641
1642 static void amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
1643 {
1644 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1645
1646 amdgpu_cs_sync_flush(rcs);
1647 util_queue_fence_destroy(&cs->flush_completed);
1648 p_atomic_dec(&cs->ctx->ws->num_cs);
1649 pb_reference(&cs->main.big_ib_buffer, NULL);
1650 FREE(cs->main.base.prev);
1651 amdgpu_destroy_cs_context(&cs->csc1);
1652 amdgpu_destroy_cs_context(&cs->csc2);
1653 amdgpu_fence_reference(&cs->next_fence, NULL);
1654 FREE(cs);
1655 }
1656
1657 static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs,
1658 struct pb_buffer *_buf,
1659 enum radeon_bo_usage usage)
1660 {
1661 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1662 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
1663
1664 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
1665 }
1666
1667 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
1668 {
1669 ws->base.ctx_create = amdgpu_ctx_create;
1670 ws->base.ctx_destroy = amdgpu_ctx_destroy;
1671 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
1672 ws->base.cs_create = amdgpu_cs_create;
1673 ws->base.cs_destroy = amdgpu_cs_destroy;
1674 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
1675 ws->base.cs_validate = amdgpu_cs_validate;
1676 ws->base.cs_check_space = amdgpu_cs_check_space;
1677 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
1678 ws->base.cs_flush = amdgpu_cs_flush;
1679 ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
1680 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
1681 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
1682 ws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;
1683 ws->base.cs_add_syncobj_signal = amdgpu_cs_add_syncobj_signal;
1684 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
1685 ws->base.fence_reference = amdgpu_fence_reference;
1686 ws->base.fence_import_syncobj = amdgpu_fence_import_syncobj;
1687 ws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;
1688 ws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;
1689 ws->base.export_signalled_sync_file = amdgpu_export_signalled_sync_file;
1690 }