winsys/amdgpu: add REWIND emulation via INDIRECT_BUFFER into cs_check_space
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28
29 #include "amdgpu_cs.h"
30 #include "util/os_time.h"
31 #include <inttypes.h>
32 #include <stdio.h>
33
34 #include "amd/common/sid.h"
35
36 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
37
38 #ifndef AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
39 #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4)
40 #endif
41
42 #ifndef AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
43 #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
44 #endif
45
46 /* FENCES */
47
48 static struct pipe_fence_handle *
49 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
50 unsigned ip_instance, unsigned ring)
51 {
52 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
53
54 fence->reference.count = 1;
55 fence->ws = ctx->ws;
56 fence->ctx = ctx;
57 fence->fence.context = ctx->ctx;
58 fence->fence.ip_type = ip_type;
59 fence->fence.ip_instance = ip_instance;
60 fence->fence.ring = ring;
61 util_queue_fence_init(&fence->submitted);
62 util_queue_fence_reset(&fence->submitted);
63 p_atomic_inc(&ctx->refcount);
64 return (struct pipe_fence_handle *)fence;
65 }
66
67 static struct pipe_fence_handle *
68 amdgpu_fence_import_syncobj(struct radeon_winsys *rws, int fd)
69 {
70 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
71 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
72 int r;
73
74 if (!fence)
75 return NULL;
76
77 pipe_reference_init(&fence->reference, 1);
78 fence->ws = ws;
79
80 r = amdgpu_cs_import_syncobj(ws->dev, fd, &fence->syncobj);
81 if (r) {
82 FREE(fence);
83 return NULL;
84 }
85
86 util_queue_fence_init(&fence->submitted);
87
88 assert(amdgpu_fence_is_syncobj(fence));
89 return (struct pipe_fence_handle*)fence;
90 }
91
92 static struct pipe_fence_handle *
93 amdgpu_fence_import_sync_file(struct radeon_winsys *rws, int fd)
94 {
95 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
96 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
97
98 if (!fence)
99 return NULL;
100
101 pipe_reference_init(&fence->reference, 1);
102 fence->ws = ws;
103 /* fence->ctx == NULL means that the fence is syncobj-based. */
104
105 /* Convert sync_file into syncobj. */
106 int r = amdgpu_cs_create_syncobj(ws->dev, &fence->syncobj);
107 if (r) {
108 FREE(fence);
109 return NULL;
110 }
111
112 r = amdgpu_cs_syncobj_import_sync_file(ws->dev, fence->syncobj, fd);
113 if (r) {
114 amdgpu_cs_destroy_syncobj(ws->dev, fence->syncobj);
115 FREE(fence);
116 return NULL;
117 }
118
119 util_queue_fence_init(&fence->submitted);
120
121 return (struct pipe_fence_handle*)fence;
122 }
123
124 static int amdgpu_fence_export_sync_file(struct radeon_winsys *rws,
125 struct pipe_fence_handle *pfence)
126 {
127 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
128 struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
129
130 if (amdgpu_fence_is_syncobj(fence)) {
131 int fd, r;
132
133 /* Convert syncobj into sync_file. */
134 r = amdgpu_cs_syncobj_export_sync_file(ws->dev, fence->syncobj, &fd);
135 return r ? -1 : fd;
136 }
137
138 util_queue_fence_wait(&fence->submitted);
139
140 /* Convert the amdgpu fence into a fence FD. */
141 int fd;
142 if (amdgpu_cs_fence_to_handle(ws->dev, &fence->fence,
143 AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD,
144 (uint32_t*)&fd))
145 return -1;
146
147 return fd;
148 }
149
150 static int amdgpu_export_signalled_sync_file(struct radeon_winsys *rws)
151 {
152 struct amdgpu_winsys *ws = amdgpu_winsys(rws);
153 uint32_t syncobj;
154 int fd = -1;
155
156 int r = amdgpu_cs_create_syncobj2(ws->dev, DRM_SYNCOBJ_CREATE_SIGNALED,
157 &syncobj);
158 if (r) {
159 return -1;
160 }
161
162 r = amdgpu_cs_syncobj_export_sync_file(ws->dev, syncobj, &fd);
163 if (r) {
164 fd = -1;
165 }
166
167 amdgpu_cs_destroy_syncobj(ws->dev, syncobj);
168 return fd;
169 }
170
171 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
172 uint64_t seq_no,
173 uint64_t *user_fence_cpu_address)
174 {
175 struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;
176
177 afence->fence.fence = seq_no;
178 afence->user_fence_cpu_address = user_fence_cpu_address;
179 util_queue_fence_signal(&afence->submitted);
180 }
181
182 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
183 {
184 struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;
185
186 afence->signalled = true;
187 util_queue_fence_signal(&afence->submitted);
188 }
189
190 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
191 bool absolute)
192 {
193 struct amdgpu_fence *afence = (struct amdgpu_fence*)fence;
194 uint32_t expired;
195 int64_t abs_timeout;
196 uint64_t *user_fence_cpu;
197 int r;
198
199 if (afence->signalled)
200 return true;
201
202 /* Handle syncobjs. */
203 if (amdgpu_fence_is_syncobj(afence)) {
204 /* Absolute timeouts are only be used by BO fences, which aren't
205 * backed by syncobjs.
206 */
207 assert(!absolute);
208
209 if (amdgpu_cs_syncobj_wait(afence->ws->dev, &afence->syncobj, 1,
210 timeout, 0, NULL))
211 return false;
212
213 afence->signalled = true;
214 return true;
215 }
216
217 if (absolute)
218 abs_timeout = timeout;
219 else
220 abs_timeout = os_time_get_absolute_timeout(timeout);
221
222 /* The fence might not have a number assigned if its IB is being
223 * submitted in the other thread right now. Wait until the submission
224 * is done. */
225 if (!util_queue_fence_wait_timeout(&afence->submitted, abs_timeout))
226 return false;
227
228 user_fence_cpu = afence->user_fence_cpu_address;
229 if (user_fence_cpu) {
230 if (*user_fence_cpu >= afence->fence.fence) {
231 afence->signalled = true;
232 return true;
233 }
234
235 /* No timeout, just query: no need for the ioctl. */
236 if (!absolute && !timeout)
237 return false;
238 }
239
240 /* Now use the libdrm query. */
241 r = amdgpu_cs_query_fence_status(&afence->fence,
242 abs_timeout,
243 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
244 &expired);
245 if (r) {
246 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
247 return false;
248 }
249
250 if (expired) {
251 /* This variable can only transition from false to true, so it doesn't
252 * matter if threads race for it. */
253 afence->signalled = true;
254 return true;
255 }
256 return false;
257 }
258
259 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
260 struct pipe_fence_handle *fence,
261 uint64_t timeout)
262 {
263 return amdgpu_fence_wait(fence, timeout, false);
264 }
265
266 static struct pipe_fence_handle *
267 amdgpu_cs_get_next_fence(struct radeon_cmdbuf *rcs)
268 {
269 struct amdgpu_cs *cs = amdgpu_cs(rcs);
270 struct pipe_fence_handle *fence = NULL;
271
272 if (debug_get_option_noop())
273 return NULL;
274
275 if (cs->next_fence) {
276 amdgpu_fence_reference(&fence, cs->next_fence);
277 return fence;
278 }
279
280 fence = amdgpu_fence_create(cs->ctx,
281 cs->csc->ib[IB_MAIN].ip_type,
282 cs->csc->ib[IB_MAIN].ip_instance,
283 cs->csc->ib[IB_MAIN].ring);
284 if (!fence)
285 return NULL;
286
287 amdgpu_fence_reference(&cs->next_fence, fence);
288 return fence;
289 }
290
291 /* CONTEXTS */
292
293 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
294 {
295 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
296 int r;
297 struct amdgpu_bo_alloc_request alloc_buffer = {};
298 amdgpu_bo_handle buf_handle;
299
300 if (!ctx)
301 return NULL;
302
303 ctx->ws = amdgpu_winsys(ws);
304 ctx->refcount = 1;
305 ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
306
307 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
308 if (r) {
309 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
310 goto error_create;
311 }
312
313 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
314 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
315 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
316
317 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
318 if (r) {
319 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
320 goto error_user_fence_alloc;
321 }
322
323 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
324 if (r) {
325 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
326 goto error_user_fence_map;
327 }
328
329 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
330 ctx->user_fence_bo = buf_handle;
331
332 return (struct radeon_winsys_ctx*)ctx;
333
334 error_user_fence_map:
335 amdgpu_bo_free(buf_handle);
336 error_user_fence_alloc:
337 amdgpu_cs_ctx_free(ctx->ctx);
338 error_create:
339 FREE(ctx);
340 return NULL;
341 }
342
343 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
344 {
345 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
346 }
347
348 static enum pipe_reset_status
349 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
350 {
351 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
352 uint32_t result, hangs;
353 int r;
354
355 /* Return a failure due to a rejected command submission. */
356 if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
357 return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
358 PIPE_INNOCENT_CONTEXT_RESET;
359 }
360
361 /* Return a failure due to a GPU hang. */
362 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
363 if (r) {
364 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
365 return PIPE_NO_RESET;
366 }
367
368 switch (result) {
369 case AMDGPU_CTX_GUILTY_RESET:
370 return PIPE_GUILTY_CONTEXT_RESET;
371 case AMDGPU_CTX_INNOCENT_RESET:
372 return PIPE_INNOCENT_CONTEXT_RESET;
373 case AMDGPU_CTX_UNKNOWN_RESET:
374 return PIPE_UNKNOWN_CONTEXT_RESET;
375 case AMDGPU_CTX_NO_RESET:
376 default:
377 return PIPE_NO_RESET;
378 }
379 }
380
381 /* COMMAND SUBMISSION */
382
383 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
384 {
385 return cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD &&
386 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCE &&
387 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_UVD_ENC &&
388 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_DEC &&
389 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_ENC &&
390 cs->ib[IB_MAIN].ip_type != AMDGPU_HW_IP_VCN_JPEG;
391 }
392
393 static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
394 {
395 return cs->ctx->ws->info.chip_class >= GFX7 &&
396 (cs->ring_type == RING_GFX || cs->ring_type == RING_COMPUTE);
397 }
398
399 static unsigned amdgpu_cs_epilog_dws(struct amdgpu_cs *cs)
400 {
401 if (amdgpu_cs_has_chaining(cs))
402 return 4; /* for chaining */
403
404 return 0;
405 }
406
407 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
408 {
409 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
410 int i = cs->buffer_indices_hashlist[hash];
411 struct amdgpu_cs_buffer *buffers;
412 int num_buffers;
413
414 if (bo->bo) {
415 buffers = cs->real_buffers;
416 num_buffers = cs->num_real_buffers;
417 } else if (!bo->sparse) {
418 buffers = cs->slab_buffers;
419 num_buffers = cs->num_slab_buffers;
420 } else {
421 buffers = cs->sparse_buffers;
422 num_buffers = cs->num_sparse_buffers;
423 }
424
425 /* not found or found */
426 if (i < 0 || (i < num_buffers && buffers[i].bo == bo))
427 return i;
428
429 /* Hash collision, look for the BO in the list of buffers linearly. */
430 for (i = num_buffers - 1; i >= 0; i--) {
431 if (buffers[i].bo == bo) {
432 /* Put this buffer in the hash list.
433 * This will prevent additional hash collisions if there are
434 * several consecutive lookup_buffer calls for the same buffer.
435 *
436 * Example: Assuming buffers A,B,C collide in the hash list,
437 * the following sequence of buffers:
438 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
439 * will collide here: ^ and here: ^,
440 * meaning that we should get very few collisions in the end. */
441 cs->buffer_indices_hashlist[hash] = i;
442 return i;
443 }
444 }
445 return -1;
446 }
447
448 static int
449 amdgpu_do_add_real_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
450 {
451 struct amdgpu_cs_buffer *buffer;
452 int idx;
453
454 /* New buffer, check if the backing array is large enough. */
455 if (cs->num_real_buffers >= cs->max_real_buffers) {
456 unsigned new_max =
457 MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
458 struct amdgpu_cs_buffer *new_buffers;
459
460 new_buffers = MALLOC(new_max * sizeof(*new_buffers));
461
462 if (!new_buffers) {
463 fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");
464 FREE(new_buffers);
465 return -1;
466 }
467
468 memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
469
470 FREE(cs->real_buffers);
471
472 cs->max_real_buffers = new_max;
473 cs->real_buffers = new_buffers;
474 }
475
476 idx = cs->num_real_buffers;
477 buffer = &cs->real_buffers[idx];
478
479 memset(buffer, 0, sizeof(*buffer));
480 amdgpu_winsys_bo_reference(&buffer->bo, bo);
481 p_atomic_inc(&bo->num_cs_references);
482 cs->num_real_buffers++;
483
484 return idx;
485 }
486
487 static int
488 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
489 {
490 struct amdgpu_cs_context *cs = acs->csc;
491 unsigned hash;
492 int idx = amdgpu_lookup_buffer(cs, bo);
493
494 if (idx >= 0)
495 return idx;
496
497 idx = amdgpu_do_add_real_buffer(cs, bo);
498
499 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
500 cs->buffer_indices_hashlist[hash] = idx;
501
502 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
503 acs->main.base.used_vram += bo->base.size;
504 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
505 acs->main.base.used_gart += bo->base.size;
506
507 return idx;
508 }
509
510 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs *acs,
511 struct amdgpu_winsys_bo *bo)
512 {
513 struct amdgpu_cs_context *cs = acs->csc;
514 struct amdgpu_cs_buffer *buffer;
515 unsigned hash;
516 int idx = amdgpu_lookup_buffer(cs, bo);
517 int real_idx;
518
519 if (idx >= 0)
520 return idx;
521
522 real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real);
523 if (real_idx < 0)
524 return -1;
525
526 /* New buffer, check if the backing array is large enough. */
527 if (cs->num_slab_buffers >= cs->max_slab_buffers) {
528 unsigned new_max =
529 MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));
530 struct amdgpu_cs_buffer *new_buffers;
531
532 new_buffers = REALLOC(cs->slab_buffers,
533 cs->max_slab_buffers * sizeof(*new_buffers),
534 new_max * sizeof(*new_buffers));
535 if (!new_buffers) {
536 fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
537 return -1;
538 }
539
540 cs->max_slab_buffers = new_max;
541 cs->slab_buffers = new_buffers;
542 }
543
544 idx = cs->num_slab_buffers;
545 buffer = &cs->slab_buffers[idx];
546
547 memset(buffer, 0, sizeof(*buffer));
548 amdgpu_winsys_bo_reference(&buffer->bo, bo);
549 buffer->u.slab.real_idx = real_idx;
550 p_atomic_inc(&bo->num_cs_references);
551 cs->num_slab_buffers++;
552
553 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
554 cs->buffer_indices_hashlist[hash] = idx;
555
556 return idx;
557 }
558
559 static int amdgpu_lookup_or_add_sparse_buffer(struct amdgpu_cs *acs,
560 struct amdgpu_winsys_bo *bo)
561 {
562 struct amdgpu_cs_context *cs = acs->csc;
563 struct amdgpu_cs_buffer *buffer;
564 unsigned hash;
565 int idx = amdgpu_lookup_buffer(cs, bo);
566
567 if (idx >= 0)
568 return idx;
569
570 /* New buffer, check if the backing array is large enough. */
571 if (cs->num_sparse_buffers >= cs->max_sparse_buffers) {
572 unsigned new_max =
573 MAX2(cs->max_sparse_buffers + 16, (unsigned)(cs->max_sparse_buffers * 1.3));
574 struct amdgpu_cs_buffer *new_buffers;
575
576 new_buffers = REALLOC(cs->sparse_buffers,
577 cs->max_sparse_buffers * sizeof(*new_buffers),
578 new_max * sizeof(*new_buffers));
579 if (!new_buffers) {
580 fprintf(stderr, "amdgpu_lookup_or_add_sparse_buffer: allocation failed\n");
581 return -1;
582 }
583
584 cs->max_sparse_buffers = new_max;
585 cs->sparse_buffers = new_buffers;
586 }
587
588 idx = cs->num_sparse_buffers;
589 buffer = &cs->sparse_buffers[idx];
590
591 memset(buffer, 0, sizeof(*buffer));
592 amdgpu_winsys_bo_reference(&buffer->bo, bo);
593 p_atomic_inc(&bo->num_cs_references);
594 cs->num_sparse_buffers++;
595
596 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
597 cs->buffer_indices_hashlist[hash] = idx;
598
599 /* We delay adding the backing buffers until we really have to. However,
600 * we cannot delay accounting for memory use.
601 */
602 simple_mtx_lock(&bo->lock);
603
604 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
605 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
606 acs->main.base.used_vram += backing->bo->base.size;
607 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
608 acs->main.base.used_gart += backing->bo->base.size;
609 }
610
611 simple_mtx_unlock(&bo->lock);
612
613 return idx;
614 }
615
616 static unsigned amdgpu_cs_add_buffer(struct radeon_cmdbuf *rcs,
617 struct pb_buffer *buf,
618 enum radeon_bo_usage usage,
619 enum radeon_bo_domain domains,
620 enum radeon_bo_priority priority)
621 {
622 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
623 * the buffer placement during command submission.
624 */
625 struct amdgpu_cs *acs = amdgpu_cs(rcs);
626 struct amdgpu_cs_context *cs = acs->csc;
627 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
628 struct amdgpu_cs_buffer *buffer;
629 int index;
630
631 /* Fast exit for no-op calls.
632 * This is very effective with suballocators and linear uploaders that
633 * are outside of the winsys.
634 */
635 if (bo == cs->last_added_bo &&
636 (usage & cs->last_added_bo_usage) == usage &&
637 (1u << priority) & cs->last_added_bo_priority_usage)
638 return cs->last_added_bo_index;
639
640 if (!bo->sparse) {
641 if (!bo->bo) {
642 index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
643 if (index < 0)
644 return 0;
645
646 buffer = &cs->slab_buffers[index];
647 buffer->usage |= usage;
648
649 usage &= ~RADEON_USAGE_SYNCHRONIZED;
650 index = buffer->u.slab.real_idx;
651 } else {
652 index = amdgpu_lookup_or_add_real_buffer(acs, bo);
653 if (index < 0)
654 return 0;
655 }
656
657 buffer = &cs->real_buffers[index];
658 } else {
659 index = amdgpu_lookup_or_add_sparse_buffer(acs, bo);
660 if (index < 0)
661 return 0;
662
663 buffer = &cs->sparse_buffers[index];
664 }
665
666 buffer->u.real.priority_usage |= 1u << priority;
667 buffer->usage |= usage;
668
669 cs->last_added_bo = bo;
670 cs->last_added_bo_index = index;
671 cs->last_added_bo_usage = buffer->usage;
672 cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
673 return index;
674 }
675
676 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib,
677 enum ring_type ring_type)
678 {
679 struct pb_buffer *pb;
680 uint8_t *mapped;
681 unsigned buffer_size;
682
683 /* Always create a buffer that is at least as large as the maximum seen IB
684 * size, aligned to a power of two (and multiplied by 4 to reduce internal
685 * fragmentation if chaining is not available). Limit to 512k dwords, which
686 * is the largest power of two that fits into the size field of the
687 * INDIRECT_BUFFER packet.
688 */
689 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
690 buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
691 else
692 buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
693
694 const unsigned min_size = MAX2(ib->max_check_space_size, 8 * 1024 * 4);
695 const unsigned max_size = 512 * 1024 * 4;
696
697 buffer_size = MIN2(buffer_size, max_size);
698 buffer_size = MAX2(buffer_size, min_size); /* min_size is more important */
699
700 pb = ws->base.buffer_create(&ws->base, buffer_size,
701 ws->info.gart_page_size,
702 RADEON_DOMAIN_GTT,
703 RADEON_FLAG_NO_INTERPROCESS_SHARING |
704 (ring_type == RING_GFX ||
705 ring_type == RING_COMPUTE ||
706 ring_type == RING_DMA ?
707 RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC : 0));
708 if (!pb)
709 return false;
710
711 mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
712 if (!mapped) {
713 pb_reference(&pb, NULL);
714 return false;
715 }
716
717 pb_reference(&ib->big_ib_buffer, pb);
718 pb_reference(&pb, NULL);
719
720 ib->ib_mapped = mapped;
721 ib->used_ib_space = 0;
722
723 return true;
724 }
725
726 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
727 {
728 /* The maximum IB size including all chained IBs. */
729 switch (ib_type) {
730 case IB_MAIN:
731 /* Smaller submits means the GPU gets busy sooner and there is less
732 * waiting for buffers and fences. Proof:
733 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
734 */
735 return 20 * 1024;
736 case IB_PARALLEL_COMPUTE:
737 /* Always chain this IB. */
738 return UINT_MAX;
739 default:
740 unreachable("bad ib_type");
741 }
742 }
743
744 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
745 enum ib_type ib_type)
746 {
747 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
748 /* Small IBs are better than big IBs, because the GPU goes idle quicker
749 * and there is less waiting for buffers and fences. Proof:
750 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
751 */
752 struct amdgpu_ib *ib = NULL;
753 struct drm_amdgpu_cs_chunk_ib *info = &cs->csc->ib[ib_type];
754 /* This is the minimum size of a contiguous IB. */
755 unsigned ib_size = 4 * 1024 * 4;
756
757 switch (ib_type) {
758 case IB_PARALLEL_COMPUTE:
759 ib = &cs->compute_ib;
760 break;
761 case IB_MAIN:
762 ib = &cs->main;
763 break;
764 default:
765 unreachable("unhandled IB type");
766 }
767
768 /* Always allocate at least the size of the biggest cs_check_space call,
769 * because precisely the last call might have requested this size.
770 */
771 ib_size = MAX2(ib_size, ib->max_check_space_size);
772
773 if (!amdgpu_cs_has_chaining(cs)) {
774 ib_size = MAX2(ib_size,
775 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
776 amdgpu_ib_max_submit_dwords(ib_type)));
777 }
778
779 ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
780
781 ib->base.prev_dw = 0;
782 ib->base.num_prev = 0;
783 ib->base.current.cdw = 0;
784 ib->base.current.buf = NULL;
785
786 /* Allocate a new buffer for IBs if the current buffer is all used. */
787 if (!ib->big_ib_buffer ||
788 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
789 if (!amdgpu_ib_new_buffer(aws, ib, cs->ring_type))
790 return false;
791 }
792
793 info->va_start = amdgpu_winsys_bo(ib->big_ib_buffer)->va + ib->used_ib_space;
794 info->ib_bytes = 0;
795 /* ib_bytes is in dwords and the conversion to bytes will be done before
796 * the CS ioctl. */
797 ib->ptr_ib_size = &info->ib_bytes;
798 ib->ptr_ib_size_inside_ib = false;
799
800 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
801 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
802
803 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
804
805 ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
806 ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs);
807 assert(ib->base.current.max_dw >= ib->max_check_space_size / 4);
808 ib->base.gpu_address = info->va_start;
809 return true;
810 }
811
812 static void amdgpu_set_ib_size(struct amdgpu_ib *ib)
813 {
814 if (ib->ptr_ib_size_inside_ib) {
815 *ib->ptr_ib_size = ib->base.current.cdw |
816 S_3F2_CHAIN(1) | S_3F2_VALID(1);
817 } else {
818 *ib->ptr_ib_size = ib->base.current.cdw;
819 }
820 }
821
822 static void amdgpu_ib_finalize(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
823 {
824 amdgpu_set_ib_size(ib);
825 ib->used_ib_space += ib->base.current.cdw * 4;
826 ib->used_ib_space = align(ib->used_ib_space, ws->info.ib_start_alignment);
827 ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
828 }
829
830 static bool amdgpu_init_cs_context(struct amdgpu_winsys *ws,
831 struct amdgpu_cs_context *cs,
832 enum ring_type ring_type)
833 {
834 switch (ring_type) {
835 case RING_DMA:
836 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_DMA;
837 break;
838
839 case RING_UVD:
840 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD;
841 break;
842
843 case RING_UVD_ENC:
844 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_UVD_ENC;
845 break;
846
847 case RING_VCE:
848 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCE;
849 break;
850
851 case RING_VCN_DEC:
852 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_DEC;
853 break;
854
855 case RING_VCN_ENC:
856 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_ENC;
857 break;
858
859 case RING_VCN_JPEG:
860 cs->ib[IB_MAIN].ip_type = AMDGPU_HW_IP_VCN_JPEG;
861 break;
862
863 case RING_COMPUTE:
864 case RING_GFX:
865 cs->ib[IB_MAIN].ip_type = ring_type == RING_GFX ? AMDGPU_HW_IP_GFX :
866 AMDGPU_HW_IP_COMPUTE;
867
868 /* The kernel shouldn't invalidate L2 and vL1. The proper place for cache
869 * invalidation is the beginning of IBs (the previous commit does that),
870 * because completion of an IB doesn't care about the state of GPU caches,
871 * but the beginning of an IB does. Draw calls from multiple IBs can be
872 * executed in parallel, so draw calls from the current IB can finish after
873 * the next IB starts drawing, and so the cache flush at the end of IB
874 * is always late.
875 */
876 if (ws->info.drm_minor >= 26)
877 cs->ib[IB_MAIN].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;
878 break;
879
880 default:
881 assert(0);
882 }
883
884 cs->ib[IB_PARALLEL_COMPUTE].ip_type = AMDGPU_HW_IP_COMPUTE;
885 cs->ib[IB_PARALLEL_COMPUTE].flags = AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE;
886
887 memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
888 cs->last_added_bo = NULL;
889 return true;
890 }
891
892 static void cleanup_fence_list(struct amdgpu_fence_list *fences)
893 {
894 for (unsigned i = 0; i < fences->num; i++)
895 amdgpu_fence_reference(&fences->list[i], NULL);
896 fences->num = 0;
897 }
898
899 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
900 {
901 unsigned i;
902
903 for (i = 0; i < cs->num_real_buffers; i++) {
904 p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
905 amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
906 }
907 for (i = 0; i < cs->num_slab_buffers; i++) {
908 p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
909 amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
910 }
911 for (i = 0; i < cs->num_sparse_buffers; i++) {
912 p_atomic_dec(&cs->sparse_buffers[i].bo->num_cs_references);
913 amdgpu_winsys_bo_reference(&cs->sparse_buffers[i].bo, NULL);
914 }
915 cleanup_fence_list(&cs->fence_dependencies);
916 cleanup_fence_list(&cs->syncobj_dependencies);
917 cleanup_fence_list(&cs->syncobj_to_signal);
918 cleanup_fence_list(&cs->compute_fence_dependencies);
919 cleanup_fence_list(&cs->compute_start_fence_dependencies);
920
921 cs->num_real_buffers = 0;
922 cs->num_slab_buffers = 0;
923 cs->num_sparse_buffers = 0;
924 amdgpu_fence_reference(&cs->fence, NULL);
925
926 memset(cs->buffer_indices_hashlist, -1, sizeof(cs->buffer_indices_hashlist));
927 cs->last_added_bo = NULL;
928 }
929
930 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
931 {
932 amdgpu_cs_context_cleanup(cs);
933 FREE(cs->real_buffers);
934 FREE(cs->slab_buffers);
935 FREE(cs->sparse_buffers);
936 FREE(cs->fence_dependencies.list);
937 FREE(cs->syncobj_dependencies.list);
938 FREE(cs->syncobj_to_signal.list);
939 FREE(cs->compute_fence_dependencies.list);
940 FREE(cs->compute_start_fence_dependencies.list);
941 }
942
943
944 static struct radeon_cmdbuf *
945 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
946 enum ring_type ring_type,
947 void (*flush)(void *ctx, unsigned flags,
948 struct pipe_fence_handle **fence),
949 void *flush_ctx,
950 bool stop_exec_on_failure)
951 {
952 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
953 struct amdgpu_cs *cs;
954
955 cs = CALLOC_STRUCT(amdgpu_cs);
956 if (!cs) {
957 return NULL;
958 }
959
960 util_queue_fence_init(&cs->flush_completed);
961
962 cs->ctx = ctx;
963 cs->flush_cs = flush;
964 cs->flush_data = flush_ctx;
965 cs->ring_type = ring_type;
966 cs->stop_exec_on_failure = stop_exec_on_failure;
967
968 struct amdgpu_cs_fence_info fence_info;
969 fence_info.handle = cs->ctx->user_fence_bo;
970 fence_info.offset = cs->ring_type;
971 amdgpu_cs_chunk_fence_info_to_data(&fence_info, (void*)&cs->fence_chunk);
972
973 cs->main.ib_type = IB_MAIN;
974 cs->compute_ib.ib_type = IB_PARALLEL_COMPUTE;
975
976 if (!amdgpu_init_cs_context(ctx->ws, &cs->csc1, ring_type)) {
977 FREE(cs);
978 return NULL;
979 }
980
981 if (!amdgpu_init_cs_context(ctx->ws, &cs->csc2, ring_type)) {
982 amdgpu_destroy_cs_context(&cs->csc1);
983 FREE(cs);
984 return NULL;
985 }
986
987 /* Set the first submission context as current. */
988 cs->csc = &cs->csc1;
989 cs->cst = &cs->csc2;
990
991 if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
992 amdgpu_destroy_cs_context(&cs->csc2);
993 amdgpu_destroy_cs_context(&cs->csc1);
994 FREE(cs);
995 return NULL;
996 }
997
998 p_atomic_inc(&ctx->ws->num_cs);
999 return &cs->main.base;
1000 }
1001
1002 static struct radeon_cmdbuf *
1003 amdgpu_cs_add_parallel_compute_ib(struct radeon_cmdbuf *ib,
1004 bool uses_gds_ordered_append)
1005 {
1006 struct amdgpu_cs *cs = (struct amdgpu_cs*)ib;
1007 struct amdgpu_winsys *ws = cs->ctx->ws;
1008
1009 if (cs->ring_type != RING_GFX)
1010 return NULL;
1011
1012 /* only one secondary IB can be added */
1013 if (cs->compute_ib.ib_mapped)
1014 return NULL;
1015
1016 /* Allocate the compute IB. */
1017 if (!amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE))
1018 return NULL;
1019
1020 if (uses_gds_ordered_append) {
1021 cs->csc1.ib[IB_PARALLEL_COMPUTE].flags |=
1022 AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID;
1023 cs->csc2.ib[IB_PARALLEL_COMPUTE].flags |=
1024 AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID;
1025 }
1026 return &cs->compute_ib.base;
1027 }
1028
1029 static bool amdgpu_cs_validate(struct radeon_cmdbuf *rcs)
1030 {
1031 return true;
1032 }
1033
1034 static bool amdgpu_cs_check_space(struct radeon_cmdbuf *rcs, unsigned dw,
1035 bool force_chaining)
1036 {
1037 struct amdgpu_ib *ib = amdgpu_ib(rcs);
1038 struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
1039 unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
1040 unsigned cs_epilog_dw = amdgpu_cs_epilog_dws(cs);
1041 unsigned need_byte_size = (dw + cs_epilog_dw) * 4;
1042 uint64_t va;
1043 uint32_t *new_ptr_ib_size;
1044
1045 assert(rcs->current.cdw <= rcs->current.max_dw);
1046
1047 /* 125% of the size for IB epilog. */
1048 unsigned safe_byte_size = need_byte_size + need_byte_size / 4;
1049 ib->max_check_space_size = MAX2(ib->max_check_space_size,
1050 safe_byte_size);
1051
1052 /* If force_chaining is true, we can't return. We have to chain. */
1053 if (!force_chaining) {
1054 if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
1055 return false;
1056
1057 ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
1058
1059 if (rcs->current.max_dw - rcs->current.cdw >= dw)
1060 return true;
1061 }
1062
1063 if (!amdgpu_cs_has_chaining(cs)) {
1064 assert(!force_chaining);
1065 return false;
1066 }
1067
1068 /* Allocate a new chunk */
1069 if (rcs->num_prev >= rcs->max_prev) {
1070 unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
1071 struct radeon_cmdbuf_chunk *new_prev;
1072
1073 new_prev = REALLOC(rcs->prev,
1074 sizeof(*new_prev) * rcs->max_prev,
1075 sizeof(*new_prev) * new_max_prev);
1076 if (!new_prev)
1077 return false;
1078
1079 rcs->prev = new_prev;
1080 rcs->max_prev = new_max_prev;
1081 }
1082
1083 if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib, cs->ring_type))
1084 return false;
1085
1086 assert(ib->used_ib_space == 0);
1087 va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
1088
1089 /* This space was originally reserved. */
1090 rcs->current.max_dw += cs_epilog_dw;
1091
1092 /* Pad with NOPs and add INDIRECT_BUFFER packet */
1093 while ((rcs->current.cdw & 7) != 4)
1094 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1095
1096 radeon_emit(rcs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
1097 radeon_emit(rcs, va);
1098 radeon_emit(rcs, va >> 32);
1099 new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw++];
1100
1101 assert((rcs->current.cdw & 7) == 0);
1102 assert(rcs->current.cdw <= rcs->current.max_dw);
1103
1104 amdgpu_set_ib_size(ib);
1105 ib->ptr_ib_size = new_ptr_ib_size;
1106 ib->ptr_ib_size_inside_ib = true;
1107
1108 /* Hook up the new chunk */
1109 rcs->prev[rcs->num_prev].buf = rcs->current.buf;
1110 rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
1111 rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
1112 rcs->num_prev++;
1113
1114 ib->base.prev_dw += ib->base.current.cdw;
1115 ib->base.current.cdw = 0;
1116
1117 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
1118 ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - cs_epilog_dw;
1119 assert(ib->base.current.max_dw >= ib->max_check_space_size / 4);
1120 ib->base.gpu_address = va;
1121
1122 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
1123 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
1124
1125 return true;
1126 }
1127
1128 static unsigned amdgpu_cs_get_buffer_list(struct radeon_cmdbuf *rcs,
1129 struct radeon_bo_list_item *list)
1130 {
1131 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
1132 int i;
1133
1134 if (list) {
1135 for (i = 0; i < cs->num_real_buffers; i++) {
1136 list[i].bo_size = cs->real_buffers[i].bo->base.size;
1137 list[i].vm_address = cs->real_buffers[i].bo->va;
1138 list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;
1139 }
1140 }
1141 return cs->num_real_buffers;
1142 }
1143
1144 static void add_fence_to_list(struct amdgpu_fence_list *fences,
1145 struct amdgpu_fence *fence)
1146 {
1147 unsigned idx = fences->num++;
1148
1149 if (idx >= fences->max) {
1150 unsigned size;
1151 const unsigned increment = 8;
1152
1153 fences->max = idx + increment;
1154 size = fences->max * sizeof(fences->list[0]);
1155 fences->list = realloc(fences->list, size);
1156 /* Clear the newly-allocated elements. */
1157 memset(fences->list + idx, 0,
1158 increment * sizeof(fences->list[0]));
1159 }
1160 amdgpu_fence_reference(&fences->list[idx], (struct pipe_fence_handle*)fence);
1161 }
1162
1163 /* TODO: recognizing dependencies as no-ops doesn't take the parallel
1164 * compute IB into account. The compute IB won't wait for these.
1165 * Also, the scheduler can execute compute and SDMA IBs on any rings.
1166 * Should we always insert dependencies?
1167 */
1168 static bool is_noop_fence_dependency(struct amdgpu_cs *acs,
1169 struct amdgpu_fence *fence)
1170 {
1171 struct amdgpu_cs_context *cs = acs->csc;
1172
1173 if (!amdgpu_fence_is_syncobj(fence) &&
1174 fence->ctx == acs->ctx &&
1175 fence->fence.ip_type == cs->ib[IB_MAIN].ip_type &&
1176 fence->fence.ip_instance == cs->ib[IB_MAIN].ip_instance &&
1177 fence->fence.ring == cs->ib[IB_MAIN].ring)
1178 return true;
1179
1180 return amdgpu_fence_wait((void *)fence, 0, false);
1181 }
1182
1183 static void amdgpu_cs_add_fence_dependency(struct radeon_cmdbuf *rws,
1184 struct pipe_fence_handle *pfence,
1185 unsigned dependency_flags)
1186 {
1187 struct amdgpu_cs *acs = amdgpu_cs(rws);
1188 struct amdgpu_cs_context *cs = acs->csc;
1189 struct amdgpu_fence *fence = (struct amdgpu_fence*)pfence;
1190
1191 util_queue_fence_wait(&fence->submitted);
1192
1193 if (dependency_flags & RADEON_DEPENDENCY_PARALLEL_COMPUTE_ONLY) {
1194 /* Syncobjs are not needed here. */
1195 assert(!amdgpu_fence_is_syncobj(fence));
1196
1197 if (acs->ctx->ws->info.has_scheduled_fence_dependency &&
1198 dependency_flags & RADEON_DEPENDENCY_START_FENCE)
1199 add_fence_to_list(&cs->compute_start_fence_dependencies, fence);
1200 else
1201 add_fence_to_list(&cs->compute_fence_dependencies, fence);
1202 return;
1203 }
1204
1205 /* Start fences are not needed here. */
1206 assert(!(dependency_flags & RADEON_DEPENDENCY_START_FENCE));
1207
1208 if (is_noop_fence_dependency(acs, fence))
1209 return;
1210
1211 if (amdgpu_fence_is_syncobj(fence))
1212 add_fence_to_list(&cs->syncobj_dependencies, fence);
1213 else
1214 add_fence_to_list(&cs->fence_dependencies, fence);
1215 }
1216
1217 static void amdgpu_add_bo_fence_dependencies(struct amdgpu_cs *acs,
1218 struct amdgpu_cs_buffer *buffer)
1219 {
1220 struct amdgpu_cs_context *cs = acs->csc;
1221 struct amdgpu_winsys_bo *bo = buffer->bo;
1222 unsigned new_num_fences = 0;
1223
1224 for (unsigned j = 0; j < bo->num_fences; ++j) {
1225 struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
1226
1227 if (is_noop_fence_dependency(acs, bo_fence))
1228 continue;
1229
1230 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
1231 new_num_fences++;
1232
1233 if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
1234 continue;
1235
1236 add_fence_to_list(&cs->fence_dependencies, bo_fence);
1237 }
1238
1239 for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
1240 amdgpu_fence_reference(&bo->fences[j], NULL);
1241
1242 bo->num_fences = new_num_fences;
1243 }
1244
1245 /* Add the given list of fences to the buffer's fence list.
1246 *
1247 * Must be called with the winsys bo_fence_lock held.
1248 */
1249 void amdgpu_add_fences(struct amdgpu_winsys_bo *bo,
1250 unsigned num_fences,
1251 struct pipe_fence_handle **fences)
1252 {
1253 if (bo->num_fences + num_fences > bo->max_fences) {
1254 unsigned new_max_fences = MAX2(bo->num_fences + num_fences, bo->max_fences * 2);
1255 struct pipe_fence_handle **new_fences =
1256 REALLOC(bo->fences,
1257 bo->num_fences * sizeof(*new_fences),
1258 new_max_fences * sizeof(*new_fences));
1259 if (likely(new_fences)) {
1260 bo->fences = new_fences;
1261 bo->max_fences = new_max_fences;
1262 } else {
1263 unsigned drop;
1264
1265 fprintf(stderr, "amdgpu_add_fences: allocation failure, dropping fence(s)\n");
1266 if (!bo->num_fences)
1267 return;
1268
1269 bo->num_fences--; /* prefer to keep the most recent fence if possible */
1270 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
1271
1272 drop = bo->num_fences + num_fences - bo->max_fences;
1273 num_fences -= drop;
1274 fences += drop;
1275 }
1276 }
1277
1278 for (unsigned i = 0; i < num_fences; ++i) {
1279 bo->fences[bo->num_fences] = NULL;
1280 amdgpu_fence_reference(&bo->fences[bo->num_fences], fences[i]);
1281 bo->num_fences++;
1282 }
1283 }
1284
1285 static void amdgpu_add_fence_dependencies_bo_list(struct amdgpu_cs *acs,
1286 struct pipe_fence_handle *fence,
1287 unsigned num_buffers,
1288 struct amdgpu_cs_buffer *buffers)
1289 {
1290 for (unsigned i = 0; i < num_buffers; i++) {
1291 struct amdgpu_cs_buffer *buffer = &buffers[i];
1292 struct amdgpu_winsys_bo *bo = buffer->bo;
1293
1294 amdgpu_add_bo_fence_dependencies(acs, buffer);
1295 p_atomic_inc(&bo->num_active_ioctls);
1296 amdgpu_add_fences(bo, 1, &fence);
1297 }
1298 }
1299
1300 /* Since the kernel driver doesn't synchronize execution between different
1301 * rings automatically, we have to add fence dependencies manually.
1302 */
1303 static void amdgpu_add_fence_dependencies_bo_lists(struct amdgpu_cs *acs)
1304 {
1305 struct amdgpu_cs_context *cs = acs->csc;
1306
1307 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);
1308 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);
1309 amdgpu_add_fence_dependencies_bo_list(acs, cs->fence, cs->num_sparse_buffers, cs->sparse_buffers);
1310 }
1311
1312 static void amdgpu_cs_add_syncobj_signal(struct radeon_cmdbuf *rws,
1313 struct pipe_fence_handle *fence)
1314 {
1315 struct amdgpu_cs *acs = amdgpu_cs(rws);
1316 struct amdgpu_cs_context *cs = acs->csc;
1317
1318 assert(amdgpu_fence_is_syncobj((struct amdgpu_fence *)fence));
1319
1320 add_fence_to_list(&cs->syncobj_to_signal, (struct amdgpu_fence*)fence);
1321 }
1322
1323 /* Add backing of sparse buffers to the buffer list.
1324 *
1325 * This is done late, during submission, to keep the buffer list short before
1326 * submit, and to avoid managing fences for the backing buffers.
1327 */
1328 static bool amdgpu_add_sparse_backing_buffers(struct amdgpu_cs_context *cs)
1329 {
1330 for (unsigned i = 0; i < cs->num_sparse_buffers; ++i) {
1331 struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
1332 struct amdgpu_winsys_bo *bo = buffer->bo;
1333
1334 simple_mtx_lock(&bo->lock);
1335
1336 list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
1337 /* We can directly add the buffer here, because we know that each
1338 * backing buffer occurs only once.
1339 */
1340 int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
1341 if (idx < 0) {
1342 fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
1343 simple_mtx_unlock(&bo->lock);
1344 return false;
1345 }
1346
1347 cs->real_buffers[idx].usage = buffer->usage & ~RADEON_USAGE_SYNCHRONIZED;
1348 cs->real_buffers[idx].u.real.priority_usage = buffer->u.real.priority_usage;
1349 p_atomic_inc(&backing->bo->num_active_ioctls);
1350 }
1351
1352 simple_mtx_unlock(&bo->lock);
1353 }
1354
1355 return true;
1356 }
1357
1358 void amdgpu_cs_submit_ib(void *job, int thread_index)
1359 {
1360 struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
1361 struct amdgpu_winsys *ws = acs->ctx->ws;
1362 struct amdgpu_cs_context *cs = acs->cst;
1363 int i, r;
1364 uint32_t bo_list = 0;
1365 uint64_t seq_no = 0;
1366 bool has_user_fence = amdgpu_cs_has_user_fence(cs);
1367 bool use_bo_list_create = ws->info.drm_minor < 27;
1368 struct drm_amdgpu_bo_list_in bo_list_in;
1369
1370 /* Prepare the buffer list. */
1371 if (ws->debug_all_bos) {
1372 /* The buffer list contains all buffers. This is a slow path that
1373 * ensures that no buffer is missing in the BO list.
1374 */
1375 unsigned num_handles = 0;
1376 struct drm_amdgpu_bo_list_entry *list =
1377 alloca(ws->num_buffers * sizeof(struct drm_amdgpu_bo_list_entry));
1378 struct amdgpu_winsys_bo *bo;
1379
1380 simple_mtx_lock(&ws->global_bo_list_lock);
1381 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
1382 if (bo->is_local)
1383 continue;
1384
1385 list[num_handles].bo_handle = bo->u.real.kms_handle;
1386 list[num_handles].bo_priority = 0;
1387 ++num_handles;
1388 }
1389
1390 r = amdgpu_bo_list_create_raw(ws->dev, ws->num_buffers, list, &bo_list);
1391 simple_mtx_unlock(&ws->global_bo_list_lock);
1392 if (r) {
1393 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
1394 goto cleanup;
1395 }
1396 } else {
1397 if (!amdgpu_add_sparse_backing_buffers(cs)) {
1398 fprintf(stderr, "amdgpu: amdgpu_add_sparse_backing_buffers failed\n");
1399 r = -ENOMEM;
1400 goto cleanup;
1401 }
1402
1403 struct drm_amdgpu_bo_list_entry *list =
1404 alloca((cs->num_real_buffers + 2) * sizeof(struct drm_amdgpu_bo_list_entry));
1405
1406 unsigned num_handles = 0;
1407 for (i = 0; i < cs->num_real_buffers; ++i) {
1408 struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
1409
1410 if (buffer->bo->is_local)
1411 continue;
1412
1413 assert(buffer->u.real.priority_usage != 0);
1414
1415 list[num_handles].bo_handle = buffer->bo->u.real.kms_handle;
1416 list[num_handles].bo_priority = (util_last_bit(buffer->u.real.priority_usage) - 1) / 2;
1417 ++num_handles;
1418 }
1419
1420 if (use_bo_list_create) {
1421 /* Legacy path creating the buffer list handle and passing it to the CS ioctl. */
1422 r = amdgpu_bo_list_create_raw(ws->dev, num_handles, list, &bo_list);
1423 if (r) {
1424 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
1425 goto cleanup;
1426 }
1427 } else {
1428 /* Standard path passing the buffer list via the CS ioctl. */
1429 bo_list_in.operation = ~0;
1430 bo_list_in.list_handle = ~0;
1431 bo_list_in.bo_number = num_handles;
1432 bo_list_in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry);
1433 bo_list_in.bo_info_ptr = (uint64_t)(uintptr_t)list;
1434 }
1435 }
1436
1437 if (acs->ring_type == RING_GFX)
1438 ws->gfx_bo_list_counter += cs->num_real_buffers;
1439
1440 if (acs->stop_exec_on_failure && acs->ctx->num_rejected_cs) {
1441 r = -ECANCELED;
1442 } else {
1443 struct drm_amdgpu_cs_chunk chunks[6];
1444 unsigned num_chunks = 0;
1445
1446 /* BO list */
1447 if (!use_bo_list_create) {
1448 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES;
1449 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4;
1450 chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in;
1451 num_chunks++;
1452 }
1453
1454 /* Fence dependencies. */
1455 unsigned num_dependencies = cs->fence_dependencies.num;
1456 if (num_dependencies) {
1457 struct drm_amdgpu_cs_chunk_dep *dep_chunk =
1458 alloca(num_dependencies * sizeof(*dep_chunk));
1459
1460 for (unsigned i = 0; i < num_dependencies; i++) {
1461 struct amdgpu_fence *fence =
1462 (struct amdgpu_fence*)cs->fence_dependencies.list[i];
1463
1464 assert(util_queue_fence_is_signalled(&fence->submitted));
1465 amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
1466 }
1467
1468 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1469 chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies;
1470 chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
1471 num_chunks++;
1472 }
1473
1474 /* Syncobj dependencies. */
1475 unsigned num_syncobj_dependencies = cs->syncobj_dependencies.num;
1476 if (num_syncobj_dependencies) {
1477 struct drm_amdgpu_cs_chunk_sem *sem_chunk =
1478 alloca(num_syncobj_dependencies * sizeof(sem_chunk[0]));
1479
1480 for (unsigned i = 0; i < num_syncobj_dependencies; i++) {
1481 struct amdgpu_fence *fence =
1482 (struct amdgpu_fence*)cs->syncobj_dependencies.list[i];
1483
1484 if (!amdgpu_fence_is_syncobj(fence))
1485 continue;
1486
1487 assert(util_queue_fence_is_signalled(&fence->submitted));
1488 sem_chunk[i].handle = fence->syncobj;
1489 }
1490
1491 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;
1492 chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4 * num_syncobj_dependencies;
1493 chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
1494 num_chunks++;
1495 }
1496
1497 /* Submit the parallel compute IB first. */
1498 if (cs->ib[IB_PARALLEL_COMPUTE].ib_bytes > 0) {
1499 unsigned old_num_chunks = num_chunks;
1500
1501 /* Add compute fence dependencies. */
1502 unsigned num_dependencies = cs->compute_fence_dependencies.num;
1503 if (num_dependencies) {
1504 struct drm_amdgpu_cs_chunk_dep *dep_chunk =
1505 alloca(num_dependencies * sizeof(*dep_chunk));
1506
1507 for (unsigned i = 0; i < num_dependencies; i++) {
1508 struct amdgpu_fence *fence =
1509 (struct amdgpu_fence*)cs->compute_fence_dependencies.list[i];
1510
1511 assert(util_queue_fence_is_signalled(&fence->submitted));
1512 amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
1513 }
1514
1515 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
1516 chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies;
1517 chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
1518 num_chunks++;
1519 }
1520
1521 /* Add compute start fence dependencies. */
1522 unsigned num_start_dependencies = cs->compute_start_fence_dependencies.num;
1523 if (num_start_dependencies) {
1524 struct drm_amdgpu_cs_chunk_dep *dep_chunk =
1525 alloca(num_start_dependencies * sizeof(*dep_chunk));
1526
1527 for (unsigned i = 0; i < num_start_dependencies; i++) {
1528 struct amdgpu_fence *fence =
1529 (struct amdgpu_fence*)cs->compute_start_fence_dependencies.list[i];
1530
1531 assert(util_queue_fence_is_signalled(&fence->submitted));
1532 amdgpu_cs_chunk_fence_to_dep(&fence->fence, &dep_chunk[i]);
1533 }
1534
1535 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES;
1536 chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_start_dependencies;
1537 chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk;
1538 num_chunks++;
1539 }
1540
1541 /* Convert from dwords to bytes. */
1542 cs->ib[IB_PARALLEL_COMPUTE].ib_bytes *= 4;
1543 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
1544 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1545 chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_PARALLEL_COMPUTE];
1546 num_chunks++;
1547
1548 r = amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,
1549 num_chunks, chunks, NULL);
1550 if (r)
1551 goto finalize;
1552
1553 /* Back off the compute chunks. */
1554 num_chunks = old_num_chunks;
1555 }
1556
1557 /* Syncobj signals. */
1558 unsigned num_syncobj_to_signal = cs->syncobj_to_signal.num;
1559 if (num_syncobj_to_signal) {
1560 struct drm_amdgpu_cs_chunk_sem *sem_chunk =
1561 alloca(num_syncobj_to_signal * sizeof(sem_chunk[0]));
1562
1563 for (unsigned i = 0; i < num_syncobj_to_signal; i++) {
1564 struct amdgpu_fence *fence =
1565 (struct amdgpu_fence*)cs->syncobj_to_signal.list[i];
1566
1567 assert(amdgpu_fence_is_syncobj(fence));
1568 sem_chunk[i].handle = fence->syncobj;
1569 }
1570
1571 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_OUT;
1572 chunks[num_chunks].length_dw = sizeof(sem_chunk[0]) / 4
1573 * num_syncobj_to_signal;
1574 chunks[num_chunks].chunk_data = (uintptr_t)sem_chunk;
1575 num_chunks++;
1576 }
1577
1578 /* Fence */
1579 if (has_user_fence) {
1580 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_FENCE;
1581 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
1582 chunks[num_chunks].chunk_data = (uintptr_t)&acs->fence_chunk;
1583 num_chunks++;
1584 }
1585
1586 /* IB */
1587 cs->ib[IB_MAIN].ib_bytes *= 4; /* Convert from dwords to bytes. */
1588 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_IB;
1589 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
1590 chunks[num_chunks].chunk_data = (uintptr_t)&cs->ib[IB_MAIN];
1591 num_chunks++;
1592
1593 assert(num_chunks <= ARRAY_SIZE(chunks));
1594
1595 r = amdgpu_cs_submit_raw2(ws->dev, acs->ctx->ctx, bo_list,
1596 num_chunks, chunks, &seq_no);
1597 }
1598 finalize:
1599
1600 if (r) {
1601 if (r == -ENOMEM)
1602 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1603 else if (r == -ECANCELED)
1604 fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1605 else
1606 fprintf(stderr, "amdgpu: The CS has been rejected, "
1607 "see dmesg for more information (%i).\n", r);
1608
1609 acs->ctx->num_rejected_cs++;
1610 ws->num_total_rejected_cs++;
1611 } else {
1612 /* Success. */
1613 uint64_t *user_fence = NULL;
1614
1615 if (has_user_fence)
1616 user_fence = acs->ctx->user_fence_cpu_address_base + acs->ring_type;
1617 amdgpu_fence_submitted(cs->fence, seq_no, user_fence);
1618 }
1619
1620 /* Cleanup. */
1621 if (bo_list)
1622 amdgpu_bo_list_destroy_raw(ws->dev, bo_list);
1623
1624 cleanup:
1625 /* If there was an error, signal the fence, because it won't be signalled
1626 * by the hardware. */
1627 if (r)
1628 amdgpu_fence_signalled(cs->fence);
1629
1630 cs->error_code = r;
1631
1632 for (i = 0; i < cs->num_real_buffers; i++)
1633 p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
1634 for (i = 0; i < cs->num_slab_buffers; i++)
1635 p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
1636 for (i = 0; i < cs->num_sparse_buffers; i++)
1637 p_atomic_dec(&cs->sparse_buffers[i].bo->num_active_ioctls);
1638
1639 amdgpu_cs_context_cleanup(cs);
1640 }
1641
1642 /* Make sure the previous submission is completed. */
1643 void amdgpu_cs_sync_flush(struct radeon_cmdbuf *rcs)
1644 {
1645 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1646
1647 /* Wait for any pending ioctl of this CS to complete. */
1648 util_queue_fence_wait(&cs->flush_completed);
1649 }
1650
1651 static int amdgpu_cs_flush(struct radeon_cmdbuf *rcs,
1652 unsigned flags,
1653 struct pipe_fence_handle **fence)
1654 {
1655 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1656 struct amdgpu_winsys *ws = cs->ctx->ws;
1657 int error_code = 0;
1658
1659 rcs->current.max_dw += amdgpu_cs_epilog_dws(cs);
1660
1661 switch (cs->ring_type) {
1662 case RING_DMA:
1663 /* pad DMA ring to 8 DWs */
1664 if (ws->info.chip_class <= GFX6) {
1665 while (rcs->current.cdw & 7)
1666 radeon_emit(rcs, 0xf0000000); /* NOP packet */
1667 } else {
1668 while (rcs->current.cdw & 7)
1669 radeon_emit(rcs, 0x00000000); /* NOP packet */
1670 }
1671 break;
1672 case RING_GFX:
1673 case RING_COMPUTE:
1674 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1675 if (ws->info.gfx_ib_pad_with_type2) {
1676 while (rcs->current.cdw & 7)
1677 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1678 } else {
1679 while (rcs->current.cdw & 7)
1680 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1681 }
1682 if (cs->ring_type == RING_GFX)
1683 ws->gfx_ib_size_counter += (rcs->prev_dw + rcs->current.cdw) * 4;
1684
1685 /* Also pad secondary IBs. */
1686 if (cs->compute_ib.ib_mapped) {
1687 while (cs->compute_ib.base.current.cdw & 7)
1688 radeon_emit(&cs->compute_ib.base, 0xffff1000); /* type3 nop packet */
1689 }
1690 break;
1691 case RING_UVD:
1692 case RING_UVD_ENC:
1693 while (rcs->current.cdw & 15)
1694 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1695 break;
1696 case RING_VCN_JPEG:
1697 if (rcs->current.cdw % 2)
1698 assert(0);
1699 while (rcs->current.cdw & 15) {
1700 radeon_emit(rcs, 0x60000000); /* nop packet */
1701 radeon_emit(rcs, 0x00000000);
1702 }
1703 break;
1704 case RING_VCN_DEC:
1705 while (rcs->current.cdw & 15)
1706 radeon_emit(rcs, 0x81ff); /* nop packet */
1707 break;
1708 default:
1709 break;
1710 }
1711
1712 if (rcs->current.cdw > rcs->current.max_dw) {
1713 fprintf(stderr, "amdgpu: command stream overflowed\n");
1714 }
1715
1716 /* If the CS is not empty or overflowed.... */
1717 if (likely(radeon_emitted(&cs->main.base, 0) &&
1718 cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
1719 !debug_get_option_noop())) {
1720 struct amdgpu_cs_context *cur = cs->csc;
1721
1722 /* Set IB sizes. */
1723 amdgpu_ib_finalize(ws, &cs->main);
1724
1725 if (cs->compute_ib.ib_mapped)
1726 amdgpu_ib_finalize(ws, &cs->compute_ib);
1727
1728 /* Create a fence. */
1729 amdgpu_fence_reference(&cur->fence, NULL);
1730 if (cs->next_fence) {
1731 /* just move the reference */
1732 cur->fence = cs->next_fence;
1733 cs->next_fence = NULL;
1734 } else {
1735 cur->fence = amdgpu_fence_create(cs->ctx,
1736 cur->ib[IB_MAIN].ip_type,
1737 cur->ib[IB_MAIN].ip_instance,
1738 cur->ib[IB_MAIN].ring);
1739 }
1740 if (fence)
1741 amdgpu_fence_reference(fence, cur->fence);
1742
1743 amdgpu_cs_sync_flush(rcs);
1744
1745 /* Prepare buffers.
1746 *
1747 * This fence must be held until the submission is queued to ensure
1748 * that the order of fence dependency updates matches the order of
1749 * submissions.
1750 */
1751 simple_mtx_lock(&ws->bo_fence_lock);
1752 amdgpu_add_fence_dependencies_bo_lists(cs);
1753
1754 /* Swap command streams. "cst" is going to be submitted. */
1755 cs->csc = cs->cst;
1756 cs->cst = cur;
1757
1758 /* Submit. */
1759 util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
1760 amdgpu_cs_submit_ib, NULL);
1761 /* The submission has been queued, unlock the fence now. */
1762 simple_mtx_unlock(&ws->bo_fence_lock);
1763
1764 if (!(flags & PIPE_FLUSH_ASYNC)) {
1765 amdgpu_cs_sync_flush(rcs);
1766 error_code = cur->error_code;
1767 }
1768 } else {
1769 amdgpu_cs_context_cleanup(cs->csc);
1770 }
1771
1772 amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
1773 if (cs->compute_ib.ib_mapped)
1774 amdgpu_get_new_ib(&ws->base, cs, IB_PARALLEL_COMPUTE);
1775
1776 cs->main.base.used_gart = 0;
1777 cs->main.base.used_vram = 0;
1778
1779 if (cs->ring_type == RING_GFX)
1780 ws->num_gfx_IBs++;
1781 else if (cs->ring_type == RING_DMA)
1782 ws->num_sdma_IBs++;
1783
1784 return error_code;
1785 }
1786
1787 static void amdgpu_cs_destroy(struct radeon_cmdbuf *rcs)
1788 {
1789 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1790
1791 amdgpu_cs_sync_flush(rcs);
1792 util_queue_fence_destroy(&cs->flush_completed);
1793 p_atomic_dec(&cs->ctx->ws->num_cs);
1794 pb_reference(&cs->main.big_ib_buffer, NULL);
1795 FREE(cs->main.base.prev);
1796 pb_reference(&cs->compute_ib.big_ib_buffer, NULL);
1797 FREE(cs->compute_ib.base.prev);
1798 amdgpu_destroy_cs_context(&cs->csc1);
1799 amdgpu_destroy_cs_context(&cs->csc2);
1800 amdgpu_fence_reference(&cs->next_fence, NULL);
1801 FREE(cs);
1802 }
1803
1804 static bool amdgpu_bo_is_referenced(struct radeon_cmdbuf *rcs,
1805 struct pb_buffer *_buf,
1806 enum radeon_bo_usage usage)
1807 {
1808 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1809 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
1810
1811 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
1812 }
1813
1814 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
1815 {
1816 ws->base.ctx_create = amdgpu_ctx_create;
1817 ws->base.ctx_destroy = amdgpu_ctx_destroy;
1818 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
1819 ws->base.cs_create = amdgpu_cs_create;
1820 ws->base.cs_add_parallel_compute_ib = amdgpu_cs_add_parallel_compute_ib;
1821 ws->base.cs_destroy = amdgpu_cs_destroy;
1822 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
1823 ws->base.cs_validate = amdgpu_cs_validate;
1824 ws->base.cs_check_space = amdgpu_cs_check_space;
1825 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
1826 ws->base.cs_flush = amdgpu_cs_flush;
1827 ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
1828 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
1829 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
1830 ws->base.cs_add_fence_dependency = amdgpu_cs_add_fence_dependency;
1831 ws->base.cs_add_syncobj_signal = amdgpu_cs_add_syncobj_signal;
1832 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
1833 ws->base.fence_reference = amdgpu_fence_reference;
1834 ws->base.fence_import_syncobj = amdgpu_fence_import_syncobj;
1835 ws->base.fence_import_sync_file = amdgpu_fence_import_sync_file;
1836 ws->base.fence_export_sync_file = amdgpu_fence_export_sync_file;
1837 ws->base.export_signalled_sync_file = amdgpu_export_signalled_sync_file;
1838 }