winsys/amdgpu: build handles and flags list late on submit thread
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36 #include <amdgpu_drm.h>
37
38 #include "amd/common/sid.h"
39
40 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
41
42 /* FENCES */
43
44 static struct pipe_fence_handle *
45 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
46 unsigned ip_instance, unsigned ring)
47 {
48 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
49
50 fence->reference.count = 1;
51 fence->ctx = ctx;
52 fence->fence.context = ctx->ctx;
53 fence->fence.ip_type = ip_type;
54 fence->fence.ip_instance = ip_instance;
55 fence->fence.ring = ring;
56 fence->submission_in_progress = true;
57 p_atomic_inc(&ctx->refcount);
58 return (struct pipe_fence_handle *)fence;
59 }
60
61 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
62 struct amdgpu_cs_request* request,
63 uint64_t *user_fence_cpu_address)
64 {
65 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
66
67 rfence->fence.fence = request->seq_no;
68 rfence->user_fence_cpu_address = user_fence_cpu_address;
69 rfence->submission_in_progress = false;
70 }
71
72 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
73 {
74 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
75
76 rfence->signalled = true;
77 rfence->submission_in_progress = false;
78 }
79
80 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
81 bool absolute)
82 {
83 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
84 uint32_t expired;
85 int64_t abs_timeout;
86 uint64_t *user_fence_cpu;
87 int r;
88
89 if (rfence->signalled)
90 return true;
91
92 if (absolute)
93 abs_timeout = timeout;
94 else
95 abs_timeout = os_time_get_absolute_timeout(timeout);
96
97 /* The fence might not have a number assigned if its IB is being
98 * submitted in the other thread right now. Wait until the submission
99 * is done. */
100 if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
101 abs_timeout))
102 return false;
103
104 user_fence_cpu = rfence->user_fence_cpu_address;
105 if (user_fence_cpu) {
106 if (*user_fence_cpu >= rfence->fence.fence) {
107 rfence->signalled = true;
108 return true;
109 }
110
111 /* No timeout, just query: no need for the ioctl. */
112 if (!absolute && !timeout)
113 return false;
114 }
115
116 /* Now use the libdrm query. */
117 r = amdgpu_cs_query_fence_status(&rfence->fence,
118 abs_timeout,
119 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
120 &expired);
121 if (r) {
122 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
123 return false;
124 }
125
126 if (expired) {
127 /* This variable can only transition from false to true, so it doesn't
128 * matter if threads race for it. */
129 rfence->signalled = true;
130 return true;
131 }
132 return false;
133 }
134
135 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
136 struct pipe_fence_handle *fence,
137 uint64_t timeout)
138 {
139 return amdgpu_fence_wait(fence, timeout, false);
140 }
141
142 static struct pipe_fence_handle *
143 amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
144 {
145 struct amdgpu_cs *cs = amdgpu_cs(rcs);
146 struct pipe_fence_handle *fence = NULL;
147
148 if (debug_get_option_noop())
149 return NULL;
150
151 if (cs->next_fence) {
152 amdgpu_fence_reference(&fence, cs->next_fence);
153 return fence;
154 }
155
156 fence = amdgpu_fence_create(cs->ctx,
157 cs->csc->request.ip_type,
158 cs->csc->request.ip_instance,
159 cs->csc->request.ring);
160 if (!fence)
161 return NULL;
162
163 amdgpu_fence_reference(&cs->next_fence, fence);
164 return fence;
165 }
166
167 /* CONTEXTS */
168
169 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
170 {
171 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
172 int r;
173 struct amdgpu_bo_alloc_request alloc_buffer = {};
174 amdgpu_bo_handle buf_handle;
175
176 if (!ctx)
177 return NULL;
178
179 ctx->ws = amdgpu_winsys(ws);
180 ctx->refcount = 1;
181 ctx->initial_num_total_rejected_cs = ctx->ws->num_total_rejected_cs;
182
183 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
184 if (r) {
185 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
186 goto error_create;
187 }
188
189 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
190 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
191 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
192
193 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
194 if (r) {
195 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
196 goto error_user_fence_alloc;
197 }
198
199 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
200 if (r) {
201 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
202 goto error_user_fence_map;
203 }
204
205 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
206 ctx->user_fence_bo = buf_handle;
207
208 return (struct radeon_winsys_ctx*)ctx;
209
210 error_user_fence_map:
211 amdgpu_bo_free(buf_handle);
212 error_user_fence_alloc:
213 amdgpu_cs_ctx_free(ctx->ctx);
214 error_create:
215 FREE(ctx);
216 return NULL;
217 }
218
219 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
220 {
221 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
222 }
223
224 static enum pipe_reset_status
225 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
226 {
227 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
228 uint32_t result, hangs;
229 int r;
230
231 /* Return a failure due to a rejected command submission. */
232 if (ctx->ws->num_total_rejected_cs > ctx->initial_num_total_rejected_cs) {
233 return ctx->num_rejected_cs ? PIPE_GUILTY_CONTEXT_RESET :
234 PIPE_INNOCENT_CONTEXT_RESET;
235 }
236
237 /* Return a failure due to a GPU hang. */
238 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
239 if (r) {
240 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
241 return PIPE_NO_RESET;
242 }
243
244 switch (result) {
245 case AMDGPU_CTX_GUILTY_RESET:
246 return PIPE_GUILTY_CONTEXT_RESET;
247 case AMDGPU_CTX_INNOCENT_RESET:
248 return PIPE_INNOCENT_CONTEXT_RESET;
249 case AMDGPU_CTX_UNKNOWN_RESET:
250 return PIPE_UNKNOWN_CONTEXT_RESET;
251 case AMDGPU_CTX_NO_RESET:
252 default:
253 return PIPE_NO_RESET;
254 }
255 }
256
257 /* COMMAND SUBMISSION */
258
259 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
260 {
261 return cs->request.ip_type != AMDGPU_HW_IP_UVD &&
262 cs->request.ip_type != AMDGPU_HW_IP_VCE;
263 }
264
265 static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
266 {
267 return cs->ctx->ws->info.chip_class >= CIK &&
268 cs->ring_type == RING_GFX;
269 }
270
271 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
272 {
273 if (ring_type == RING_GFX)
274 return 4; /* for chaining */
275
276 return 0;
277 }
278
279 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
280 {
281 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
282 int i = cs->buffer_indices_hashlist[hash];
283 struct amdgpu_cs_buffer *buffers;
284 int num_buffers;
285
286 if (bo->bo) {
287 buffers = cs->real_buffers;
288 num_buffers = cs->num_real_buffers;
289 } else {
290 buffers = cs->slab_buffers;
291 num_buffers = cs->num_slab_buffers;
292 }
293
294 /* not found or found */
295 if (i < 0 || (i < num_buffers && buffers[i].bo == bo))
296 return i;
297
298 /* Hash collision, look for the BO in the list of buffers linearly. */
299 for (i = num_buffers - 1; i >= 0; i--) {
300 if (buffers[i].bo == bo) {
301 /* Put this buffer in the hash list.
302 * This will prevent additional hash collisions if there are
303 * several consecutive lookup_buffer calls for the same buffer.
304 *
305 * Example: Assuming buffers A,B,C collide in the hash list,
306 * the following sequence of buffers:
307 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
308 * will collide here: ^ and here: ^,
309 * meaning that we should get very few collisions in the end. */
310 cs->buffer_indices_hashlist[hash] = i;
311 return i;
312 }
313 }
314 return -1;
315 }
316
317 static int
318 amdgpu_do_add_real_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
319 {
320 struct amdgpu_cs_buffer *buffer;
321 int idx;
322
323 /* New buffer, check if the backing array is large enough. */
324 if (cs->num_real_buffers >= cs->max_real_buffers) {
325 unsigned new_max =
326 MAX2(cs->max_real_buffers + 16, (unsigned)(cs->max_real_buffers * 1.3));
327 struct amdgpu_cs_buffer *new_buffers;
328
329 new_buffers = MALLOC(new_max * sizeof(*new_buffers));
330
331 if (!new_buffers) {
332 fprintf(stderr, "amdgpu_do_add_buffer: allocation failed\n");
333 FREE(new_buffers);
334 return -1;
335 }
336
337 memcpy(new_buffers, cs->real_buffers, cs->num_real_buffers * sizeof(*new_buffers));
338
339 FREE(cs->real_buffers);
340
341 cs->max_real_buffers = new_max;
342 cs->real_buffers = new_buffers;
343 }
344
345 idx = cs->num_real_buffers;
346 buffer = &cs->real_buffers[idx];
347
348 memset(buffer, 0, sizeof(*buffer));
349 amdgpu_winsys_bo_reference(&buffer->bo, bo);
350 p_atomic_inc(&bo->num_cs_references);
351 cs->num_real_buffers++;
352
353 return idx;
354 }
355
356 static int
357 amdgpu_lookup_or_add_real_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
358 {
359 struct amdgpu_cs_context *cs = acs->csc;
360 unsigned hash;
361 int idx = amdgpu_lookup_buffer(cs, bo);
362
363 if (idx >= 0)
364 return idx;
365
366 idx = amdgpu_do_add_real_buffer(cs, bo);
367
368 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
369 cs->buffer_indices_hashlist[hash] = idx;
370
371 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
372 acs->main.base.used_vram += bo->base.size;
373 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
374 acs->main.base.used_gart += bo->base.size;
375
376 return idx;
377 }
378
379 static int amdgpu_lookup_or_add_slab_buffer(struct amdgpu_cs *acs,
380 struct amdgpu_winsys_bo *bo)
381 {
382 struct amdgpu_cs_context *cs = acs->csc;
383 struct amdgpu_cs_buffer *buffer;
384 unsigned hash;
385 int idx = amdgpu_lookup_buffer(cs, bo);
386 int real_idx;
387
388 if (idx >= 0)
389 return idx;
390
391 real_idx = amdgpu_lookup_or_add_real_buffer(acs, bo->u.slab.real);
392 if (real_idx < 0)
393 return -1;
394
395 /* New buffer, check if the backing array is large enough. */
396 if (cs->num_slab_buffers >= cs->max_slab_buffers) {
397 unsigned new_max =
398 MAX2(cs->max_slab_buffers + 16, (unsigned)(cs->max_slab_buffers * 1.3));
399 struct amdgpu_cs_buffer *new_buffers;
400
401 new_buffers = REALLOC(cs->slab_buffers,
402 cs->max_slab_buffers * sizeof(*new_buffers),
403 new_max * sizeof(*new_buffers));
404 if (!new_buffers) {
405 fprintf(stderr, "amdgpu_lookup_or_add_slab_buffer: allocation failed\n");
406 return -1;
407 }
408
409 cs->max_slab_buffers = new_max;
410 cs->slab_buffers = new_buffers;
411 }
412
413 idx = cs->num_slab_buffers;
414 buffer = &cs->slab_buffers[idx];
415
416 memset(buffer, 0, sizeof(*buffer));
417 amdgpu_winsys_bo_reference(&buffer->bo, bo);
418 buffer->u.slab.real_idx = real_idx;
419 p_atomic_inc(&bo->num_cs_references);
420 cs->num_slab_buffers++;
421
422 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
423 cs->buffer_indices_hashlist[hash] = idx;
424
425 return idx;
426 }
427
428 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
429 struct pb_buffer *buf,
430 enum radeon_bo_usage usage,
431 enum radeon_bo_domain domains,
432 enum radeon_bo_priority priority)
433 {
434 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
435 * the buffer placement during command submission.
436 */
437 struct amdgpu_cs *acs = amdgpu_cs(rcs);
438 struct amdgpu_cs_context *cs = acs->csc;
439 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
440 struct amdgpu_cs_buffer *buffer;
441 int index;
442
443 /* Fast exit for no-op calls.
444 * This is very effective with suballocators and linear uploaders that
445 * are outside of the winsys.
446 */
447 if (bo == cs->last_added_bo &&
448 (usage & cs->last_added_bo_usage) == usage &&
449 (1ull << priority) & cs->last_added_bo_priority_usage)
450 return cs->last_added_bo_index;
451
452 if (!bo->bo) {
453 index = amdgpu_lookup_or_add_slab_buffer(acs, bo);
454 if (index < 0)
455 return 0;
456
457 buffer = &cs->slab_buffers[index];
458 buffer->usage |= usage;
459
460 usage &= ~RADEON_USAGE_SYNCHRONIZED;
461 index = buffer->u.slab.real_idx;
462 } else {
463 index = amdgpu_lookup_or_add_real_buffer(acs, bo);
464 if (index < 0)
465 return 0;
466 }
467
468 buffer = &cs->real_buffers[index];
469 buffer->u.real.priority_usage |= 1llu << priority;
470 buffer->usage |= usage;
471
472 cs->last_added_bo = bo;
473 cs->last_added_bo_index = index;
474 cs->last_added_bo_usage = buffer->usage;
475 cs->last_added_bo_priority_usage = buffer->u.real.priority_usage;
476 return index;
477 }
478
479 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
480 {
481 struct pb_buffer *pb;
482 uint8_t *mapped;
483 unsigned buffer_size;
484
485 /* Always create a buffer that is at least as large as the maximum seen IB
486 * size, aligned to a power of two (and multiplied by 4 to reduce internal
487 * fragmentation if chaining is not available). Limit to 512k dwords, which
488 * is the largest power of two that fits into the size field of the
489 * INDIRECT_BUFFER packet.
490 */
491 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
492 buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
493 else
494 buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
495
496 buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
497
498 switch (ib->ib_type) {
499 case IB_CONST_PREAMBLE:
500 buffer_size = MAX2(buffer_size, 4 * 1024);
501 break;
502 case IB_CONST:
503 buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
504 break;
505 case IB_MAIN:
506 buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
507 break;
508 default:
509 unreachable("unhandled IB type");
510 }
511
512 pb = ws->base.buffer_create(&ws->base, buffer_size,
513 ws->info.gart_page_size,
514 RADEON_DOMAIN_GTT,
515 RADEON_FLAG_CPU_ACCESS);
516 if (!pb)
517 return false;
518
519 mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
520 if (!mapped) {
521 pb_reference(&pb, NULL);
522 return false;
523 }
524
525 pb_reference(&ib->big_ib_buffer, pb);
526 pb_reference(&pb, NULL);
527
528 ib->ib_mapped = mapped;
529 ib->used_ib_space = 0;
530
531 return true;
532 }
533
534 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
535 {
536 switch (ib_type) {
537 case IB_MAIN:
538 /* Smaller submits means the GPU gets busy sooner and there is less
539 * waiting for buffers and fences. Proof:
540 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
541 */
542 return 20 * 1024;
543 case IB_CONST_PREAMBLE:
544 case IB_CONST:
545 /* There isn't really any reason to limit CE IB size beyond the natural
546 * limit implied by the main IB, except perhaps GTT size. Just return
547 * an extremely large value that we never get anywhere close to.
548 */
549 return 16 * 1024 * 1024;
550 default:
551 unreachable("bad ib_type");
552 }
553 }
554
555 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
556 enum ib_type ib_type)
557 {
558 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
559 /* Small IBs are better than big IBs, because the GPU goes idle quicker
560 * and there is less waiting for buffers and fences. Proof:
561 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
562 */
563 struct amdgpu_ib *ib = NULL;
564 struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
565 unsigned ib_size = 0;
566
567 switch (ib_type) {
568 case IB_CONST_PREAMBLE:
569 ib = &cs->const_preamble_ib;
570 ib_size = 256 * 4;
571 break;
572 case IB_CONST:
573 ib = &cs->const_ib;
574 ib_size = 8 * 1024 * 4;
575 break;
576 case IB_MAIN:
577 ib = &cs->main;
578 ib_size = 4 * 1024 * 4;
579 break;
580 default:
581 unreachable("unhandled IB type");
582 }
583
584 if (!amdgpu_cs_has_chaining(cs)) {
585 ib_size = MAX2(ib_size,
586 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
587 amdgpu_ib_max_submit_dwords(ib_type)));
588 }
589
590 ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
591
592 ib->base.prev_dw = 0;
593 ib->base.num_prev = 0;
594 ib->base.current.cdw = 0;
595 ib->base.current.buf = NULL;
596
597 /* Allocate a new buffer for IBs if the current buffer is all used. */
598 if (!ib->big_ib_buffer ||
599 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
600 if (!amdgpu_ib_new_buffer(aws, ib))
601 return false;
602 }
603
604 info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
605 ib->used_ib_space;
606 info->size = 0;
607 ib->ptr_ib_size = &info->size;
608
609 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
610 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
611
612 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
613
614 ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
615 ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
616 return true;
617 }
618
619 static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
620 {
621 *ib->ptr_ib_size |= ib->base.current.cdw;
622 ib->used_ib_space += ib->base.current.cdw * 4;
623 ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
624 }
625
626 static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
627 enum ring_type ring_type)
628 {
629 int i;
630
631 switch (ring_type) {
632 case RING_DMA:
633 cs->request.ip_type = AMDGPU_HW_IP_DMA;
634 break;
635
636 case RING_UVD:
637 cs->request.ip_type = AMDGPU_HW_IP_UVD;
638 break;
639
640 case RING_VCE:
641 cs->request.ip_type = AMDGPU_HW_IP_VCE;
642 break;
643
644 case RING_COMPUTE:
645 cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
646 break;
647
648 default:
649 case RING_GFX:
650 cs->request.ip_type = AMDGPU_HW_IP_GFX;
651 break;
652 }
653
654 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
655 cs->buffer_indices_hashlist[i] = -1;
656 }
657 cs->last_added_bo = NULL;
658
659 cs->request.number_of_ibs = 1;
660 cs->request.ibs = &cs->ib[IB_MAIN];
661
662 cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
663 cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
664 AMDGPU_IB_FLAG_PREAMBLE;
665
666 return true;
667 }
668
669 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
670 {
671 unsigned i;
672
673 for (i = 0; i < cs->num_real_buffers; i++) {
674 p_atomic_dec(&cs->real_buffers[i].bo->num_cs_references);
675 amdgpu_winsys_bo_reference(&cs->real_buffers[i].bo, NULL);
676 }
677 for (i = 0; i < cs->num_slab_buffers; i++) {
678 p_atomic_dec(&cs->slab_buffers[i].bo->num_cs_references);
679 amdgpu_winsys_bo_reference(&cs->slab_buffers[i].bo, NULL);
680 }
681
682 cs->num_real_buffers = 0;
683 cs->num_slab_buffers = 0;
684 amdgpu_fence_reference(&cs->fence, NULL);
685
686 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
687 cs->buffer_indices_hashlist[i] = -1;
688 }
689 cs->last_added_bo = NULL;
690 }
691
692 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
693 {
694 amdgpu_cs_context_cleanup(cs);
695 FREE(cs->flags);
696 FREE(cs->real_buffers);
697 FREE(cs->handles);
698 FREE(cs->slab_buffers);
699 FREE(cs->request.dependencies);
700 }
701
702
703 static struct radeon_winsys_cs *
704 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
705 enum ring_type ring_type,
706 void (*flush)(void *ctx, unsigned flags,
707 struct pipe_fence_handle **fence),
708 void *flush_ctx)
709 {
710 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
711 struct amdgpu_cs *cs;
712
713 cs = CALLOC_STRUCT(amdgpu_cs);
714 if (!cs) {
715 return NULL;
716 }
717
718 util_queue_fence_init(&cs->flush_completed);
719
720 cs->ctx = ctx;
721 cs->flush_cs = flush;
722 cs->flush_data = flush_ctx;
723 cs->ring_type = ring_type;
724
725 cs->main.ib_type = IB_MAIN;
726 cs->const_ib.ib_type = IB_CONST;
727 cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
728
729 if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
730 FREE(cs);
731 return NULL;
732 }
733
734 if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
735 amdgpu_destroy_cs_context(&cs->csc1);
736 FREE(cs);
737 return NULL;
738 }
739
740 /* Set the first submission context as current. */
741 cs->csc = &cs->csc1;
742 cs->cst = &cs->csc2;
743
744 if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
745 amdgpu_destroy_cs_context(&cs->csc2);
746 amdgpu_destroy_cs_context(&cs->csc1);
747 FREE(cs);
748 return NULL;
749 }
750
751 p_atomic_inc(&ctx->ws->num_cs);
752 return &cs->main.base;
753 }
754
755 static struct radeon_winsys_cs *
756 amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
757 {
758 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
759 struct amdgpu_winsys *ws = cs->ctx->ws;
760
761 /* only one const IB can be added */
762 if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
763 return NULL;
764
765 if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST))
766 return NULL;
767
768 cs->csc->request.number_of_ibs = 2;
769 cs->csc->request.ibs = &cs->csc->ib[IB_CONST];
770
771 cs->cst->request.number_of_ibs = 2;
772 cs->cst->request.ibs = &cs->cst->ib[IB_CONST];
773
774 return &cs->const_ib.base;
775 }
776
777 static struct radeon_winsys_cs *
778 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
779 {
780 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
781 struct amdgpu_winsys *ws = cs->ctx->ws;
782
783 /* only one const preamble IB can be added and only when the const IB has
784 * also been mapped */
785 if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
786 cs->const_preamble_ib.ib_mapped)
787 return NULL;
788
789 if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE))
790 return NULL;
791
792 cs->csc->request.number_of_ibs = 3;
793 cs->csc->request.ibs = &cs->csc->ib[IB_CONST_PREAMBLE];
794
795 cs->cst->request.number_of_ibs = 3;
796 cs->cst->request.ibs = &cs->cst->ib[IB_CONST_PREAMBLE];
797
798 return &cs->const_preamble_ib.base;
799 }
800
801 static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
802 {
803 return true;
804 }
805
806 static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
807 {
808 struct amdgpu_ib *ib = amdgpu_ib(rcs);
809 struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
810 unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
811 uint64_t va;
812 uint32_t *new_ptr_ib_size;
813
814 assert(rcs->current.cdw <= rcs->current.max_dw);
815
816 if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
817 return false;
818
819 ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
820
821 if (rcs->current.max_dw - rcs->current.cdw >= dw)
822 return true;
823
824 if (!amdgpu_cs_has_chaining(cs))
825 return false;
826
827 /* Allocate a new chunk */
828 if (rcs->num_prev >= rcs->max_prev) {
829 unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
830 struct radeon_winsys_cs_chunk *new_prev;
831
832 new_prev = REALLOC(rcs->prev,
833 sizeof(*new_prev) * rcs->max_prev,
834 sizeof(*new_prev) * new_max_prev);
835 if (!new_prev)
836 return false;
837
838 rcs->prev = new_prev;
839 rcs->max_prev = new_max_prev;
840 }
841
842 if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib))
843 return false;
844
845 assert(ib->used_ib_space == 0);
846 va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
847
848 /* This space was originally reserved. */
849 rcs->current.max_dw += 4;
850 assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
851
852 /* Pad with NOPs and add INDIRECT_BUFFER packet */
853 while ((rcs->current.cdw & 7) != 4)
854 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
855
856 radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
857 : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
858 radeon_emit(rcs, va);
859 radeon_emit(rcs, va >> 32);
860 new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw];
861 radeon_emit(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
862
863 assert((rcs->current.cdw & 7) == 0);
864 assert(rcs->current.cdw <= rcs->current.max_dw);
865
866 *ib->ptr_ib_size |= rcs->current.cdw;
867 ib->ptr_ib_size = new_ptr_ib_size;
868
869 /* Hook up the new chunk */
870 rcs->prev[rcs->num_prev].buf = rcs->current.buf;
871 rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
872 rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
873 rcs->num_prev++;
874
875 ib->base.prev_dw += ib->base.current.cdw;
876 ib->base.current.cdw = 0;
877
878 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
879 ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
880
881 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
882 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
883
884 return true;
885 }
886
887 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
888 struct radeon_bo_list_item *list)
889 {
890 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
891 int i;
892
893 if (list) {
894 for (i = 0; i < cs->num_real_buffers; i++) {
895 list[i].bo_size = cs->real_buffers[i].bo->base.size;
896 list[i].vm_address = cs->real_buffers[i].bo->va;
897 list[i].priority_usage = cs->real_buffers[i].u.real.priority_usage;
898 }
899 }
900 return cs->num_real_buffers;
901 }
902
903 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
904
905 static void amdgpu_add_fence_dependency(struct amdgpu_cs *acs,
906 struct amdgpu_cs_buffer *buffer)
907 {
908 struct amdgpu_cs_context *cs = acs->csc;
909 struct amdgpu_winsys_bo *bo = buffer->bo;
910 struct amdgpu_cs_fence *dep;
911 unsigned new_num_fences = 0;
912
913 for (unsigned j = 0; j < bo->num_fences; ++j) {
914 struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
915 unsigned idx;
916
917 if (bo_fence->ctx == acs->ctx &&
918 bo_fence->fence.ip_type == cs->request.ip_type &&
919 bo_fence->fence.ip_instance == cs->request.ip_instance &&
920 bo_fence->fence.ring == cs->request.ring)
921 continue;
922
923 if (amdgpu_fence_wait((void *)bo_fence, 0, false))
924 continue;
925
926 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
927 new_num_fences++;
928
929 if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
930 continue;
931
932 if (bo_fence->submission_in_progress)
933 os_wait_until_zero(&bo_fence->submission_in_progress,
934 PIPE_TIMEOUT_INFINITE);
935
936 idx = cs->request.number_of_dependencies++;
937 if (idx >= cs->max_dependencies) {
938 unsigned size;
939
940 cs->max_dependencies = idx + 8;
941 size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
942 cs->request.dependencies = realloc(cs->request.dependencies, size);
943 }
944
945 dep = &cs->request.dependencies[idx];
946 memcpy(dep, &bo_fence->fence, sizeof(*dep));
947 }
948
949 for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
950 amdgpu_fence_reference(&bo->fences[j], NULL);
951
952 bo->num_fences = new_num_fences;
953 }
954
955 static void amdgpu_add_fence(struct amdgpu_winsys_bo *bo,
956 struct pipe_fence_handle *fence)
957 {
958 if (bo->num_fences >= bo->max_fences) {
959 unsigned new_max_fences = MAX2(1, bo->max_fences * 2);
960 struct pipe_fence_handle **new_fences =
961 REALLOC(bo->fences,
962 bo->num_fences * sizeof(*new_fences),
963 new_max_fences * sizeof(*new_fences));
964 if (new_fences) {
965 bo->fences = new_fences;
966 bo->max_fences = new_max_fences;
967 } else {
968 fprintf(stderr, "amdgpu_add_fence: allocation failure, dropping fence\n");
969 if (!bo->num_fences)
970 return;
971
972 bo->num_fences--; /* prefer to keep a more recent fence if possible */
973 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
974 }
975 }
976
977 bo->fences[bo->num_fences] = NULL;
978 amdgpu_fence_reference(&bo->fences[bo->num_fences], fence);
979 bo->num_fences++;
980 }
981
982 static void amdgpu_add_fence_dependencies_list(struct amdgpu_cs *acs,
983 struct pipe_fence_handle *fence,
984 unsigned num_buffers,
985 struct amdgpu_cs_buffer *buffers)
986 {
987 for (unsigned i = 0; i < num_buffers; i++) {
988 struct amdgpu_cs_buffer *buffer = &buffers[i];
989 struct amdgpu_winsys_bo *bo = buffer->bo;
990
991 amdgpu_add_fence_dependency(acs, buffer);
992 p_atomic_inc(&bo->num_active_ioctls);
993 amdgpu_add_fence(bo, fence);
994 }
995 }
996
997 /* Since the kernel driver doesn't synchronize execution between different
998 * rings automatically, we have to add fence dependencies manually.
999 */
1000 static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
1001 {
1002 struct amdgpu_cs_context *cs = acs->csc;
1003
1004 cs->request.number_of_dependencies = 0;
1005
1006 amdgpu_add_fence_dependencies_list(acs, cs->fence, cs->num_real_buffers, cs->real_buffers);
1007 amdgpu_add_fence_dependencies_list(acs, cs->fence, cs->num_slab_buffers, cs->slab_buffers);
1008 }
1009
1010 void amdgpu_cs_submit_ib(void *job, int thread_index)
1011 {
1012 struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
1013 struct amdgpu_winsys *ws = acs->ctx->ws;
1014 struct amdgpu_cs_context *cs = acs->cst;
1015 int i, r;
1016
1017 cs->request.fence_info.handle = NULL;
1018 if (amdgpu_cs_has_user_fence(cs)) {
1019 cs->request.fence_info.handle = acs->ctx->user_fence_bo;
1020 cs->request.fence_info.offset = acs->ring_type;
1021 }
1022
1023 /* Create the buffer list.
1024 * Use a buffer list containing all allocated buffers if requested.
1025 */
1026 if (debug_get_option_all_bos()) {
1027 struct amdgpu_winsys_bo *bo;
1028 amdgpu_bo_handle *handles;
1029 unsigned num = 0;
1030
1031 mtx_lock(&ws->global_bo_list_lock);
1032
1033 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
1034 if (!handles) {
1035 mtx_unlock(&ws->global_bo_list_lock);
1036 amdgpu_cs_context_cleanup(cs);
1037 cs->error_code = -ENOMEM;
1038 return;
1039 }
1040
1041 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
1042 assert(num < ws->num_buffers);
1043 handles[num++] = bo->bo;
1044 }
1045
1046 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
1047 handles, NULL,
1048 &cs->request.resources);
1049 free(handles);
1050 mtx_unlock(&ws->global_bo_list_lock);
1051 } else {
1052 if (cs->max_real_submit < cs->num_real_buffers) {
1053 FREE(cs->handles);
1054 FREE(cs->flags);
1055
1056 cs->handles = MALLOC(sizeof(*cs->handles) * cs->num_real_buffers);
1057 cs->flags = MALLOC(sizeof(*cs->flags) * cs->num_real_buffers);
1058
1059 if (!cs->handles || !cs->flags) {
1060 cs->max_real_submit = 0;
1061 r = -ENOMEM;
1062 goto bo_list_error;
1063 }
1064 }
1065
1066 for (i = 0; i < cs->num_real_buffers; ++i) {
1067 struct amdgpu_cs_buffer *buffer = &cs->real_buffers[i];
1068
1069 assert(buffer->u.real.priority_usage != 0);
1070
1071 cs->handles[i] = buffer->bo->bo;
1072 cs->flags[i] = (util_last_bit64(buffer->u.real.priority_usage) - 1) / 4;
1073 }
1074
1075 r = amdgpu_bo_list_create(ws->dev, cs->num_real_buffers,
1076 cs->handles, cs->flags,
1077 &cs->request.resources);
1078 }
1079 bo_list_error:
1080
1081 if (r) {
1082 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
1083 cs->request.resources = NULL;
1084 amdgpu_fence_signalled(cs->fence);
1085 cs->error_code = r;
1086 goto cleanup;
1087 }
1088
1089 if (acs->ctx->num_rejected_cs)
1090 r = -ECANCELED;
1091 else
1092 r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
1093
1094 cs->error_code = r;
1095 if (r) {
1096 if (r == -ENOMEM)
1097 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
1098 else if (r == -ECANCELED)
1099 fprintf(stderr, "amdgpu: The CS has been cancelled because the context is lost.\n");
1100 else
1101 fprintf(stderr, "amdgpu: The CS has been rejected, "
1102 "see dmesg for more information (%i).\n", r);
1103
1104 amdgpu_fence_signalled(cs->fence);
1105
1106 acs->ctx->num_rejected_cs++;
1107 ws->num_total_rejected_cs++;
1108 } else {
1109 /* Success. */
1110 uint64_t *user_fence = NULL;
1111 if (amdgpu_cs_has_user_fence(cs))
1112 user_fence = acs->ctx->user_fence_cpu_address_base +
1113 cs->request.fence_info.offset;
1114 amdgpu_fence_submitted(cs->fence, &cs->request, user_fence);
1115 }
1116
1117 /* Cleanup. */
1118 if (cs->request.resources)
1119 amdgpu_bo_list_destroy(cs->request.resources);
1120
1121 cleanup:
1122 for (i = 0; i < cs->num_real_buffers; i++)
1123 p_atomic_dec(&cs->real_buffers[i].bo->num_active_ioctls);
1124 for (i = 0; i < cs->num_slab_buffers; i++)
1125 p_atomic_dec(&cs->slab_buffers[i].bo->num_active_ioctls);
1126
1127 amdgpu_cs_context_cleanup(cs);
1128 }
1129
1130 /* Make sure the previous submission is completed. */
1131 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
1132 {
1133 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1134
1135 /* Wait for any pending ioctl of this CS to complete. */
1136 util_queue_fence_wait(&cs->flush_completed);
1137 }
1138
1139 static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
1140 unsigned flags,
1141 struct pipe_fence_handle **fence)
1142 {
1143 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1144 struct amdgpu_winsys *ws = cs->ctx->ws;
1145 int error_code = 0;
1146
1147 rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
1148
1149 switch (cs->ring_type) {
1150 case RING_DMA:
1151 /* pad DMA ring to 8 DWs */
1152 if (ws->info.chip_class <= SI) {
1153 while (rcs->current.cdw & 7)
1154 radeon_emit(rcs, 0xf0000000); /* NOP packet */
1155 } else {
1156 while (rcs->current.cdw & 7)
1157 radeon_emit(rcs, 0x00000000); /* NOP packet */
1158 }
1159 break;
1160 case RING_GFX:
1161 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1162 if (ws->info.gfx_ib_pad_with_type2) {
1163 while (rcs->current.cdw & 7)
1164 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1165 } else {
1166 while (rcs->current.cdw & 7)
1167 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1168 }
1169
1170 /* Also pad the const IB. */
1171 if (cs->const_ib.ib_mapped)
1172 while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
1173 radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
1174
1175 if (cs->const_preamble_ib.ib_mapped)
1176 while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
1177 radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
1178 break;
1179 case RING_UVD:
1180 while (rcs->current.cdw & 15)
1181 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1182 break;
1183 default:
1184 break;
1185 }
1186
1187 if (rcs->current.cdw > rcs->current.max_dw) {
1188 fprintf(stderr, "amdgpu: command stream overflowed\n");
1189 }
1190
1191 /* If the CS is not empty or overflowed.... */
1192 if (likely(radeon_emitted(&cs->main.base, 0) &&
1193 cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
1194 !debug_get_option_noop())) {
1195 struct amdgpu_cs_context *cur = cs->csc;
1196
1197 /* Set IB sizes. */
1198 amdgpu_ib_finalize(&cs->main);
1199
1200 if (cs->const_ib.ib_mapped)
1201 amdgpu_ib_finalize(&cs->const_ib);
1202
1203 if (cs->const_preamble_ib.ib_mapped)
1204 amdgpu_ib_finalize(&cs->const_preamble_ib);
1205
1206 /* Create a fence. */
1207 amdgpu_fence_reference(&cur->fence, NULL);
1208 if (cs->next_fence) {
1209 /* just move the reference */
1210 cur->fence = cs->next_fence;
1211 cs->next_fence = NULL;
1212 } else {
1213 cur->fence = amdgpu_fence_create(cs->ctx,
1214 cur->request.ip_type,
1215 cur->request.ip_instance,
1216 cur->request.ring);
1217 }
1218 if (fence)
1219 amdgpu_fence_reference(fence, cur->fence);
1220
1221 amdgpu_cs_sync_flush(rcs);
1222
1223 /* Prepare buffers.
1224 *
1225 * This fence must be held until the submission is queued to ensure
1226 * that the order of fence dependency updates matches the order of
1227 * submissions.
1228 */
1229 mtx_lock(&ws->bo_fence_lock);
1230 amdgpu_add_fence_dependencies(cs);
1231
1232 /* Swap command streams. "cst" is going to be submitted. */
1233 cs->csc = cs->cst;
1234 cs->cst = cur;
1235
1236 /* Submit. */
1237 util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
1238 amdgpu_cs_submit_ib, NULL);
1239 /* The submission has been queued, unlock the fence now. */
1240 mtx_unlock(&ws->bo_fence_lock);
1241
1242 if (!(flags & RADEON_FLUSH_ASYNC)) {
1243 amdgpu_cs_sync_flush(rcs);
1244 error_code = cur->error_code;
1245 }
1246 } else {
1247 amdgpu_cs_context_cleanup(cs->csc);
1248 }
1249
1250 amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
1251 if (cs->const_ib.ib_mapped)
1252 amdgpu_get_new_ib(&ws->base, cs, IB_CONST);
1253 if (cs->const_preamble_ib.ib_mapped)
1254 amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
1255
1256 cs->main.base.used_gart = 0;
1257 cs->main.base.used_vram = 0;
1258
1259 if (cs->ring_type == RING_GFX)
1260 ws->num_gfx_IBs++;
1261 else if (cs->ring_type == RING_DMA)
1262 ws->num_sdma_IBs++;
1263
1264 return error_code;
1265 }
1266
1267 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
1268 {
1269 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1270
1271 amdgpu_cs_sync_flush(rcs);
1272 util_queue_fence_destroy(&cs->flush_completed);
1273 p_atomic_dec(&cs->ctx->ws->num_cs);
1274 pb_reference(&cs->main.big_ib_buffer, NULL);
1275 FREE(cs->main.base.prev);
1276 pb_reference(&cs->const_ib.big_ib_buffer, NULL);
1277 FREE(cs->const_ib.base.prev);
1278 pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
1279 FREE(cs->const_preamble_ib.base.prev);
1280 amdgpu_destroy_cs_context(&cs->csc1);
1281 amdgpu_destroy_cs_context(&cs->csc2);
1282 amdgpu_fence_reference(&cs->next_fence, NULL);
1283 FREE(cs);
1284 }
1285
1286 static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
1287 struct pb_buffer *_buf,
1288 enum radeon_bo_usage usage)
1289 {
1290 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1291 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
1292
1293 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
1294 }
1295
1296 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
1297 {
1298 ws->base.ctx_create = amdgpu_ctx_create;
1299 ws->base.ctx_destroy = amdgpu_ctx_destroy;
1300 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
1301 ws->base.cs_create = amdgpu_cs_create;
1302 ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
1303 ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
1304 ws->base.cs_destroy = amdgpu_cs_destroy;
1305 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
1306 ws->base.cs_validate = amdgpu_cs_validate;
1307 ws->base.cs_check_space = amdgpu_cs_check_space;
1308 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
1309 ws->base.cs_flush = amdgpu_cs_flush;
1310 ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
1311 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
1312 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
1313 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
1314 ws->base.fence_reference = amdgpu_fence_reference;
1315 }