winsys/amdgpu: add slab entry structures to amdgpu_winsys_bo
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36 #include <amdgpu_drm.h>
37
38 #include "amd/common/sid.h"
39
40 /* FENCES */
41
42 static struct pipe_fence_handle *
43 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
44 unsigned ip_instance, unsigned ring)
45 {
46 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
47
48 fence->reference.count = 1;
49 fence->ctx = ctx;
50 fence->fence.context = ctx->ctx;
51 fence->fence.ip_type = ip_type;
52 fence->fence.ip_instance = ip_instance;
53 fence->fence.ring = ring;
54 fence->submission_in_progress = true;
55 p_atomic_inc(&ctx->refcount);
56 return (struct pipe_fence_handle *)fence;
57 }
58
59 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
60 struct amdgpu_cs_request* request,
61 uint64_t *user_fence_cpu_address)
62 {
63 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
64
65 rfence->fence.fence = request->seq_no;
66 rfence->user_fence_cpu_address = user_fence_cpu_address;
67 rfence->submission_in_progress = false;
68 }
69
70 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
71 {
72 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
73
74 rfence->signalled = true;
75 rfence->submission_in_progress = false;
76 }
77
78 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
79 bool absolute)
80 {
81 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
82 uint32_t expired;
83 int64_t abs_timeout;
84 uint64_t *user_fence_cpu;
85 int r;
86
87 if (rfence->signalled)
88 return true;
89
90 if (absolute)
91 abs_timeout = timeout;
92 else
93 abs_timeout = os_time_get_absolute_timeout(timeout);
94
95 /* The fence might not have a number assigned if its IB is being
96 * submitted in the other thread right now. Wait until the submission
97 * is done. */
98 if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
99 abs_timeout))
100 return false;
101
102 user_fence_cpu = rfence->user_fence_cpu_address;
103 if (user_fence_cpu) {
104 if (*user_fence_cpu >= rfence->fence.fence) {
105 rfence->signalled = true;
106 return true;
107 }
108
109 /* No timeout, just query: no need for the ioctl. */
110 if (!absolute && !timeout)
111 return false;
112 }
113
114 /* Now use the libdrm query. */
115 r = amdgpu_cs_query_fence_status(&rfence->fence,
116 abs_timeout,
117 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
118 &expired);
119 if (r) {
120 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
121 return false;
122 }
123
124 if (expired) {
125 /* This variable can only transition from false to true, so it doesn't
126 * matter if threads race for it. */
127 rfence->signalled = true;
128 return true;
129 }
130 return false;
131 }
132
133 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
134 struct pipe_fence_handle *fence,
135 uint64_t timeout)
136 {
137 return amdgpu_fence_wait(fence, timeout, false);
138 }
139
140 static struct pipe_fence_handle *
141 amdgpu_cs_get_next_fence(struct radeon_winsys_cs *rcs)
142 {
143 struct amdgpu_cs *cs = amdgpu_cs(rcs);
144 struct pipe_fence_handle *fence = NULL;
145
146 if (cs->next_fence) {
147 amdgpu_fence_reference(&fence, cs->next_fence);
148 return fence;
149 }
150
151 fence = amdgpu_fence_create(cs->ctx,
152 cs->csc->request.ip_type,
153 cs->csc->request.ip_instance,
154 cs->csc->request.ring);
155 if (!fence)
156 return NULL;
157
158 amdgpu_fence_reference(&cs->next_fence, fence);
159 return fence;
160 }
161
162 /* CONTEXTS */
163
164 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
165 {
166 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
167 int r;
168 struct amdgpu_bo_alloc_request alloc_buffer = {};
169 amdgpu_bo_handle buf_handle;
170
171 if (!ctx)
172 return NULL;
173
174 ctx->ws = amdgpu_winsys(ws);
175 ctx->refcount = 1;
176
177 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
178 if (r) {
179 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
180 goto error_create;
181 }
182
183 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
184 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
185 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
186
187 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
188 if (r) {
189 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
190 goto error_user_fence_alloc;
191 }
192
193 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
194 if (r) {
195 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
196 goto error_user_fence_map;
197 }
198
199 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
200 ctx->user_fence_bo = buf_handle;
201
202 return (struct radeon_winsys_ctx*)ctx;
203
204 error_user_fence_map:
205 amdgpu_bo_free(buf_handle);
206 error_user_fence_alloc:
207 amdgpu_cs_ctx_free(ctx->ctx);
208 error_create:
209 FREE(ctx);
210 return NULL;
211 }
212
213 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
214 {
215 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
216 }
217
218 static enum pipe_reset_status
219 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
220 {
221 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
222 uint32_t result, hangs;
223 int r;
224
225 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
226 if (r) {
227 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
228 return PIPE_NO_RESET;
229 }
230
231 switch (result) {
232 case AMDGPU_CTX_GUILTY_RESET:
233 return PIPE_GUILTY_CONTEXT_RESET;
234 case AMDGPU_CTX_INNOCENT_RESET:
235 return PIPE_INNOCENT_CONTEXT_RESET;
236 case AMDGPU_CTX_UNKNOWN_RESET:
237 return PIPE_UNKNOWN_CONTEXT_RESET;
238 case AMDGPU_CTX_NO_RESET:
239 default:
240 return PIPE_NO_RESET;
241 }
242 }
243
244 /* COMMAND SUBMISSION */
245
246 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
247 {
248 return cs->request.ip_type != AMDGPU_HW_IP_UVD &&
249 cs->request.ip_type != AMDGPU_HW_IP_VCE;
250 }
251
252 static bool amdgpu_cs_has_chaining(struct amdgpu_cs *cs)
253 {
254 return cs->ctx->ws->info.chip_class >= CIK &&
255 cs->ring_type == RING_GFX;
256 }
257
258 static unsigned amdgpu_cs_epilog_dws(enum ring_type ring_type)
259 {
260 if (ring_type == RING_GFX)
261 return 4; /* for chaining */
262
263 return 0;
264 }
265
266 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
267 {
268 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
269 int i = cs->buffer_indices_hashlist[hash];
270
271 /* not found or found */
272 if (i == -1 || cs->buffers[i].bo == bo)
273 return i;
274
275 /* Hash collision, look for the BO in the list of buffers linearly. */
276 for (i = cs->num_buffers - 1; i >= 0; i--) {
277 if (cs->buffers[i].bo == bo) {
278 /* Put this buffer in the hash list.
279 * This will prevent additional hash collisions if there are
280 * several consecutive lookup_buffer calls for the same buffer.
281 *
282 * Example: Assuming buffers A,B,C collide in the hash list,
283 * the following sequence of buffers:
284 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
285 * will collide here: ^ and here: ^,
286 * meaning that we should get very few collisions in the end. */
287 cs->buffer_indices_hashlist[hash] = i;
288 return i;
289 }
290 }
291 return -1;
292 }
293
294 static int
295 amdgpu_lookup_or_add_buffer(struct amdgpu_cs *acs, struct amdgpu_winsys_bo *bo)
296 {
297 struct amdgpu_cs_context *cs = acs->csc;
298 struct amdgpu_cs_buffer *buffer;
299 unsigned hash;
300 int idx = amdgpu_lookup_buffer(cs, bo);
301
302 if (idx >= 0)
303 return idx;
304
305 /* New buffer, check if the backing array is large enough. */
306 if (cs->num_buffers >= cs->max_num_buffers) {
307 unsigned new_max =
308 MAX2(cs->max_num_buffers + 16, (unsigned)(cs->max_num_buffers * 1.3));
309 struct amdgpu_cs_buffer *new_buffers;
310 amdgpu_bo_handle *new_handles;
311 uint8_t *new_flags;
312
313 new_buffers = MALLOC(new_max * sizeof(*new_buffers));
314 new_handles = MALLOC(new_max * sizeof(*new_handles));
315 new_flags = MALLOC(new_max * sizeof(*new_flags));
316
317 if (!new_buffers || !new_handles || !new_flags) {
318 fprintf(stderr, "amdgpu_lookup_or_add_buffer: allocation failed\n");
319 FREE(new_buffers);
320 FREE(new_handles);
321 FREE(new_flags);
322 return -1;
323 }
324
325 memcpy(new_buffers, cs->buffers, cs->num_buffers * sizeof(*new_buffers));
326 memcpy(new_handles, cs->handles, cs->num_buffers * sizeof(*new_handles));
327 memcpy(new_flags, cs->flags, cs->num_buffers * sizeof(*new_flags));
328
329 FREE(cs->buffers);
330 FREE(cs->handles);
331 FREE(cs->flags);
332
333 cs->max_num_buffers = new_max;
334 cs->buffers = new_buffers;
335 cs->handles = new_handles;
336 cs->flags = new_flags;
337 }
338
339 idx = cs->num_buffers;
340 buffer = &cs->buffers[idx];
341 memset(buffer, 0, sizeof(*buffer));
342 amdgpu_winsys_bo_reference(&buffer->bo, bo);
343 cs->handles[idx] = bo->bo;
344 cs->flags[idx] = 0;
345 p_atomic_inc(&bo->num_cs_references);
346 cs->num_buffers++;
347
348 hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
349 cs->buffer_indices_hashlist[hash] = idx;
350
351 if (bo->initial_domain & RADEON_DOMAIN_VRAM)
352 acs->main.base.used_vram += bo->base.size;
353 else if (bo->initial_domain & RADEON_DOMAIN_GTT)
354 acs->main.base.used_gart += bo->base.size;
355
356 return idx;
357 }
358
359 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
360 struct pb_buffer *buf,
361 enum radeon_bo_usage usage,
362 enum radeon_bo_domain domains,
363 enum radeon_bo_priority priority)
364 {
365 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
366 * the buffer placement during command submission.
367 */
368 struct amdgpu_cs *acs = amdgpu_cs(rcs);
369 struct amdgpu_cs_context *cs = acs->csc;
370 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
371 struct amdgpu_cs_buffer *buffer;
372 int index = amdgpu_lookup_or_add_buffer(acs, bo);
373
374 if (index < 0)
375 return 0;
376
377 buffer = &cs->buffers[index];
378 buffer->priority_usage |= 1llu << priority;
379 buffer->usage |= usage;
380 cs->flags[index] = MAX2(cs->flags[index], priority / 4);
381 return index;
382 }
383
384 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
385 {
386 struct pb_buffer *pb;
387 uint8_t *mapped;
388 unsigned buffer_size;
389
390 /* Always create a buffer that is at least as large as the maximum seen IB
391 * size, aligned to a power of two (and multiplied by 4 to reduce internal
392 * fragmentation if chaining is not available). Limit to 512k dwords, which
393 * is the largest power of two that fits into the size field of the
394 * INDIRECT_BUFFER packet.
395 */
396 if (amdgpu_cs_has_chaining(amdgpu_cs_from_ib(ib)))
397 buffer_size = 4 *util_next_power_of_two(ib->max_ib_size);
398 else
399 buffer_size = 4 *util_next_power_of_two(4 * ib->max_ib_size);
400
401 buffer_size = MIN2(buffer_size, 4 * 512 * 1024);
402
403 switch (ib->ib_type) {
404 case IB_CONST_PREAMBLE:
405 buffer_size = MAX2(buffer_size, 4 * 1024);
406 break;
407 case IB_CONST:
408 buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
409 break;
410 case IB_MAIN:
411 buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
412 break;
413 default:
414 unreachable("unhandled IB type");
415 }
416
417 pb = ws->base.buffer_create(&ws->base, buffer_size,
418 ws->info.gart_page_size,
419 RADEON_DOMAIN_GTT,
420 RADEON_FLAG_CPU_ACCESS);
421 if (!pb)
422 return false;
423
424 mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
425 if (!mapped) {
426 pb_reference(&pb, NULL);
427 return false;
428 }
429
430 pb_reference(&ib->big_ib_buffer, pb);
431 pb_reference(&pb, NULL);
432
433 ib->ib_mapped = mapped;
434 ib->used_ib_space = 0;
435
436 return true;
437 }
438
439 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
440 {
441 switch (ib_type) {
442 case IB_MAIN:
443 /* Smaller submits means the GPU gets busy sooner and there is less
444 * waiting for buffers and fences. Proof:
445 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
446 */
447 return 20 * 1024;
448 case IB_CONST_PREAMBLE:
449 case IB_CONST:
450 /* There isn't really any reason to limit CE IB size beyond the natural
451 * limit implied by the main IB, except perhaps GTT size. Just return
452 * an extremely large value that we never get anywhere close to.
453 */
454 return 16 * 1024 * 1024;
455 default:
456 unreachable("bad ib_type");
457 }
458 }
459
460 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
461 enum ib_type ib_type)
462 {
463 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
464 /* Small IBs are better than big IBs, because the GPU goes idle quicker
465 * and there is less waiting for buffers and fences. Proof:
466 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
467 */
468 struct amdgpu_ib *ib = NULL;
469 struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
470 unsigned ib_size = 0;
471
472 switch (ib_type) {
473 case IB_CONST_PREAMBLE:
474 ib = &cs->const_preamble_ib;
475 ib_size = 256 * 4;
476 break;
477 case IB_CONST:
478 ib = &cs->const_ib;
479 ib_size = 8 * 1024 * 4;
480 break;
481 case IB_MAIN:
482 ib = &cs->main;
483 ib_size = 4 * 1024 * 4;
484 break;
485 default:
486 unreachable("unhandled IB type");
487 }
488
489 if (!amdgpu_cs_has_chaining(cs)) {
490 ib_size = MAX2(ib_size,
491 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
492 amdgpu_ib_max_submit_dwords(ib_type)));
493 }
494
495 ib->max_ib_size = ib->max_ib_size - ib->max_ib_size / 32;
496
497 ib->base.prev_dw = 0;
498 ib->base.num_prev = 0;
499 ib->base.current.cdw = 0;
500 ib->base.current.buf = NULL;
501
502 /* Allocate a new buffer for IBs if the current buffer is all used. */
503 if (!ib->big_ib_buffer ||
504 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
505 if (!amdgpu_ib_new_buffer(aws, ib))
506 return false;
507 }
508
509 info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
510 ib->used_ib_space;
511 info->size = 0;
512 ib->ptr_ib_size = &info->size;
513
514 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
515 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
516
517 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
518
519 ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
520 ib->base.current.max_dw = ib_size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
521 return true;
522 }
523
524 static void amdgpu_ib_finalize(struct amdgpu_ib *ib)
525 {
526 *ib->ptr_ib_size |= ib->base.current.cdw;
527 ib->used_ib_space += ib->base.current.cdw * 4;
528 ib->max_ib_size = MAX2(ib->max_ib_size, ib->base.prev_dw + ib->base.current.cdw);
529 }
530
531 static bool amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
532 enum ring_type ring_type)
533 {
534 int i;
535
536 switch (ring_type) {
537 case RING_DMA:
538 cs->request.ip_type = AMDGPU_HW_IP_DMA;
539 break;
540
541 case RING_UVD:
542 cs->request.ip_type = AMDGPU_HW_IP_UVD;
543 break;
544
545 case RING_VCE:
546 cs->request.ip_type = AMDGPU_HW_IP_VCE;
547 break;
548
549 case RING_COMPUTE:
550 cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
551 break;
552
553 default:
554 case RING_GFX:
555 cs->request.ip_type = AMDGPU_HW_IP_GFX;
556 break;
557 }
558
559 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
560 cs->buffer_indices_hashlist[i] = -1;
561 }
562
563 cs->request.number_of_ibs = 1;
564 cs->request.ibs = &cs->ib[IB_MAIN];
565
566 cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
567 cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
568 AMDGPU_IB_FLAG_PREAMBLE;
569
570 return true;
571 }
572
573 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
574 {
575 unsigned i;
576
577 for (i = 0; i < cs->num_buffers; i++) {
578 p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
579 amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
580 cs->handles[i] = NULL;
581 cs->flags[i] = 0;
582 }
583
584 cs->num_buffers = 0;
585 amdgpu_fence_reference(&cs->fence, NULL);
586
587 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
588 cs->buffer_indices_hashlist[i] = -1;
589 }
590 }
591
592 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
593 {
594 amdgpu_cs_context_cleanup(cs);
595 FREE(cs->flags);
596 FREE(cs->buffers);
597 FREE(cs->handles);
598 FREE(cs->request.dependencies);
599 }
600
601
602 static struct radeon_winsys_cs *
603 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
604 enum ring_type ring_type,
605 void (*flush)(void *ctx, unsigned flags,
606 struct pipe_fence_handle **fence),
607 void *flush_ctx)
608 {
609 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
610 struct amdgpu_cs *cs;
611
612 cs = CALLOC_STRUCT(amdgpu_cs);
613 if (!cs) {
614 return NULL;
615 }
616
617 util_queue_fence_init(&cs->flush_completed);
618
619 cs->ctx = ctx;
620 cs->flush_cs = flush;
621 cs->flush_data = flush_ctx;
622 cs->ring_type = ring_type;
623
624 cs->main.ib_type = IB_MAIN;
625 cs->const_ib.ib_type = IB_CONST;
626 cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
627
628 if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
629 FREE(cs);
630 return NULL;
631 }
632
633 if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
634 amdgpu_destroy_cs_context(&cs->csc1);
635 FREE(cs);
636 return NULL;
637 }
638
639 /* Set the first submission context as current. */
640 cs->csc = &cs->csc1;
641 cs->cst = &cs->csc2;
642
643 if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
644 amdgpu_destroy_cs_context(&cs->csc2);
645 amdgpu_destroy_cs_context(&cs->csc1);
646 FREE(cs);
647 return NULL;
648 }
649
650 p_atomic_inc(&ctx->ws->num_cs);
651 return &cs->main.base;
652 }
653
654 static struct radeon_winsys_cs *
655 amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
656 {
657 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
658 struct amdgpu_winsys *ws = cs->ctx->ws;
659
660 /* only one const IB can be added */
661 if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
662 return NULL;
663
664 if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST))
665 return NULL;
666
667 cs->csc->request.number_of_ibs = 2;
668 cs->csc->request.ibs = &cs->csc->ib[IB_CONST];
669
670 cs->cst->request.number_of_ibs = 2;
671 cs->cst->request.ibs = &cs->cst->ib[IB_CONST];
672
673 return &cs->const_ib.base;
674 }
675
676 static struct radeon_winsys_cs *
677 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
678 {
679 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
680 struct amdgpu_winsys *ws = cs->ctx->ws;
681
682 /* only one const preamble IB can be added and only when the const IB has
683 * also been mapped */
684 if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
685 cs->const_preamble_ib.ib_mapped)
686 return NULL;
687
688 if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE))
689 return NULL;
690
691 cs->csc->request.number_of_ibs = 3;
692 cs->csc->request.ibs = &cs->csc->ib[IB_CONST_PREAMBLE];
693
694 cs->cst->request.number_of_ibs = 3;
695 cs->cst->request.ibs = &cs->cst->ib[IB_CONST_PREAMBLE];
696
697 return &cs->const_preamble_ib.base;
698 }
699
700 static bool amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
701 {
702 return true;
703 }
704
705 static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
706 {
707 struct amdgpu_ib *ib = amdgpu_ib(rcs);
708 struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
709 unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
710 uint64_t va;
711 uint32_t *new_ptr_ib_size;
712
713 assert(rcs->current.cdw <= rcs->current.max_dw);
714
715 if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
716 return false;
717
718 ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
719
720 if (rcs->current.max_dw - rcs->current.cdw >= dw)
721 return true;
722
723 if (!amdgpu_cs_has_chaining(cs))
724 return false;
725
726 /* Allocate a new chunk */
727 if (rcs->num_prev >= rcs->max_prev) {
728 unsigned new_max_prev = MAX2(1, 2 * rcs->max_prev);
729 struct radeon_winsys_cs_chunk *new_prev;
730
731 new_prev = REALLOC(rcs->prev,
732 sizeof(*new_prev) * rcs->max_prev,
733 sizeof(*new_prev) * new_max_prev);
734 if (!new_prev)
735 return false;
736
737 rcs->prev = new_prev;
738 rcs->max_prev = new_max_prev;
739 }
740
741 if (!amdgpu_ib_new_buffer(cs->ctx->ws, ib))
742 return false;
743
744 assert(ib->used_ib_space == 0);
745 va = amdgpu_winsys_bo(ib->big_ib_buffer)->va;
746
747 /* This space was originally reserved. */
748 rcs->current.max_dw += 4;
749 assert(ib->used_ib_space + 4 * rcs->current.max_dw <= ib->big_ib_buffer->size);
750
751 /* Pad with NOPs and add INDIRECT_BUFFER packet */
752 while ((rcs->current.cdw & 7) != 4)
753 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
754
755 radeon_emit(rcs, PKT3(ib->ib_type == IB_MAIN ? PKT3_INDIRECT_BUFFER_CIK
756 : PKT3_INDIRECT_BUFFER_CONST, 2, 0));
757 radeon_emit(rcs, va);
758 radeon_emit(rcs, va >> 32);
759 new_ptr_ib_size = &rcs->current.buf[rcs->current.cdw];
760 radeon_emit(rcs, S_3F2_CHAIN(1) | S_3F2_VALID(1));
761
762 assert((rcs->current.cdw & 7) == 0);
763 assert(rcs->current.cdw <= rcs->current.max_dw);
764
765 *ib->ptr_ib_size |= rcs->current.cdw;
766 ib->ptr_ib_size = new_ptr_ib_size;
767
768 /* Hook up the new chunk */
769 rcs->prev[rcs->num_prev].buf = rcs->current.buf;
770 rcs->prev[rcs->num_prev].cdw = rcs->current.cdw;
771 rcs->prev[rcs->num_prev].max_dw = rcs->current.cdw; /* no modifications */
772 rcs->num_prev++;
773
774 ib->base.prev_dw += ib->base.current.cdw;
775 ib->base.current.cdw = 0;
776
777 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
778 ib->base.current.max_dw = ib->big_ib_buffer->size / 4 - amdgpu_cs_epilog_dws(cs->ring_type);
779
780 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
781 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
782
783 return true;
784 }
785
786 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
787 struct radeon_bo_list_item *list)
788 {
789 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
790 int i;
791
792 if (list) {
793 for (i = 0; i < cs->num_buffers; i++) {
794 list[i].bo_size = cs->buffers[i].bo->base.size;
795 list[i].vm_address = cs->buffers[i].bo->va;
796 list[i].priority_usage = cs->buffers[i].priority_usage;
797 }
798 }
799 return cs->num_buffers;
800 }
801
802 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
803
804 static void amdgpu_add_fence_dependency(struct amdgpu_cs *acs,
805 struct amdgpu_cs_buffer *buffer)
806 {
807 struct amdgpu_cs_context *cs = acs->csc;
808 struct amdgpu_winsys_bo *bo = buffer->bo;
809 struct amdgpu_cs_fence *dep;
810 unsigned new_num_fences = 0;
811
812 for (unsigned j = 0; j < bo->num_fences; ++j) {
813 struct amdgpu_fence *bo_fence = (void *)bo->fences[j];
814 unsigned idx;
815
816 if (bo_fence->ctx == acs->ctx &&
817 bo_fence->fence.ip_type == cs->request.ip_type &&
818 bo_fence->fence.ip_instance == cs->request.ip_instance &&
819 bo_fence->fence.ring == cs->request.ring)
820 continue;
821
822 if (amdgpu_fence_wait((void *)bo_fence, 0, false))
823 continue;
824
825 amdgpu_fence_reference(&bo->fences[new_num_fences], bo->fences[j]);
826 new_num_fences++;
827
828 if (!(buffer->usage & RADEON_USAGE_SYNCHRONIZED))
829 continue;
830
831 if (bo_fence->submission_in_progress)
832 os_wait_until_zero(&bo_fence->submission_in_progress,
833 PIPE_TIMEOUT_INFINITE);
834
835 idx = cs->request.number_of_dependencies++;
836 if (idx >= cs->max_dependencies) {
837 unsigned size;
838
839 cs->max_dependencies = idx + 8;
840 size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
841 cs->request.dependencies = realloc(cs->request.dependencies, size);
842 }
843
844 dep = &cs->request.dependencies[idx];
845 memcpy(dep, &bo_fence->fence, sizeof(*dep));
846 }
847
848 for (unsigned j = new_num_fences; j < bo->num_fences; ++j)
849 amdgpu_fence_reference(&bo->fences[j], NULL);
850
851 bo->num_fences = new_num_fences;
852 }
853
854 /* Since the kernel driver doesn't synchronize execution between different
855 * rings automatically, we have to add fence dependencies manually.
856 */
857 static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
858 {
859 struct amdgpu_cs_context *cs = acs->csc;
860 int i;
861
862 cs->request.number_of_dependencies = 0;
863
864 for (i = 0; i < cs->num_buffers; i++)
865 amdgpu_add_fence_dependency(acs, &cs->buffers[i]);
866 }
867
868 static void amdgpu_add_fence(struct amdgpu_winsys_bo *bo,
869 struct pipe_fence_handle *fence)
870 {
871 if (bo->num_fences >= bo->max_fences) {
872 unsigned new_max_fences = MAX2(1, bo->max_fences * 2);
873 struct pipe_fence_handle **new_fences =
874 REALLOC(bo->fences,
875 bo->num_fences * sizeof(*new_fences),
876 new_max_fences * sizeof(*new_fences));
877 if (new_fences) {
878 bo->fences = new_fences;
879 bo->max_fences = new_max_fences;
880 } else {
881 fprintf(stderr, "amdgpu_add_fence: allocation failure, dropping fence\n");
882 if (!bo->num_fences)
883 return;
884
885 bo->num_fences--; /* prefer to keep a more recent fence if possible */
886 amdgpu_fence_reference(&bo->fences[bo->num_fences], NULL);
887 }
888 }
889
890 bo->fences[bo->num_fences] = NULL;
891 amdgpu_fence_reference(&bo->fences[bo->num_fences], fence);
892 bo->num_fences++;
893 }
894
895 void amdgpu_cs_submit_ib(void *job, int thread_index)
896 {
897 struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
898 struct amdgpu_winsys *ws = acs->ctx->ws;
899 struct amdgpu_cs_context *cs = acs->cst;
900 int i, r;
901
902 cs->request.fence_info.handle = NULL;
903 if (amdgpu_cs_has_user_fence(cs)) {
904 cs->request.fence_info.handle = acs->ctx->user_fence_bo;
905 cs->request.fence_info.offset = acs->ring_type;
906 }
907
908 /* Create the buffer list.
909 * Use a buffer list containing all allocated buffers if requested.
910 */
911 if (debug_get_option_all_bos()) {
912 struct amdgpu_winsys_bo *bo;
913 amdgpu_bo_handle *handles;
914 unsigned num = 0;
915
916 pipe_mutex_lock(ws->global_bo_list_lock);
917
918 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
919 if (!handles) {
920 pipe_mutex_unlock(ws->global_bo_list_lock);
921 amdgpu_cs_context_cleanup(cs);
922 cs->error_code = -ENOMEM;
923 return;
924 }
925
926 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, u.real.global_list_item) {
927 assert(num < ws->num_buffers);
928 handles[num++] = bo->bo;
929 }
930
931 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
932 handles, NULL,
933 &cs->request.resources);
934 free(handles);
935 pipe_mutex_unlock(ws->global_bo_list_lock);
936 } else {
937 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
938 cs->handles, cs->flags,
939 &cs->request.resources);
940 }
941
942 if (r) {
943 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
944 cs->request.resources = NULL;
945 amdgpu_fence_signalled(cs->fence);
946 cs->error_code = r;
947 goto cleanup;
948 }
949
950 r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
951 cs->error_code = r;
952 if (r) {
953 if (r == -ENOMEM)
954 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
955 else
956 fprintf(stderr, "amdgpu: The CS has been rejected, "
957 "see dmesg for more information (%i).\n", r);
958
959 amdgpu_fence_signalled(cs->fence);
960 } else {
961 /* Success. */
962 uint64_t *user_fence = NULL;
963 if (amdgpu_cs_has_user_fence(cs))
964 user_fence = acs->ctx->user_fence_cpu_address_base +
965 cs->request.fence_info.offset;
966 amdgpu_fence_submitted(cs->fence, &cs->request, user_fence);
967 }
968
969 /* Cleanup. */
970 if (cs->request.resources)
971 amdgpu_bo_list_destroy(cs->request.resources);
972
973 cleanup:
974 for (i = 0; i < cs->num_buffers; i++)
975 p_atomic_dec(&cs->buffers[i].bo->num_active_ioctls);
976
977 amdgpu_cs_context_cleanup(cs);
978 }
979
980 /* Make sure the previous submission is completed. */
981 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
982 {
983 struct amdgpu_cs *cs = amdgpu_cs(rcs);
984 struct amdgpu_winsys *ws = cs->ctx->ws;
985
986 /* Wait for any pending ioctl of this CS to complete. */
987 if (util_queue_is_initialized(&ws->cs_queue))
988 util_queue_job_wait(&cs->flush_completed);
989 }
990
991 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", false)
992
993 static int amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
994 unsigned flags,
995 struct pipe_fence_handle **fence)
996 {
997 struct amdgpu_cs *cs = amdgpu_cs(rcs);
998 struct amdgpu_winsys *ws = cs->ctx->ws;
999 int error_code = 0;
1000
1001 rcs->current.max_dw += amdgpu_cs_epilog_dws(cs->ring_type);
1002
1003 switch (cs->ring_type) {
1004 case RING_DMA:
1005 /* pad DMA ring to 8 DWs */
1006 if (ws->info.chip_class <= SI) {
1007 while (rcs->current.cdw & 7)
1008 radeon_emit(rcs, 0xf0000000); /* NOP packet */
1009 } else {
1010 while (rcs->current.cdw & 7)
1011 radeon_emit(rcs, 0x00000000); /* NOP packet */
1012 }
1013 break;
1014 case RING_GFX:
1015 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
1016 if (ws->info.gfx_ib_pad_with_type2) {
1017 while (rcs->current.cdw & 7)
1018 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1019 } else {
1020 while (rcs->current.cdw & 7)
1021 radeon_emit(rcs, 0xffff1000); /* type3 nop packet */
1022 }
1023
1024 /* Also pad the const IB. */
1025 if (cs->const_ib.ib_mapped)
1026 while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
1027 radeon_emit(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
1028
1029 if (cs->const_preamble_ib.ib_mapped)
1030 while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
1031 radeon_emit(&cs->const_preamble_ib.base, 0xffff1000);
1032 break;
1033 case RING_UVD:
1034 while (rcs->current.cdw & 15)
1035 radeon_emit(rcs, 0x80000000); /* type2 nop packet */
1036 break;
1037 default:
1038 break;
1039 }
1040
1041 if (rcs->current.cdw > rcs->current.max_dw) {
1042 fprintf(stderr, "amdgpu: command stream overflowed\n");
1043 }
1044
1045 /* If the CS is not empty or overflowed.... */
1046 if (radeon_emitted(&cs->main.base, 0) &&
1047 cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
1048 !debug_get_option_noop()) {
1049 struct amdgpu_cs_context *cur = cs->csc;
1050 unsigned i, num_buffers = cur->num_buffers;
1051
1052 /* Set IB sizes. */
1053 amdgpu_ib_finalize(&cs->main);
1054
1055 if (cs->const_ib.ib_mapped)
1056 amdgpu_ib_finalize(&cs->const_ib);
1057
1058 if (cs->const_preamble_ib.ib_mapped)
1059 amdgpu_ib_finalize(&cs->const_preamble_ib);
1060
1061 /* Create a fence. */
1062 amdgpu_fence_reference(&cur->fence, NULL);
1063 if (cs->next_fence) {
1064 /* just move the reference */
1065 cur->fence = cs->next_fence;
1066 cs->next_fence = NULL;
1067 } else {
1068 cur->fence = amdgpu_fence_create(cs->ctx,
1069 cur->request.ip_type,
1070 cur->request.ip_instance,
1071 cur->request.ring);
1072 }
1073 if (fence)
1074 amdgpu_fence_reference(fence, cur->fence);
1075
1076 /* Prepare buffers. */
1077 pipe_mutex_lock(ws->bo_fence_lock);
1078 amdgpu_add_fence_dependencies(cs);
1079 for (i = 0; i < num_buffers; i++) {
1080 struct amdgpu_winsys_bo *bo = cur->buffers[i].bo;
1081 p_atomic_inc(&bo->num_active_ioctls);
1082 amdgpu_add_fence(bo, cur->fence);
1083 }
1084 pipe_mutex_unlock(ws->bo_fence_lock);
1085
1086 amdgpu_cs_sync_flush(rcs);
1087
1088 /* Swap command streams. "cst" is going to be submitted. */
1089 cs->csc = cs->cst;
1090 cs->cst = cur;
1091
1092 /* Submit. */
1093 if ((flags & RADEON_FLUSH_ASYNC) &&
1094 util_queue_is_initialized(&ws->cs_queue)) {
1095 util_queue_add_job(&ws->cs_queue, cs, &cs->flush_completed,
1096 amdgpu_cs_submit_ib, NULL);
1097 } else {
1098 amdgpu_cs_submit_ib(cs, 0);
1099 error_code = cs->cst->error_code;
1100 }
1101 } else {
1102 amdgpu_cs_context_cleanup(cs->csc);
1103 }
1104
1105 amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
1106 if (cs->const_ib.ib_mapped)
1107 amdgpu_get_new_ib(&ws->base, cs, IB_CONST);
1108 if (cs->const_preamble_ib.ib_mapped)
1109 amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
1110
1111 cs->main.base.used_gart = 0;
1112 cs->main.base.used_vram = 0;
1113
1114 ws->num_cs_flushes++;
1115 return error_code;
1116 }
1117
1118 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
1119 {
1120 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1121
1122 amdgpu_cs_sync_flush(rcs);
1123 util_queue_fence_destroy(&cs->flush_completed);
1124 p_atomic_dec(&cs->ctx->ws->num_cs);
1125 pb_reference(&cs->main.big_ib_buffer, NULL);
1126 FREE(cs->main.base.prev);
1127 pb_reference(&cs->const_ib.big_ib_buffer, NULL);
1128 FREE(cs->const_ib.base.prev);
1129 pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
1130 FREE(cs->const_preamble_ib.base.prev);
1131 amdgpu_destroy_cs_context(&cs->csc1);
1132 amdgpu_destroy_cs_context(&cs->csc2);
1133 amdgpu_fence_reference(&cs->next_fence, NULL);
1134 FREE(cs);
1135 }
1136
1137 static bool amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
1138 struct pb_buffer *_buf,
1139 enum radeon_bo_usage usage)
1140 {
1141 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1142 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
1143
1144 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
1145 }
1146
1147 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
1148 {
1149 ws->base.ctx_create = amdgpu_ctx_create;
1150 ws->base.ctx_destroy = amdgpu_ctx_destroy;
1151 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
1152 ws->base.cs_create = amdgpu_cs_create;
1153 ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
1154 ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
1155 ws->base.cs_destroy = amdgpu_cs_destroy;
1156 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
1157 ws->base.cs_validate = amdgpu_cs_validate;
1158 ws->base.cs_check_space = amdgpu_cs_check_space;
1159 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
1160 ws->base.cs_flush = amdgpu_cs_flush;
1161 ws->base.cs_get_next_fence = amdgpu_cs_get_next_fence;
1162 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
1163 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
1164 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
1165 ws->base.fence_reference = amdgpu_fence_reference;
1166 }