radeon/winsys: introduce radeon_winsys_cs_chunk
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36 #include <amdgpu_drm.h>
37
38
39 /* FENCES */
40
41 static struct pipe_fence_handle *
42 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
43 unsigned ip_instance, unsigned ring)
44 {
45 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
46
47 fence->reference.count = 1;
48 fence->ctx = ctx;
49 fence->fence.context = ctx->ctx;
50 fence->fence.ip_type = ip_type;
51 fence->fence.ip_instance = ip_instance;
52 fence->fence.ring = ring;
53 fence->submission_in_progress = true;
54 p_atomic_inc(&ctx->refcount);
55 return (struct pipe_fence_handle *)fence;
56 }
57
58 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
59 struct amdgpu_cs_request* request,
60 uint64_t *user_fence_cpu_address)
61 {
62 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
63
64 rfence->fence.fence = request->seq_no;
65 rfence->user_fence_cpu_address = user_fence_cpu_address;
66 rfence->submission_in_progress = false;
67 }
68
69 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
70 {
71 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
72
73 rfence->signalled = true;
74 rfence->submission_in_progress = false;
75 }
76
77 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
78 bool absolute)
79 {
80 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
81 uint32_t expired;
82 int64_t abs_timeout;
83 uint64_t *user_fence_cpu;
84 int r;
85
86 if (rfence->signalled)
87 return true;
88
89 if (absolute)
90 abs_timeout = timeout;
91 else
92 abs_timeout = os_time_get_absolute_timeout(timeout);
93
94 /* The fence might not have a number assigned if its IB is being
95 * submitted in the other thread right now. Wait until the submission
96 * is done. */
97 if (!os_wait_until_zero_abs_timeout(&rfence->submission_in_progress,
98 abs_timeout))
99 return false;
100
101 user_fence_cpu = rfence->user_fence_cpu_address;
102 if (user_fence_cpu) {
103 if (*user_fence_cpu >= rfence->fence.fence) {
104 rfence->signalled = true;
105 return true;
106 }
107
108 /* No timeout, just query: no need for the ioctl. */
109 if (!absolute && !timeout)
110 return false;
111 }
112
113 /* Now use the libdrm query. */
114 r = amdgpu_cs_query_fence_status(&rfence->fence,
115 abs_timeout,
116 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
117 &expired);
118 if (r) {
119 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
120 return FALSE;
121 }
122
123 if (expired) {
124 /* This variable can only transition from false to true, so it doesn't
125 * matter if threads race for it. */
126 rfence->signalled = true;
127 return true;
128 }
129 return false;
130 }
131
132 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
133 struct pipe_fence_handle *fence,
134 uint64_t timeout)
135 {
136 return amdgpu_fence_wait(fence, timeout, false);
137 }
138
139 /* CONTEXTS */
140
141 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
142 {
143 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
144 int r;
145 struct amdgpu_bo_alloc_request alloc_buffer = {};
146 amdgpu_bo_handle buf_handle;
147
148 if (!ctx)
149 return NULL;
150
151 ctx->ws = amdgpu_winsys(ws);
152 ctx->refcount = 1;
153
154 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
155 if (r) {
156 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
157 goto error_create;
158 }
159
160 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
161 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
162 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
163
164 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
165 if (r) {
166 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
167 goto error_user_fence_alloc;
168 }
169
170 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
171 if (r) {
172 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
173 goto error_user_fence_map;
174 }
175
176 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
177 ctx->user_fence_bo = buf_handle;
178
179 return (struct radeon_winsys_ctx*)ctx;
180
181 error_user_fence_map:
182 amdgpu_bo_free(buf_handle);
183 error_user_fence_alloc:
184 amdgpu_cs_ctx_free(ctx->ctx);
185 error_create:
186 FREE(ctx);
187 return NULL;
188 }
189
190 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
191 {
192 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
193 }
194
195 static enum pipe_reset_status
196 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
197 {
198 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
199 uint32_t result, hangs;
200 int r;
201
202 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
203 if (r) {
204 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
205 return PIPE_NO_RESET;
206 }
207
208 switch (result) {
209 case AMDGPU_CTX_GUILTY_RESET:
210 return PIPE_GUILTY_CONTEXT_RESET;
211 case AMDGPU_CTX_INNOCENT_RESET:
212 return PIPE_INNOCENT_CONTEXT_RESET;
213 case AMDGPU_CTX_UNKNOWN_RESET:
214 return PIPE_UNKNOWN_CONTEXT_RESET;
215 case AMDGPU_CTX_NO_RESET:
216 default:
217 return PIPE_NO_RESET;
218 }
219 }
220
221 /* COMMAND SUBMISSION */
222
223 static bool amdgpu_cs_has_user_fence(struct amdgpu_cs_context *cs)
224 {
225 return cs->request.ip_type != AMDGPU_HW_IP_UVD &&
226 cs->request.ip_type != AMDGPU_HW_IP_VCE;
227 }
228
229 int amdgpu_lookup_buffer(struct amdgpu_cs_context *cs, struct amdgpu_winsys_bo *bo)
230 {
231 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
232 int i = cs->buffer_indices_hashlist[hash];
233
234 /* not found or found */
235 if (i == -1 || cs->buffers[i].bo == bo)
236 return i;
237
238 /* Hash collision, look for the BO in the list of buffers linearly. */
239 for (i = cs->num_buffers - 1; i >= 0; i--) {
240 if (cs->buffers[i].bo == bo) {
241 /* Put this buffer in the hash list.
242 * This will prevent additional hash collisions if there are
243 * several consecutive lookup_buffer calls for the same buffer.
244 *
245 * Example: Assuming buffers A,B,C collide in the hash list,
246 * the following sequence of buffers:
247 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
248 * will collide here: ^ and here: ^,
249 * meaning that we should get very few collisions in the end. */
250 cs->buffer_indices_hashlist[hash] = i;
251 return i;
252 }
253 }
254 return -1;
255 }
256
257 static unsigned amdgpu_add_buffer(struct amdgpu_cs *acs,
258 struct amdgpu_winsys_bo *bo,
259 enum radeon_bo_usage usage,
260 enum radeon_bo_domain domains,
261 unsigned priority,
262 enum radeon_bo_domain *added_domains)
263 {
264 struct amdgpu_cs_context *cs = acs->csc;
265 struct amdgpu_cs_buffer *buffer;
266 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
267 int i = -1;
268
269 assert(priority < 64);
270 *added_domains = 0;
271
272 i = amdgpu_lookup_buffer(cs, bo);
273
274 if (i >= 0) {
275 buffer = &cs->buffers[i];
276 buffer->priority_usage |= 1llu << priority;
277 buffer->usage |= usage;
278 *added_domains = domains & ~buffer->domains;
279 buffer->domains |= domains;
280 cs->flags[i] = MAX2(cs->flags[i], priority / 4);
281 return i;
282 }
283
284 /* New buffer, check if the backing array is large enough. */
285 if (cs->num_buffers >= cs->max_num_buffers) {
286 uint32_t size;
287 cs->max_num_buffers += 10;
288
289 size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
290 cs->buffers = realloc(cs->buffers, size);
291
292 size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
293 cs->handles = realloc(cs->handles, size);
294
295 cs->flags = realloc(cs->flags, cs->max_num_buffers);
296 }
297
298 /* Initialize the new buffer. */
299 cs->buffers[cs->num_buffers].bo = NULL;
300 amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
301 cs->handles[cs->num_buffers] = bo->bo;
302 cs->flags[cs->num_buffers] = priority / 4;
303 p_atomic_inc(&bo->num_cs_references);
304 buffer = &cs->buffers[cs->num_buffers];
305 buffer->bo = bo;
306 buffer->priority_usage = 1llu << priority;
307 buffer->usage = usage;
308 buffer->domains = domains;
309
310 cs->buffer_indices_hashlist[hash] = cs->num_buffers;
311
312 *added_domains = domains;
313 return cs->num_buffers++;
314 }
315
316 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
317 struct pb_buffer *buf,
318 enum radeon_bo_usage usage,
319 enum radeon_bo_domain domains,
320 enum radeon_bo_priority priority)
321 {
322 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
323 * the buffer placement during command submission.
324 */
325 struct amdgpu_cs *cs = amdgpu_cs(rcs);
326 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
327 enum radeon_bo_domain added_domains;
328 unsigned index = amdgpu_add_buffer(cs, bo, usage, bo->initial_domain,
329 priority, &added_domains);
330
331 if (added_domains & RADEON_DOMAIN_VRAM)
332 cs->csc->used_vram += bo->base.size;
333 else if (added_domains & RADEON_DOMAIN_GTT)
334 cs->csc->used_gart += bo->base.size;
335
336 return index;
337 }
338
339 static bool amdgpu_ib_new_buffer(struct amdgpu_winsys *ws, struct amdgpu_ib *ib)
340 {
341 struct pb_buffer *pb;
342 uint8_t *mapped;
343 unsigned buffer_size;
344
345 /* Always create a buffer that is 4 times larger than the maximum seen IB
346 * size, aligned to a power of two. Limit to 512k dwords, which is the
347 * largest power of two that fits into the size field of the INDIRECT_BUFFER
348 * packet.
349 */
350 buffer_size = 4 * MIN2(util_next_power_of_two(4 * ib->max_ib_size),
351 512 * 1024);
352
353 switch (ib->ib_type) {
354 case IB_CONST_PREAMBLE:
355 buffer_size = MAX2(buffer_size, 4 * 1024);
356 break;
357 case IB_CONST:
358 buffer_size = MAX2(buffer_size, 16 * 1024 * 4);
359 break;
360 case IB_MAIN:
361 buffer_size = MAX2(buffer_size, 8 * 1024 * 4);
362 break;
363 default:
364 unreachable("unhandled IB type");
365 }
366
367 pb = ws->base.buffer_create(&ws->base, buffer_size,
368 ws->info.gart_page_size,
369 RADEON_DOMAIN_GTT,
370 RADEON_FLAG_CPU_ACCESS);
371 if (!pb)
372 return false;
373
374 mapped = ws->base.buffer_map(pb, NULL, PIPE_TRANSFER_WRITE);
375 if (!mapped) {
376 pb_reference(&pb, NULL);
377 return false;
378 }
379
380 pb_reference(&ib->big_ib_buffer, pb);
381 pb_reference(&pb, NULL);
382
383 ib->ib_mapped = mapped;
384 ib->used_ib_space = 0;
385
386 return true;
387 }
388
389 static unsigned amdgpu_ib_max_submit_dwords(enum ib_type ib_type)
390 {
391 switch (ib_type) {
392 case IB_MAIN:
393 /* Smaller submits means the GPU gets busy sooner and there is less
394 * waiting for buffers and fences. Proof:
395 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
396 */
397 return 20 * 1024;
398 case IB_CONST_PREAMBLE:
399 case IB_CONST:
400 /* There isn't really any reason to limit CE IB size beyond the natural
401 * limit implied by the main IB, except perhaps GTT size. Just return
402 * an extremely large value that we never get anywhere close to.
403 */
404 return 16 * 1024 * 1024;
405 default:
406 unreachable("bad ib_type");
407 }
408 }
409
410 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_cs *cs,
411 enum ib_type ib_type)
412 {
413 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
414 /* Small IBs are better than big IBs, because the GPU goes idle quicker
415 * and there is less waiting for buffers and fences. Proof:
416 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
417 */
418 struct amdgpu_ib *ib = NULL;
419 struct amdgpu_cs_ib_info *info = &cs->csc->ib[ib_type];
420 unsigned ib_size = 0;
421
422 switch (ib_type) {
423 case IB_CONST_PREAMBLE:
424 ib = &cs->const_preamble_ib;
425 ib_size = 256 * 4;
426 break;
427 case IB_CONST:
428 ib = &cs->const_ib;
429 ib_size = 8 * 1024 * 4;
430 break;
431 case IB_MAIN:
432 ib = &cs->main;
433 ib_size = 4 * 1024 * 4;
434 break;
435 default:
436 unreachable("unhandled IB type");
437 }
438
439 ib_size = MAX2(ib_size,
440 4 * MIN2(util_next_power_of_two(ib->max_ib_size),
441 amdgpu_ib_max_submit_dwords(ib_type)));
442
443 ib->base.prev_dw = 0;
444 ib->base.num_prev = 0;
445 ib->base.current.cdw = 0;
446 ib->base.current.buf = NULL;
447
448 /* Allocate a new buffer for IBs if the current buffer is all used. */
449 if (!ib->big_ib_buffer ||
450 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
451 if (!amdgpu_ib_new_buffer(aws, ib))
452 return false;
453 }
454
455 info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
456 ib->used_ib_space;
457 amdgpu_cs_add_buffer(&cs->main.base, ib->big_ib_buffer,
458 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
459
460 ib->base.current.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
461
462 ib_size = ib->big_ib_buffer->size - ib->used_ib_space;
463 ib->base.current.max_dw = ib_size / 4;
464 return true;
465 }
466
467 static boolean amdgpu_init_cs_context(struct amdgpu_cs_context *cs,
468 enum ring_type ring_type)
469 {
470 int i;
471
472 switch (ring_type) {
473 case RING_DMA:
474 cs->request.ip_type = AMDGPU_HW_IP_DMA;
475 break;
476
477 case RING_UVD:
478 cs->request.ip_type = AMDGPU_HW_IP_UVD;
479 break;
480
481 case RING_VCE:
482 cs->request.ip_type = AMDGPU_HW_IP_VCE;
483 break;
484
485 case RING_COMPUTE:
486 cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
487 break;
488
489 default:
490 case RING_GFX:
491 cs->request.ip_type = AMDGPU_HW_IP_GFX;
492 break;
493 }
494
495 cs->max_num_buffers = 512;
496 cs->buffers = (struct amdgpu_cs_buffer*)
497 CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
498 if (!cs->buffers) {
499 return FALSE;
500 }
501
502 cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
503 if (!cs->handles) {
504 FREE(cs->buffers);
505 return FALSE;
506 }
507
508 cs->flags = CALLOC(1, cs->max_num_buffers);
509 if (!cs->flags) {
510 FREE(cs->handles);
511 FREE(cs->buffers);
512 return FALSE;
513 }
514
515 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
516 cs->buffer_indices_hashlist[i] = -1;
517 }
518
519 cs->request.number_of_ibs = 1;
520 cs->request.ibs = &cs->ib[IB_MAIN];
521
522 cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
523 cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE |
524 AMDGPU_IB_FLAG_PREAMBLE;
525
526 return TRUE;
527 }
528
529 static void amdgpu_cs_context_cleanup(struct amdgpu_cs_context *cs)
530 {
531 unsigned i;
532
533 for (i = 0; i < cs->num_buffers; i++) {
534 p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
535 amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
536 cs->handles[i] = NULL;
537 cs->flags[i] = 0;
538 }
539
540 cs->num_buffers = 0;
541 cs->used_gart = 0;
542 cs->used_vram = 0;
543 amdgpu_fence_reference(&cs->fence, NULL);
544
545 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
546 cs->buffer_indices_hashlist[i] = -1;
547 }
548 }
549
550 static void amdgpu_destroy_cs_context(struct amdgpu_cs_context *cs)
551 {
552 amdgpu_cs_context_cleanup(cs);
553 FREE(cs->flags);
554 FREE(cs->buffers);
555 FREE(cs->handles);
556 FREE(cs->request.dependencies);
557 }
558
559
560 static struct radeon_winsys_cs *
561 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
562 enum ring_type ring_type,
563 void (*flush)(void *ctx, unsigned flags,
564 struct pipe_fence_handle **fence),
565 void *flush_ctx)
566 {
567 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
568 struct amdgpu_cs *cs;
569
570 cs = CALLOC_STRUCT(amdgpu_cs);
571 if (!cs) {
572 return NULL;
573 }
574
575 pipe_semaphore_init(&cs->flush_completed, 1);
576
577 cs->ctx = ctx;
578 cs->flush_cs = flush;
579 cs->flush_data = flush_ctx;
580 cs->ring_type = ring_type;
581
582 cs->main.ib_type = IB_MAIN;
583 cs->const_ib.ib_type = IB_CONST;
584 cs->const_preamble_ib.ib_type = IB_CONST_PREAMBLE;
585
586 if (!amdgpu_init_cs_context(&cs->csc1, ring_type)) {
587 FREE(cs);
588 return NULL;
589 }
590
591 if (!amdgpu_init_cs_context(&cs->csc2, ring_type)) {
592 amdgpu_destroy_cs_context(&cs->csc1);
593 FREE(cs);
594 return NULL;
595 }
596
597 /* Set the first submission context as current. */
598 cs->csc = &cs->csc1;
599 cs->cst = &cs->csc2;
600
601 if (!amdgpu_get_new_ib(&ctx->ws->base, cs, IB_MAIN)) {
602 amdgpu_destroy_cs_context(&cs->csc2);
603 amdgpu_destroy_cs_context(&cs->csc1);
604 FREE(cs);
605 return NULL;
606 }
607
608 p_atomic_inc(&ctx->ws->num_cs);
609 return &cs->main.base;
610 }
611
612 static struct radeon_winsys_cs *
613 amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
614 {
615 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
616 struct amdgpu_winsys *ws = cs->ctx->ws;
617
618 /* only one const IB can be added */
619 if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
620 return NULL;
621
622 if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST))
623 return NULL;
624
625 cs->csc->request.number_of_ibs = 2;
626 cs->csc->request.ibs = &cs->csc->ib[IB_CONST];
627
628 cs->cst->request.number_of_ibs = 2;
629 cs->cst->request.ibs = &cs->cst->ib[IB_CONST];
630
631 return &cs->const_ib.base;
632 }
633
634 static struct radeon_winsys_cs *
635 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
636 {
637 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
638 struct amdgpu_winsys *ws = cs->ctx->ws;
639
640 /* only one const preamble IB can be added and only when the const IB has
641 * also been mapped */
642 if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
643 cs->const_preamble_ib.ib_mapped)
644 return NULL;
645
646 if (!amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE))
647 return NULL;
648
649 cs->csc->request.number_of_ibs = 3;
650 cs->csc->request.ibs = &cs->csc->ib[IB_CONST_PREAMBLE];
651
652 cs->cst->request.number_of_ibs = 3;
653 cs->cst->request.ibs = &cs->cst->ib[IB_CONST_PREAMBLE];
654
655 return &cs->const_preamble_ib.base;
656 }
657
658 #define OUT_CS(cs, value) (cs)->current.buf[(cs)->current.cdw++] = (value)
659
660 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
661 struct pb_buffer *buf)
662 {
663 struct amdgpu_cs *cs = amdgpu_cs(rcs);
664
665 return amdgpu_lookup_buffer(cs->csc, (struct amdgpu_winsys_bo*)buf);
666 }
667
668 static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
669 {
670 return TRUE;
671 }
672
673 static bool amdgpu_cs_check_space(struct radeon_winsys_cs *rcs, unsigned dw)
674 {
675 struct amdgpu_ib *ib = amdgpu_ib(rcs);
676 struct amdgpu_cs *cs = amdgpu_cs_from_ib(ib);
677 unsigned requested_size = rcs->prev_dw + rcs->current.cdw + dw;
678
679 assert(rcs->current.cdw <= rcs->current.max_dw);
680
681 if (requested_size > amdgpu_ib_max_submit_dwords(ib->ib_type))
682 return false;
683
684 ib->max_ib_size = MAX2(ib->max_ib_size, requested_size);
685
686 return rcs->current.max_dw - rcs->current.cdw >= dw;
687 }
688
689 static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
690 {
691 struct amdgpu_cs *cs = amdgpu_cs(rcs);
692 struct amdgpu_winsys *ws = cs->ctx->ws;
693
694 vram += cs->csc->used_vram;
695 gtt += cs->csc->used_gart;
696
697 /* Anything that goes above the VRAM size should go to GTT. */
698 if (vram > ws->info.vram_size)
699 gtt += vram - ws->info.vram_size;
700
701 /* Now we just need to check if we have enough GTT. */
702 return gtt < ws->info.gart_size * 0.7;
703 }
704
705 static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs *rcs)
706 {
707 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
708
709 return cs->used_vram + cs->used_gart;
710 }
711
712 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
713 struct radeon_bo_list_item *list)
714 {
715 struct amdgpu_cs_context *cs = amdgpu_cs(rcs)->csc;
716 int i;
717
718 if (list) {
719 for (i = 0; i < cs->num_buffers; i++) {
720 pb_reference(&list[i].buf, &cs->buffers[i].bo->base);
721 list[i].vm_address = cs->buffers[i].bo->va;
722 list[i].priority_usage = cs->buffers[i].priority_usage;
723 }
724 }
725 return cs->num_buffers;
726 }
727
728 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", FALSE)
729
730 /* Since the kernel driver doesn't synchronize execution between different
731 * rings automatically, we have to add fence dependencies manually.
732 */
733 static void amdgpu_add_fence_dependencies(struct amdgpu_cs *acs)
734 {
735 struct amdgpu_cs_context *cs = acs->csc;
736 int i, j;
737
738 cs->request.number_of_dependencies = 0;
739
740 for (i = 0; i < cs->num_buffers; i++) {
741 for (j = 0; j < RING_LAST; j++) {
742 struct amdgpu_cs_fence *dep;
743 unsigned idx;
744
745 struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
746 if (!bo_fence)
747 continue;
748
749 if (bo_fence->ctx == acs->ctx &&
750 bo_fence->fence.ip_type == cs->request.ip_type &&
751 bo_fence->fence.ip_instance == cs->request.ip_instance &&
752 bo_fence->fence.ring == cs->request.ring)
753 continue;
754
755 if (amdgpu_fence_wait((void *)bo_fence, 0, false))
756 continue;
757
758 if (bo_fence->submission_in_progress)
759 os_wait_until_zero(&bo_fence->submission_in_progress,
760 PIPE_TIMEOUT_INFINITE);
761
762 idx = cs->request.number_of_dependencies++;
763 if (idx >= cs->max_dependencies) {
764 unsigned size;
765
766 cs->max_dependencies = idx + 8;
767 size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
768 cs->request.dependencies = realloc(cs->request.dependencies, size);
769 }
770
771 dep = &cs->request.dependencies[idx];
772 memcpy(dep, &bo_fence->fence, sizeof(*dep));
773 }
774 }
775 }
776
777 void amdgpu_cs_submit_ib(struct amdgpu_cs *acs)
778 {
779 struct amdgpu_winsys *ws = acs->ctx->ws;
780 struct amdgpu_cs_context *cs = acs->cst;
781 int i, r;
782
783 cs->request.fence_info.handle = NULL;
784 if (amdgpu_cs_has_user_fence(cs)) {
785 cs->request.fence_info.handle = acs->ctx->user_fence_bo;
786 cs->request.fence_info.offset = acs->ring_type;
787 }
788
789 /* Create the buffer list.
790 * Use a buffer list containing all allocated buffers if requested.
791 */
792 if (debug_get_option_all_bos()) {
793 struct amdgpu_winsys_bo *bo;
794 amdgpu_bo_handle *handles;
795 unsigned num = 0;
796
797 pipe_mutex_lock(ws->global_bo_list_lock);
798
799 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
800 if (!handles) {
801 pipe_mutex_unlock(ws->global_bo_list_lock);
802 amdgpu_cs_context_cleanup(cs);
803 return;
804 }
805
806 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
807 assert(num < ws->num_buffers);
808 handles[num++] = bo->bo;
809 }
810
811 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
812 handles, NULL,
813 &cs->request.resources);
814 free(handles);
815 pipe_mutex_unlock(ws->global_bo_list_lock);
816 } else {
817 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
818 cs->handles, cs->flags,
819 &cs->request.resources);
820 }
821
822 if (r) {
823 fprintf(stderr, "amdgpu: buffer list creation failed (%d)\n", r);
824 cs->request.resources = NULL;
825 amdgpu_fence_signalled(cs->fence);
826 goto cleanup;
827 }
828
829 r = amdgpu_cs_submit(acs->ctx->ctx, 0, &cs->request, 1);
830 if (r) {
831 if (r == -ENOMEM)
832 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
833 else
834 fprintf(stderr, "amdgpu: The CS has been rejected, "
835 "see dmesg for more information.\n");
836
837 amdgpu_fence_signalled(cs->fence);
838 } else {
839 /* Success. */
840 uint64_t *user_fence = NULL;
841 if (amdgpu_cs_has_user_fence(cs))
842 user_fence = acs->ctx->user_fence_cpu_address_base +
843 cs->request.fence_info.offset;
844 amdgpu_fence_submitted(cs->fence, &cs->request, user_fence);
845 }
846
847 /* Cleanup. */
848 if (cs->request.resources)
849 amdgpu_bo_list_destroy(cs->request.resources);
850
851 cleanup:
852 for (i = 0; i < cs->num_buffers; i++)
853 p_atomic_dec(&cs->buffers[i].bo->num_active_ioctls);
854
855 amdgpu_cs_context_cleanup(cs);
856 }
857
858 /* Make sure the previous submission is completed. */
859 void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
860 {
861 struct amdgpu_cs *cs = amdgpu_cs(rcs);
862
863 /* Wait for any pending ioctl of this CS to complete. */
864 if (cs->ctx->ws->thread) {
865 /* wait and set the semaphore to "busy" */
866 pipe_semaphore_wait(&cs->flush_completed);
867 /* set the semaphore to "idle" */
868 pipe_semaphore_signal(&cs->flush_completed);
869 }
870 }
871
872 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
873
874 static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
875 unsigned flags,
876 struct pipe_fence_handle **fence)
877 {
878 struct amdgpu_cs *cs = amdgpu_cs(rcs);
879 struct amdgpu_winsys *ws = cs->ctx->ws;
880
881 switch (cs->ring_type) {
882 case RING_DMA:
883 /* pad DMA ring to 8 DWs */
884 while (rcs->current.cdw & 7)
885 OUT_CS(rcs, 0x00000000); /* NOP packet */
886 break;
887 case RING_GFX:
888 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
889 while (rcs->current.cdw & 7)
890 OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
891
892 /* Also pad the const IB. */
893 if (cs->const_ib.ib_mapped)
894 while (!cs->const_ib.base.current.cdw || (cs->const_ib.base.current.cdw & 7))
895 OUT_CS(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
896
897 if (cs->const_preamble_ib.ib_mapped)
898 while (!cs->const_preamble_ib.base.current.cdw || (cs->const_preamble_ib.base.current.cdw & 7))
899 OUT_CS(&cs->const_preamble_ib.base, 0xffff1000);
900 break;
901 case RING_UVD:
902 while (rcs->current.cdw & 15)
903 OUT_CS(rcs, 0x80000000); /* type2 nop packet */
904 break;
905 default:
906 break;
907 }
908
909 if (rcs->current.cdw > rcs->current.max_dw) {
910 fprintf(stderr, "amdgpu: command stream overflowed\n");
911 }
912
913 /* If the CS is not empty or overflowed.... */
914 if (radeon_emitted(&cs->main.base, 0) &&
915 cs->main.base.current.cdw <= cs->main.base.current.max_dw &&
916 !debug_get_option_noop()) {
917 struct amdgpu_cs_context *cur = cs->csc;
918 unsigned i, num_buffers = cur->num_buffers;
919
920 /* Set IB sizes. */
921 cur->ib[IB_MAIN].size = cs->main.base.current.cdw;
922 cs->main.used_ib_space += cs->main.base.current.cdw * 4;
923 cs->main.max_ib_size = MAX2(cs->main.max_ib_size, cs->main.base.prev_dw + cs->main.base.current.cdw);
924
925 if (cs->const_ib.ib_mapped) {
926 cur->ib[IB_CONST].size = cs->const_ib.base.current.cdw;
927 cs->const_ib.used_ib_space += cs->const_ib.base.current.cdw * 4;
928 cs->const_ib.max_ib_size =
929 MAX2(cs->const_ib.max_ib_size, cs->main.base.prev_dw + cs->const_ib.base.current.cdw);
930 }
931
932 if (cs->const_preamble_ib.ib_mapped) {
933 cur->ib[IB_CONST_PREAMBLE].size = cs->const_preamble_ib.base.current.cdw;
934 cs->const_preamble_ib.used_ib_space += cs->const_preamble_ib.base.current.cdw * 4;
935 cs->const_preamble_ib.max_ib_size =
936 MAX2(cs->const_preamble_ib.max_ib_size,
937 cs->const_preamble_ib.base.prev_dw + cs->const_preamble_ib.base.current.cdw);
938 }
939
940 /* Create a fence. */
941 amdgpu_fence_reference(&cur->fence, NULL);
942 cur->fence = amdgpu_fence_create(cs->ctx,
943 cur->request.ip_type,
944 cur->request.ip_instance,
945 cur->request.ring);
946 if (fence)
947 amdgpu_fence_reference(fence, cur->fence);
948
949 /* Prepare buffers. */
950 pipe_mutex_lock(ws->bo_fence_lock);
951 amdgpu_add_fence_dependencies(cs);
952 for (i = 0; i < num_buffers; i++) {
953 p_atomic_inc(&cur->buffers[i].bo->num_active_ioctls);
954 amdgpu_fence_reference(&cur->buffers[i].bo->fence[cs->ring_type],
955 cur->fence);
956 }
957 pipe_mutex_unlock(ws->bo_fence_lock);
958
959 amdgpu_cs_sync_flush(rcs);
960
961 /* Swap command streams. "cst" is going to be submitted. */
962 cs->csc = cs->cst;
963 cs->cst = cur;
964
965 /* Submit. */
966 if (ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
967 /* Set the semaphore to "busy". */
968 pipe_semaphore_wait(&cs->flush_completed);
969 amdgpu_ws_queue_cs(ws, cs);
970 } else {
971 amdgpu_cs_submit_ib(cs);
972 }
973 } else {
974 amdgpu_cs_context_cleanup(cs->csc);
975 }
976
977 amdgpu_get_new_ib(&ws->base, cs, IB_MAIN);
978 if (cs->const_ib.ib_mapped)
979 amdgpu_get_new_ib(&ws->base, cs, IB_CONST);
980 if (cs->const_preamble_ib.ib_mapped)
981 amdgpu_get_new_ib(&ws->base, cs, IB_CONST_PREAMBLE);
982
983 ws->num_cs_flushes++;
984 }
985
986 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
987 {
988 struct amdgpu_cs *cs = amdgpu_cs(rcs);
989
990 amdgpu_cs_sync_flush(rcs);
991 pipe_semaphore_destroy(&cs->flush_completed);
992 p_atomic_dec(&cs->ctx->ws->num_cs);
993 pb_reference(&cs->main.big_ib_buffer, NULL);
994 pb_reference(&cs->const_ib.big_ib_buffer, NULL);
995 pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
996 amdgpu_destroy_cs_context(&cs->csc1);
997 amdgpu_destroy_cs_context(&cs->csc2);
998 FREE(cs);
999 }
1000
1001 static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
1002 struct pb_buffer *_buf,
1003 enum radeon_bo_usage usage)
1004 {
1005 struct amdgpu_cs *cs = amdgpu_cs(rcs);
1006 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
1007
1008 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
1009 }
1010
1011 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
1012 {
1013 ws->base.ctx_create = amdgpu_ctx_create;
1014 ws->base.ctx_destroy = amdgpu_ctx_destroy;
1015 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
1016 ws->base.cs_create = amdgpu_cs_create;
1017 ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
1018 ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
1019 ws->base.cs_destroy = amdgpu_cs_destroy;
1020 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
1021 ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;
1022 ws->base.cs_validate = amdgpu_cs_validate;
1023 ws->base.cs_check_space = amdgpu_cs_check_space;
1024 ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
1025 ws->base.cs_query_memory_usage = amdgpu_cs_query_memory_usage;
1026 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
1027 ws->base.cs_flush = amdgpu_cs_flush;
1028 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
1029 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
1030 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
1031 ws->base.fence_reference = amdgpu_fence_reference;
1032 }