winsys/amdgpu: cleanup error handling in amdgpu_ctx_create
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36 #include <amdgpu_drm.h>
37
38
39 /* FENCES */
40
41 static struct pipe_fence_handle *
42 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
43 unsigned ip_instance, unsigned ring)
44 {
45 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
46
47 fence->reference.count = 1;
48 fence->ctx = ctx;
49 fence->fence.context = ctx->ctx;
50 fence->fence.ip_type = ip_type;
51 fence->fence.ip_instance = ip_instance;
52 fence->fence.ring = ring;
53 p_atomic_inc(&ctx->refcount);
54 return (struct pipe_fence_handle *)fence;
55 }
56
57 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
58 struct amdgpu_cs_request* request,
59 uint64_t *user_fence_cpu_address)
60 {
61 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
62
63 rfence->fence.fence = request->seq_no;
64 rfence->user_fence_cpu_address = user_fence_cpu_address;
65 }
66
67 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
68 {
69 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
70
71 rfence->signalled = true;
72 }
73
74 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
75 bool absolute)
76 {
77 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
78 uint32_t expired;
79 int64_t abs_timeout;
80 uint64_t *user_fence_cpu;
81 int r;
82
83 if (rfence->signalled)
84 return true;
85
86 if (absolute)
87 abs_timeout = timeout;
88 else
89 abs_timeout = os_time_get_absolute_timeout(timeout);
90
91 user_fence_cpu = rfence->user_fence_cpu_address;
92 if (user_fence_cpu) {
93 if (*user_fence_cpu >= rfence->fence.fence) {
94 rfence->signalled = true;
95 return true;
96 }
97
98 /* No timeout, just query: no need for the ioctl. */
99 if (!absolute && !timeout)
100 return false;
101 }
102
103 /* Now use the libdrm query. */
104 r = amdgpu_cs_query_fence_status(&rfence->fence,
105 abs_timeout,
106 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
107 &expired);
108 if (r) {
109 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
110 return FALSE;
111 }
112
113 if (expired) {
114 /* This variable can only transition from false to true, so it doesn't
115 * matter if threads race for it. */
116 rfence->signalled = true;
117 return true;
118 }
119 return false;
120 }
121
122 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
123 struct pipe_fence_handle *fence,
124 uint64_t timeout)
125 {
126 return amdgpu_fence_wait(fence, timeout, false);
127 }
128
129 /* CONTEXTS */
130
131 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
132 {
133 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
134 int r;
135 struct amdgpu_bo_alloc_request alloc_buffer = {};
136 amdgpu_bo_handle buf_handle;
137
138 if (!ctx)
139 return NULL;
140
141 ctx->ws = amdgpu_winsys(ws);
142 ctx->refcount = 1;
143
144 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
145 if (r) {
146 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
147 goto error_create;
148 }
149
150 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
151 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
152 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
153
154 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
155 if (r) {
156 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
157 goto error_user_fence_alloc;
158 }
159
160 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
161 if (r) {
162 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
163 goto error_user_fence_map;
164 }
165
166 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
167 ctx->user_fence_bo = buf_handle;
168
169 return (struct radeon_winsys_ctx*)ctx;
170
171 error_user_fence_map:
172 amdgpu_bo_free(buf_handle);
173 error_user_fence_alloc:
174 amdgpu_cs_ctx_free(ctx->ctx);
175 error_create:
176 FREE(ctx);
177 return NULL;
178 }
179
180 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
181 {
182 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
183 }
184
185 static enum pipe_reset_status
186 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
187 {
188 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
189 uint32_t result, hangs;
190 int r;
191
192 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
193 if (r) {
194 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
195 return PIPE_NO_RESET;
196 }
197
198 switch (result) {
199 case AMDGPU_CTX_GUILTY_RESET:
200 return PIPE_GUILTY_CONTEXT_RESET;
201 case AMDGPU_CTX_INNOCENT_RESET:
202 return PIPE_INNOCENT_CONTEXT_RESET;
203 case AMDGPU_CTX_UNKNOWN_RESET:
204 return PIPE_UNKNOWN_CONTEXT_RESET;
205 case AMDGPU_CTX_NO_RESET:
206 default:
207 return PIPE_NO_RESET;
208 }
209 }
210
211 /* COMMAND SUBMISSION */
212
213 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib,
214 struct amdgpu_cs_ib_info *info, unsigned ib_type)
215 {
216 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
217 /* Small IBs are better than big IBs, because the GPU goes idle quicker
218 * and there is less waiting for buffers and fences. Proof:
219 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
220 */
221 unsigned buffer_size, ib_size;
222
223 switch (ib_type) {
224 case IB_CONST_PREAMBLE:
225 buffer_size = 4 * 1024 * 4;
226 ib_size = 1024 * 4;
227 break;
228 case IB_CONST:
229 buffer_size = 512 * 1024 * 4;
230 ib_size = 128 * 1024 * 4;
231 break;
232 case IB_MAIN:
233 buffer_size = 128 * 1024 * 4;
234 ib_size = 20 * 1024 * 4;
235 break;
236 default:
237 unreachable("unhandled IB type");
238 }
239
240 ib->base.cdw = 0;
241 ib->base.buf = NULL;
242
243 /* Allocate a new buffer for IBs if the current buffer is all used. */
244 if (!ib->big_ib_buffer ||
245 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
246
247 pb_reference(&ib->big_ib_buffer, NULL);
248 ib->ib_mapped = NULL;
249 ib->used_ib_space = 0;
250
251 ib->big_ib_buffer = ws->buffer_create(ws, buffer_size,
252 aws->info.gart_page_size,
253 RADEON_DOMAIN_GTT,
254 RADEON_FLAG_CPU_ACCESS);
255 if (!ib->big_ib_buffer)
256 return false;
257
258 ib->ib_mapped = ws->buffer_map(ib->big_ib_buffer, NULL,
259 PIPE_TRANSFER_WRITE);
260 if (!ib->ib_mapped) {
261 pb_reference(&ib->big_ib_buffer, NULL);
262 return false;
263 }
264 }
265
266 info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
267 ib->used_ib_space;
268 ib->base.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
269 ib->base.max_dw = ib_size / 4;
270 return true;
271 }
272
273 static boolean amdgpu_init_cs_context(struct amdgpu_cs *cs,
274 enum ring_type ring_type)
275 {
276 int i;
277
278 switch (ring_type) {
279 case RING_DMA:
280 cs->request.ip_type = AMDGPU_HW_IP_DMA;
281 break;
282
283 case RING_UVD:
284 cs->request.ip_type = AMDGPU_HW_IP_UVD;
285 break;
286
287 case RING_VCE:
288 cs->request.ip_type = AMDGPU_HW_IP_VCE;
289 break;
290
291 case RING_COMPUTE:
292 cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
293 break;
294
295 default:
296 case RING_GFX:
297 cs->request.ip_type = AMDGPU_HW_IP_GFX;
298 break;
299 }
300
301 cs->max_num_buffers = 512;
302 cs->buffers = (struct amdgpu_cs_buffer*)
303 CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
304 if (!cs->buffers) {
305 return FALSE;
306 }
307
308 cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
309 if (!cs->handles) {
310 FREE(cs->buffers);
311 return FALSE;
312 }
313
314 cs->flags = CALLOC(1, cs->max_num_buffers);
315 if (!cs->flags) {
316 FREE(cs->handles);
317 FREE(cs->buffers);
318 return FALSE;
319 }
320
321 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
322 cs->buffer_indices_hashlist[i] = -1;
323 }
324 return TRUE;
325 }
326
327 static void amdgpu_cs_context_cleanup(struct amdgpu_cs *cs)
328 {
329 unsigned i;
330
331 for (i = 0; i < cs->num_buffers; i++) {
332 p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
333 amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
334 cs->handles[i] = NULL;
335 cs->flags[i] = 0;
336 }
337
338 cs->num_buffers = 0;
339 cs->used_gart = 0;
340 cs->used_vram = 0;
341
342 for (i = 0; i < ARRAY_SIZE(cs->buffer_indices_hashlist); i++) {
343 cs->buffer_indices_hashlist[i] = -1;
344 }
345 }
346
347 static void amdgpu_destroy_cs_context(struct amdgpu_cs *cs)
348 {
349 amdgpu_cs_context_cleanup(cs);
350 FREE(cs->flags);
351 FREE(cs->buffers);
352 FREE(cs->handles);
353 FREE(cs->request.dependencies);
354 }
355
356
357 static struct radeon_winsys_cs *
358 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
359 enum ring_type ring_type,
360 void (*flush)(void *ctx, unsigned flags,
361 struct pipe_fence_handle **fence),
362 void *flush_ctx)
363 {
364 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
365 struct amdgpu_cs *cs;
366
367 cs = CALLOC_STRUCT(amdgpu_cs);
368 if (!cs) {
369 return NULL;
370 }
371
372 cs->ctx = ctx;
373 cs->flush_cs = flush;
374 cs->flush_data = flush_ctx;
375 cs->ring_type = ring_type;
376
377 if (!amdgpu_init_cs_context(cs, ring_type)) {
378 FREE(cs);
379 return NULL;
380 }
381
382 if (!amdgpu_get_new_ib(&ctx->ws->base, &cs->main, &cs->ib[IB_MAIN], IB_MAIN)) {
383 amdgpu_destroy_cs_context(cs);
384 FREE(cs);
385 return NULL;
386 }
387
388 cs->request.number_of_ibs = 1;
389 cs->request.ibs = &cs->ib[IB_MAIN];
390
391 p_atomic_inc(&ctx->ws->num_cs);
392 return &cs->main.base;
393 }
394
395 static struct radeon_winsys_cs *
396 amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
397 {
398 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
399 struct amdgpu_winsys *ws = cs->ctx->ws;
400
401 /* only one const IB can be added */
402 if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
403 return NULL;
404
405 if (!amdgpu_get_new_ib(&ws->base, &cs->const_ib, &cs->ib[IB_CONST], IB_CONST))
406 return NULL;
407
408 cs->request.number_of_ibs = 2;
409 cs->request.ibs = &cs->ib[IB_CONST];
410 cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
411
412 return &cs->const_ib.base;
413 }
414
415 static struct radeon_winsys_cs *
416 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
417 {
418 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
419 struct amdgpu_winsys *ws = cs->ctx->ws;
420
421 /* only one const preamble IB can be added and only when the const IB has
422 * also been mapped */
423 if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
424 cs->const_preamble_ib.ib_mapped)
425 return NULL;
426
427 if (!amdgpu_get_new_ib(&ws->base, &cs->const_preamble_ib,
428 &cs->ib[IB_CONST_PREAMBLE], IB_CONST_PREAMBLE))
429 return NULL;
430
431 cs->request.number_of_ibs = 3;
432 cs->request.ibs = &cs->ib[IB_CONST_PREAMBLE];
433 cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE | AMDGPU_IB_FLAG_PREAMBLE;
434
435 return &cs->const_preamble_ib.base;
436 }
437
438 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
439
440 int amdgpu_lookup_buffer(struct amdgpu_cs *cs, struct amdgpu_winsys_bo *bo)
441 {
442 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
443 int i = cs->buffer_indices_hashlist[hash];
444
445 /* not found or found */
446 if (i == -1 || cs->buffers[i].bo == bo)
447 return i;
448
449 /* Hash collision, look for the BO in the list of buffers linearly. */
450 for (i = cs->num_buffers - 1; i >= 0; i--) {
451 if (cs->buffers[i].bo == bo) {
452 /* Put this buffer in the hash list.
453 * This will prevent additional hash collisions if there are
454 * several consecutive lookup_buffer calls for the same buffer.
455 *
456 * Example: Assuming buffers A,B,C collide in the hash list,
457 * the following sequence of buffers:
458 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
459 * will collide here: ^ and here: ^,
460 * meaning that we should get very few collisions in the end. */
461 cs->buffer_indices_hashlist[hash] = i;
462 return i;
463 }
464 }
465 return -1;
466 }
467
468 static unsigned amdgpu_add_buffer(struct amdgpu_cs *cs,
469 struct amdgpu_winsys_bo *bo,
470 enum radeon_bo_usage usage,
471 enum radeon_bo_domain domains,
472 unsigned priority,
473 enum radeon_bo_domain *added_domains)
474 {
475 struct amdgpu_cs_buffer *buffer;
476 unsigned hash = bo->unique_id & (ARRAY_SIZE(cs->buffer_indices_hashlist)-1);
477 int i = -1;
478
479 assert(priority < 64);
480 *added_domains = 0;
481
482 i = amdgpu_lookup_buffer(cs, bo);
483
484 if (i >= 0) {
485 buffer = &cs->buffers[i];
486 buffer->priority_usage |= 1llu << priority;
487 buffer->usage |= usage;
488 *added_domains = domains & ~buffer->domains;
489 buffer->domains |= domains;
490 cs->flags[i] = MAX2(cs->flags[i], priority / 4);
491 return i;
492 }
493
494 /* New buffer, check if the backing array is large enough. */
495 if (cs->num_buffers >= cs->max_num_buffers) {
496 uint32_t size;
497 cs->max_num_buffers += 10;
498
499 size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
500 cs->buffers = realloc(cs->buffers, size);
501
502 size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
503 cs->handles = realloc(cs->handles, size);
504
505 cs->flags = realloc(cs->flags, cs->max_num_buffers);
506 }
507
508 /* Initialize the new buffer. */
509 cs->buffers[cs->num_buffers].bo = NULL;
510 amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
511 cs->handles[cs->num_buffers] = bo->bo;
512 cs->flags[cs->num_buffers] = priority / 4;
513 p_atomic_inc(&bo->num_cs_references);
514 buffer = &cs->buffers[cs->num_buffers];
515 buffer->bo = bo;
516 buffer->priority_usage = 1llu << priority;
517 buffer->usage = usage;
518 buffer->domains = domains;
519
520 cs->buffer_indices_hashlist[hash] = cs->num_buffers;
521
522 *added_domains = domains;
523 return cs->num_buffers++;
524 }
525
526 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
527 struct pb_buffer *buf,
528 enum radeon_bo_usage usage,
529 enum radeon_bo_domain domains,
530 enum radeon_bo_priority priority)
531 {
532 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
533 * the buffer placement during command submission.
534 */
535 struct amdgpu_cs *cs = amdgpu_cs(rcs);
536 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
537 enum radeon_bo_domain added_domains;
538 unsigned index = amdgpu_add_buffer(cs, bo, usage, bo->initial_domain,
539 priority, &added_domains);
540
541 if (added_domains & RADEON_DOMAIN_VRAM)
542 cs->used_vram += bo->base.size;
543 else if (added_domains & RADEON_DOMAIN_GTT)
544 cs->used_gart += bo->base.size;
545
546 return index;
547 }
548
549 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
550 struct pb_buffer *buf)
551 {
552 struct amdgpu_cs *cs = amdgpu_cs(rcs);
553
554 return amdgpu_lookup_buffer(cs, (struct amdgpu_winsys_bo*)buf);
555 }
556
557 static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
558 {
559 return TRUE;
560 }
561
562 static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
563 {
564 struct amdgpu_cs *cs = amdgpu_cs(rcs);
565 struct amdgpu_winsys *ws = cs->ctx->ws;
566
567 vram += cs->used_vram;
568 gtt += cs->used_gart;
569
570 /* Anything that goes above the VRAM size should go to GTT. */
571 if (vram > ws->info.vram_size)
572 gtt += vram - ws->info.vram_size;
573
574 /* Now we just need to check if we have enough GTT. */
575 return gtt < ws->info.gart_size * 0.7;
576 }
577
578 static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs *rcs)
579 {
580 struct amdgpu_cs *cs = amdgpu_cs(rcs);
581
582 return cs->used_vram + cs->used_gart;
583 }
584
585 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
586 struct radeon_bo_list_item *list)
587 {
588 struct amdgpu_cs *cs = amdgpu_cs(rcs);
589 int i;
590
591 if (list) {
592 for (i = 0; i < cs->num_buffers; i++) {
593 pb_reference(&list[i].buf, &cs->buffers[i].bo->base);
594 list[i].vm_address = cs->buffers[i].bo->va;
595 list[i].priority_usage = cs->buffers[i].priority_usage;
596 }
597 }
598 return cs->num_buffers;
599 }
600
601 static void amdgpu_cs_do_submission(struct amdgpu_cs *cs,
602 struct pipe_fence_handle **out_fence)
603 {
604 struct amdgpu_winsys *ws = cs->ctx->ws;
605 struct pipe_fence_handle *fence;
606 int i, j, r;
607
608 /* Create a fence. */
609 fence = amdgpu_fence_create(cs->ctx,
610 cs->request.ip_type,
611 cs->request.ip_instance,
612 cs->request.ring);
613 if (out_fence)
614 amdgpu_fence_reference(out_fence, fence);
615
616 cs->request.number_of_dependencies = 0;
617
618 /* Since the kernel driver doesn't synchronize execution between different
619 * rings automatically, we have to add fence dependencies manually. */
620 pipe_mutex_lock(ws->bo_fence_lock);
621 for (i = 0; i < cs->num_buffers; i++) {
622 for (j = 0; j < RING_LAST; j++) {
623 struct amdgpu_cs_fence *dep;
624 unsigned idx;
625
626 struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
627 if (!bo_fence)
628 continue;
629
630 if (bo_fence->ctx == cs->ctx &&
631 bo_fence->fence.ip_type == cs->request.ip_type &&
632 bo_fence->fence.ip_instance == cs->request.ip_instance &&
633 bo_fence->fence.ring == cs->request.ring)
634 continue;
635
636 if (amdgpu_fence_wait((void *)bo_fence, 0, false))
637 continue;
638
639 idx = cs->request.number_of_dependencies++;
640 if (idx >= cs->max_dependencies) {
641 unsigned size;
642
643 cs->max_dependencies = idx + 8;
644 size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
645 cs->request.dependencies = realloc(cs->request.dependencies, size);
646 }
647
648 dep = &cs->request.dependencies[idx];
649 memcpy(dep, &bo_fence->fence, sizeof(*dep));
650 }
651 }
652
653 cs->request.fence_info.handle = NULL;
654 if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) {
655 cs->request.fence_info.handle = cs->ctx->user_fence_bo;
656 cs->request.fence_info.offset = cs->ring_type;
657 }
658
659 r = amdgpu_cs_submit(cs->ctx->ctx, 0, &cs->request, 1);
660 if (r) {
661 if (r == -ENOMEM)
662 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
663 else
664 fprintf(stderr, "amdgpu: The CS has been rejected, "
665 "see dmesg for more information.\n");
666
667 amdgpu_fence_signalled(fence);
668 } else {
669 /* Success. */
670 uint64_t *user_fence = NULL;
671 if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE)
672 user_fence = cs->ctx->user_fence_cpu_address_base +
673 cs->request.fence_info.offset;
674 amdgpu_fence_submitted(fence, &cs->request, user_fence);
675
676 for (i = 0; i < cs->num_buffers; i++)
677 amdgpu_fence_reference(&cs->buffers[i].bo->fence[cs->ring_type],
678 fence);
679 }
680 pipe_mutex_unlock(ws->bo_fence_lock);
681 amdgpu_fence_reference(&fence, NULL);
682 }
683
684 static void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
685 {
686 /* no-op */
687 }
688
689 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
690 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", FALSE)
691
692 static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
693 unsigned flags,
694 struct pipe_fence_handle **fence)
695 {
696 struct amdgpu_cs *cs = amdgpu_cs(rcs);
697 struct amdgpu_winsys *ws = cs->ctx->ws;
698
699 switch (cs->ring_type) {
700 case RING_DMA:
701 /* pad DMA ring to 8 DWs */
702 while (rcs->cdw & 7)
703 OUT_CS(rcs, 0x00000000); /* NOP packet */
704 break;
705 case RING_GFX:
706 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
707 while (rcs->cdw & 7)
708 OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
709
710 /* Also pad the const IB. */
711 if (cs->const_ib.ib_mapped)
712 while (!cs->const_ib.base.cdw || (cs->const_ib.base.cdw & 7))
713 OUT_CS(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
714
715 if (cs->const_preamble_ib.ib_mapped)
716 while (!cs->const_preamble_ib.base.cdw || (cs->const_preamble_ib.base.cdw & 7))
717 OUT_CS(&cs->const_preamble_ib.base, 0xffff1000);
718 break;
719 case RING_UVD:
720 while (rcs->cdw & 15)
721 OUT_CS(rcs, 0x80000000); /* type2 nop packet */
722 break;
723 default:
724 break;
725 }
726
727 if (rcs->cdw > rcs->max_dw) {
728 fprintf(stderr, "amdgpu: command stream overflowed\n");
729 }
730
731 amdgpu_cs_add_buffer(rcs, cs->main.big_ib_buffer,
732 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
733
734 if (cs->const_ib.ib_mapped)
735 amdgpu_cs_add_buffer(rcs, cs->const_ib.big_ib_buffer,
736 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
737
738 if (cs->const_preamble_ib.ib_mapped)
739 amdgpu_cs_add_buffer(rcs, cs->const_preamble_ib.big_ib_buffer,
740 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
741
742 /* If the CS is not empty or overflowed.... */
743 if (cs->main.base.cdw && cs->main.base.cdw <= cs->main.base.max_dw && !debug_get_option_noop()) {
744 int r;
745
746 /* Use a buffer list containing all allocated buffers if requested. */
747 if (debug_get_option_all_bos()) {
748 struct amdgpu_winsys_bo *bo;
749 amdgpu_bo_handle *handles;
750 unsigned num = 0;
751
752 pipe_mutex_lock(ws->global_bo_list_lock);
753
754 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
755 if (!handles) {
756 pipe_mutex_unlock(ws->global_bo_list_lock);
757 goto cleanup;
758 }
759
760 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
761 assert(num < ws->num_buffers);
762 handles[num++] = bo->bo;
763 }
764
765 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
766 handles, NULL,
767 &cs->request.resources);
768 free(handles);
769 pipe_mutex_unlock(ws->global_bo_list_lock);
770 } else {
771 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
772 cs->handles, cs->flags,
773 &cs->request.resources);
774 }
775
776 if (r) {
777 fprintf(stderr, "amdgpu: resource list creation failed (%d)\n", r);
778 cs->request.resources = NULL;
779 goto cleanup;
780 }
781
782 cs->ib[IB_MAIN].size = cs->main.base.cdw;
783 cs->main.used_ib_space += cs->main.base.cdw * 4;
784
785 if (cs->const_ib.ib_mapped) {
786 cs->ib[IB_CONST].size = cs->const_ib.base.cdw;
787 cs->const_ib.used_ib_space += cs->const_ib.base.cdw * 4;
788 }
789
790 if (cs->const_preamble_ib.ib_mapped) {
791 cs->ib[IB_CONST_PREAMBLE].size = cs->const_preamble_ib.base.cdw;
792 cs->const_preamble_ib.used_ib_space += cs->const_preamble_ib.base.cdw * 4;
793 }
794
795 amdgpu_cs_do_submission(cs, fence);
796
797 /* Cleanup. */
798 if (cs->request.resources)
799 amdgpu_bo_list_destroy(cs->request.resources);
800 }
801
802 cleanup:
803 amdgpu_cs_context_cleanup(cs);
804
805 amdgpu_get_new_ib(&ws->base, &cs->main, &cs->ib[IB_MAIN], IB_MAIN);
806 if (cs->const_ib.ib_mapped)
807 amdgpu_get_new_ib(&ws->base, &cs->const_ib, &cs->ib[IB_CONST], IB_CONST);
808 if (cs->const_preamble_ib.ib_mapped)
809 amdgpu_get_new_ib(&ws->base, &cs->const_preamble_ib,
810 &cs->ib[IB_CONST_PREAMBLE], IB_CONST_PREAMBLE);
811
812 ws->num_cs_flushes++;
813 }
814
815 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
816 {
817 struct amdgpu_cs *cs = amdgpu_cs(rcs);
818
819 amdgpu_destroy_cs_context(cs);
820 p_atomic_dec(&cs->ctx->ws->num_cs);
821 pb_reference(&cs->main.big_ib_buffer, NULL);
822 pb_reference(&cs->const_ib.big_ib_buffer, NULL);
823 pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
824 FREE(cs);
825 }
826
827 static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
828 struct pb_buffer *_buf,
829 enum radeon_bo_usage usage)
830 {
831 struct amdgpu_cs *cs = amdgpu_cs(rcs);
832 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
833
834 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
835 }
836
837 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
838 {
839 ws->base.ctx_create = amdgpu_ctx_create;
840 ws->base.ctx_destroy = amdgpu_ctx_destroy;
841 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
842 ws->base.cs_create = amdgpu_cs_create;
843 ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
844 ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
845 ws->base.cs_destroy = amdgpu_cs_destroy;
846 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
847 ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;
848 ws->base.cs_validate = amdgpu_cs_validate;
849 ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
850 ws->base.cs_query_memory_usage = amdgpu_cs_query_memory_usage;
851 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
852 ws->base.cs_flush = amdgpu_cs_flush;
853 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
854 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
855 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
856 ws->base.fence_reference = amdgpu_fence_reference;
857 }