gallium/radeon: add a heuristic for better (S)DMA performance
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36 #include <amdgpu_drm.h>
37
38
39 /* FENCES */
40
41 static struct pipe_fence_handle *
42 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
43 unsigned ip_instance, unsigned ring)
44 {
45 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
46
47 fence->reference.count = 1;
48 fence->ctx = ctx;
49 fence->fence.context = ctx->ctx;
50 fence->fence.ip_type = ip_type;
51 fence->fence.ip_instance = ip_instance;
52 fence->fence.ring = ring;
53 p_atomic_inc(&ctx->refcount);
54 return (struct pipe_fence_handle *)fence;
55 }
56
57 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
58 struct amdgpu_cs_request* request,
59 uint64_t *user_fence_cpu_address)
60 {
61 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
62
63 rfence->fence.fence = request->seq_no;
64 rfence->user_fence_cpu_address = user_fence_cpu_address;
65 }
66
67 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
68 {
69 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
70
71 rfence->signalled = true;
72 }
73
74 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
75 bool absolute)
76 {
77 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
78 uint32_t expired;
79 int64_t abs_timeout;
80 uint64_t *user_fence_cpu;
81 int r;
82
83 if (rfence->signalled)
84 return true;
85
86 if (absolute)
87 abs_timeout = timeout;
88 else
89 abs_timeout = os_time_get_absolute_timeout(timeout);
90
91 user_fence_cpu = rfence->user_fence_cpu_address;
92 if (user_fence_cpu && *user_fence_cpu >= rfence->fence.fence) {
93 rfence->signalled = true;
94 return true;
95 }
96 /* Now use the libdrm query. */
97 r = amdgpu_cs_query_fence_status(&rfence->fence,
98 abs_timeout,
99 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
100 &expired);
101 if (r) {
102 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
103 return FALSE;
104 }
105
106 if (expired) {
107 /* This variable can only transition from false to true, so it doesn't
108 * matter if threads race for it. */
109 rfence->signalled = true;
110 return true;
111 }
112 return false;
113 }
114
115 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
116 struct pipe_fence_handle *fence,
117 uint64_t timeout)
118 {
119 return amdgpu_fence_wait(fence, timeout, false);
120 }
121
122 /* CONTEXTS */
123
124 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
125 {
126 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
127 int r;
128 struct amdgpu_bo_alloc_request alloc_buffer = {};
129 amdgpu_bo_handle buf_handle;
130
131 ctx->ws = amdgpu_winsys(ws);
132 ctx->refcount = 1;
133
134 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
135 if (r) {
136 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
137 FREE(ctx);
138 return NULL;
139 }
140
141 alloc_buffer.alloc_size = ctx->ws->info.gart_page_size;
142 alloc_buffer.phys_alignment = ctx->ws->info.gart_page_size;
143 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
144
145 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
146 if (r) {
147 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
148 amdgpu_cs_ctx_free(ctx->ctx);
149 FREE(ctx);
150 return NULL;
151 }
152
153 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
154 if (r) {
155 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
156 amdgpu_bo_free(buf_handle);
157 amdgpu_cs_ctx_free(ctx->ctx);
158 FREE(ctx);
159 return NULL;
160 }
161
162 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
163 ctx->user_fence_bo = buf_handle;
164
165 return (struct radeon_winsys_ctx*)ctx;
166 }
167
168 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
169 {
170 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
171 }
172
173 static enum pipe_reset_status
174 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
175 {
176 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
177 uint32_t result, hangs;
178 int r;
179
180 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
181 if (r) {
182 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
183 return PIPE_NO_RESET;
184 }
185
186 switch (result) {
187 case AMDGPU_CTX_GUILTY_RESET:
188 return PIPE_GUILTY_CONTEXT_RESET;
189 case AMDGPU_CTX_INNOCENT_RESET:
190 return PIPE_INNOCENT_CONTEXT_RESET;
191 case AMDGPU_CTX_UNKNOWN_RESET:
192 return PIPE_UNKNOWN_CONTEXT_RESET;
193 case AMDGPU_CTX_NO_RESET:
194 default:
195 return PIPE_NO_RESET;
196 }
197 }
198
199 /* COMMAND SUBMISSION */
200
201 static bool amdgpu_get_new_ib(struct radeon_winsys *ws, struct amdgpu_ib *ib,
202 struct amdgpu_cs_ib_info *info, unsigned ib_type)
203 {
204 struct amdgpu_winsys *aws = (struct amdgpu_winsys*)ws;
205 /* Small IBs are better than big IBs, because the GPU goes idle quicker
206 * and there is less waiting for buffers and fences. Proof:
207 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
208 */
209 unsigned buffer_size, ib_size;
210
211 switch (ib_type) {
212 case IB_CONST_PREAMBLE:
213 buffer_size = 4 * 1024 * 4;
214 ib_size = 1024 * 4;
215 break;
216 case IB_CONST:
217 buffer_size = 512 * 1024 * 4;
218 ib_size = 128 * 1024 * 4;
219 break;
220 case IB_MAIN:
221 buffer_size = 128 * 1024 * 4;
222 ib_size = 20 * 1024 * 4;
223 break;
224 default:
225 unreachable("unhandled IB type");
226 }
227
228 ib->base.cdw = 0;
229 ib->base.buf = NULL;
230
231 /* Allocate a new buffer for IBs if the current buffer is all used. */
232 if (!ib->big_ib_buffer ||
233 ib->used_ib_space + ib_size > ib->big_ib_buffer->size) {
234
235 pb_reference(&ib->big_ib_buffer, NULL);
236 ib->ib_mapped = NULL;
237 ib->used_ib_space = 0;
238
239 ib->big_ib_buffer = ws->buffer_create(ws, buffer_size,
240 aws->info.gart_page_size,
241 RADEON_DOMAIN_GTT,
242 RADEON_FLAG_CPU_ACCESS);
243 if (!ib->big_ib_buffer)
244 return false;
245
246 ib->ib_mapped = ws->buffer_map(ib->big_ib_buffer, NULL,
247 PIPE_TRANSFER_WRITE);
248 if (!ib->ib_mapped) {
249 pb_reference(&ib->big_ib_buffer, NULL);
250 return false;
251 }
252 }
253
254 info->ib_mc_address = amdgpu_winsys_bo(ib->big_ib_buffer)->va +
255 ib->used_ib_space;
256 ib->base.buf = (uint32_t*)(ib->ib_mapped + ib->used_ib_space);
257 ib->base.max_dw = ib_size / 4;
258 return true;
259 }
260
261 static boolean amdgpu_init_cs_context(struct amdgpu_cs *cs,
262 enum ring_type ring_type)
263 {
264 int i;
265
266 switch (ring_type) {
267 case RING_DMA:
268 cs->request.ip_type = AMDGPU_HW_IP_DMA;
269 break;
270
271 case RING_UVD:
272 cs->request.ip_type = AMDGPU_HW_IP_UVD;
273 break;
274
275 case RING_VCE:
276 cs->request.ip_type = AMDGPU_HW_IP_VCE;
277 break;
278
279 case RING_COMPUTE:
280 cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
281 break;
282
283 default:
284 case RING_GFX:
285 cs->request.ip_type = AMDGPU_HW_IP_GFX;
286 break;
287 }
288
289 cs->max_num_buffers = 512;
290 cs->buffers = (struct amdgpu_cs_buffer*)
291 CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
292 if (!cs->buffers) {
293 return FALSE;
294 }
295
296 cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
297 if (!cs->handles) {
298 FREE(cs->buffers);
299 return FALSE;
300 }
301
302 cs->flags = CALLOC(1, cs->max_num_buffers);
303 if (!cs->flags) {
304 FREE(cs->handles);
305 FREE(cs->buffers);
306 return FALSE;
307 }
308
309 for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
310 cs->buffer_indices_hashlist[i] = -1;
311 }
312 return TRUE;
313 }
314
315 static void amdgpu_cs_context_cleanup(struct amdgpu_cs *cs)
316 {
317 unsigned i;
318
319 for (i = 0; i < cs->num_buffers; i++) {
320 p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
321 amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
322 cs->handles[i] = NULL;
323 cs->flags[i] = 0;
324 }
325
326 cs->num_buffers = 0;
327 cs->used_gart = 0;
328 cs->used_vram = 0;
329
330 for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
331 cs->buffer_indices_hashlist[i] = -1;
332 }
333 }
334
335 static void amdgpu_destroy_cs_context(struct amdgpu_cs *cs)
336 {
337 amdgpu_cs_context_cleanup(cs);
338 FREE(cs->flags);
339 FREE(cs->buffers);
340 FREE(cs->handles);
341 FREE(cs->request.dependencies);
342 }
343
344
345 static struct radeon_winsys_cs *
346 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
347 enum ring_type ring_type,
348 void (*flush)(void *ctx, unsigned flags,
349 struct pipe_fence_handle **fence),
350 void *flush_ctx)
351 {
352 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
353 struct amdgpu_cs *cs;
354
355 cs = CALLOC_STRUCT(amdgpu_cs);
356 if (!cs) {
357 return NULL;
358 }
359
360 cs->ctx = ctx;
361 cs->flush_cs = flush;
362 cs->flush_data = flush_ctx;
363 cs->ring_type = ring_type;
364
365 if (!amdgpu_init_cs_context(cs, ring_type)) {
366 FREE(cs);
367 return NULL;
368 }
369
370 if (!amdgpu_get_new_ib(&ctx->ws->base, &cs->main, &cs->ib[IB_MAIN], IB_MAIN)) {
371 amdgpu_destroy_cs_context(cs);
372 FREE(cs);
373 return NULL;
374 }
375
376 cs->request.number_of_ibs = 1;
377 cs->request.ibs = &cs->ib[IB_MAIN];
378
379 p_atomic_inc(&ctx->ws->num_cs);
380 return &cs->main.base;
381 }
382
383 static struct radeon_winsys_cs *
384 amdgpu_cs_add_const_ib(struct radeon_winsys_cs *rcs)
385 {
386 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
387 struct amdgpu_winsys *ws = cs->ctx->ws;
388
389 /* only one const IB can be added */
390 if (cs->ring_type != RING_GFX || cs->const_ib.ib_mapped)
391 return NULL;
392
393 if (!amdgpu_get_new_ib(&ws->base, &cs->const_ib, &cs->ib[IB_CONST], IB_CONST))
394 return NULL;
395
396 cs->request.number_of_ibs = 2;
397 cs->request.ibs = &cs->ib[IB_CONST];
398 cs->ib[IB_CONST].flags = AMDGPU_IB_FLAG_CE;
399
400 return &cs->const_ib.base;
401 }
402
403 static struct radeon_winsys_cs *
404 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs *rcs)
405 {
406 struct amdgpu_cs *cs = (struct amdgpu_cs*)rcs;
407 struct amdgpu_winsys *ws = cs->ctx->ws;
408
409 /* only one const preamble IB can be added and only when the const IB has
410 * also been mapped */
411 if (cs->ring_type != RING_GFX || !cs->const_ib.ib_mapped ||
412 cs->const_preamble_ib.ib_mapped)
413 return NULL;
414
415 if (!amdgpu_get_new_ib(&ws->base, &cs->const_preamble_ib,
416 &cs->ib[IB_CONST_PREAMBLE], IB_CONST_PREAMBLE))
417 return NULL;
418
419 cs->request.number_of_ibs = 3;
420 cs->request.ibs = &cs->ib[IB_CONST_PREAMBLE];
421 cs->ib[IB_CONST_PREAMBLE].flags = AMDGPU_IB_FLAG_CE | AMDGPU_IB_FLAG_PREAMBLE;
422
423 return &cs->const_preamble_ib.base;
424 }
425
426 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
427
428 int amdgpu_lookup_buffer(struct amdgpu_cs *cs, struct amdgpu_winsys_bo *bo)
429 {
430 unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
431 int i = cs->buffer_indices_hashlist[hash];
432
433 /* not found or found */
434 if (i == -1 || cs->buffers[i].bo == bo)
435 return i;
436
437 /* Hash collision, look for the BO in the list of buffers linearly. */
438 for (i = cs->num_buffers - 1; i >= 0; i--) {
439 if (cs->buffers[i].bo == bo) {
440 /* Put this buffer in the hash list.
441 * This will prevent additional hash collisions if there are
442 * several consecutive lookup_buffer calls for the same buffer.
443 *
444 * Example: Assuming buffers A,B,C collide in the hash list,
445 * the following sequence of buffers:
446 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
447 * will collide here: ^ and here: ^,
448 * meaning that we should get very few collisions in the end. */
449 cs->buffer_indices_hashlist[hash] = i;
450 return i;
451 }
452 }
453 return -1;
454 }
455
456 static unsigned amdgpu_add_buffer(struct amdgpu_cs *cs,
457 struct amdgpu_winsys_bo *bo,
458 enum radeon_bo_usage usage,
459 enum radeon_bo_domain domains,
460 unsigned priority,
461 enum radeon_bo_domain *added_domains)
462 {
463 struct amdgpu_cs_buffer *buffer;
464 unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
465 int i = -1;
466
467 assert(priority < 64);
468 *added_domains = 0;
469
470 i = amdgpu_lookup_buffer(cs, bo);
471
472 if (i >= 0) {
473 buffer = &cs->buffers[i];
474 buffer->priority_usage |= 1llu << priority;
475 buffer->usage |= usage;
476 *added_domains = domains & ~buffer->domains;
477 buffer->domains |= domains;
478 cs->flags[i] = MAX2(cs->flags[i], priority / 4);
479 return i;
480 }
481
482 /* New buffer, check if the backing array is large enough. */
483 if (cs->num_buffers >= cs->max_num_buffers) {
484 uint32_t size;
485 cs->max_num_buffers += 10;
486
487 size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
488 cs->buffers = realloc(cs->buffers, size);
489
490 size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
491 cs->handles = realloc(cs->handles, size);
492
493 cs->flags = realloc(cs->flags, cs->max_num_buffers);
494 }
495
496 /* Initialize the new buffer. */
497 cs->buffers[cs->num_buffers].bo = NULL;
498 amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
499 cs->handles[cs->num_buffers] = bo->bo;
500 cs->flags[cs->num_buffers] = priority / 4;
501 p_atomic_inc(&bo->num_cs_references);
502 buffer = &cs->buffers[cs->num_buffers];
503 buffer->bo = bo;
504 buffer->priority_usage = 1llu << priority;
505 buffer->usage = usage;
506 buffer->domains = domains;
507
508 cs->buffer_indices_hashlist[hash] = cs->num_buffers;
509
510 *added_domains = domains;
511 return cs->num_buffers++;
512 }
513
514 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
515 struct pb_buffer *buf,
516 enum radeon_bo_usage usage,
517 enum radeon_bo_domain domains,
518 enum radeon_bo_priority priority)
519 {
520 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
521 * the buffer placement during command submission.
522 */
523 struct amdgpu_cs *cs = amdgpu_cs(rcs);
524 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
525 enum radeon_bo_domain added_domains;
526 unsigned index = amdgpu_add_buffer(cs, bo, usage, bo->initial_domain,
527 priority, &added_domains);
528
529 if (added_domains & RADEON_DOMAIN_VRAM)
530 cs->used_vram += bo->base.size;
531 else if (added_domains & RADEON_DOMAIN_GTT)
532 cs->used_gart += bo->base.size;
533
534 return index;
535 }
536
537 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
538 struct pb_buffer *buf)
539 {
540 struct amdgpu_cs *cs = amdgpu_cs(rcs);
541
542 return amdgpu_lookup_buffer(cs, (struct amdgpu_winsys_bo*)buf);
543 }
544
545 static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
546 {
547 return TRUE;
548 }
549
550 static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
551 {
552 struct amdgpu_cs *cs = amdgpu_cs(rcs);
553 struct amdgpu_winsys *ws = cs->ctx->ws;
554
555 vram += cs->used_vram;
556 gtt += cs->used_gart;
557
558 /* Anything that goes above the VRAM size should go to GTT. */
559 if (vram > ws->info.vram_size)
560 gtt += vram - ws->info.vram_size;
561
562 /* Now we just need to check if we have enough GTT. */
563 return gtt < ws->info.gart_size * 0.7;
564 }
565
566 static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs *rcs)
567 {
568 struct amdgpu_cs *cs = amdgpu_cs(rcs);
569
570 return cs->used_vram + cs->used_gart;
571 }
572
573 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
574 struct radeon_bo_list_item *list)
575 {
576 struct amdgpu_cs *cs = amdgpu_cs(rcs);
577 int i;
578
579 if (list) {
580 for (i = 0; i < cs->num_buffers; i++) {
581 pb_reference(&list[i].buf, &cs->buffers[i].bo->base);
582 list[i].vm_address = cs->buffers[i].bo->va;
583 list[i].priority_usage = cs->buffers[i].priority_usage;
584 }
585 }
586 return cs->num_buffers;
587 }
588
589 static void amdgpu_cs_do_submission(struct amdgpu_cs *cs,
590 struct pipe_fence_handle **out_fence)
591 {
592 struct amdgpu_winsys *ws = cs->ctx->ws;
593 struct pipe_fence_handle *fence;
594 int i, j, r;
595
596 /* Create a fence. */
597 fence = amdgpu_fence_create(cs->ctx,
598 cs->request.ip_type,
599 cs->request.ip_instance,
600 cs->request.ring);
601 if (out_fence)
602 amdgpu_fence_reference(out_fence, fence);
603
604 cs->request.number_of_dependencies = 0;
605
606 /* Since the kernel driver doesn't synchronize execution between different
607 * rings automatically, we have to add fence dependencies manually. */
608 pipe_mutex_lock(ws->bo_fence_lock);
609 for (i = 0; i < cs->num_buffers; i++) {
610 for (j = 0; j < RING_LAST; j++) {
611 struct amdgpu_cs_fence *dep;
612 unsigned idx;
613
614 struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
615 if (!bo_fence)
616 continue;
617
618 if (bo_fence->ctx == cs->ctx &&
619 bo_fence->fence.ip_type == cs->request.ip_type &&
620 bo_fence->fence.ip_instance == cs->request.ip_instance &&
621 bo_fence->fence.ring == cs->request.ring)
622 continue;
623
624 if (amdgpu_fence_wait((void *)bo_fence, 0, false))
625 continue;
626
627 idx = cs->request.number_of_dependencies++;
628 if (idx >= cs->max_dependencies) {
629 unsigned size;
630
631 cs->max_dependencies = idx + 8;
632 size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
633 cs->request.dependencies = realloc(cs->request.dependencies, size);
634 }
635
636 dep = &cs->request.dependencies[idx];
637 memcpy(dep, &bo_fence->fence, sizeof(*dep));
638 }
639 }
640
641 cs->request.fence_info.handle = NULL;
642 if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) {
643 cs->request.fence_info.handle = cs->ctx->user_fence_bo;
644 cs->request.fence_info.offset = cs->ring_type;
645 }
646
647 r = amdgpu_cs_submit(cs->ctx->ctx, 0, &cs->request, 1);
648 if (r) {
649 if (r == -ENOMEM)
650 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
651 else
652 fprintf(stderr, "amdgpu: The CS has been rejected, "
653 "see dmesg for more information.\n");
654
655 amdgpu_fence_signalled(fence);
656 } else {
657 /* Success. */
658 uint64_t *user_fence = NULL;
659 if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE)
660 user_fence = cs->ctx->user_fence_cpu_address_base +
661 cs->request.fence_info.offset;
662 amdgpu_fence_submitted(fence, &cs->request, user_fence);
663
664 for (i = 0; i < cs->num_buffers; i++)
665 amdgpu_fence_reference(&cs->buffers[i].bo->fence[cs->ring_type],
666 fence);
667 }
668 pipe_mutex_unlock(ws->bo_fence_lock);
669 amdgpu_fence_reference(&fence, NULL);
670 }
671
672 static void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
673 {
674 /* no-op */
675 }
676
677 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
678 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", FALSE)
679
680 static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
681 unsigned flags,
682 struct pipe_fence_handle **fence)
683 {
684 struct amdgpu_cs *cs = amdgpu_cs(rcs);
685 struct amdgpu_winsys *ws = cs->ctx->ws;
686
687 switch (cs->ring_type) {
688 case RING_DMA:
689 /* pad DMA ring to 8 DWs */
690 while (rcs->cdw & 7)
691 OUT_CS(rcs, 0x00000000); /* NOP packet */
692 break;
693 case RING_GFX:
694 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
695 while (rcs->cdw & 7)
696 OUT_CS(rcs, 0xffff1000); /* type3 nop packet */
697
698 /* Also pad the const IB. */
699 if (cs->const_ib.ib_mapped)
700 while (!cs->const_ib.base.cdw || (cs->const_ib.base.cdw & 7))
701 OUT_CS(&cs->const_ib.base, 0xffff1000); /* type3 nop packet */
702
703 if (cs->const_preamble_ib.ib_mapped)
704 while (!cs->const_preamble_ib.base.cdw || (cs->const_preamble_ib.base.cdw & 7))
705 OUT_CS(&cs->const_preamble_ib.base, 0xffff1000);
706 break;
707 case RING_UVD:
708 while (rcs->cdw & 15)
709 OUT_CS(rcs, 0x80000000); /* type2 nop packet */
710 break;
711 default:
712 break;
713 }
714
715 if (rcs->cdw > rcs->max_dw) {
716 fprintf(stderr, "amdgpu: command stream overflowed\n");
717 }
718
719 amdgpu_cs_add_buffer(rcs, cs->main.big_ib_buffer,
720 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
721
722 if (cs->const_ib.ib_mapped)
723 amdgpu_cs_add_buffer(rcs, cs->const_ib.big_ib_buffer,
724 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
725
726 if (cs->const_preamble_ib.ib_mapped)
727 amdgpu_cs_add_buffer(rcs, cs->const_preamble_ib.big_ib_buffer,
728 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
729
730 /* If the CS is not empty or overflowed.... */
731 if (cs->main.base.cdw && cs->main.base.cdw <= cs->main.base.max_dw && !debug_get_option_noop()) {
732 int r;
733
734 /* Use a buffer list containing all allocated buffers if requested. */
735 if (debug_get_option_all_bos()) {
736 struct amdgpu_winsys_bo *bo;
737 amdgpu_bo_handle *handles;
738 unsigned num = 0;
739
740 pipe_mutex_lock(ws->global_bo_list_lock);
741
742 handles = malloc(sizeof(handles[0]) * ws->num_buffers);
743 if (!handles) {
744 pipe_mutex_unlock(ws->global_bo_list_lock);
745 goto cleanup;
746 }
747
748 LIST_FOR_EACH_ENTRY(bo, &ws->global_bo_list, global_list_item) {
749 assert(num < ws->num_buffers);
750 handles[num++] = bo->bo;
751 }
752
753 r = amdgpu_bo_list_create(ws->dev, ws->num_buffers,
754 handles, NULL,
755 &cs->request.resources);
756 free(handles);
757 pipe_mutex_unlock(ws->global_bo_list_lock);
758 } else {
759 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
760 cs->handles, cs->flags,
761 &cs->request.resources);
762 }
763
764 if (r) {
765 fprintf(stderr, "amdgpu: resource list creation failed (%d)\n", r);
766 cs->request.resources = NULL;
767 goto cleanup;
768 }
769
770 cs->ib[IB_MAIN].size = cs->main.base.cdw;
771 cs->main.used_ib_space += cs->main.base.cdw * 4;
772
773 if (cs->const_ib.ib_mapped) {
774 cs->ib[IB_CONST].size = cs->const_ib.base.cdw;
775 cs->const_ib.used_ib_space += cs->const_ib.base.cdw * 4;
776 }
777
778 if (cs->const_preamble_ib.ib_mapped) {
779 cs->ib[IB_CONST_PREAMBLE].size = cs->const_preamble_ib.base.cdw;
780 cs->const_preamble_ib.used_ib_space += cs->const_preamble_ib.base.cdw * 4;
781 }
782
783 amdgpu_cs_do_submission(cs, fence);
784
785 /* Cleanup. */
786 if (cs->request.resources)
787 amdgpu_bo_list_destroy(cs->request.resources);
788 }
789
790 cleanup:
791 amdgpu_cs_context_cleanup(cs);
792
793 amdgpu_get_new_ib(&ws->base, &cs->main, &cs->ib[IB_MAIN], IB_MAIN);
794 if (cs->const_ib.ib_mapped)
795 amdgpu_get_new_ib(&ws->base, &cs->const_ib, &cs->ib[IB_CONST], IB_CONST);
796 if (cs->const_preamble_ib.ib_mapped)
797 amdgpu_get_new_ib(&ws->base, &cs->const_preamble_ib,
798 &cs->ib[IB_CONST_PREAMBLE], IB_CONST_PREAMBLE);
799
800 ws->num_cs_flushes++;
801 }
802
803 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
804 {
805 struct amdgpu_cs *cs = amdgpu_cs(rcs);
806
807 amdgpu_destroy_cs_context(cs);
808 p_atomic_dec(&cs->ctx->ws->num_cs);
809 pb_reference(&cs->main.big_ib_buffer, NULL);
810 pb_reference(&cs->const_ib.big_ib_buffer, NULL);
811 pb_reference(&cs->const_preamble_ib.big_ib_buffer, NULL);
812 FREE(cs);
813 }
814
815 static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
816 struct pb_buffer *_buf,
817 enum radeon_bo_usage usage)
818 {
819 struct amdgpu_cs *cs = amdgpu_cs(rcs);
820 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
821
822 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
823 }
824
825 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
826 {
827 ws->base.ctx_create = amdgpu_ctx_create;
828 ws->base.ctx_destroy = amdgpu_ctx_destroy;
829 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
830 ws->base.cs_create = amdgpu_cs_create;
831 ws->base.cs_add_const_ib = amdgpu_cs_add_const_ib;
832 ws->base.cs_add_const_preamble_ib = amdgpu_cs_add_const_preamble_ib;
833 ws->base.cs_destroy = amdgpu_cs_destroy;
834 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
835 ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;
836 ws->base.cs_validate = amdgpu_cs_validate;
837 ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
838 ws->base.cs_query_memory_usage = amdgpu_cs_query_memory_usage;
839 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
840 ws->base.cs_flush = amdgpu_cs_flush;
841 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
842 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
843 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
844 ws->base.fence_reference = amdgpu_fence_reference;
845 }