winsys/amdgpu: add winsys function cs_get_buffer_list
[mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 */
28 /*
29 * Authors:
30 * Marek Olšák <maraeo@gmail.com>
31 */
32
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
35 #include <stdio.h>
36 #include <amdgpu_drm.h>
37
38
39 /* FENCES */
40
41 static struct pipe_fence_handle *
42 amdgpu_fence_create(struct amdgpu_ctx *ctx, unsigned ip_type,
43 unsigned ip_instance, unsigned ring)
44 {
45 struct amdgpu_fence *fence = CALLOC_STRUCT(amdgpu_fence);
46
47 fence->reference.count = 1;
48 fence->ctx = ctx;
49 fence->fence.context = ctx->ctx;
50 fence->fence.ip_type = ip_type;
51 fence->fence.ip_instance = ip_instance;
52 fence->fence.ring = ring;
53 p_atomic_inc(&ctx->refcount);
54 return (struct pipe_fence_handle *)fence;
55 }
56
57 static void amdgpu_fence_submitted(struct pipe_fence_handle *fence,
58 struct amdgpu_cs_request* request,
59 uint64_t *user_fence_cpu_address)
60 {
61 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
62
63 rfence->fence.fence = request->seq_no;
64 rfence->user_fence_cpu_address = user_fence_cpu_address;
65 }
66
67 static void amdgpu_fence_signalled(struct pipe_fence_handle *fence)
68 {
69 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
70
71 rfence->signalled = true;
72 }
73
74 bool amdgpu_fence_wait(struct pipe_fence_handle *fence, uint64_t timeout,
75 bool absolute)
76 {
77 struct amdgpu_fence *rfence = (struct amdgpu_fence*)fence;
78 uint32_t expired;
79 int64_t abs_timeout;
80 uint64_t *user_fence_cpu;
81 int r;
82
83 if (rfence->signalled)
84 return true;
85
86 if (absolute)
87 abs_timeout = timeout;
88 else
89 abs_timeout = os_time_get_absolute_timeout(timeout);
90
91 user_fence_cpu = rfence->user_fence_cpu_address;
92 if (user_fence_cpu && *user_fence_cpu >= rfence->fence.fence) {
93 rfence->signalled = true;
94 return true;
95 }
96 /* Now use the libdrm query. */
97 r = amdgpu_cs_query_fence_status(&rfence->fence,
98 abs_timeout,
99 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE,
100 &expired);
101 if (r) {
102 fprintf(stderr, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
103 return FALSE;
104 }
105
106 if (expired) {
107 /* This variable can only transition from false to true, so it doesn't
108 * matter if threads race for it. */
109 rfence->signalled = true;
110 return true;
111 }
112 return false;
113 }
114
115 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys *rws,
116 struct pipe_fence_handle *fence,
117 uint64_t timeout)
118 {
119 return amdgpu_fence_wait(fence, timeout, false);
120 }
121
122 /* CONTEXTS */
123
124 static struct radeon_winsys_ctx *amdgpu_ctx_create(struct radeon_winsys *ws)
125 {
126 struct amdgpu_ctx *ctx = CALLOC_STRUCT(amdgpu_ctx);
127 int r;
128 struct amdgpu_bo_alloc_request alloc_buffer = {};
129 amdgpu_bo_handle buf_handle;
130
131 ctx->ws = amdgpu_winsys(ws);
132 ctx->refcount = 1;
133
134 r = amdgpu_cs_ctx_create(ctx->ws->dev, &ctx->ctx);
135 if (r) {
136 fprintf(stderr, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r);
137 FREE(ctx);
138 return NULL;
139 }
140
141 alloc_buffer.alloc_size = 4 * 1024;
142 alloc_buffer.phys_alignment = 4 *1024;
143 alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
144
145 r = amdgpu_bo_alloc(ctx->ws->dev, &alloc_buffer, &buf_handle);
146 if (r) {
147 fprintf(stderr, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r);
148 amdgpu_cs_ctx_free(ctx->ctx);
149 FREE(ctx);
150 return NULL;
151 }
152
153 r = amdgpu_bo_cpu_map(buf_handle, (void**)&ctx->user_fence_cpu_address_base);
154 if (r) {
155 fprintf(stderr, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r);
156 amdgpu_bo_free(buf_handle);
157 amdgpu_cs_ctx_free(ctx->ctx);
158 FREE(ctx);
159 return NULL;
160 }
161
162 memset(ctx->user_fence_cpu_address_base, 0, alloc_buffer.alloc_size);
163 ctx->user_fence_bo = buf_handle;
164
165 return (struct radeon_winsys_ctx*)ctx;
166 }
167
168 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx *rwctx)
169 {
170 amdgpu_ctx_unref((struct amdgpu_ctx*)rwctx);
171 }
172
173 static enum pipe_reset_status
174 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx *rwctx)
175 {
176 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
177 uint32_t result, hangs;
178 int r;
179
180 r = amdgpu_cs_query_reset_state(ctx->ctx, &result, &hangs);
181 if (r) {
182 fprintf(stderr, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r);
183 return PIPE_NO_RESET;
184 }
185
186 switch (result) {
187 case AMDGPU_CTX_GUILTY_RESET:
188 return PIPE_GUILTY_CONTEXT_RESET;
189 case AMDGPU_CTX_INNOCENT_RESET:
190 return PIPE_INNOCENT_CONTEXT_RESET;
191 case AMDGPU_CTX_UNKNOWN_RESET:
192 return PIPE_UNKNOWN_CONTEXT_RESET;
193 case AMDGPU_CTX_NO_RESET:
194 default:
195 return PIPE_NO_RESET;
196 }
197 }
198
199 /* COMMAND SUBMISSION */
200
201 static bool amdgpu_get_new_ib(struct amdgpu_cs *cs)
202 {
203 /* Small IBs are better than big IBs, because the GPU goes idle quicker
204 * and there is less waiting for buffers and fences. Proof:
205 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
206 */
207 const unsigned buffer_size = 128 * 1024 * 4;
208 const unsigned ib_size = 20 * 1024 * 4;
209
210 cs->base.cdw = 0;
211 cs->base.buf = NULL;
212
213 /* Allocate a new buffer for IBs if the current buffer is all used. */
214 if (!cs->big_ib_buffer ||
215 cs->used_ib_space + ib_size > cs->big_ib_buffer->size) {
216 struct radeon_winsys *ws = &cs->ctx->ws->base;
217 struct radeon_winsys_cs_handle *winsys_bo;
218
219 pb_reference(&cs->big_ib_buffer, NULL);
220 cs->big_ib_winsys_buffer = NULL;
221 cs->ib_mapped = NULL;
222 cs->used_ib_space = 0;
223
224 cs->big_ib_buffer = ws->buffer_create(ws, buffer_size,
225 4096, true,
226 RADEON_DOMAIN_GTT,
227 RADEON_FLAG_CPU_ACCESS);
228 if (!cs->big_ib_buffer)
229 return false;
230
231 winsys_bo = ws->buffer_get_cs_handle(cs->big_ib_buffer);
232
233 cs->ib_mapped = ws->buffer_map(winsys_bo, NULL, PIPE_TRANSFER_WRITE);
234 if (!cs->ib_mapped) {
235 pb_reference(&cs->big_ib_buffer, NULL);
236 return false;
237 }
238
239 cs->big_ib_winsys_buffer = (struct amdgpu_winsys_bo*)winsys_bo;
240 }
241
242 cs->ib.ib_mc_address = cs->big_ib_winsys_buffer->va + cs->used_ib_space;
243 cs->base.buf = (uint32_t*)(cs->ib_mapped + cs->used_ib_space);
244 cs->base.max_dw = ib_size / 4;
245 return true;
246 }
247
248 static boolean amdgpu_init_cs_context(struct amdgpu_cs *cs,
249 enum ring_type ring_type)
250 {
251 int i;
252
253 switch (ring_type) {
254 case RING_DMA:
255 cs->request.ip_type = AMDGPU_HW_IP_DMA;
256 break;
257
258 case RING_UVD:
259 cs->request.ip_type = AMDGPU_HW_IP_UVD;
260 break;
261
262 case RING_VCE:
263 cs->request.ip_type = AMDGPU_HW_IP_VCE;
264 break;
265
266 case RING_COMPUTE:
267 cs->request.ip_type = AMDGPU_HW_IP_COMPUTE;
268 break;
269
270 default:
271 case RING_GFX:
272 cs->request.ip_type = AMDGPU_HW_IP_GFX;
273 break;
274 }
275
276 cs->request.number_of_ibs = 1;
277 cs->request.ibs = &cs->ib;
278
279 cs->max_num_buffers = 512;
280 cs->buffers = (struct amdgpu_cs_buffer*)
281 CALLOC(1, cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer));
282 if (!cs->buffers) {
283 return FALSE;
284 }
285
286 cs->handles = CALLOC(1, cs->max_num_buffers * sizeof(amdgpu_bo_handle));
287 if (!cs->handles) {
288 FREE(cs->buffers);
289 return FALSE;
290 }
291
292 cs->flags = CALLOC(1, cs->max_num_buffers);
293 if (!cs->flags) {
294 FREE(cs->handles);
295 FREE(cs->buffers);
296 return FALSE;
297 }
298
299 for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
300 cs->buffer_indices_hashlist[i] = -1;
301 }
302 return TRUE;
303 }
304
305 static void amdgpu_cs_context_cleanup(struct amdgpu_cs *cs)
306 {
307 unsigned i;
308
309 for (i = 0; i < cs->num_buffers; i++) {
310 p_atomic_dec(&cs->buffers[i].bo->num_cs_references);
311 amdgpu_winsys_bo_reference(&cs->buffers[i].bo, NULL);
312 cs->handles[i] = NULL;
313 cs->flags[i] = 0;
314 }
315
316 cs->num_buffers = 0;
317 cs->used_gart = 0;
318 cs->used_vram = 0;
319
320 for (i = 0; i < Elements(cs->buffer_indices_hashlist); i++) {
321 cs->buffer_indices_hashlist[i] = -1;
322 }
323 }
324
325 static void amdgpu_destroy_cs_context(struct amdgpu_cs *cs)
326 {
327 amdgpu_cs_context_cleanup(cs);
328 FREE(cs->flags);
329 FREE(cs->buffers);
330 FREE(cs->handles);
331 FREE(cs->request.dependencies);
332 }
333
334
335 static struct radeon_winsys_cs *
336 amdgpu_cs_create(struct radeon_winsys_ctx *rwctx,
337 enum ring_type ring_type,
338 void (*flush)(void *ctx, unsigned flags,
339 struct pipe_fence_handle **fence),
340 void *flush_ctx,
341 struct radeon_winsys_cs_handle *trace_buf)
342 {
343 struct amdgpu_ctx *ctx = (struct amdgpu_ctx*)rwctx;
344 struct amdgpu_cs *cs;
345
346 cs = CALLOC_STRUCT(amdgpu_cs);
347 if (!cs) {
348 return NULL;
349 }
350
351 cs->ctx = ctx;
352 cs->flush_cs = flush;
353 cs->flush_data = flush_ctx;
354 cs->base.ring_type = ring_type;
355
356 if (!amdgpu_init_cs_context(cs, ring_type)) {
357 FREE(cs);
358 return NULL;
359 }
360
361 if (!amdgpu_get_new_ib(cs)) {
362 amdgpu_destroy_cs_context(cs);
363 FREE(cs);
364 return NULL;
365 }
366
367 p_atomic_inc(&ctx->ws->num_cs);
368 return &cs->base;
369 }
370
371 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
372
373 int amdgpu_lookup_buffer(struct amdgpu_cs *cs, struct amdgpu_winsys_bo *bo)
374 {
375 unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
376 int i = cs->buffer_indices_hashlist[hash];
377
378 /* not found or found */
379 if (i == -1 || cs->buffers[i].bo == bo)
380 return i;
381
382 /* Hash collision, look for the BO in the list of buffers linearly. */
383 for (i = cs->num_buffers - 1; i >= 0; i--) {
384 if (cs->buffers[i].bo == bo) {
385 /* Put this buffer in the hash list.
386 * This will prevent additional hash collisions if there are
387 * several consecutive lookup_buffer calls for the same buffer.
388 *
389 * Example: Assuming buffers A,B,C collide in the hash list,
390 * the following sequence of buffers:
391 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
392 * will collide here: ^ and here: ^,
393 * meaning that we should get very few collisions in the end. */
394 cs->buffer_indices_hashlist[hash] = i;
395 return i;
396 }
397 }
398 return -1;
399 }
400
401 static unsigned amdgpu_add_buffer(struct amdgpu_cs *cs,
402 struct amdgpu_winsys_bo *bo,
403 enum radeon_bo_usage usage,
404 enum radeon_bo_domain domains,
405 unsigned priority,
406 enum radeon_bo_domain *added_domains)
407 {
408 struct amdgpu_cs_buffer *buffer;
409 unsigned hash = bo->unique_id & (Elements(cs->buffer_indices_hashlist)-1);
410 int i = -1;
411
412 assert(priority < 64);
413 *added_domains = 0;
414
415 i = amdgpu_lookup_buffer(cs, bo);
416
417 if (i >= 0) {
418 buffer = &cs->buffers[i];
419 buffer->priority_usage |= 1llu << priority;
420 buffer->usage |= usage;
421 *added_domains = domains & ~buffer->domains;
422 buffer->domains |= domains;
423 cs->flags[i] = MAX2(cs->flags[i], priority / 4);
424 return i;
425 }
426
427 /* New buffer, check if the backing array is large enough. */
428 if (cs->num_buffers >= cs->max_num_buffers) {
429 uint32_t size;
430 cs->max_num_buffers += 10;
431
432 size = cs->max_num_buffers * sizeof(struct amdgpu_cs_buffer);
433 cs->buffers = realloc(cs->buffers, size);
434
435 size = cs->max_num_buffers * sizeof(amdgpu_bo_handle);
436 cs->handles = realloc(cs->handles, size);
437
438 cs->flags = realloc(cs->flags, cs->max_num_buffers);
439 }
440
441 /* Initialize the new buffer. */
442 cs->buffers[cs->num_buffers].bo = NULL;
443 amdgpu_winsys_bo_reference(&cs->buffers[cs->num_buffers].bo, bo);
444 cs->handles[cs->num_buffers] = bo->bo;
445 cs->flags[cs->num_buffers] = priority / 4;
446 p_atomic_inc(&bo->num_cs_references);
447 buffer = &cs->buffers[cs->num_buffers];
448 buffer->bo = bo;
449 buffer->priority_usage = 1llu << priority;
450 buffer->usage = usage;
451 buffer->domains = domains;
452
453 cs->buffer_indices_hashlist[hash] = cs->num_buffers;
454
455 *added_domains = domains;
456 return cs->num_buffers++;
457 }
458
459 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs *rcs,
460 struct radeon_winsys_cs_handle *buf,
461 enum radeon_bo_usage usage,
462 enum radeon_bo_domain domains,
463 enum radeon_bo_priority priority)
464 {
465 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
466 * the buffer placement during command submission.
467 */
468 struct amdgpu_cs *cs = amdgpu_cs(rcs);
469 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)buf;
470 enum radeon_bo_domain added_domains;
471 unsigned index = amdgpu_add_buffer(cs, bo, usage, bo->initial_domain,
472 priority, &added_domains);
473
474 if (added_domains & RADEON_DOMAIN_GTT)
475 cs->used_gart += bo->base.size;
476 if (added_domains & RADEON_DOMAIN_VRAM)
477 cs->used_vram += bo->base.size;
478
479 return index;
480 }
481
482 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs *rcs,
483 struct radeon_winsys_cs_handle *buf)
484 {
485 struct amdgpu_cs *cs = amdgpu_cs(rcs);
486
487 return amdgpu_lookup_buffer(cs, (struct amdgpu_winsys_bo*)buf);
488 }
489
490 static boolean amdgpu_cs_validate(struct radeon_winsys_cs *rcs)
491 {
492 return TRUE;
493 }
494
495 static boolean amdgpu_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
496 {
497 struct amdgpu_cs *cs = amdgpu_cs(rcs);
498 boolean status =
499 (cs->used_gart + gtt) < cs->ctx->ws->info.gart_size * 0.7 &&
500 (cs->used_vram + vram) < cs->ctx->ws->info.vram_size * 0.7;
501
502 return status;
503 }
504
505 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs *rcs,
506 struct radeon_bo_list_item *list)
507 {
508 struct amdgpu_cs *cs = amdgpu_cs(rcs);
509 int i;
510
511 if (list) {
512 for (i = 0; i < cs->num_buffers; i++) {
513 pb_reference(&list[i].buf, &cs->buffers[i].bo->base);
514 list[i].vm_address = cs->buffers[i].bo->va;
515 list[i].priority_usage = cs->buffers[i].priority_usage;
516 }
517 }
518 return cs->num_buffers;
519 }
520
521 static void amdgpu_cs_do_submission(struct amdgpu_cs *cs,
522 struct pipe_fence_handle **out_fence)
523 {
524 struct amdgpu_winsys *ws = cs->ctx->ws;
525 struct pipe_fence_handle *fence;
526 int i, j, r;
527
528 /* Create a fence. */
529 fence = amdgpu_fence_create(cs->ctx,
530 cs->request.ip_type,
531 cs->request.ip_instance,
532 cs->request.ring);
533 if (out_fence)
534 amdgpu_fence_reference(out_fence, fence);
535
536 cs->request.number_of_dependencies = 0;
537
538 /* Since the kernel driver doesn't synchronize execution between different
539 * rings automatically, we have to add fence dependencies manually. */
540 pipe_mutex_lock(ws->bo_fence_lock);
541 for (i = 0; i < cs->num_buffers; i++) {
542 for (j = 0; j < RING_LAST; j++) {
543 struct amdgpu_cs_fence *dep;
544 unsigned idx;
545
546 struct amdgpu_fence *bo_fence = (void *)cs->buffers[i].bo->fence[j];
547 if (!bo_fence)
548 continue;
549
550 if (bo_fence->ctx == cs->ctx &&
551 bo_fence->fence.ip_type == cs->request.ip_type &&
552 bo_fence->fence.ip_instance == cs->request.ip_instance &&
553 bo_fence->fence.ring == cs->request.ring)
554 continue;
555
556 if (amdgpu_fence_wait((void *)bo_fence, 0, false))
557 continue;
558
559 idx = cs->request.number_of_dependencies++;
560 if (idx >= cs->max_dependencies) {
561 unsigned size;
562
563 cs->max_dependencies = idx + 8;
564 size = cs->max_dependencies * sizeof(struct amdgpu_cs_fence);
565 cs->request.dependencies = realloc(cs->request.dependencies, size);
566 }
567
568 dep = &cs->request.dependencies[idx];
569 memcpy(dep, &bo_fence->fence, sizeof(*dep));
570 }
571 }
572
573 cs->request.fence_info.handle = NULL;
574 if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE) {
575 cs->request.fence_info.handle = cs->ctx->user_fence_bo;
576 cs->request.fence_info.offset = cs->base.ring_type;
577 }
578
579 r = amdgpu_cs_submit(cs->ctx->ctx, 0, &cs->request, 1);
580 if (r) {
581 if (r == -ENOMEM)
582 fprintf(stderr, "amdgpu: Not enough memory for command submission.\n");
583 else
584 fprintf(stderr, "amdgpu: The CS has been rejected, "
585 "see dmesg for more information.\n");
586
587 amdgpu_fence_signalled(fence);
588 } else {
589 /* Success. */
590 uint64_t *user_fence = NULL;
591 if (cs->request.ip_type != AMDGPU_HW_IP_UVD && cs->request.ip_type != AMDGPU_HW_IP_VCE)
592 user_fence = cs->ctx->user_fence_cpu_address_base +
593 cs->request.fence_info.offset;
594 amdgpu_fence_submitted(fence, &cs->request, user_fence);
595
596 for (i = 0; i < cs->num_buffers; i++)
597 amdgpu_fence_reference(&cs->buffers[i].bo->fence[cs->base.ring_type],
598 fence);
599 }
600 pipe_mutex_unlock(ws->bo_fence_lock);
601 amdgpu_fence_reference(&fence, NULL);
602 }
603
604 static void amdgpu_cs_sync_flush(struct radeon_winsys_cs *rcs)
605 {
606 /* no-op */
607 }
608
609 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
610
611 static void amdgpu_cs_flush(struct radeon_winsys_cs *rcs,
612 unsigned flags,
613 struct pipe_fence_handle **fence,
614 uint32_t cs_trace_id)
615 {
616 struct amdgpu_cs *cs = amdgpu_cs(rcs);
617 struct amdgpu_winsys *ws = cs->ctx->ws;
618
619 switch (cs->base.ring_type) {
620 case RING_DMA:
621 /* pad DMA ring to 8 DWs */
622 while (rcs->cdw & 7)
623 OUT_CS(&cs->base, 0x00000000); /* NOP packet */
624 break;
625 case RING_GFX:
626 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
627 while (rcs->cdw & 7)
628 OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
629 break;
630 case RING_UVD:
631 while (rcs->cdw & 15)
632 OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
633 break;
634 default:
635 break;
636 }
637
638 if (rcs->cdw > rcs->max_dw) {
639 fprintf(stderr, "amdgpu: command stream overflowed\n");
640 }
641
642 amdgpu_cs_add_buffer(rcs, (void*)cs->big_ib_winsys_buffer,
643 RADEON_USAGE_READ, 0, RADEON_PRIO_IB1);
644
645 /* If the CS is not empty or overflowed.... */
646 if (cs->base.cdw && cs->base.cdw <= cs->base.max_dw && !debug_get_option_noop()) {
647 int r;
648
649 r = amdgpu_bo_list_create(ws->dev, cs->num_buffers,
650 cs->handles, cs->flags,
651 &cs->request.resources);
652
653 if (r) {
654 fprintf(stderr, "amdgpu: resource list creation failed (%d)\n", r);
655 cs->request.resources = NULL;
656 goto cleanup;
657 }
658
659 cs->ib.size = cs->base.cdw;
660 cs->used_ib_space += cs->base.cdw * 4;
661
662 amdgpu_cs_do_submission(cs, fence);
663
664 /* Cleanup. */
665 if (cs->request.resources)
666 amdgpu_bo_list_destroy(cs->request.resources);
667 }
668
669 cleanup:
670 amdgpu_cs_context_cleanup(cs);
671 amdgpu_get_new_ib(cs);
672
673 ws->num_cs_flushes++;
674 }
675
676 static void amdgpu_cs_destroy(struct radeon_winsys_cs *rcs)
677 {
678 struct amdgpu_cs *cs = amdgpu_cs(rcs);
679
680 amdgpu_destroy_cs_context(cs);
681 p_atomic_dec(&cs->ctx->ws->num_cs);
682 pb_reference(&cs->big_ib_buffer, NULL);
683 FREE(cs);
684 }
685
686 static boolean amdgpu_bo_is_referenced(struct radeon_winsys_cs *rcs,
687 struct radeon_winsys_cs_handle *_buf,
688 enum radeon_bo_usage usage)
689 {
690 struct amdgpu_cs *cs = amdgpu_cs(rcs);
691 struct amdgpu_winsys_bo *bo = (struct amdgpu_winsys_bo*)_buf;
692
693 return amdgpu_bo_is_referenced_by_cs_with_usage(cs, bo, usage);
694 }
695
696 void amdgpu_cs_init_functions(struct amdgpu_winsys *ws)
697 {
698 ws->base.ctx_create = amdgpu_ctx_create;
699 ws->base.ctx_destroy = amdgpu_ctx_destroy;
700 ws->base.ctx_query_reset_status = amdgpu_ctx_query_reset_status;
701 ws->base.cs_create = amdgpu_cs_create;
702 ws->base.cs_destroy = amdgpu_cs_destroy;
703 ws->base.cs_add_buffer = amdgpu_cs_add_buffer;
704 ws->base.cs_lookup_buffer = amdgpu_cs_lookup_buffer;
705 ws->base.cs_validate = amdgpu_cs_validate;
706 ws->base.cs_memory_below_limit = amdgpu_cs_memory_below_limit;
707 ws->base.cs_get_buffer_list = amdgpu_cs_get_buffer_list;
708 ws->base.cs_flush = amdgpu_cs_flush;
709 ws->base.cs_is_buffer_referenced = amdgpu_bo_is_referenced;
710 ws->base.cs_sync_flush = amdgpu_cs_sync_flush;
711 ws->base.fence_wait = amdgpu_fence_wait_rel_timeout;
712 ws->base.fence_reference = amdgpu_fence_reference;
713 }