2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
30 * Marek Olšák <maraeo@gmail.com>
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
36 #include <amdgpu_drm.h>
41 static struct pipe_fence_handle
*
42 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
43 unsigned ip_instance
, unsigned ring
)
45 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
47 fence
->reference
.count
= 1;
49 fence
->fence
.context
= ctx
->ctx
;
50 fence
->fence
.ip_type
= ip_type
;
51 fence
->fence
.ip_instance
= ip_instance
;
52 fence
->fence
.ring
= ring
;
53 p_atomic_inc(&ctx
->refcount
);
54 return (struct pipe_fence_handle
*)fence
;
57 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
58 struct amdgpu_cs_request
* request
,
59 uint64_t *user_fence_cpu_address
)
61 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
63 rfence
->fence
.fence
= request
->seq_no
;
64 rfence
->user_fence_cpu_address
= user_fence_cpu_address
;
67 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
69 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
71 rfence
->signalled
= true;
74 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
77 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
80 uint64_t *user_fence_cpu
;
83 if (rfence
->signalled
)
87 abs_timeout
= timeout
;
89 abs_timeout
= os_time_get_absolute_timeout(timeout
);
91 user_fence_cpu
= rfence
->user_fence_cpu_address
;
93 if (*user_fence_cpu
>= rfence
->fence
.fence
) {
94 rfence
->signalled
= true;
98 /* No timeout, just query: no need for the ioctl. */
99 if (!absolute
&& !timeout
)
103 /* Now use the libdrm query. */
104 r
= amdgpu_cs_query_fence_status(&rfence
->fence
,
106 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
109 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
114 /* This variable can only transition from false to true, so it doesn't
115 * matter if threads race for it. */
116 rfence
->signalled
= true;
122 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
123 struct pipe_fence_handle
*fence
,
126 return amdgpu_fence_wait(fence
, timeout
, false);
131 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
133 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
135 struct amdgpu_bo_alloc_request alloc_buffer
= {};
136 amdgpu_bo_handle buf_handle
;
141 ctx
->ws
= amdgpu_winsys(ws
);
144 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
146 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
150 alloc_buffer
.alloc_size
= ctx
->ws
->info
.gart_page_size
;
151 alloc_buffer
.phys_alignment
= ctx
->ws
->info
.gart_page_size
;
152 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
154 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
156 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
157 goto error_user_fence_alloc
;
160 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
162 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
163 goto error_user_fence_map
;
166 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
167 ctx
->user_fence_bo
= buf_handle
;
169 return (struct radeon_winsys_ctx
*)ctx
;
171 error_user_fence_map
:
172 amdgpu_bo_free(buf_handle
);
173 error_user_fence_alloc
:
174 amdgpu_cs_ctx_free(ctx
->ctx
);
180 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
182 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
185 static enum pipe_reset_status
186 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
188 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
189 uint32_t result
, hangs
;
192 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
194 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
195 return PIPE_NO_RESET
;
199 case AMDGPU_CTX_GUILTY_RESET
:
200 return PIPE_GUILTY_CONTEXT_RESET
;
201 case AMDGPU_CTX_INNOCENT_RESET
:
202 return PIPE_INNOCENT_CONTEXT_RESET
;
203 case AMDGPU_CTX_UNKNOWN_RESET
:
204 return PIPE_UNKNOWN_CONTEXT_RESET
;
205 case AMDGPU_CTX_NO_RESET
:
207 return PIPE_NO_RESET
;
211 /* COMMAND SUBMISSION */
213 static bool amdgpu_get_new_ib(struct radeon_winsys
*ws
, struct amdgpu_ib
*ib
,
214 struct amdgpu_cs_ib_info
*info
, unsigned ib_type
)
216 struct amdgpu_winsys
*aws
= (struct amdgpu_winsys
*)ws
;
217 /* Small IBs are better than big IBs, because the GPU goes idle quicker
218 * and there is less waiting for buffers and fences. Proof:
219 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
221 unsigned buffer_size
, ib_size
;
224 case IB_CONST_PREAMBLE
:
225 buffer_size
= 4 * 1024 * 4;
229 buffer_size
= 512 * 1024 * 4;
230 ib_size
= 128 * 1024 * 4;
233 buffer_size
= 128 * 1024 * 4;
234 ib_size
= 20 * 1024 * 4;
237 unreachable("unhandled IB type");
243 /* Allocate a new buffer for IBs if the current buffer is all used. */
244 if (!ib
->big_ib_buffer
||
245 ib
->used_ib_space
+ ib_size
> ib
->big_ib_buffer
->size
) {
247 pb_reference(&ib
->big_ib_buffer
, NULL
);
248 ib
->ib_mapped
= NULL
;
249 ib
->used_ib_space
= 0;
251 ib
->big_ib_buffer
= ws
->buffer_create(ws
, buffer_size
,
252 aws
->info
.gart_page_size
,
254 RADEON_FLAG_CPU_ACCESS
);
255 if (!ib
->big_ib_buffer
)
258 ib
->ib_mapped
= ws
->buffer_map(ib
->big_ib_buffer
, NULL
,
259 PIPE_TRANSFER_WRITE
);
260 if (!ib
->ib_mapped
) {
261 pb_reference(&ib
->big_ib_buffer
, NULL
);
266 info
->ib_mc_address
= amdgpu_winsys_bo(ib
->big_ib_buffer
)->va
+
268 ib
->base
.buf
= (uint32_t*)(ib
->ib_mapped
+ ib
->used_ib_space
);
269 ib
->base
.max_dw
= ib_size
/ 4;
273 static boolean
amdgpu_init_cs_context(struct amdgpu_cs
*cs
,
274 enum ring_type ring_type
)
280 cs
->request
.ip_type
= AMDGPU_HW_IP_DMA
;
284 cs
->request
.ip_type
= AMDGPU_HW_IP_UVD
;
288 cs
->request
.ip_type
= AMDGPU_HW_IP_VCE
;
292 cs
->request
.ip_type
= AMDGPU_HW_IP_COMPUTE
;
297 cs
->request
.ip_type
= AMDGPU_HW_IP_GFX
;
301 cs
->max_num_buffers
= 512;
302 cs
->buffers
= (struct amdgpu_cs_buffer
*)
303 CALLOC(1, cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
));
308 cs
->handles
= CALLOC(1, cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
));
314 cs
->flags
= CALLOC(1, cs
->max_num_buffers
);
321 for (i
= 0; i
< ARRAY_SIZE(cs
->buffer_indices_hashlist
); i
++) {
322 cs
->buffer_indices_hashlist
[i
] = -1;
327 static void amdgpu_cs_context_cleanup(struct amdgpu_cs
*cs
)
331 for (i
= 0; i
< cs
->num_buffers
; i
++) {
332 p_atomic_dec(&cs
->buffers
[i
].bo
->num_cs_references
);
333 amdgpu_winsys_bo_reference(&cs
->buffers
[i
].bo
, NULL
);
334 cs
->handles
[i
] = NULL
;
342 for (i
= 0; i
< ARRAY_SIZE(cs
->buffer_indices_hashlist
); i
++) {
343 cs
->buffer_indices_hashlist
[i
] = -1;
347 static void amdgpu_destroy_cs_context(struct amdgpu_cs
*cs
)
349 amdgpu_cs_context_cleanup(cs
);
353 FREE(cs
->request
.dependencies
);
357 static struct radeon_winsys_cs
*
358 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
359 enum ring_type ring_type
,
360 void (*flush
)(void *ctx
, unsigned flags
,
361 struct pipe_fence_handle
**fence
),
364 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
365 struct amdgpu_cs
*cs
;
367 cs
= CALLOC_STRUCT(amdgpu_cs
);
373 cs
->flush_cs
= flush
;
374 cs
->flush_data
= flush_ctx
;
375 cs
->ring_type
= ring_type
;
377 if (!amdgpu_init_cs_context(cs
, ring_type
)) {
382 if (!amdgpu_get_new_ib(&ctx
->ws
->base
, &cs
->main
, &cs
->ib
[IB_MAIN
], IB_MAIN
)) {
383 amdgpu_destroy_cs_context(cs
);
388 cs
->request
.number_of_ibs
= 1;
389 cs
->request
.ibs
= &cs
->ib
[IB_MAIN
];
391 p_atomic_inc(&ctx
->ws
->num_cs
);
392 return &cs
->main
.base
;
395 static struct radeon_winsys_cs
*
396 amdgpu_cs_add_const_ib(struct radeon_winsys_cs
*rcs
)
398 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
399 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
401 /* only one const IB can be added */
402 if (cs
->ring_type
!= RING_GFX
|| cs
->const_ib
.ib_mapped
)
405 if (!amdgpu_get_new_ib(&ws
->base
, &cs
->const_ib
, &cs
->ib
[IB_CONST
], IB_CONST
))
408 cs
->request
.number_of_ibs
= 2;
409 cs
->request
.ibs
= &cs
->ib
[IB_CONST
];
410 cs
->ib
[IB_CONST
].flags
= AMDGPU_IB_FLAG_CE
;
412 return &cs
->const_ib
.base
;
415 static struct radeon_winsys_cs
*
416 amdgpu_cs_add_const_preamble_ib(struct radeon_winsys_cs
*rcs
)
418 struct amdgpu_cs
*cs
= (struct amdgpu_cs
*)rcs
;
419 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
421 /* only one const preamble IB can be added and only when the const IB has
422 * also been mapped */
423 if (cs
->ring_type
!= RING_GFX
|| !cs
->const_ib
.ib_mapped
||
424 cs
->const_preamble_ib
.ib_mapped
)
427 if (!amdgpu_get_new_ib(&ws
->base
, &cs
->const_preamble_ib
,
428 &cs
->ib
[IB_CONST_PREAMBLE
], IB_CONST_PREAMBLE
))
431 cs
->request
.number_of_ibs
= 3;
432 cs
->request
.ibs
= &cs
->ib
[IB_CONST_PREAMBLE
];
433 cs
->ib
[IB_CONST_PREAMBLE
].flags
= AMDGPU_IB_FLAG_CE
| AMDGPU_IB_FLAG_PREAMBLE
;
435 return &cs
->const_preamble_ib
.base
;
438 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
440 int amdgpu_lookup_buffer(struct amdgpu_cs
*cs
, struct amdgpu_winsys_bo
*bo
)
442 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
443 int i
= cs
->buffer_indices_hashlist
[hash
];
445 /* not found or found */
446 if (i
== -1 || cs
->buffers
[i
].bo
== bo
)
449 /* Hash collision, look for the BO in the list of buffers linearly. */
450 for (i
= cs
->num_buffers
- 1; i
>= 0; i
--) {
451 if (cs
->buffers
[i
].bo
== bo
) {
452 /* Put this buffer in the hash list.
453 * This will prevent additional hash collisions if there are
454 * several consecutive lookup_buffer calls for the same buffer.
456 * Example: Assuming buffers A,B,C collide in the hash list,
457 * the following sequence of buffers:
458 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
459 * will collide here: ^ and here: ^,
460 * meaning that we should get very few collisions in the end. */
461 cs
->buffer_indices_hashlist
[hash
] = i
;
468 static unsigned amdgpu_add_buffer(struct amdgpu_cs
*cs
,
469 struct amdgpu_winsys_bo
*bo
,
470 enum radeon_bo_usage usage
,
471 enum radeon_bo_domain domains
,
473 enum radeon_bo_domain
*added_domains
)
475 struct amdgpu_cs_buffer
*buffer
;
476 unsigned hash
= bo
->unique_id
& (ARRAY_SIZE(cs
->buffer_indices_hashlist
)-1);
479 assert(priority
< 64);
482 i
= amdgpu_lookup_buffer(cs
, bo
);
485 buffer
= &cs
->buffers
[i
];
486 buffer
->priority_usage
|= 1llu << priority
;
487 buffer
->usage
|= usage
;
488 *added_domains
= domains
& ~buffer
->domains
;
489 buffer
->domains
|= domains
;
490 cs
->flags
[i
] = MAX2(cs
->flags
[i
], priority
/ 4);
494 /* New buffer, check if the backing array is large enough. */
495 if (cs
->num_buffers
>= cs
->max_num_buffers
) {
497 cs
->max_num_buffers
+= 10;
499 size
= cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
);
500 cs
->buffers
= realloc(cs
->buffers
, size
);
502 size
= cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
);
503 cs
->handles
= realloc(cs
->handles
, size
);
505 cs
->flags
= realloc(cs
->flags
, cs
->max_num_buffers
);
508 /* Initialize the new buffer. */
509 cs
->buffers
[cs
->num_buffers
].bo
= NULL
;
510 amdgpu_winsys_bo_reference(&cs
->buffers
[cs
->num_buffers
].bo
, bo
);
511 cs
->handles
[cs
->num_buffers
] = bo
->bo
;
512 cs
->flags
[cs
->num_buffers
] = priority
/ 4;
513 p_atomic_inc(&bo
->num_cs_references
);
514 buffer
= &cs
->buffers
[cs
->num_buffers
];
516 buffer
->priority_usage
= 1llu << priority
;
517 buffer
->usage
= usage
;
518 buffer
->domains
= domains
;
520 cs
->buffer_indices_hashlist
[hash
] = cs
->num_buffers
;
522 *added_domains
= domains
;
523 return cs
->num_buffers
++;
526 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
527 struct pb_buffer
*buf
,
528 enum radeon_bo_usage usage
,
529 enum radeon_bo_domain domains
,
530 enum radeon_bo_priority priority
)
532 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
533 * the buffer placement during command submission.
535 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
536 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
537 enum radeon_bo_domain added_domains
;
538 unsigned index
= amdgpu_add_buffer(cs
, bo
, usage
, bo
->initial_domain
,
539 priority
, &added_domains
);
541 if (added_domains
& RADEON_DOMAIN_VRAM
)
542 cs
->used_vram
+= bo
->base
.size
;
543 else if (added_domains
& RADEON_DOMAIN_GTT
)
544 cs
->used_gart
+= bo
->base
.size
;
549 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs
*rcs
,
550 struct pb_buffer
*buf
)
552 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
554 return amdgpu_lookup_buffer(cs
, (struct amdgpu_winsys_bo
*)buf
);
557 static boolean
amdgpu_cs_validate(struct radeon_winsys_cs
*rcs
)
562 static boolean
amdgpu_cs_memory_below_limit(struct radeon_winsys_cs
*rcs
, uint64_t vram
, uint64_t gtt
)
564 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
565 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
567 vram
+= cs
->used_vram
;
568 gtt
+= cs
->used_gart
;
570 /* Anything that goes above the VRAM size should go to GTT. */
571 if (vram
> ws
->info
.vram_size
)
572 gtt
+= vram
- ws
->info
.vram_size
;
574 /* Now we just need to check if we have enough GTT. */
575 return gtt
< ws
->info
.gart_size
* 0.7;
578 static uint64_t amdgpu_cs_query_memory_usage(struct radeon_winsys_cs
*rcs
)
580 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
582 return cs
->used_vram
+ cs
->used_gart
;
585 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
586 struct radeon_bo_list_item
*list
)
588 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
592 for (i
= 0; i
< cs
->num_buffers
; i
++) {
593 pb_reference(&list
[i
].buf
, &cs
->buffers
[i
].bo
->base
);
594 list
[i
].vm_address
= cs
->buffers
[i
].bo
->va
;
595 list
[i
].priority_usage
= cs
->buffers
[i
].priority_usage
;
598 return cs
->num_buffers
;
601 static void amdgpu_cs_do_submission(struct amdgpu_cs
*cs
,
602 struct pipe_fence_handle
**out_fence
)
604 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
605 struct pipe_fence_handle
*fence
;
608 /* Create a fence. */
609 fence
= amdgpu_fence_create(cs
->ctx
,
611 cs
->request
.ip_instance
,
614 amdgpu_fence_reference(out_fence
, fence
);
616 cs
->request
.number_of_dependencies
= 0;
618 /* Since the kernel driver doesn't synchronize execution between different
619 * rings automatically, we have to add fence dependencies manually. */
620 pipe_mutex_lock(ws
->bo_fence_lock
);
621 for (i
= 0; i
< cs
->num_buffers
; i
++) {
622 for (j
= 0; j
< RING_LAST
; j
++) {
623 struct amdgpu_cs_fence
*dep
;
626 struct amdgpu_fence
*bo_fence
= (void *)cs
->buffers
[i
].bo
->fence
[j
];
630 if (bo_fence
->ctx
== cs
->ctx
&&
631 bo_fence
->fence
.ip_type
== cs
->request
.ip_type
&&
632 bo_fence
->fence
.ip_instance
== cs
->request
.ip_instance
&&
633 bo_fence
->fence
.ring
== cs
->request
.ring
)
636 if (amdgpu_fence_wait((void *)bo_fence
, 0, false))
639 idx
= cs
->request
.number_of_dependencies
++;
640 if (idx
>= cs
->max_dependencies
) {
643 cs
->max_dependencies
= idx
+ 8;
644 size
= cs
->max_dependencies
* sizeof(struct amdgpu_cs_fence
);
645 cs
->request
.dependencies
= realloc(cs
->request
.dependencies
, size
);
648 dep
= &cs
->request
.dependencies
[idx
];
649 memcpy(dep
, &bo_fence
->fence
, sizeof(*dep
));
653 cs
->request
.fence_info
.handle
= NULL
;
654 if (cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&& cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
) {
655 cs
->request
.fence_info
.handle
= cs
->ctx
->user_fence_bo
;
656 cs
->request
.fence_info
.offset
= cs
->ring_type
;
659 r
= amdgpu_cs_submit(cs
->ctx
->ctx
, 0, &cs
->request
, 1);
662 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
664 fprintf(stderr
, "amdgpu: The CS has been rejected, "
665 "see dmesg for more information.\n");
667 amdgpu_fence_signalled(fence
);
670 uint64_t *user_fence
= NULL
;
671 if (cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&& cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
)
672 user_fence
= cs
->ctx
->user_fence_cpu_address_base
+
673 cs
->request
.fence_info
.offset
;
674 amdgpu_fence_submitted(fence
, &cs
->request
, user_fence
);
676 for (i
= 0; i
< cs
->num_buffers
; i
++)
677 amdgpu_fence_reference(&cs
->buffers
[i
].bo
->fence
[cs
->ring_type
],
680 pipe_mutex_unlock(ws
->bo_fence_lock
);
681 amdgpu_fence_reference(&fence
, NULL
);
684 static void amdgpu_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
689 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", FALSE
)
690 DEBUG_GET_ONCE_BOOL_OPTION(all_bos
, "RADEON_ALL_BOS", FALSE
)
692 static void amdgpu_cs_flush(struct radeon_winsys_cs
*rcs
,
694 struct pipe_fence_handle
**fence
)
696 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
697 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
699 switch (cs
->ring_type
) {
701 /* pad DMA ring to 8 DWs */
703 OUT_CS(rcs
, 0x00000000); /* NOP packet */
706 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
708 OUT_CS(rcs
, 0xffff1000); /* type3 nop packet */
710 /* Also pad the const IB. */
711 if (cs
->const_ib
.ib_mapped
)
712 while (!cs
->const_ib
.base
.cdw
|| (cs
->const_ib
.base
.cdw
& 7))
713 OUT_CS(&cs
->const_ib
.base
, 0xffff1000); /* type3 nop packet */
715 if (cs
->const_preamble_ib
.ib_mapped
)
716 while (!cs
->const_preamble_ib
.base
.cdw
|| (cs
->const_preamble_ib
.base
.cdw
& 7))
717 OUT_CS(&cs
->const_preamble_ib
.base
, 0xffff1000);
720 while (rcs
->cdw
& 15)
721 OUT_CS(rcs
, 0x80000000); /* type2 nop packet */
727 if (rcs
->cdw
> rcs
->max_dw
) {
728 fprintf(stderr
, "amdgpu: command stream overflowed\n");
731 amdgpu_cs_add_buffer(rcs
, cs
->main
.big_ib_buffer
,
732 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
734 if (cs
->const_ib
.ib_mapped
)
735 amdgpu_cs_add_buffer(rcs
, cs
->const_ib
.big_ib_buffer
,
736 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
738 if (cs
->const_preamble_ib
.ib_mapped
)
739 amdgpu_cs_add_buffer(rcs
, cs
->const_preamble_ib
.big_ib_buffer
,
740 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
742 /* If the CS is not empty or overflowed.... */
743 if (cs
->main
.base
.cdw
&& cs
->main
.base
.cdw
<= cs
->main
.base
.max_dw
&& !debug_get_option_noop()) {
746 /* Use a buffer list containing all allocated buffers if requested. */
747 if (debug_get_option_all_bos()) {
748 struct amdgpu_winsys_bo
*bo
;
749 amdgpu_bo_handle
*handles
;
752 pipe_mutex_lock(ws
->global_bo_list_lock
);
754 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
756 pipe_mutex_unlock(ws
->global_bo_list_lock
);
760 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
761 assert(num
< ws
->num_buffers
);
762 handles
[num
++] = bo
->bo
;
765 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
767 &cs
->request
.resources
);
769 pipe_mutex_unlock(ws
->global_bo_list_lock
);
771 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_buffers
,
772 cs
->handles
, cs
->flags
,
773 &cs
->request
.resources
);
777 fprintf(stderr
, "amdgpu: resource list creation failed (%d)\n", r
);
778 cs
->request
.resources
= NULL
;
782 cs
->ib
[IB_MAIN
].size
= cs
->main
.base
.cdw
;
783 cs
->main
.used_ib_space
+= cs
->main
.base
.cdw
* 4;
785 if (cs
->const_ib
.ib_mapped
) {
786 cs
->ib
[IB_CONST
].size
= cs
->const_ib
.base
.cdw
;
787 cs
->const_ib
.used_ib_space
+= cs
->const_ib
.base
.cdw
* 4;
790 if (cs
->const_preamble_ib
.ib_mapped
) {
791 cs
->ib
[IB_CONST_PREAMBLE
].size
= cs
->const_preamble_ib
.base
.cdw
;
792 cs
->const_preamble_ib
.used_ib_space
+= cs
->const_preamble_ib
.base
.cdw
* 4;
795 amdgpu_cs_do_submission(cs
, fence
);
798 if (cs
->request
.resources
)
799 amdgpu_bo_list_destroy(cs
->request
.resources
);
803 amdgpu_cs_context_cleanup(cs
);
805 amdgpu_get_new_ib(&ws
->base
, &cs
->main
, &cs
->ib
[IB_MAIN
], IB_MAIN
);
806 if (cs
->const_ib
.ib_mapped
)
807 amdgpu_get_new_ib(&ws
->base
, &cs
->const_ib
, &cs
->ib
[IB_CONST
], IB_CONST
);
808 if (cs
->const_preamble_ib
.ib_mapped
)
809 amdgpu_get_new_ib(&ws
->base
, &cs
->const_preamble_ib
,
810 &cs
->ib
[IB_CONST_PREAMBLE
], IB_CONST_PREAMBLE
);
812 ws
->num_cs_flushes
++;
815 static void amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
817 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
819 amdgpu_destroy_cs_context(cs
);
820 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
821 pb_reference(&cs
->main
.big_ib_buffer
, NULL
);
822 pb_reference(&cs
->const_ib
.big_ib_buffer
, NULL
);
823 pb_reference(&cs
->const_preamble_ib
.big_ib_buffer
, NULL
);
827 static boolean
amdgpu_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
828 struct pb_buffer
*_buf
,
829 enum radeon_bo_usage usage
)
831 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
832 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
834 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
837 void amdgpu_cs_init_functions(struct amdgpu_winsys
*ws
)
839 ws
->base
.ctx_create
= amdgpu_ctx_create
;
840 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
841 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
842 ws
->base
.cs_create
= amdgpu_cs_create
;
843 ws
->base
.cs_add_const_ib
= amdgpu_cs_add_const_ib
;
844 ws
->base
.cs_add_const_preamble_ib
= amdgpu_cs_add_const_preamble_ib
;
845 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
846 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
847 ws
->base
.cs_lookup_buffer
= amdgpu_cs_lookup_buffer
;
848 ws
->base
.cs_validate
= amdgpu_cs_validate
;
849 ws
->base
.cs_memory_below_limit
= amdgpu_cs_memory_below_limit
;
850 ws
->base
.cs_query_memory_usage
= amdgpu_cs_query_memory_usage
;
851 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
852 ws
->base
.cs_flush
= amdgpu_cs_flush
;
853 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
854 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
855 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
856 ws
->base
.fence_reference
= amdgpu_fence_reference
;