2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright © 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
17 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
19 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
30 * Marek Olšák <maraeo@gmail.com>
33 #include "amdgpu_cs.h"
34 #include "os/os_time.h"
36 #include <amdgpu_drm.h>
41 static struct pipe_fence_handle
*
42 amdgpu_fence_create(struct amdgpu_ctx
*ctx
, unsigned ip_type
,
43 unsigned ip_instance
, unsigned ring
)
45 struct amdgpu_fence
*fence
= CALLOC_STRUCT(amdgpu_fence
);
47 fence
->reference
.count
= 1;
49 fence
->fence
.context
= ctx
->ctx
;
50 fence
->fence
.ip_type
= ip_type
;
51 fence
->fence
.ip_instance
= ip_instance
;
52 fence
->fence
.ring
= ring
;
53 p_atomic_inc(&ctx
->refcount
);
54 return (struct pipe_fence_handle
*)fence
;
57 static void amdgpu_fence_submitted(struct pipe_fence_handle
*fence
,
58 struct amdgpu_cs_request
* request
,
59 uint64_t *user_fence_cpu_address
)
61 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
63 rfence
->fence
.fence
= request
->seq_no
;
64 rfence
->user_fence_cpu_address
= user_fence_cpu_address
;
67 static void amdgpu_fence_signalled(struct pipe_fence_handle
*fence
)
69 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
71 rfence
->signalled
= true;
74 bool amdgpu_fence_wait(struct pipe_fence_handle
*fence
, uint64_t timeout
,
77 struct amdgpu_fence
*rfence
= (struct amdgpu_fence
*)fence
;
80 uint64_t *user_fence_cpu
;
83 if (rfence
->signalled
)
87 abs_timeout
= timeout
;
89 abs_timeout
= os_time_get_absolute_timeout(timeout
);
91 user_fence_cpu
= rfence
->user_fence_cpu_address
;
92 if (user_fence_cpu
&& *user_fence_cpu
>= rfence
->fence
.fence
) {
93 rfence
->signalled
= true;
96 /* Now use the libdrm query. */
97 r
= amdgpu_cs_query_fence_status(&rfence
->fence
,
99 AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE
,
102 fprintf(stderr
, "amdgpu: amdgpu_cs_query_fence_status failed.\n");
107 /* This variable can only transition from false to true, so it doesn't
108 * matter if threads race for it. */
109 rfence
->signalled
= true;
115 static bool amdgpu_fence_wait_rel_timeout(struct radeon_winsys
*rws
,
116 struct pipe_fence_handle
*fence
,
119 return amdgpu_fence_wait(fence
, timeout
, false);
124 static struct radeon_winsys_ctx
*amdgpu_ctx_create(struct radeon_winsys
*ws
)
126 struct amdgpu_ctx
*ctx
= CALLOC_STRUCT(amdgpu_ctx
);
128 struct amdgpu_bo_alloc_request alloc_buffer
= {};
129 amdgpu_bo_handle buf_handle
;
131 ctx
->ws
= amdgpu_winsys(ws
);
134 r
= amdgpu_cs_ctx_create(ctx
->ws
->dev
, &ctx
->ctx
);
136 fprintf(stderr
, "amdgpu: amdgpu_cs_ctx_create failed. (%i)\n", r
);
141 alloc_buffer
.alloc_size
= 4 * 1024;
142 alloc_buffer
.phys_alignment
= 4 *1024;
143 alloc_buffer
.preferred_heap
= AMDGPU_GEM_DOMAIN_GTT
;
145 r
= amdgpu_bo_alloc(ctx
->ws
->dev
, &alloc_buffer
, &buf_handle
);
147 fprintf(stderr
, "amdgpu: amdgpu_bo_alloc failed. (%i)\n", r
);
148 amdgpu_cs_ctx_free(ctx
->ctx
);
153 r
= amdgpu_bo_cpu_map(buf_handle
, (void**)&ctx
->user_fence_cpu_address_base
);
155 fprintf(stderr
, "amdgpu: amdgpu_bo_cpu_map failed. (%i)\n", r
);
156 amdgpu_bo_free(buf_handle
);
157 amdgpu_cs_ctx_free(ctx
->ctx
);
162 memset(ctx
->user_fence_cpu_address_base
, 0, alloc_buffer
.alloc_size
);
163 ctx
->user_fence_bo
= buf_handle
;
165 return (struct radeon_winsys_ctx
*)ctx
;
168 static void amdgpu_ctx_destroy(struct radeon_winsys_ctx
*rwctx
)
170 amdgpu_ctx_unref((struct amdgpu_ctx
*)rwctx
);
173 static enum pipe_reset_status
174 amdgpu_ctx_query_reset_status(struct radeon_winsys_ctx
*rwctx
)
176 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
177 uint32_t result
, hangs
;
180 r
= amdgpu_cs_query_reset_state(ctx
->ctx
, &result
, &hangs
);
182 fprintf(stderr
, "amdgpu: amdgpu_cs_query_reset_state failed. (%i)\n", r
);
183 return PIPE_NO_RESET
;
187 case AMDGPU_CTX_GUILTY_RESET
:
188 return PIPE_GUILTY_CONTEXT_RESET
;
189 case AMDGPU_CTX_INNOCENT_RESET
:
190 return PIPE_INNOCENT_CONTEXT_RESET
;
191 case AMDGPU_CTX_UNKNOWN_RESET
:
192 return PIPE_UNKNOWN_CONTEXT_RESET
;
193 case AMDGPU_CTX_NO_RESET
:
195 return PIPE_NO_RESET
;
199 /* COMMAND SUBMISSION */
201 static bool amdgpu_get_new_ib(struct amdgpu_cs
*cs
)
203 /* Small IBs are better than big IBs, because the GPU goes idle quicker
204 * and there is less waiting for buffers and fences. Proof:
205 * http://www.phoronix.com/scan.php?page=article&item=mesa-111-si&num=1
207 const unsigned buffer_size
= 128 * 1024 * 4;
208 const unsigned ib_size
= 20 * 1024 * 4;
213 /* Allocate a new buffer for IBs if the current buffer is all used. */
214 if (!cs
->big_ib_buffer
||
215 cs
->used_ib_space
+ ib_size
> cs
->big_ib_buffer
->size
) {
216 struct radeon_winsys
*ws
= &cs
->ctx
->ws
->base
;
218 pb_reference(&cs
->big_ib_buffer
, NULL
);
219 cs
->big_ib_winsys_buffer
= NULL
;
220 cs
->ib_mapped
= NULL
;
221 cs
->used_ib_space
= 0;
223 cs
->big_ib_buffer
= ws
->buffer_create(ws
, buffer_size
,
226 RADEON_FLAG_CPU_ACCESS
);
227 if (!cs
->big_ib_buffer
)
230 cs
->ib_mapped
= ws
->buffer_map(cs
->big_ib_buffer
, NULL
,
231 PIPE_TRANSFER_WRITE
);
232 if (!cs
->ib_mapped
) {
233 pb_reference(&cs
->big_ib_buffer
, NULL
);
237 cs
->big_ib_winsys_buffer
= (struct amdgpu_winsys_bo
*)cs
->big_ib_buffer
;
240 cs
->ib
.ib_mc_address
= cs
->big_ib_winsys_buffer
->va
+ cs
->used_ib_space
;
241 cs
->base
.buf
= (uint32_t*)(cs
->ib_mapped
+ cs
->used_ib_space
);
242 cs
->base
.max_dw
= ib_size
/ 4;
246 static boolean
amdgpu_init_cs_context(struct amdgpu_cs
*cs
,
247 enum ring_type ring_type
)
253 cs
->request
.ip_type
= AMDGPU_HW_IP_DMA
;
257 cs
->request
.ip_type
= AMDGPU_HW_IP_UVD
;
261 cs
->request
.ip_type
= AMDGPU_HW_IP_VCE
;
265 cs
->request
.ip_type
= AMDGPU_HW_IP_COMPUTE
;
270 cs
->request
.ip_type
= AMDGPU_HW_IP_GFX
;
274 cs
->request
.number_of_ibs
= 1;
275 cs
->request
.ibs
= &cs
->ib
;
277 cs
->max_num_buffers
= 512;
278 cs
->buffers
= (struct amdgpu_cs_buffer
*)
279 CALLOC(1, cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
));
284 cs
->handles
= CALLOC(1, cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
));
290 cs
->flags
= CALLOC(1, cs
->max_num_buffers
);
297 for (i
= 0; i
< Elements(cs
->buffer_indices_hashlist
); i
++) {
298 cs
->buffer_indices_hashlist
[i
] = -1;
303 static void amdgpu_cs_context_cleanup(struct amdgpu_cs
*cs
)
307 for (i
= 0; i
< cs
->num_buffers
; i
++) {
308 p_atomic_dec(&cs
->buffers
[i
].bo
->num_cs_references
);
309 amdgpu_winsys_bo_reference(&cs
->buffers
[i
].bo
, NULL
);
310 cs
->handles
[i
] = NULL
;
318 for (i
= 0; i
< Elements(cs
->buffer_indices_hashlist
); i
++) {
319 cs
->buffer_indices_hashlist
[i
] = -1;
323 static void amdgpu_destroy_cs_context(struct amdgpu_cs
*cs
)
325 amdgpu_cs_context_cleanup(cs
);
329 FREE(cs
->request
.dependencies
);
333 static struct radeon_winsys_cs
*
334 amdgpu_cs_create(struct radeon_winsys_ctx
*rwctx
,
335 enum ring_type ring_type
,
336 void (*flush
)(void *ctx
, unsigned flags
,
337 struct pipe_fence_handle
**fence
),
339 struct pb_buffer
*trace_buf
)
341 struct amdgpu_ctx
*ctx
= (struct amdgpu_ctx
*)rwctx
;
342 struct amdgpu_cs
*cs
;
344 cs
= CALLOC_STRUCT(amdgpu_cs
);
350 cs
->flush_cs
= flush
;
351 cs
->flush_data
= flush_ctx
;
352 cs
->base
.ring_type
= ring_type
;
354 if (!amdgpu_init_cs_context(cs
, ring_type
)) {
359 if (!amdgpu_get_new_ib(cs
)) {
360 amdgpu_destroy_cs_context(cs
);
365 p_atomic_inc(&ctx
->ws
->num_cs
);
369 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
371 int amdgpu_lookup_buffer(struct amdgpu_cs
*cs
, struct amdgpu_winsys_bo
*bo
)
373 unsigned hash
= bo
->unique_id
& (Elements(cs
->buffer_indices_hashlist
)-1);
374 int i
= cs
->buffer_indices_hashlist
[hash
];
376 /* not found or found */
377 if (i
== -1 || cs
->buffers
[i
].bo
== bo
)
380 /* Hash collision, look for the BO in the list of buffers linearly. */
381 for (i
= cs
->num_buffers
- 1; i
>= 0; i
--) {
382 if (cs
->buffers
[i
].bo
== bo
) {
383 /* Put this buffer in the hash list.
384 * This will prevent additional hash collisions if there are
385 * several consecutive lookup_buffer calls for the same buffer.
387 * Example: Assuming buffers A,B,C collide in the hash list,
388 * the following sequence of buffers:
389 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
390 * will collide here: ^ and here: ^,
391 * meaning that we should get very few collisions in the end. */
392 cs
->buffer_indices_hashlist
[hash
] = i
;
399 static unsigned amdgpu_add_buffer(struct amdgpu_cs
*cs
,
400 struct amdgpu_winsys_bo
*bo
,
401 enum radeon_bo_usage usage
,
402 enum radeon_bo_domain domains
,
404 enum radeon_bo_domain
*added_domains
)
406 struct amdgpu_cs_buffer
*buffer
;
407 unsigned hash
= bo
->unique_id
& (Elements(cs
->buffer_indices_hashlist
)-1);
410 assert(priority
< 64);
413 i
= amdgpu_lookup_buffer(cs
, bo
);
416 buffer
= &cs
->buffers
[i
];
417 buffer
->priority_usage
|= 1llu << priority
;
418 buffer
->usage
|= usage
;
419 *added_domains
= domains
& ~buffer
->domains
;
420 buffer
->domains
|= domains
;
421 cs
->flags
[i
] = MAX2(cs
->flags
[i
], priority
/ 4);
425 /* New buffer, check if the backing array is large enough. */
426 if (cs
->num_buffers
>= cs
->max_num_buffers
) {
428 cs
->max_num_buffers
+= 10;
430 size
= cs
->max_num_buffers
* sizeof(struct amdgpu_cs_buffer
);
431 cs
->buffers
= realloc(cs
->buffers
, size
);
433 size
= cs
->max_num_buffers
* sizeof(amdgpu_bo_handle
);
434 cs
->handles
= realloc(cs
->handles
, size
);
436 cs
->flags
= realloc(cs
->flags
, cs
->max_num_buffers
);
439 /* Initialize the new buffer. */
440 cs
->buffers
[cs
->num_buffers
].bo
= NULL
;
441 amdgpu_winsys_bo_reference(&cs
->buffers
[cs
->num_buffers
].bo
, bo
);
442 cs
->handles
[cs
->num_buffers
] = bo
->bo
;
443 cs
->flags
[cs
->num_buffers
] = priority
/ 4;
444 p_atomic_inc(&bo
->num_cs_references
);
445 buffer
= &cs
->buffers
[cs
->num_buffers
];
447 buffer
->priority_usage
= 1llu << priority
;
448 buffer
->usage
= usage
;
449 buffer
->domains
= domains
;
451 cs
->buffer_indices_hashlist
[hash
] = cs
->num_buffers
;
453 *added_domains
= domains
;
454 return cs
->num_buffers
++;
457 static unsigned amdgpu_cs_add_buffer(struct radeon_winsys_cs
*rcs
,
458 struct pb_buffer
*buf
,
459 enum radeon_bo_usage usage
,
460 enum radeon_bo_domain domains
,
461 enum radeon_bo_priority priority
)
463 /* Don't use the "domains" parameter. Amdgpu doesn't support changing
464 * the buffer placement during command submission.
466 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
467 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)buf
;
468 enum radeon_bo_domain added_domains
;
469 unsigned index
= amdgpu_add_buffer(cs
, bo
, usage
, bo
->initial_domain
,
470 priority
, &added_domains
);
472 if (added_domains
& RADEON_DOMAIN_GTT
)
473 cs
->used_gart
+= bo
->base
.size
;
474 if (added_domains
& RADEON_DOMAIN_VRAM
)
475 cs
->used_vram
+= bo
->base
.size
;
480 static int amdgpu_cs_lookup_buffer(struct radeon_winsys_cs
*rcs
,
481 struct pb_buffer
*buf
)
483 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
485 return amdgpu_lookup_buffer(cs
, (struct amdgpu_winsys_bo
*)buf
);
488 static boolean
amdgpu_cs_validate(struct radeon_winsys_cs
*rcs
)
493 static boolean
amdgpu_cs_memory_below_limit(struct radeon_winsys_cs
*rcs
, uint64_t vram
, uint64_t gtt
)
495 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
497 (cs
->used_gart
+ gtt
) < cs
->ctx
->ws
->info
.gart_size
* 0.7 &&
498 (cs
->used_vram
+ vram
) < cs
->ctx
->ws
->info
.vram_size
* 0.7;
503 static unsigned amdgpu_cs_get_buffer_list(struct radeon_winsys_cs
*rcs
,
504 struct radeon_bo_list_item
*list
)
506 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
510 for (i
= 0; i
< cs
->num_buffers
; i
++) {
511 pb_reference(&list
[i
].buf
, &cs
->buffers
[i
].bo
->base
);
512 list
[i
].vm_address
= cs
->buffers
[i
].bo
->va
;
513 list
[i
].priority_usage
= cs
->buffers
[i
].priority_usage
;
516 return cs
->num_buffers
;
519 static void amdgpu_cs_do_submission(struct amdgpu_cs
*cs
,
520 struct pipe_fence_handle
**out_fence
)
522 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
523 struct pipe_fence_handle
*fence
;
526 /* Create a fence. */
527 fence
= amdgpu_fence_create(cs
->ctx
,
529 cs
->request
.ip_instance
,
532 amdgpu_fence_reference(out_fence
, fence
);
534 cs
->request
.number_of_dependencies
= 0;
536 /* Since the kernel driver doesn't synchronize execution between different
537 * rings automatically, we have to add fence dependencies manually. */
538 pipe_mutex_lock(ws
->bo_fence_lock
);
539 for (i
= 0; i
< cs
->num_buffers
; i
++) {
540 for (j
= 0; j
< RING_LAST
; j
++) {
541 struct amdgpu_cs_fence
*dep
;
544 struct amdgpu_fence
*bo_fence
= (void *)cs
->buffers
[i
].bo
->fence
[j
];
548 if (bo_fence
->ctx
== cs
->ctx
&&
549 bo_fence
->fence
.ip_type
== cs
->request
.ip_type
&&
550 bo_fence
->fence
.ip_instance
== cs
->request
.ip_instance
&&
551 bo_fence
->fence
.ring
== cs
->request
.ring
)
554 if (amdgpu_fence_wait((void *)bo_fence
, 0, false))
557 idx
= cs
->request
.number_of_dependencies
++;
558 if (idx
>= cs
->max_dependencies
) {
561 cs
->max_dependencies
= idx
+ 8;
562 size
= cs
->max_dependencies
* sizeof(struct amdgpu_cs_fence
);
563 cs
->request
.dependencies
= realloc(cs
->request
.dependencies
, size
);
566 dep
= &cs
->request
.dependencies
[idx
];
567 memcpy(dep
, &bo_fence
->fence
, sizeof(*dep
));
571 cs
->request
.fence_info
.handle
= NULL
;
572 if (cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&& cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
) {
573 cs
->request
.fence_info
.handle
= cs
->ctx
->user_fence_bo
;
574 cs
->request
.fence_info
.offset
= cs
->base
.ring_type
;
577 r
= amdgpu_cs_submit(cs
->ctx
->ctx
, 0, &cs
->request
, 1);
580 fprintf(stderr
, "amdgpu: Not enough memory for command submission.\n");
582 fprintf(stderr
, "amdgpu: The CS has been rejected, "
583 "see dmesg for more information.\n");
585 amdgpu_fence_signalled(fence
);
588 uint64_t *user_fence
= NULL
;
589 if (cs
->request
.ip_type
!= AMDGPU_HW_IP_UVD
&& cs
->request
.ip_type
!= AMDGPU_HW_IP_VCE
)
590 user_fence
= cs
->ctx
->user_fence_cpu_address_base
+
591 cs
->request
.fence_info
.offset
;
592 amdgpu_fence_submitted(fence
, &cs
->request
, user_fence
);
594 for (i
= 0; i
< cs
->num_buffers
; i
++)
595 amdgpu_fence_reference(&cs
->buffers
[i
].bo
->fence
[cs
->base
.ring_type
],
598 pipe_mutex_unlock(ws
->bo_fence_lock
);
599 amdgpu_fence_reference(&fence
, NULL
);
602 static void amdgpu_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
607 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", FALSE
)
608 DEBUG_GET_ONCE_BOOL_OPTION(all_bos
, "RADEON_ALL_BOS", FALSE
)
610 static void amdgpu_cs_flush(struct radeon_winsys_cs
*rcs
,
612 struct pipe_fence_handle
**fence
,
613 uint32_t cs_trace_id
)
615 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
616 struct amdgpu_winsys
*ws
= cs
->ctx
->ws
;
618 switch (cs
->base
.ring_type
) {
620 /* pad DMA ring to 8 DWs */
622 OUT_CS(&cs
->base
, 0x00000000); /* NOP packet */
625 /* pad GFX ring to 8 DWs to meet CP fetch alignment requirements */
627 OUT_CS(&cs
->base
, 0xffff1000); /* type3 nop packet */
630 while (rcs
->cdw
& 15)
631 OUT_CS(&cs
->base
, 0x80000000); /* type2 nop packet */
637 if (rcs
->cdw
> rcs
->max_dw
) {
638 fprintf(stderr
, "amdgpu: command stream overflowed\n");
641 amdgpu_cs_add_buffer(rcs
, (void*)cs
->big_ib_winsys_buffer
,
642 RADEON_USAGE_READ
, 0, RADEON_PRIO_IB1
);
644 /* If the CS is not empty or overflowed.... */
645 if (cs
->base
.cdw
&& cs
->base
.cdw
<= cs
->base
.max_dw
&& !debug_get_option_noop()) {
648 /* Use a buffer list containing all allocated buffers if requested. */
649 if (debug_get_option_all_bos()) {
650 struct amdgpu_winsys_bo
*bo
;
651 amdgpu_bo_handle
*handles
;
654 pipe_mutex_lock(ws
->global_bo_list_lock
);
656 handles
= malloc(sizeof(handles
[0]) * ws
->num_buffers
);
658 pipe_mutex_unlock(ws
->global_bo_list_lock
);
662 LIST_FOR_EACH_ENTRY(bo
, &ws
->global_bo_list
, global_list_item
) {
663 assert(num
< ws
->num_buffers
);
664 handles
[num
++] = bo
->bo
;
667 r
= amdgpu_bo_list_create(ws
->dev
, ws
->num_buffers
,
669 &cs
->request
.resources
);
671 pipe_mutex_unlock(ws
->global_bo_list_lock
);
673 r
= amdgpu_bo_list_create(ws
->dev
, cs
->num_buffers
,
674 cs
->handles
, cs
->flags
,
675 &cs
->request
.resources
);
679 fprintf(stderr
, "amdgpu: resource list creation failed (%d)\n", r
);
680 cs
->request
.resources
= NULL
;
684 cs
->ib
.size
= cs
->base
.cdw
;
685 cs
->used_ib_space
+= cs
->base
.cdw
* 4;
687 amdgpu_cs_do_submission(cs
, fence
);
690 if (cs
->request
.resources
)
691 amdgpu_bo_list_destroy(cs
->request
.resources
);
695 amdgpu_cs_context_cleanup(cs
);
696 amdgpu_get_new_ib(cs
);
698 ws
->num_cs_flushes
++;
701 static void amdgpu_cs_destroy(struct radeon_winsys_cs
*rcs
)
703 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
705 amdgpu_destroy_cs_context(cs
);
706 p_atomic_dec(&cs
->ctx
->ws
->num_cs
);
707 pb_reference(&cs
->big_ib_buffer
, NULL
);
711 static boolean
amdgpu_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
712 struct pb_buffer
*_buf
,
713 enum radeon_bo_usage usage
)
715 struct amdgpu_cs
*cs
= amdgpu_cs(rcs
);
716 struct amdgpu_winsys_bo
*bo
= (struct amdgpu_winsys_bo
*)_buf
;
718 return amdgpu_bo_is_referenced_by_cs_with_usage(cs
, bo
, usage
);
721 void amdgpu_cs_init_functions(struct amdgpu_winsys
*ws
)
723 ws
->base
.ctx_create
= amdgpu_ctx_create
;
724 ws
->base
.ctx_destroy
= amdgpu_ctx_destroy
;
725 ws
->base
.ctx_query_reset_status
= amdgpu_ctx_query_reset_status
;
726 ws
->base
.cs_create
= amdgpu_cs_create
;
727 ws
->base
.cs_destroy
= amdgpu_cs_destroy
;
728 ws
->base
.cs_add_buffer
= amdgpu_cs_add_buffer
;
729 ws
->base
.cs_lookup_buffer
= amdgpu_cs_lookup_buffer
;
730 ws
->base
.cs_validate
= amdgpu_cs_validate
;
731 ws
->base
.cs_memory_below_limit
= amdgpu_cs_memory_below_limit
;
732 ws
->base
.cs_get_buffer_list
= amdgpu_cs_get_buffer_list
;
733 ws
->base
.cs_flush
= amdgpu_cs_flush
;
734 ws
->base
.cs_is_buffer_referenced
= amdgpu_bo_is_referenced
;
735 ws
->base
.cs_sync_flush
= amdgpu_cs_sync_flush
;
736 ws
->base
.fence_wait
= amdgpu_fence_wait_rel_timeout
;
737 ws
->base
.fence_reference
= amdgpu_fence_reference
;