2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Marek Olšák <maraeo@gmail.com>
31 * Based on work from libdrm_radeon by:
32 * Aapo Tahkola <aet@rasterburn.org>
33 * Nicolai Haehnle <prefect_@gmx.net>
34 * Jérôme Glisse <glisse@freedesktop.org>
38 This file replaces libdrm's radeon_cs_gem with our own implemention.
39 It's optimized specifically for Radeon DRM.
40 Reloc writes and space checking are faster and simpler than their
41 counterparts in libdrm (the time complexity of all the functions
42 is O(1) in nearly all scenarios, thanks to hashing).
46 cs_add_reloc(cs, buf, read_domain, write_domain) adds a new relocation and
47 also adds the size of 'buf' to the used_gart and used_vram winsys variables
48 based on the domains, which are simply or'd for the accounting purposes.
49 The adding is skipped if the reloc is already present in the list, but it
50 accounts any newly-referenced domains.
52 cs_validate is then called, which just checks:
53 used_vram/gart < vram/gart_size * 0.8
54 The 0.8 number allows for some memory fragmentation. If the validation
55 fails, the pipe driver flushes CS and tries do the validation again,
56 i.e. it validates only that one operation. If it fails again, it drops
57 the operation on the floor and prints some nasty message to stderr.
58 (done in the pipe driver)
60 cs_write_reloc(cs, buf) just writes a reloc that has been added using
61 cs_add_reloc. The read_domain and write_domain parameters have been removed,
62 because we already specify them in cs_add_reloc.
65 #include "radeon_drm_cs.h"
67 #include "util/u_memory.h"
75 * this are copy from radeon_drm, once an updated libdrm is released
76 * we should bump configure.ac requirement for it and remove the following
79 #ifndef RADEON_CHUNK_ID_FLAGS
80 #define RADEON_CHUNK_ID_FLAGS 0x03
82 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
83 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
86 #ifndef RADEON_CS_USE_VM
87 #define RADEON_CS_USE_VM 0x02
88 /* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
89 #define RADEON_CS_RING_GFX 0
90 #define RADEON_CS_RING_COMPUTE 1
93 #ifndef RADEON_CS_RING_DMA
94 #define RADEON_CS_RING_DMA 2
97 #ifndef RADEON_CS_RING_UVD
98 #define RADEON_CS_RING_UVD 3
101 #ifndef RADEON_CS_END_OF_FRAME
102 #define RADEON_CS_END_OF_FRAME 0x04
106 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
108 static boolean
radeon_init_cs_context(struct radeon_cs_context
*csc
,
109 struct radeon_drm_winsys
*ws
)
113 csc
->relocs_bo
= (struct radeon_bo
**)
114 CALLOC(1, csc
->nrelocs
* sizeof(struct radeon_bo
*));
115 if (!csc
->relocs_bo
) {
119 csc
->relocs
= (struct drm_radeon_cs_reloc
*)
120 CALLOC(1, csc
->nrelocs
* sizeof(struct drm_radeon_cs_reloc
));
122 FREE(csc
->relocs_bo
);
126 csc
->chunks
[0].chunk_id
= RADEON_CHUNK_ID_IB
;
127 csc
->chunks
[0].length_dw
= 0;
128 csc
->chunks
[0].chunk_data
= (uint64_t)(uintptr_t)csc
->buf
;
129 csc
->chunks
[1].chunk_id
= RADEON_CHUNK_ID_RELOCS
;
130 csc
->chunks
[1].length_dw
= 0;
131 csc
->chunks
[1].chunk_data
= (uint64_t)(uintptr_t)csc
->relocs
;
132 csc
->chunks
[2].chunk_id
= RADEON_CHUNK_ID_FLAGS
;
133 csc
->chunks
[2].length_dw
= 2;
134 csc
->chunks
[2].chunk_data
= (uint64_t)(uintptr_t)&csc
->flags
;
136 csc
->chunk_array
[0] = (uint64_t)(uintptr_t)&csc
->chunks
[0];
137 csc
->chunk_array
[1] = (uint64_t)(uintptr_t)&csc
->chunks
[1];
138 csc
->chunk_array
[2] = (uint64_t)(uintptr_t)&csc
->chunks
[2];
140 csc
->cs
.chunks
= (uint64_t)(uintptr_t)csc
->chunk_array
;
144 static void radeon_cs_context_cleanup(struct radeon_cs_context
*csc
)
148 for (i
= 0; i
< csc
->crelocs
; i
++) {
149 p_atomic_dec(&csc
->relocs_bo
[i
]->num_cs_references
);
150 radeon_bo_reference(&csc
->relocs_bo
[i
], NULL
);
154 csc
->validated_crelocs
= 0;
155 csc
->chunks
[0].length_dw
= 0;
156 csc
->chunks
[1].length_dw
= 0;
159 memset(csc
->is_handle_added
, 0, sizeof(csc
->is_handle_added
));
162 static void radeon_destroy_cs_context(struct radeon_cs_context
*csc
)
164 radeon_cs_context_cleanup(csc
);
165 FREE(csc
->relocs_bo
);
170 static struct radeon_winsys_cs
*radeon_drm_cs_create(struct radeon_winsys
*rws
,
171 enum ring_type ring_type
,
172 struct radeon_winsys_cs_handle
*trace_buf
)
174 struct radeon_drm_winsys
*ws
= radeon_drm_winsys(rws
);
175 struct radeon_drm_cs
*cs
;
177 cs
= CALLOC_STRUCT(radeon_drm_cs
);
181 pipe_semaphore_init(&cs
->flush_completed
, 0);
184 cs
->trace_buf
= (struct radeon_bo
*)trace_buf
;
186 if (!radeon_init_cs_context(&cs
->csc1
, cs
->ws
)) {
190 if (!radeon_init_cs_context(&cs
->csc2
, cs
->ws
)) {
191 radeon_destroy_cs_context(&cs
->csc1
);
196 /* Set the first command buffer as current. */
199 cs
->base
.buf
= cs
->csc
->buf
;
200 cs
->base
.ring_type
= ring_type
;
202 p_atomic_inc(&ws
->num_cs
);
206 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
208 static INLINE
void update_reloc_domains(struct drm_radeon_cs_reloc
*reloc
,
209 enum radeon_bo_domain rd
,
210 enum radeon_bo_domain wd
,
211 enum radeon_bo_domain
*added_domains
)
213 *added_domains
= (rd
| wd
) & ~(reloc
->read_domains
| reloc
->write_domain
);
215 reloc
->read_domains
|= rd
;
216 reloc
->write_domain
|= wd
;
219 int radeon_get_reloc(struct radeon_cs_context
*csc
, struct radeon_bo
*bo
)
221 struct drm_radeon_cs_reloc
*reloc
;
223 unsigned hash
= bo
->handle
& (sizeof(csc
->is_handle_added
)-1);
225 if (csc
->is_handle_added
[hash
]) {
226 i
= csc
->reloc_indices_hashlist
[hash
];
227 reloc
= &csc
->relocs
[i
];
228 if (reloc
->handle
== bo
->handle
) {
232 /* Hash collision, look for the BO in the list of relocs linearly. */
233 for (i
= csc
->crelocs
; i
!= 0;) {
235 reloc
= &csc
->relocs
[i
];
236 if (reloc
->handle
== bo
->handle
) {
237 /* Put this reloc in the hash list.
238 * This will prevent additional hash collisions if there are
239 * several consecutive get_reloc calls for the same buffer.
241 * Example: Assuming buffers A,B,C collide in the hash list,
242 * the following sequence of relocs:
243 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
244 * will collide here: ^ and here: ^,
245 * meaning that we should get very few collisions in the end. */
246 csc
->reloc_indices_hashlist
[hash
] = i
;
247 /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
256 static unsigned radeon_add_reloc(struct radeon_drm_cs
*cs
,
257 struct radeon_bo
*bo
,
258 enum radeon_bo_usage usage
,
259 enum radeon_bo_domain domains
,
260 enum radeon_bo_domain
*added_domains
)
262 struct radeon_cs_context
*csc
= cs
->csc
;
263 struct drm_radeon_cs_reloc
*reloc
;
264 unsigned hash
= bo
->handle
& (sizeof(csc
->is_handle_added
)-1);
265 enum radeon_bo_domain rd
= usage
& RADEON_USAGE_READ
? domains
: 0;
266 enum radeon_bo_domain wd
= usage
& RADEON_USAGE_WRITE
? domains
: 0;
267 bool update_hash
= TRUE
;
271 if (csc
->is_handle_added
[hash
]) {
272 i
= csc
->reloc_indices_hashlist
[hash
];
273 reloc
= &csc
->relocs
[i
];
274 if (reloc
->handle
!= bo
->handle
) {
275 /* Hash collision, look for the BO in the list of relocs linearly. */
276 for (i
= csc
->crelocs
- 1; i
>= 0; i
--) {
277 reloc
= &csc
->relocs
[i
];
278 if (reloc
->handle
== bo
->handle
) {
279 /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
286 /* On DMA ring we need to emit as many relocation as there is use of the bo
287 * thus each time this function is call we should grow add again the bo to
288 * the relocation buffer
290 * Do not update the hash table if it's dma ring, so that first hash always point
291 * to first bo relocation which will the one used by the kernel. Following relocation
292 * will be ignore by the kernel memory placement (but still use by the kernel to
293 * update the cmd stream with proper buffer offset).
296 update_reloc_domains(reloc
, rd
, wd
, added_domains
);
297 if (cs
->base
.ring_type
!= RING_DMA
) {
298 csc
->reloc_indices_hashlist
[hash
] = i
;
304 /* New relocation, check if the backing array is large enough. */
305 if (csc
->crelocs
>= csc
->nrelocs
) {
309 size
= csc
->nrelocs
* sizeof(struct radeon_bo
*);
310 csc
->relocs_bo
= realloc(csc
->relocs_bo
, size
);
312 size
= csc
->nrelocs
* sizeof(struct drm_radeon_cs_reloc
);
313 csc
->relocs
= realloc(csc
->relocs
, size
);
315 csc
->chunks
[1].chunk_data
= (uint64_t)(uintptr_t)csc
->relocs
;
318 /* Initialize the new relocation. */
319 csc
->relocs_bo
[csc
->crelocs
] = NULL
;
320 radeon_bo_reference(&csc
->relocs_bo
[csc
->crelocs
], bo
);
321 p_atomic_inc(&bo
->num_cs_references
);
322 reloc
= &csc
->relocs
[csc
->crelocs
];
323 reloc
->handle
= bo
->handle
;
324 reloc
->read_domains
= rd
;
325 reloc
->write_domain
= wd
;
328 csc
->is_handle_added
[hash
] = TRUE
;
330 csc
->reloc_indices_hashlist
[hash
] = csc
->crelocs
;
333 csc
->chunks
[1].length_dw
+= RELOC_DWORDS
;
335 *added_domains
= rd
| wd
;
336 return csc
->crelocs
++;
339 static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs
*rcs
,
340 struct radeon_winsys_cs_handle
*buf
,
341 enum radeon_bo_usage usage
,
342 enum radeon_bo_domain domains
)
344 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
345 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
346 enum radeon_bo_domain added_domains
;
347 unsigned index
= radeon_add_reloc(cs
, bo
, usage
, domains
, &added_domains
);
349 if (added_domains
& RADEON_DOMAIN_GTT
)
350 cs
->csc
->used_gart
+= bo
->base
.size
;
351 if (added_domains
& RADEON_DOMAIN_VRAM
)
352 cs
->csc
->used_vram
+= bo
->base
.size
;
357 static boolean
radeon_drm_cs_validate(struct radeon_winsys_cs
*rcs
)
359 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
361 cs
->csc
->used_gart
< cs
->ws
->info
.gart_size
* 0.8 &&
362 cs
->csc
->used_vram
< cs
->ws
->info
.vram_size
* 0.8;
365 cs
->csc
->validated_crelocs
= cs
->csc
->crelocs
;
367 /* Remove lately-added relocations. The validation failed with them
368 * and the CS is about to be flushed because of that. Keep only
369 * the already-validated relocations. */
372 for (i
= cs
->csc
->validated_crelocs
; i
< cs
->csc
->crelocs
; i
++) {
373 p_atomic_dec(&cs
->csc
->relocs_bo
[i
]->num_cs_references
);
374 radeon_bo_reference(&cs
->csc
->relocs_bo
[i
], NULL
);
376 cs
->csc
->crelocs
= cs
->csc
->validated_crelocs
;
378 /* Flush if there are any relocs. Clean up otherwise. */
379 if (cs
->csc
->crelocs
) {
380 cs
->flush_cs(cs
->flush_data
, RADEON_FLUSH_ASYNC
);
382 radeon_cs_context_cleanup(cs
->csc
);
384 assert(cs
->base
.cdw
== 0);
385 if (cs
->base
.cdw
!= 0) {
386 fprintf(stderr
, "radeon: Unexpected error in %s.\n", __func__
);
393 static boolean
radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs
*rcs
, uint64_t vram
, uint64_t gtt
)
395 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
397 (cs
->csc
->used_gart
+ gtt
) < cs
->ws
->info
.gart_size
* 0.7 &&
398 (cs
->csc
->used_vram
+ vram
) < cs
->ws
->info
.vram_size
* 0.7;
403 static void radeon_drm_cs_write_reloc(struct radeon_winsys_cs
*rcs
,
404 struct radeon_winsys_cs_handle
*buf
)
406 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
407 struct radeon_bo
*bo
= (struct radeon_bo
*)buf
;
408 unsigned index
= radeon_get_reloc(cs
->csc
, bo
);
411 fprintf(stderr
, "radeon: Cannot get a relocation in %s.\n", __func__
);
415 OUT_CS(&cs
->base
, 0xc0001000);
416 OUT_CS(&cs
->base
, index
* RELOC_DWORDS
);
419 void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs
*cs
, struct radeon_cs_context
*csc
)
423 if (drmCommandWriteRead(csc
->fd
, DRM_RADEON_CS
,
424 &csc
->cs
, sizeof(struct drm_radeon_cs
))) {
425 if (debug_get_bool_option("RADEON_DUMP_CS", FALSE
)) {
428 fprintf(stderr
, "radeon: The kernel rejected CS, dumping...\n");
429 for (i
= 0; i
< csc
->chunks
[0].length_dw
; i
++) {
430 fprintf(stderr
, "0x%08X\n", csc
->buf
[i
]);
433 fprintf(stderr
, "radeon: The kernel rejected CS, "
434 "see dmesg for more information.\n");
439 radeon_dump_cs_on_lockup(cs
, csc
);
442 for (i
= 0; i
< csc
->crelocs
; i
++)
443 p_atomic_dec(&csc
->relocs_bo
[i
]->num_active_ioctls
);
445 radeon_cs_context_cleanup(csc
);
449 * Make sure previous submission of this cs are completed
451 void radeon_drm_cs_sync_flush(struct radeon_winsys_cs
*rcs
)
453 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
455 /* Wait for any pending ioctl to complete. */
456 if (cs
->ws
->thread
&& cs
->flush_started
) {
457 pipe_semaphore_wait(&cs
->flush_completed
);
458 cs
->flush_started
= 0;
462 DEBUG_GET_ONCE_BOOL_OPTION(noop
, "RADEON_NOOP", FALSE
)
464 static void radeon_drm_cs_flush(struct radeon_winsys_cs
*rcs
, unsigned flags
, uint32_t cs_trace_id
)
466 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
467 struct radeon_cs_context
*tmp
;
469 switch (cs
->base
.ring_type
) {
471 /* pad DMA ring to 8 DWs */
472 if (cs
->ws
->info
.chip_class
<= SI
) {
474 OUT_CS(&cs
->base
, 0xf0000000); /* NOP packet */
477 OUT_CS(&cs
->base
, 0x00000000); /* NOP packet */
481 /* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
482 * r6xx, requires at least 4 dw alignment to avoid a hw bug.
484 if (flags
& RADEON_FLUSH_COMPUTE
) {
485 if (cs
->ws
->info
.chip_class
<= SI
) {
487 OUT_CS(&cs
->base
, 0x80000000); /* type2 nop packet */
490 OUT_CS(&cs
->base
, 0xffff1000); /* type3 nop packet */
494 OUT_CS(&cs
->base
, 0x80000000); /* type2 nop packet */
499 if (rcs
->cdw
> RADEON_MAX_CMDBUF_DWORDS
) {
500 fprintf(stderr
, "radeon: command stream overflowed\n");
503 radeon_drm_cs_sync_flush(rcs
);
505 /* Flip command streams. */
510 cs
->cst
->cs_trace_id
= cs_trace_id
;
512 /* If the CS is not empty or overflowed, emit it in a separate thread. */
513 if (cs
->base
.cdw
&& cs
->base
.cdw
<= RADEON_MAX_CMDBUF_DWORDS
&& !debug_get_option_noop()) {
514 unsigned i
, crelocs
= cs
->cst
->crelocs
;
516 cs
->cst
->chunks
[0].length_dw
= cs
->base
.cdw
;
518 for (i
= 0; i
< crelocs
; i
++) {
519 /* Update the number of active asynchronous CS ioctls for the buffer. */
520 p_atomic_inc(&cs
->cst
->relocs_bo
[i
]->num_active_ioctls
);
523 switch (cs
->base
.ring_type
) {
525 cs
->cst
->flags
[0] = 0;
526 cs
->cst
->flags
[1] = RADEON_CS_RING_DMA
;
527 cs
->cst
->cs
.num_chunks
= 3;
528 if (cs
->ws
->info
.r600_virtual_address
) {
529 cs
->cst
->flags
[0] |= RADEON_CS_USE_VM
;
534 cs
->cst
->flags
[0] = 0;
535 cs
->cst
->flags
[1] = RADEON_CS_RING_UVD
;
536 cs
->cst
->cs
.num_chunks
= 3;
541 cs
->cst
->flags
[0] = 0;
542 cs
->cst
->flags
[1] = RADEON_CS_RING_GFX
;
543 cs
->cst
->cs
.num_chunks
= 2;
544 if (flags
& RADEON_FLUSH_KEEP_TILING_FLAGS
) {
545 cs
->cst
->flags
[0] |= RADEON_CS_KEEP_TILING_FLAGS
;
546 cs
->cst
->cs
.num_chunks
= 3;
548 if (cs
->ws
->info
.r600_virtual_address
) {
549 cs
->cst
->flags
[0] |= RADEON_CS_USE_VM
;
550 cs
->cst
->cs
.num_chunks
= 3;
552 if (flags
& RADEON_FLUSH_END_OF_FRAME
) {
553 cs
->cst
->flags
[0] |= RADEON_CS_END_OF_FRAME
;
554 cs
->cst
->cs
.num_chunks
= 3;
556 if (flags
& RADEON_FLUSH_COMPUTE
) {
557 cs
->cst
->flags
[1] = RADEON_CS_RING_COMPUTE
;
558 cs
->cst
->cs
.num_chunks
= 3;
563 if (cs
->ws
->thread
&& (flags
& RADEON_FLUSH_ASYNC
)) {
564 cs
->flush_started
= 1;
565 radeon_drm_ws_queue_cs(cs
->ws
, cs
);
567 pipe_mutex_lock(cs
->ws
->cs_stack_lock
);
568 if (cs
->ws
->thread
) {
569 while (p_atomic_read(&cs
->ws
->ncs
)) {
570 pipe_condvar_wait(cs
->ws
->cs_queue_empty
, cs
->ws
->cs_stack_lock
);
573 pipe_mutex_unlock(cs
->ws
->cs_stack_lock
);
574 radeon_drm_cs_emit_ioctl_oneshot(cs
, cs
->cst
);
577 radeon_cs_context_cleanup(cs
->cst
);
580 /* Prepare a new CS. */
581 cs
->base
.buf
= cs
->csc
->buf
;
585 static void radeon_drm_cs_destroy(struct radeon_winsys_cs
*rcs
)
587 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
589 radeon_drm_cs_sync_flush(rcs
);
590 pipe_semaphore_destroy(&cs
->flush_completed
);
591 radeon_cs_context_cleanup(&cs
->csc1
);
592 radeon_cs_context_cleanup(&cs
->csc2
);
593 p_atomic_dec(&cs
->ws
->num_cs
);
594 radeon_destroy_cs_context(&cs
->csc1
);
595 radeon_destroy_cs_context(&cs
->csc2
);
599 static void radeon_drm_cs_set_flush(struct radeon_winsys_cs
*rcs
,
600 void (*flush
)(void *ctx
, unsigned flags
),
603 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
605 cs
->flush_cs
= flush
;
606 cs
->flush_data
= user
;
609 static boolean
radeon_bo_is_referenced(struct radeon_winsys_cs
*rcs
,
610 struct radeon_winsys_cs_handle
*_buf
,
611 enum radeon_bo_usage usage
)
613 struct radeon_drm_cs
*cs
= radeon_drm_cs(rcs
);
614 struct radeon_bo
*bo
= (struct radeon_bo
*)_buf
;
617 if (!bo
->num_cs_references
)
620 index
= radeon_get_reloc(cs
->csc
, bo
);
624 if ((usage
& RADEON_USAGE_WRITE
) && cs
->csc
->relocs
[index
].write_domain
)
626 if ((usage
& RADEON_USAGE_READ
) && cs
->csc
->relocs
[index
].read_domains
)
632 void radeon_drm_cs_init_functions(struct radeon_drm_winsys
*ws
)
634 ws
->base
.cs_create
= radeon_drm_cs_create
;
635 ws
->base
.cs_destroy
= radeon_drm_cs_destroy
;
636 ws
->base
.cs_add_reloc
= radeon_drm_cs_add_reloc
;
637 ws
->base
.cs_validate
= radeon_drm_cs_validate
;
638 ws
->base
.cs_memory_below_limit
= radeon_drm_cs_memory_below_limit
;
639 ws
->base
.cs_write_reloc
= radeon_drm_cs_write_reloc
;
640 ws
->base
.cs_flush
= radeon_drm_cs_flush
;
641 ws
->base
.cs_set_flush_callback
= radeon_drm_cs_set_flush
;
642 ws
->base
.cs_is_buffer_referenced
= radeon_bo_is_referenced
;
643 ws
->base
.cs_sync_flush
= radeon_drm_cs_sync_flush
;