radeon/winsys: add uvd ring support to winsys v3
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_cs.c
1 /*
2 * Copyright © 2008 Jérôme Glisse
3 * Copyright © 2010 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Marek Olšák <maraeo@gmail.com>
30 *
31 * Based on work from libdrm_radeon by:
32 * Aapo Tahkola <aet@rasterburn.org>
33 * Nicolai Haehnle <prefect_@gmx.net>
34 * Jérôme Glisse <glisse@freedesktop.org>
35 */
36
37 /*
38 This file replaces libdrm's radeon_cs_gem with our own implemention.
39 It's optimized specifically for Radeon DRM.
40 Reloc writes and space checking are faster and simpler than their
41 counterparts in libdrm (the time complexity of all the functions
42 is O(1) in nearly all scenarios, thanks to hashing).
43
44 It works like this:
45
46 cs_add_reloc(cs, buf, read_domain, write_domain) adds a new relocation and
47 also adds the size of 'buf' to the used_gart and used_vram winsys variables
48 based on the domains, which are simply or'd for the accounting purposes.
49 The adding is skipped if the reloc is already present in the list, but it
50 accounts any newly-referenced domains.
51
52 cs_validate is then called, which just checks:
53 used_vram/gart < vram/gart_size * 0.8
54 The 0.8 number allows for some memory fragmentation. If the validation
55 fails, the pipe driver flushes CS and tries do the validation again,
56 i.e. it validates only that one operation. If it fails again, it drops
57 the operation on the floor and prints some nasty message to stderr.
58 (done in the pipe driver)
59
60 cs_write_reloc(cs, buf) just writes a reloc that has been added using
61 cs_add_reloc. The read_domain and write_domain parameters have been removed,
62 because we already specify them in cs_add_reloc.
63 */
64
65 #include "radeon_drm_cs.h"
66
67 #include "util/u_memory.h"
68
69 #include <stdio.h>
70 #include <stdlib.h>
71 #include <stdint.h>
72 #include <xf86drm.h>
73
74 /*
75 * this are copy from radeon_drm, once an updated libdrm is released
76 * we should bump configure.ac requirement for it and remove the following
77 * field
78 */
79 #ifndef RADEON_CHUNK_ID_FLAGS
80 #define RADEON_CHUNK_ID_FLAGS 0x03
81
82 /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
83 #define RADEON_CS_KEEP_TILING_FLAGS 0x01
84 #endif
85
86 #ifndef RADEON_CS_USE_VM
87 #define RADEON_CS_USE_VM 0x02
88 /* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
89 #define RADEON_CS_RING_GFX 0
90 #define RADEON_CS_RING_COMPUTE 1
91 #endif
92
93 #ifndef RADEON_CS_RING_DMA
94 #define RADEON_CS_RING_DMA 2
95 #endif
96
97 #ifndef RADEON_CS_RING_UVD
98 #define RADEON_CS_RING_UVD 3
99 #endif
100
101 #ifndef RADEON_CS_END_OF_FRAME
102 #define RADEON_CS_END_OF_FRAME 0x04
103 #endif
104
105
106 #define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
107
108 static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
109 struct radeon_drm_winsys *ws)
110 {
111 csc->fd = ws->fd;
112 csc->nrelocs = 512;
113 csc->relocs_bo = (struct radeon_bo**)
114 CALLOC(1, csc->nrelocs * sizeof(struct radeon_bo*));
115 if (!csc->relocs_bo) {
116 return FALSE;
117 }
118
119 csc->relocs = (struct drm_radeon_cs_reloc*)
120 CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
121 if (!csc->relocs) {
122 FREE(csc->relocs_bo);
123 return FALSE;
124 }
125
126 csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
127 csc->chunks[0].length_dw = 0;
128 csc->chunks[0].chunk_data = (uint64_t)(uintptr_t)csc->buf;
129 csc->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
130 csc->chunks[1].length_dw = 0;
131 csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
132 csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
133 csc->chunks[2].length_dw = 2;
134 csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;
135
136 csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];
137 csc->chunk_array[1] = (uint64_t)(uintptr_t)&csc->chunks[1];
138 csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];
139
140 csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;
141 return TRUE;
142 }
143
144 static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
145 {
146 unsigned i;
147
148 for (i = 0; i < csc->crelocs; i++) {
149 p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
150 radeon_bo_reference(&csc->relocs_bo[i], NULL);
151 }
152
153 csc->crelocs = 0;
154 csc->validated_crelocs = 0;
155 csc->chunks[0].length_dw = 0;
156 csc->chunks[1].length_dw = 0;
157 csc->used_gart = 0;
158 csc->used_vram = 0;
159 memset(csc->is_handle_added, 0, sizeof(csc->is_handle_added));
160 }
161
162 static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
163 {
164 radeon_cs_context_cleanup(csc);
165 FREE(csc->relocs_bo);
166 FREE(csc->relocs);
167 }
168
169
170 static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws, enum ring_type ring_type)
171 {
172 struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
173 struct radeon_drm_cs *cs;
174
175 cs = CALLOC_STRUCT(radeon_drm_cs);
176 if (!cs) {
177 return NULL;
178 }
179 pipe_semaphore_init(&cs->flush_completed, 0);
180
181 cs->ws = ws;
182
183 if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
184 FREE(cs);
185 return NULL;
186 }
187 if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
188 radeon_destroy_cs_context(&cs->csc1);
189 FREE(cs);
190 return NULL;
191 }
192
193 /* Set the first command buffer as current. */
194 cs->csc = &cs->csc1;
195 cs->cst = &cs->csc2;
196 cs->base.buf = cs->csc->buf;
197 cs->base.ring_type = ring_type;
198
199 p_atomic_inc(&ws->num_cs);
200 return &cs->base;
201 }
202
203 #define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
204
205 static INLINE void update_reloc_domains(struct drm_radeon_cs_reloc *reloc,
206 enum radeon_bo_domain rd,
207 enum radeon_bo_domain wd,
208 enum radeon_bo_domain *added_domains)
209 {
210 *added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);
211
212 reloc->read_domains |= rd;
213 reloc->write_domain |= wd;
214 }
215
216 int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo)
217 {
218 struct drm_radeon_cs_reloc *reloc;
219 unsigned i;
220 unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
221
222 if (csc->is_handle_added[hash]) {
223 i = csc->reloc_indices_hashlist[hash];
224 reloc = &csc->relocs[i];
225 if (reloc->handle == bo->handle) {
226 return i;
227 }
228
229 /* Hash collision, look for the BO in the list of relocs linearly. */
230 for (i = csc->crelocs; i != 0;) {
231 --i;
232 reloc = &csc->relocs[i];
233 if (reloc->handle == bo->handle) {
234 /* Put this reloc in the hash list.
235 * This will prevent additional hash collisions if there are
236 * several consecutive get_reloc calls for the same buffer.
237 *
238 * Example: Assuming buffers A,B,C collide in the hash list,
239 * the following sequence of relocs:
240 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
241 * will collide here: ^ and here: ^,
242 * meaning that we should get very few collisions in the end. */
243 csc->reloc_indices_hashlist[hash] = i;
244 /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
245 return i;
246 }
247 }
248 }
249
250 return -1;
251 }
252
253 static unsigned radeon_add_reloc(struct radeon_drm_cs *cs,
254 struct radeon_bo *bo,
255 enum radeon_bo_usage usage,
256 enum radeon_bo_domain domains,
257 enum radeon_bo_domain *added_domains)
258 {
259 struct radeon_cs_context *csc = cs->csc;
260 struct drm_radeon_cs_reloc *reloc;
261 unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
262 enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
263 enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
264 bool update_hash = TRUE;
265 int i;
266
267 *added_domains = 0;
268 if (csc->is_handle_added[hash]) {
269 i = csc->reloc_indices_hashlist[hash];
270 reloc = &csc->relocs[i];
271 if (reloc->handle != bo->handle) {
272 /* Hash collision, look for the BO in the list of relocs linearly. */
273 for (i = csc->crelocs - 1; i >= 0; i--) {
274 reloc = &csc->relocs[i];
275 if (reloc->handle == bo->handle) {
276 /*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
277 break;
278 }
279 }
280 }
281
282 if (i >= 0) {
283 /* On DMA ring we need to emit as many relocation as there is use of the bo
284 * thus each time this function is call we should grow add again the bo to
285 * the relocation buffer
286 *
287 * Do not update the hash table if it's dma ring, so that first hash always point
288 * to first bo relocation which will the one used by the kernel. Following relocation
289 * will be ignore by the kernel memory placement (but still use by the kernel to
290 * update the cmd stream with proper buffer offset).
291 */
292 update_hash = FALSE;
293 update_reloc_domains(reloc, rd, wd, added_domains);
294 if (cs->base.ring_type != RING_DMA) {
295 csc->reloc_indices_hashlist[hash] = i;
296 return i;
297 }
298 }
299 }
300
301 /* New relocation, check if the backing array is large enough. */
302 if (csc->crelocs >= csc->nrelocs) {
303 uint32_t size;
304 csc->nrelocs += 10;
305
306 size = csc->nrelocs * sizeof(struct radeon_bo*);
307 csc->relocs_bo = realloc(csc->relocs_bo, size);
308
309 size = csc->nrelocs * sizeof(struct drm_radeon_cs_reloc);
310 csc->relocs = realloc(csc->relocs, size);
311
312 csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
313 }
314
315 /* Initialize the new relocation. */
316 csc->relocs_bo[csc->crelocs] = NULL;
317 radeon_bo_reference(&csc->relocs_bo[csc->crelocs], bo);
318 p_atomic_inc(&bo->num_cs_references);
319 reloc = &csc->relocs[csc->crelocs];
320 reloc->handle = bo->handle;
321 reloc->read_domains = rd;
322 reloc->write_domain = wd;
323 reloc->flags = 0;
324
325 csc->is_handle_added[hash] = TRUE;
326 if (update_hash) {
327 csc->reloc_indices_hashlist[hash] = csc->crelocs;
328 }
329
330 csc->chunks[1].length_dw += RELOC_DWORDS;
331
332 *added_domains = rd | wd;
333 return csc->crelocs++;
334 }
335
336 static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs *rcs,
337 struct radeon_winsys_cs_handle *buf,
338 enum radeon_bo_usage usage,
339 enum radeon_bo_domain domains)
340 {
341 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
342 struct radeon_bo *bo = (struct radeon_bo*)buf;
343 enum radeon_bo_domain added_domains;
344 unsigned index = radeon_add_reloc(cs, bo, usage, domains, &added_domains);
345
346 if (added_domains & RADEON_DOMAIN_GTT)
347 cs->csc->used_gart += bo->base.size;
348 if (added_domains & RADEON_DOMAIN_VRAM)
349 cs->csc->used_vram += bo->base.size;
350
351 return index;
352 }
353
354 static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
355 {
356 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
357 boolean status =
358 cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
359 cs->csc->used_vram < cs->ws->info.vram_size * 0.8;
360
361 if (status) {
362 cs->csc->validated_crelocs = cs->csc->crelocs;
363 } else {
364 /* Remove lately-added relocations. The validation failed with them
365 * and the CS is about to be flushed because of that. Keep only
366 * the already-validated relocations. */
367 unsigned i;
368
369 for (i = cs->csc->validated_crelocs; i < cs->csc->crelocs; i++) {
370 p_atomic_dec(&cs->csc->relocs_bo[i]->num_cs_references);
371 radeon_bo_reference(&cs->csc->relocs_bo[i], NULL);
372 }
373 cs->csc->crelocs = cs->csc->validated_crelocs;
374
375 /* Flush if there are any relocs. Clean up otherwise. */
376 if (cs->csc->crelocs) {
377 cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
378 } else {
379 radeon_cs_context_cleanup(cs->csc);
380
381 assert(cs->base.cdw == 0);
382 if (cs->base.cdw != 0) {
383 fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
384 }
385 }
386 }
387 return status;
388 }
389
390 static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
391 {
392 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
393 boolean status =
394 (cs->csc->used_gart + gtt) < cs->ws->info.gart_size * 0.7 &&
395 (cs->csc->used_vram + vram) < cs->ws->info.vram_size * 0.7;
396
397 return status;
398 }
399
400 static void radeon_drm_cs_write_reloc(struct radeon_winsys_cs *rcs,
401 struct radeon_winsys_cs_handle *buf)
402 {
403 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
404 struct radeon_bo *bo = (struct radeon_bo*)buf;
405 unsigned index = radeon_get_reloc(cs->csc, bo);
406
407 if (index == -1) {
408 fprintf(stderr, "radeon: Cannot get a relocation in %s.\n", __func__);
409 return;
410 }
411
412 OUT_CS(&cs->base, 0xc0001000);
413 OUT_CS(&cs->base, index * RELOC_DWORDS);
414 }
415
416 void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_cs_context *csc)
417 {
418 unsigned i;
419
420 if (drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
421 &csc->cs, sizeof(struct drm_radeon_cs))) {
422 if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
423 unsigned i;
424
425 fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
426 for (i = 0; i < csc->chunks[0].length_dw; i++) {
427 fprintf(stderr, "0x%08X\n", csc->buf[i]);
428 }
429 } else {
430 fprintf(stderr, "radeon: The kernel rejected CS, "
431 "see dmesg for more information.\n");
432 }
433 }
434
435 #if RADEON_CS_DUMP_ON_LOCKUP
436 radeon_dump_cs_on_lockup(csc);
437 #endif
438
439 for (i = 0; i < csc->crelocs; i++)
440 p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
441
442 radeon_cs_context_cleanup(csc);
443 }
444
445 /*
446 * Make sure previous submission of this cs are completed
447 */
448 void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
449 {
450 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
451
452 /* Wait for any pending ioctl to complete. */
453 if (cs->ws->thread && cs->flush_started) {
454 pipe_semaphore_wait(&cs->flush_completed);
455 cs->flush_started = 0;
456 }
457 }
458
459 DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
460
461 static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags)
462 {
463 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
464 struct radeon_cs_context *tmp;
465
466 if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
467 fprintf(stderr, "radeon: command stream overflowed\n");
468 }
469
470 radeon_drm_cs_sync_flush(rcs);
471
472 /* Flip command streams. */
473 tmp = cs->csc;
474 cs->csc = cs->cst;
475 cs->cst = tmp;
476
477 /* If the CS is not empty or overflowed, emit it in a separate thread. */
478 if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
479 unsigned i, crelocs = cs->cst->crelocs;
480
481 cs->cst->chunks[0].length_dw = cs->base.cdw;
482
483 for (i = 0; i < crelocs; i++) {
484 /* Update the number of active asynchronous CS ioctls for the buffer. */
485 p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
486 }
487
488 switch (cs->base.ring_type) {
489 case RING_DMA:
490 cs->cst->flags[0] = 0;
491 cs->cst->flags[1] = RADEON_CS_RING_DMA;
492 cs->cst->cs.num_chunks = 3;
493 if (cs->ws->info.r600_virtual_address) {
494 cs->cst->flags[0] |= RADEON_CS_USE_VM;
495 }
496 break;
497
498 case RING_UVD:
499 cs->cst->flags[0] = 0;
500 cs->cst->flags[1] = RADEON_CS_RING_UVD;
501 cs->cst->cs.num_chunks = 3;
502 break;
503
504 default:
505 case RING_GFX:
506 cs->cst->flags[0] = 0;
507 cs->cst->flags[1] = RADEON_CS_RING_GFX;
508 cs->cst->cs.num_chunks = 2;
509 if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
510 cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
511 cs->cst->cs.num_chunks = 3;
512 }
513 if (cs->ws->info.r600_virtual_address) {
514 cs->cst->flags[0] |= RADEON_CS_USE_VM;
515 cs->cst->cs.num_chunks = 3;
516 }
517 if (flags & RADEON_FLUSH_END_OF_FRAME) {
518 cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
519 cs->cst->cs.num_chunks = 3;
520 }
521 if (flags & RADEON_FLUSH_COMPUTE) {
522 cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
523 cs->cst->cs.num_chunks = 3;
524 }
525 break;
526 }
527
528 if (cs->ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
529 cs->flush_started = 1;
530 radeon_drm_ws_queue_cs(cs->ws, cs);
531 } else {
532 pipe_mutex_lock(cs->ws->cs_stack_lock);
533 if (cs->ws->thread) {
534 while (p_atomic_read(&cs->ws->ncs)) {
535 pipe_condvar_wait(cs->ws->cs_queue_empty, cs->ws->cs_stack_lock);
536 }
537 }
538 pipe_mutex_unlock(cs->ws->cs_stack_lock);
539 radeon_drm_cs_emit_ioctl_oneshot(cs->cst);
540 }
541 } else {
542 radeon_cs_context_cleanup(cs->cst);
543 }
544
545 /* Prepare a new CS. */
546 cs->base.buf = cs->csc->buf;
547 cs->base.cdw = 0;
548 }
549
550 static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
551 {
552 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
553
554 radeon_drm_cs_sync_flush(rcs);
555 pipe_semaphore_destroy(&cs->flush_completed);
556 radeon_cs_context_cleanup(&cs->csc1);
557 radeon_cs_context_cleanup(&cs->csc2);
558 p_atomic_dec(&cs->ws->num_cs);
559 radeon_destroy_cs_context(&cs->csc1);
560 radeon_destroy_cs_context(&cs->csc2);
561 FREE(cs);
562 }
563
564 static void radeon_drm_cs_set_flush(struct radeon_winsys_cs *rcs,
565 void (*flush)(void *ctx, unsigned flags),
566 void *user)
567 {
568 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
569
570 cs->flush_cs = flush;
571 cs->flush_data = user;
572 }
573
574 static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
575 struct radeon_winsys_cs_handle *_buf,
576 enum radeon_bo_usage usage)
577 {
578 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
579 struct radeon_bo *bo = (struct radeon_bo*)_buf;
580 int index;
581
582 if (!bo->num_cs_references)
583 return FALSE;
584
585 index = radeon_get_reloc(cs->csc, bo);
586 if (index == -1)
587 return FALSE;
588
589 if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
590 return TRUE;
591 if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
592 return TRUE;
593
594 return FALSE;
595 }
596
597 void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
598 {
599 ws->base.cs_create = radeon_drm_cs_create;
600 ws->base.cs_destroy = radeon_drm_cs_destroy;
601 ws->base.cs_add_reloc = radeon_drm_cs_add_reloc;
602 ws->base.cs_validate = radeon_drm_cs_validate;
603 ws->base.cs_memory_below_limit = radeon_drm_cs_memory_below_limit;
604 ws->base.cs_write_reloc = radeon_drm_cs_write_reloc;
605 ws->base.cs_flush = radeon_drm_cs_flush;
606 ws->base.cs_set_flush_callback = radeon_drm_cs_set_flush;
607 ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
608 ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
609 }