winsys/radeon: share winsys between different fd's
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson
3 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Corbin Simpson <MostAwesomeDude@gmail.com>
30 * Joakim Sindholt <opensource@zhasha.com>
31 * Marek Olšák <maraeo@gmail.com>
32 */
33
34 #include "radeon_drm_bo.h"
35 #include "radeon_drm_cs.h"
36 #include "radeon_drm_public.h"
37
38 #include "pipebuffer/pb_bufmgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_hash_table.h"
41
42 #include <xf86drm.h>
43 #include <stdio.h>
44 #include <sys/types.h>
45 #include <sys/stat.h>
46 #include <unistd.h>
47
48 /*
49 * this are copy from radeon_drm, once an updated libdrm is released
50 * we should bump configure.ac requirement for it and remove the following
51 * field
52 */
53 #ifndef RADEON_INFO_TILING_CONFIG
54 #define RADEON_INFO_TILING_CONFIG 6
55 #endif
56
57 #ifndef RADEON_INFO_WANT_HYPERZ
58 #define RADEON_INFO_WANT_HYPERZ 7
59 #endif
60
61 #ifndef RADEON_INFO_WANT_CMASK
62 #define RADEON_INFO_WANT_CMASK 8
63 #endif
64
65 #ifndef RADEON_INFO_CLOCK_CRYSTAL_FREQ
66 #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 9
67 #endif
68
69 #ifndef RADEON_INFO_NUM_BACKENDS
70 #define RADEON_INFO_NUM_BACKENDS 0xa
71 #endif
72
73 #ifndef RADEON_INFO_NUM_TILE_PIPES
74 #define RADEON_INFO_NUM_TILE_PIPES 0xb
75 #endif
76
77 #ifndef RADEON_INFO_BACKEND_MAP
78 #define RADEON_INFO_BACKEND_MAP 0xd
79 #endif
80
81 #ifndef RADEON_INFO_VA_START
82 /* virtual address start, va < start are reserved by the kernel */
83 #define RADEON_INFO_VA_START 0x0e
84 /* maximum size of ib using the virtual memory cs */
85 #define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
86 #endif
87
88 #ifndef RADEON_INFO_MAX_PIPES
89 #define RADEON_INFO_MAX_PIPES 0x10
90 #endif
91
92 #ifndef RADEON_INFO_TIMESTAMP
93 #define RADEON_INFO_TIMESTAMP 0x11
94 #endif
95
96 #ifndef RADEON_INFO_RING_WORKING
97 #define RADEON_INFO_RING_WORKING 0x15
98 #endif
99
100 #ifndef RADEON_CS_RING_UVD
101 #define RADEON_CS_RING_UVD 3
102 #endif
103
104 static struct util_hash_table *fd_tab = NULL;
105
106 /* Enable/disable feature access for one command stream.
107 * If enable == TRUE, return TRUE on success.
108 * Otherwise, return FALSE.
109 *
110 * We basically do the same thing kernel does, because we have to deal
111 * with multiple contexts (here command streams) backed by one winsys. */
112 static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
113 struct radeon_drm_cs **owner,
114 pipe_mutex *mutex,
115 unsigned request, const char *request_name,
116 boolean enable)
117 {
118 struct drm_radeon_info info;
119 unsigned value = enable ? 1 : 0;
120
121 memset(&info, 0, sizeof(info));
122
123 pipe_mutex_lock(*mutex);
124
125 /* Early exit if we are sure the request will fail. */
126 if (enable) {
127 if (*owner) {
128 pipe_mutex_unlock(*mutex);
129 return FALSE;
130 }
131 } else {
132 if (*owner != applier) {
133 pipe_mutex_unlock(*mutex);
134 return FALSE;
135 }
136 }
137
138 /* Pass through the request to the kernel. */
139 info.value = (unsigned long)&value;
140 info.request = request;
141 if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
142 &info, sizeof(info)) != 0) {
143 pipe_mutex_unlock(*mutex);
144 return FALSE;
145 }
146
147 /* Update the rights in the winsys. */
148 if (enable) {
149 if (value) {
150 *owner = applier;
151 printf("radeon: Acquired access to %s.\n", request_name);
152 pipe_mutex_unlock(*mutex);
153 return TRUE;
154 }
155 } else {
156 *owner = NULL;
157 printf("radeon: Released access to %s.\n", request_name);
158 }
159
160 pipe_mutex_unlock(*mutex);
161 return FALSE;
162 }
163
164 static boolean radeon_get_drm_value(int fd, unsigned request,
165 const char *errname, uint32_t *out)
166 {
167 struct drm_radeon_info info;
168 int retval;
169
170 memset(&info, 0, sizeof(info));
171
172 info.value = (unsigned long)out;
173 info.request = request;
174
175 retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
176 if (retval) {
177 if (errname) {
178 fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
179 errname, retval);
180 }
181 return FALSE;
182 }
183 return TRUE;
184 }
185
186 /* Helper function to do the ioctls needed for setup and init. */
187 static boolean do_winsys_init(struct radeon_drm_winsys *ws)
188 {
189 struct drm_radeon_gem_info gem_info;
190 int retval;
191 drmVersionPtr version;
192
193 memset(&gem_info, 0, sizeof(gem_info));
194
195 /* We do things in a specific order here.
196 *
197 * DRM version first. We need to be sure we're running on a KMS chipset.
198 * This is also for some features.
199 *
200 * Then, the PCI ID. This is essential and should return usable numbers
201 * for all Radeons. If this fails, we probably got handed an FD for some
202 * non-Radeon card.
203 *
204 * The GEM info is actually bogus on the kernel side, as well as our side
205 * (see radeon_gem_info_ioctl in radeon_gem.c) but that's alright because
206 * we don't actually use the info for anything yet.
207 *
208 * The GB and Z pipe requests should always succeed, but they might not
209 * return sensical values for all chipsets, but that's alright because
210 * the pipe drivers already know that.
211 */
212
213 /* Get DRM version. */
214 version = drmGetVersion(ws->fd);
215 if (version->version_major != 2 ||
216 version->version_minor < 3) {
217 fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
218 "only compatible with 2.3.x (kernel 2.6.34) or later.\n",
219 __FUNCTION__,
220 version->version_major,
221 version->version_minor,
222 version->version_patchlevel);
223 drmFreeVersion(version);
224 return FALSE;
225 }
226
227 ws->info.drm_major = version->version_major;
228 ws->info.drm_minor = version->version_minor;
229 ws->info.drm_patchlevel = version->version_patchlevel;
230 drmFreeVersion(version);
231
232 /* Get PCI ID. */
233 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
234 &ws->info.pci_id))
235 return FALSE;
236
237 /* Check PCI ID. */
238 switch (ws->info.pci_id) {
239 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R300; break;
240 #include "pci_ids/r300_pci_ids.h"
241 #undef CHIPSET
242
243 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R600; break;
244 #include "pci_ids/r600_pci_ids.h"
245 #undef CHIPSET
246
247 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_SI; break;
248 #include "pci_ids/radeonsi_pci_ids.h"
249 #undef CHIPSET
250
251 default:
252 fprintf(stderr, "radeon: Invalid PCI ID.\n");
253 return FALSE;
254 }
255
256 switch (ws->info.family) {
257 default:
258 case CHIP_UNKNOWN:
259 fprintf(stderr, "radeon: Unknown family.\n");
260 return FALSE;
261 case CHIP_R300:
262 case CHIP_R350:
263 case CHIP_RV350:
264 case CHIP_RV370:
265 case CHIP_RV380:
266 case CHIP_RS400:
267 case CHIP_RC410:
268 case CHIP_RS480:
269 ws->info.chip_class = R300;
270 break;
271 case CHIP_R420: /* R4xx-based cores. */
272 case CHIP_R423:
273 case CHIP_R430:
274 case CHIP_R480:
275 case CHIP_R481:
276 case CHIP_RV410:
277 case CHIP_RS600:
278 case CHIP_RS690:
279 case CHIP_RS740:
280 ws->info.chip_class = R400;
281 break;
282 case CHIP_RV515: /* R5xx-based cores. */
283 case CHIP_R520:
284 case CHIP_RV530:
285 case CHIP_R580:
286 case CHIP_RV560:
287 case CHIP_RV570:
288 ws->info.chip_class = R500;
289 break;
290 case CHIP_R600:
291 case CHIP_RV610:
292 case CHIP_RV630:
293 case CHIP_RV670:
294 case CHIP_RV620:
295 case CHIP_RV635:
296 case CHIP_RS780:
297 case CHIP_RS880:
298 ws->info.chip_class = R600;
299 break;
300 case CHIP_RV770:
301 case CHIP_RV730:
302 case CHIP_RV710:
303 case CHIP_RV740:
304 ws->info.chip_class = R700;
305 break;
306 case CHIP_CEDAR:
307 case CHIP_REDWOOD:
308 case CHIP_JUNIPER:
309 case CHIP_CYPRESS:
310 case CHIP_HEMLOCK:
311 case CHIP_PALM:
312 case CHIP_SUMO:
313 case CHIP_SUMO2:
314 case CHIP_BARTS:
315 case CHIP_TURKS:
316 case CHIP_CAICOS:
317 ws->info.chip_class = EVERGREEN;
318 break;
319 case CHIP_CAYMAN:
320 case CHIP_ARUBA:
321 ws->info.chip_class = CAYMAN;
322 break;
323 case CHIP_TAHITI:
324 case CHIP_PITCAIRN:
325 case CHIP_VERDE:
326 case CHIP_OLAND:
327 case CHIP_HAINAN:
328 ws->info.chip_class = SI;
329 break;
330 case CHIP_BONAIRE:
331 case CHIP_KAVERI:
332 case CHIP_KABINI:
333 ws->info.chip_class = CIK;
334 break;
335 }
336
337 /* Check for dma */
338 ws->info.r600_has_dma = FALSE;
339 if (ws->info.chip_class >= R700 && ws->info.drm_minor >= 27) {
340 ws->info.r600_has_dma = TRUE;
341 }
342
343 /* Check for UVD */
344 ws->info.has_uvd = FALSE;
345 if (ws->info.drm_minor >= 32) {
346 uint32_t value = RADEON_CS_RING_UVD;
347 if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
348 "UVD Ring working", &value))
349 ws->info.has_uvd = value;
350 }
351
352 /* Get GEM info. */
353 retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
354 &gem_info, sizeof(gem_info));
355 if (retval) {
356 fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
357 retval);
358 return FALSE;
359 }
360 ws->info.gart_size = gem_info.gart_size;
361 ws->info.vram_size = gem_info.vram_size;
362
363 ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
364
365 /* Generation-specific queries. */
366 if (ws->gen == DRV_R300) {
367 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
368 "GB pipe count",
369 &ws->info.r300_num_gb_pipes))
370 return FALSE;
371
372 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
373 "Z pipe count",
374 &ws->info.r300_num_z_pipes))
375 return FALSE;
376 }
377 else if (ws->gen >= DRV_R600) {
378 if (ws->info.drm_minor >= 9 &&
379 !radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
380 "num backends",
381 &ws->info.r600_num_backends))
382 return FALSE;
383
384 /* get the GPU counter frequency, failure is not fatal */
385 radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
386 &ws->info.r600_clock_crystal_freq);
387
388 radeon_get_drm_value(ws->fd, RADEON_INFO_TILING_CONFIG, NULL,
389 &ws->info.r600_tiling_config);
390
391 if (ws->info.drm_minor >= 11) {
392 radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_TILE_PIPES, NULL,
393 &ws->info.r600_num_tile_pipes);
394
395 if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
396 &ws->info.r600_backend_map))
397 ws->info.r600_backend_map_valid = TRUE;
398 }
399
400 ws->info.r600_virtual_address = FALSE;
401 if (ws->info.drm_minor >= 13) {
402 ws->info.r600_virtual_address = TRUE;
403 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
404 &ws->info.r600_va_start))
405 ws->info.r600_virtual_address = FALSE;
406 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
407 &ws->info.r600_ib_vm_max_size))
408 ws->info.r600_virtual_address = FALSE;
409 }
410 if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
411 ws->info.r600_virtual_address = FALSE;
412 }
413
414 /* Get max pipes, this is only needed for compute shaders. All evergreen+
415 * chips have at least 2 pipes, so we use 2 as a default. */
416 ws->info.r600_max_pipes = 2;
417 radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
418 &ws->info.r600_max_pipes);
419
420 return TRUE;
421 }
422
423 static void radeon_winsys_destroy(struct radeon_winsys *rws)
424 {
425 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
426
427 if (!pipe_reference(&ws->base.reference, NULL)) {
428 return;
429 }
430
431 if (ws->thread) {
432 ws->kill_thread = 1;
433 pipe_semaphore_signal(&ws->cs_queued);
434 pipe_thread_wait(ws->thread);
435 }
436 pipe_semaphore_destroy(&ws->cs_queued);
437
438 pipe_mutex_destroy(ws->hyperz_owner_mutex);
439 pipe_mutex_destroy(ws->cmask_owner_mutex);
440 pipe_mutex_destroy(ws->cs_stack_lock);
441
442 ws->cman->destroy(ws->cman);
443 ws->kman->destroy(ws->kman);
444 if (ws->gen >= DRV_R600) {
445 radeon_surface_manager_free(ws->surf_man);
446 }
447 if (fd_tab) {
448 util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd));
449 }
450 FREE(rws);
451 }
452
453 static void radeon_query_info(struct radeon_winsys *rws,
454 struct radeon_info *info)
455 {
456 *info = ((struct radeon_drm_winsys *)rws)->info;
457 }
458
459 static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
460 enum radeon_feature_id fid,
461 boolean enable)
462 {
463 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
464
465 switch (fid) {
466 case RADEON_FID_R300_HYPERZ_ACCESS:
467 return radeon_set_fd_access(cs, &cs->ws->hyperz_owner,
468 &cs->ws->hyperz_owner_mutex,
469 RADEON_INFO_WANT_HYPERZ, "Hyper-Z",
470 enable);
471
472 case RADEON_FID_R300_CMASK_ACCESS:
473 return radeon_set_fd_access(cs, &cs->ws->cmask_owner,
474 &cs->ws->cmask_owner_mutex,
475 RADEON_INFO_WANT_CMASK, "AA optimizations",
476 enable);
477 }
478 return FALSE;
479 }
480
481 static int radeon_drm_winsys_surface_init(struct radeon_winsys *rws,
482 struct radeon_surface *surf)
483 {
484 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
485
486 return radeon_surface_init(ws->surf_man, surf);
487 }
488
489 static int radeon_drm_winsys_surface_best(struct radeon_winsys *rws,
490 struct radeon_surface *surf)
491 {
492 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
493
494 return radeon_surface_best(ws->surf_man, surf);
495 }
496
497 static uint64_t radeon_query_value(struct radeon_winsys *rws,
498 enum radeon_value_id value)
499 {
500 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
501 uint64_t ts = 0;
502
503 switch (value) {
504 case RADEON_REQUESTED_VRAM_MEMORY:
505 return ws->allocated_vram;
506 case RADEON_REQUESTED_GTT_MEMORY:
507 return ws->allocated_gtt;
508 case RADEON_BUFFER_WAIT_TIME_NS:
509 return ws->buffer_wait_time;
510 case RADEON_TIMESTAMP:
511 if (ws->info.drm_minor < 20 || ws->gen < DRV_R600) {
512 assert(0);
513 return 0;
514 }
515
516 radeon_get_drm_value(ws->fd, RADEON_INFO_TIMESTAMP, "timestamp",
517 (uint32_t*)&ts);
518 return ts;
519 }
520 return 0;
521 }
522
523 static unsigned hash_fd(void *key)
524 {
525 int fd = pointer_to_intptr(key);
526 struct stat stat;
527 fstat(fd, &stat);
528
529 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
530 }
531
532 static int compare_fd(void *key1, void *key2)
533 {
534 int fd1 = pointer_to_intptr(key1);
535 int fd2 = pointer_to_intptr(key2);
536 struct stat stat1, stat2;
537 fstat(fd1, &stat1);
538 fstat(fd2, &stat2);
539
540 return stat1.st_dev != stat2.st_dev ||
541 stat1.st_ino != stat2.st_ino ||
542 stat1.st_rdev != stat2.st_rdev;
543 }
544
545 void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
546 {
547 retry:
548 pipe_mutex_lock(ws->cs_stack_lock);
549 if (p_atomic_read(&ws->ncs) >= RING_LAST) {
550 /* no room left for a flush */
551 pipe_mutex_unlock(ws->cs_stack_lock);
552 goto retry;
553 }
554 ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
555 p_atomic_inc(&ws->ncs);
556 pipe_mutex_unlock(ws->cs_stack_lock);
557 pipe_semaphore_signal(&ws->cs_queued);
558 }
559
560 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
561 {
562 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
563 struct radeon_drm_cs *cs;
564 unsigned i, empty_stack;
565
566 while (1) {
567 pipe_semaphore_wait(&ws->cs_queued);
568 if (ws->kill_thread)
569 break;
570 next:
571 pipe_mutex_lock(ws->cs_stack_lock);
572 cs = ws->cs_stack[0];
573 pipe_mutex_unlock(ws->cs_stack_lock);
574
575 if (cs) {
576 radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
577
578 pipe_mutex_lock(ws->cs_stack_lock);
579 for (i = 1; i < p_atomic_read(&ws->ncs); i++) {
580 ws->cs_stack[i - 1] = ws->cs_stack[i];
581 }
582 ws->cs_stack[p_atomic_read(&ws->ncs) - 1] = NULL;
583 empty_stack = p_atomic_dec_zero(&ws->ncs);
584 pipe_mutex_unlock(ws->cs_stack_lock);
585
586 pipe_semaphore_signal(&cs->flush_completed);
587
588 if (!empty_stack) {
589 goto next;
590 }
591 }
592 }
593 pipe_mutex_lock(ws->cs_stack_lock);
594 for (i = 0; i < p_atomic_read(&ws->ncs); i++) {
595 pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
596 ws->cs_stack[i] = NULL;
597 }
598 p_atomic_set(&ws->ncs, 0);
599 pipe_mutex_unlock(ws->cs_stack_lock);
600 return NULL;
601 }
602
603 DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
604 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
605
606 struct radeon_winsys *radeon_drm_winsys_create(int fd)
607 {
608 struct radeon_drm_winsys *ws;
609
610 if (!fd_tab) {
611 fd_tab = util_hash_table_create(hash_fd, compare_fd);
612 }
613
614 ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
615 if (ws) {
616 pipe_reference(NULL, &ws->base.reference);
617 return &ws->base;
618 }
619
620 ws = CALLOC_STRUCT(radeon_drm_winsys);
621 if (!ws) {
622 return NULL;
623 }
624 ws->fd = fd;
625 util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
626
627 if (!do_winsys_init(ws))
628 goto fail;
629
630 /* Create managers. */
631 ws->kman = radeon_bomgr_create(ws);
632 if (!ws->kman)
633 goto fail;
634 ws->cman = pb_cache_manager_create(ws->kman, 1000000);
635 if (!ws->cman)
636 goto fail;
637
638 if (ws->gen >= DRV_R600) {
639 ws->surf_man = radeon_surface_manager_new(fd);
640 if (!ws->surf_man)
641 goto fail;
642 }
643
644 /* init reference */
645 pipe_reference_init(&ws->base.reference, 1);
646
647 /* Set functions. */
648 ws->base.destroy = radeon_winsys_destroy;
649 ws->base.query_info = radeon_query_info;
650 ws->base.cs_request_feature = radeon_cs_request_feature;
651 ws->base.surface_init = radeon_drm_winsys_surface_init;
652 ws->base.surface_best = radeon_drm_winsys_surface_best;
653 ws->base.query_value = radeon_query_value;
654
655 radeon_bomgr_init_functions(ws);
656 radeon_drm_cs_init_functions(ws);
657
658 pipe_mutex_init(ws->hyperz_owner_mutex);
659 pipe_mutex_init(ws->cmask_owner_mutex);
660 pipe_mutex_init(ws->cs_stack_lock);
661
662 p_atomic_set(&ws->ncs, 0);
663 pipe_semaphore_init(&ws->cs_queued, 0);
664 if (ws->num_cpus > 1 && debug_get_option_thread())
665 ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
666
667 return &ws->base;
668
669 fail:
670 if (ws->cman)
671 ws->cman->destroy(ws->cman);
672 if (ws->kman)
673 ws->kman->destroy(ws->kman);
674 if (ws->surf_man)
675 radeon_surface_manager_free(ws->surf_man);
676 FREE(ws);
677 return NULL;
678 }