radeon/winsys: add VCE support v4
[mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_winsys.c
1 /*
2 * Copyright © 2009 Corbin Simpson
3 * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 */
27 /*
28 * Authors:
29 * Corbin Simpson <MostAwesomeDude@gmail.com>
30 * Joakim Sindholt <opensource@zhasha.com>
31 * Marek Olšák <maraeo@gmail.com>
32 */
33
34 #include "radeon_drm_bo.h"
35 #include "radeon_drm_cs.h"
36 #include "radeon_drm_public.h"
37
38 #include "pipebuffer/pb_bufmgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_hash_table.h"
41
42 #include <xf86drm.h>
43 #include <stdio.h>
44 #include <sys/types.h>
45 #include <sys/stat.h>
46 #include <unistd.h>
47
48 /*
49 * this are copy from radeon_drm, once an updated libdrm is released
50 * we should bump configure.ac requirement for it and remove the following
51 * field
52 */
53 #ifndef RADEON_INFO_TILING_CONFIG
54 #define RADEON_INFO_TILING_CONFIG 6
55 #endif
56
57 #ifndef RADEON_INFO_WANT_HYPERZ
58 #define RADEON_INFO_WANT_HYPERZ 7
59 #endif
60
61 #ifndef RADEON_INFO_WANT_CMASK
62 #define RADEON_INFO_WANT_CMASK 8
63 #endif
64
65 #ifndef RADEON_INFO_CLOCK_CRYSTAL_FREQ
66 #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 9
67 #endif
68
69 #ifndef RADEON_INFO_NUM_BACKENDS
70 #define RADEON_INFO_NUM_BACKENDS 0xa
71 #endif
72
73 #ifndef RADEON_INFO_NUM_TILE_PIPES
74 #define RADEON_INFO_NUM_TILE_PIPES 0xb
75 #endif
76
77 #ifndef RADEON_INFO_BACKEND_MAP
78 #define RADEON_INFO_BACKEND_MAP 0xd
79 #endif
80
81 #ifndef RADEON_INFO_VA_START
82 /* virtual address start, va < start are reserved by the kernel */
83 #define RADEON_INFO_VA_START 0x0e
84 /* maximum size of ib using the virtual memory cs */
85 #define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
86 #endif
87
88 #ifndef RADEON_INFO_MAX_PIPES
89 #define RADEON_INFO_MAX_PIPES 0x10
90 #endif
91
92 #ifndef RADEON_INFO_TIMESTAMP
93 #define RADEON_INFO_TIMESTAMP 0x11
94 #endif
95
96 #ifndef RADEON_INFO_RING_WORKING
97 #define RADEON_INFO_RING_WORKING 0x15
98 #endif
99
100 #ifndef RADEON_INFO_VCE_FW_VERSION
101 #define RADEON_INFO_VCE_FW_VERSION 0x1b
102 #endif
103
104 #ifndef RADEON_CS_RING_UVD
105 #define RADEON_CS_RING_UVD 3
106 #endif
107
108 #ifndef RADEON_CS_RING_VCE
109 #define RADEON_CS_RING_VCE 4
110 #endif
111
112 static struct util_hash_table *fd_tab = NULL;
113
114 /* Enable/disable feature access for one command stream.
115 * If enable == TRUE, return TRUE on success.
116 * Otherwise, return FALSE.
117 *
118 * We basically do the same thing kernel does, because we have to deal
119 * with multiple contexts (here command streams) backed by one winsys. */
120 static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
121 struct radeon_drm_cs **owner,
122 pipe_mutex *mutex,
123 unsigned request, const char *request_name,
124 boolean enable)
125 {
126 struct drm_radeon_info info;
127 unsigned value = enable ? 1 : 0;
128
129 memset(&info, 0, sizeof(info));
130
131 pipe_mutex_lock(*mutex);
132
133 /* Early exit if we are sure the request will fail. */
134 if (enable) {
135 if (*owner) {
136 pipe_mutex_unlock(*mutex);
137 return FALSE;
138 }
139 } else {
140 if (*owner != applier) {
141 pipe_mutex_unlock(*mutex);
142 return FALSE;
143 }
144 }
145
146 /* Pass through the request to the kernel. */
147 info.value = (unsigned long)&value;
148 info.request = request;
149 if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
150 &info, sizeof(info)) != 0) {
151 pipe_mutex_unlock(*mutex);
152 return FALSE;
153 }
154
155 /* Update the rights in the winsys. */
156 if (enable) {
157 if (value) {
158 *owner = applier;
159 printf("radeon: Acquired access to %s.\n", request_name);
160 pipe_mutex_unlock(*mutex);
161 return TRUE;
162 }
163 } else {
164 *owner = NULL;
165 printf("radeon: Released access to %s.\n", request_name);
166 }
167
168 pipe_mutex_unlock(*mutex);
169 return FALSE;
170 }
171
172 static boolean radeon_get_drm_value(int fd, unsigned request,
173 const char *errname, uint32_t *out)
174 {
175 struct drm_radeon_info info;
176 int retval;
177
178 memset(&info, 0, sizeof(info));
179
180 info.value = (unsigned long)out;
181 info.request = request;
182
183 retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
184 if (retval) {
185 if (errname) {
186 fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
187 errname, retval);
188 }
189 return FALSE;
190 }
191 return TRUE;
192 }
193
194 /* Helper function to do the ioctls needed for setup and init. */
195 static boolean do_winsys_init(struct radeon_drm_winsys *ws)
196 {
197 struct drm_radeon_gem_info gem_info;
198 int retval;
199 drmVersionPtr version;
200
201 memset(&gem_info, 0, sizeof(gem_info));
202
203 /* We do things in a specific order here.
204 *
205 * DRM version first. We need to be sure we're running on a KMS chipset.
206 * This is also for some features.
207 *
208 * Then, the PCI ID. This is essential and should return usable numbers
209 * for all Radeons. If this fails, we probably got handed an FD for some
210 * non-Radeon card.
211 *
212 * The GEM info is actually bogus on the kernel side, as well as our side
213 * (see radeon_gem_info_ioctl in radeon_gem.c) but that's alright because
214 * we don't actually use the info for anything yet.
215 *
216 * The GB and Z pipe requests should always succeed, but they might not
217 * return sensical values for all chipsets, but that's alright because
218 * the pipe drivers already know that.
219 */
220
221 /* Get DRM version. */
222 version = drmGetVersion(ws->fd);
223 if (version->version_major != 2 ||
224 version->version_minor < 3) {
225 fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
226 "only compatible with 2.3.x (kernel 2.6.34) or later.\n",
227 __FUNCTION__,
228 version->version_major,
229 version->version_minor,
230 version->version_patchlevel);
231 drmFreeVersion(version);
232 return FALSE;
233 }
234
235 ws->info.drm_major = version->version_major;
236 ws->info.drm_minor = version->version_minor;
237 ws->info.drm_patchlevel = version->version_patchlevel;
238 drmFreeVersion(version);
239
240 /* Get PCI ID. */
241 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
242 &ws->info.pci_id))
243 return FALSE;
244
245 /* Check PCI ID. */
246 switch (ws->info.pci_id) {
247 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R300; break;
248 #include "pci_ids/r300_pci_ids.h"
249 #undef CHIPSET
250
251 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R600; break;
252 #include "pci_ids/r600_pci_ids.h"
253 #undef CHIPSET
254
255 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_SI; break;
256 #include "pci_ids/radeonsi_pci_ids.h"
257 #undef CHIPSET
258
259 default:
260 fprintf(stderr, "radeon: Invalid PCI ID.\n");
261 return FALSE;
262 }
263
264 switch (ws->info.family) {
265 default:
266 case CHIP_UNKNOWN:
267 fprintf(stderr, "radeon: Unknown family.\n");
268 return FALSE;
269 case CHIP_R300:
270 case CHIP_R350:
271 case CHIP_RV350:
272 case CHIP_RV370:
273 case CHIP_RV380:
274 case CHIP_RS400:
275 case CHIP_RC410:
276 case CHIP_RS480:
277 ws->info.chip_class = R300;
278 break;
279 case CHIP_R420: /* R4xx-based cores. */
280 case CHIP_R423:
281 case CHIP_R430:
282 case CHIP_R480:
283 case CHIP_R481:
284 case CHIP_RV410:
285 case CHIP_RS600:
286 case CHIP_RS690:
287 case CHIP_RS740:
288 ws->info.chip_class = R400;
289 break;
290 case CHIP_RV515: /* R5xx-based cores. */
291 case CHIP_R520:
292 case CHIP_RV530:
293 case CHIP_R580:
294 case CHIP_RV560:
295 case CHIP_RV570:
296 ws->info.chip_class = R500;
297 break;
298 case CHIP_R600:
299 case CHIP_RV610:
300 case CHIP_RV630:
301 case CHIP_RV670:
302 case CHIP_RV620:
303 case CHIP_RV635:
304 case CHIP_RS780:
305 case CHIP_RS880:
306 ws->info.chip_class = R600;
307 break;
308 case CHIP_RV770:
309 case CHIP_RV730:
310 case CHIP_RV710:
311 case CHIP_RV740:
312 ws->info.chip_class = R700;
313 break;
314 case CHIP_CEDAR:
315 case CHIP_REDWOOD:
316 case CHIP_JUNIPER:
317 case CHIP_CYPRESS:
318 case CHIP_HEMLOCK:
319 case CHIP_PALM:
320 case CHIP_SUMO:
321 case CHIP_SUMO2:
322 case CHIP_BARTS:
323 case CHIP_TURKS:
324 case CHIP_CAICOS:
325 ws->info.chip_class = EVERGREEN;
326 break;
327 case CHIP_CAYMAN:
328 case CHIP_ARUBA:
329 ws->info.chip_class = CAYMAN;
330 break;
331 case CHIP_TAHITI:
332 case CHIP_PITCAIRN:
333 case CHIP_VERDE:
334 case CHIP_OLAND:
335 case CHIP_HAINAN:
336 ws->info.chip_class = SI;
337 break;
338 case CHIP_BONAIRE:
339 case CHIP_KAVERI:
340 case CHIP_KABINI:
341 case CHIP_HAWAII:
342 ws->info.chip_class = CIK;
343 break;
344 }
345
346 /* Check for dma */
347 ws->info.r600_has_dma = FALSE;
348 if (ws->info.chip_class >= R700 && ws->info.drm_minor >= 27) {
349 ws->info.r600_has_dma = TRUE;
350 }
351
352 /* Check for UVD and VCE */
353 ws->info.has_uvd = FALSE;
354 ws->info.vce_fw_version = 0x00000000;
355 if (ws->info.drm_minor >= 32) {
356 uint32_t value = RADEON_CS_RING_UVD;
357 if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
358 "UVD Ring working", &value))
359 ws->info.has_uvd = value;
360
361 value = RADEON_CS_RING_VCE;
362 if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
363 NULL, &value) && value) {
364
365 if (radeon_get_drm_value(ws->fd, RADEON_INFO_VCE_FW_VERSION,
366 "VCE FW version", &value))
367 ws->info.vce_fw_version = value;
368 }
369 }
370
371 /* Get GEM info. */
372 retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
373 &gem_info, sizeof(gem_info));
374 if (retval) {
375 fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
376 retval);
377 return FALSE;
378 }
379 ws->info.gart_size = gem_info.gart_size;
380 ws->info.vram_size = gem_info.vram_size;
381
382 ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
383
384 /* Generation-specific queries. */
385 if (ws->gen == DRV_R300) {
386 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
387 "GB pipe count",
388 &ws->info.r300_num_gb_pipes))
389 return FALSE;
390
391 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
392 "Z pipe count",
393 &ws->info.r300_num_z_pipes))
394 return FALSE;
395 }
396 else if (ws->gen >= DRV_R600) {
397 if (ws->info.drm_minor >= 9 &&
398 !radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
399 "num backends",
400 &ws->info.r600_num_backends))
401 return FALSE;
402
403 /* get the GPU counter frequency, failure is not fatal */
404 radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
405 &ws->info.r600_clock_crystal_freq);
406
407 radeon_get_drm_value(ws->fd, RADEON_INFO_TILING_CONFIG, NULL,
408 &ws->info.r600_tiling_config);
409
410 if (ws->info.drm_minor >= 11) {
411 radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_TILE_PIPES, NULL,
412 &ws->info.r600_num_tile_pipes);
413
414 if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
415 &ws->info.r600_backend_map))
416 ws->info.r600_backend_map_valid = TRUE;
417 }
418
419 ws->info.r600_virtual_address = FALSE;
420 if (ws->info.drm_minor >= 13) {
421 ws->info.r600_virtual_address = TRUE;
422 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
423 &ws->info.r600_va_start))
424 ws->info.r600_virtual_address = FALSE;
425 if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
426 &ws->info.r600_ib_vm_max_size))
427 ws->info.r600_virtual_address = FALSE;
428 }
429 if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
430 ws->info.r600_virtual_address = FALSE;
431 }
432
433 /* Get max pipes, this is only needed for compute shaders. All evergreen+
434 * chips have at least 2 pipes, so we use 2 as a default. */
435 ws->info.r600_max_pipes = 2;
436 radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
437 &ws->info.r600_max_pipes);
438
439 if (radeon_get_drm_value(ws->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, NULL,
440 ws->info.si_tile_mode_array)) {
441 ws->info.si_tile_mode_array_valid = TRUE;
442 }
443
444 if (radeon_get_drm_value(ws->fd, RADEON_INFO_CIK_MACROTILE_MODE_ARRAY, NULL,
445 ws->info.cik_macrotile_mode_array)) {
446 ws->info.cik_macrotile_mode_array_valid = TRUE;
447 }
448
449 return TRUE;
450 }
451
452 static void radeon_winsys_destroy(struct radeon_winsys *rws)
453 {
454 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
455
456 if (ws->thread) {
457 ws->kill_thread = 1;
458 pipe_semaphore_signal(&ws->cs_queued);
459 pipe_thread_wait(ws->thread);
460 }
461 pipe_semaphore_destroy(&ws->cs_queued);
462
463 pipe_mutex_destroy(ws->hyperz_owner_mutex);
464 pipe_mutex_destroy(ws->cmask_owner_mutex);
465 pipe_mutex_destroy(ws->cs_stack_lock);
466
467 ws->cman->destroy(ws->cman);
468 ws->kman->destroy(ws->kman);
469 if (ws->gen >= DRV_R600) {
470 radeon_surface_manager_free(ws->surf_man);
471 }
472 if (fd_tab) {
473 util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd));
474 }
475 FREE(rws);
476 }
477
478 static void radeon_query_info(struct radeon_winsys *rws,
479 struct radeon_info *info)
480 {
481 *info = ((struct radeon_drm_winsys *)rws)->info;
482 }
483
484 static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
485 enum radeon_feature_id fid,
486 boolean enable)
487 {
488 struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
489
490 switch (fid) {
491 case RADEON_FID_R300_HYPERZ_ACCESS:
492 return radeon_set_fd_access(cs, &cs->ws->hyperz_owner,
493 &cs->ws->hyperz_owner_mutex,
494 RADEON_INFO_WANT_HYPERZ, "Hyper-Z",
495 enable);
496
497 case RADEON_FID_R300_CMASK_ACCESS:
498 return radeon_set_fd_access(cs, &cs->ws->cmask_owner,
499 &cs->ws->cmask_owner_mutex,
500 RADEON_INFO_WANT_CMASK, "AA optimizations",
501 enable);
502 }
503 return FALSE;
504 }
505
506 static int radeon_drm_winsys_surface_init(struct radeon_winsys *rws,
507 struct radeon_surface *surf)
508 {
509 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
510
511 return radeon_surface_init(ws->surf_man, surf);
512 }
513
514 static int radeon_drm_winsys_surface_best(struct radeon_winsys *rws,
515 struct radeon_surface *surf)
516 {
517 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
518
519 return radeon_surface_best(ws->surf_man, surf);
520 }
521
522 static uint64_t radeon_query_value(struct radeon_winsys *rws,
523 enum radeon_value_id value)
524 {
525 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
526 uint64_t ts = 0;
527
528 switch (value) {
529 case RADEON_REQUESTED_VRAM_MEMORY:
530 return ws->allocated_vram;
531 case RADEON_REQUESTED_GTT_MEMORY:
532 return ws->allocated_gtt;
533 case RADEON_BUFFER_WAIT_TIME_NS:
534 return ws->buffer_wait_time;
535 case RADEON_TIMESTAMP:
536 if (ws->info.drm_minor < 20 || ws->gen < DRV_R600) {
537 assert(0);
538 return 0;
539 }
540
541 radeon_get_drm_value(ws->fd, RADEON_INFO_TIMESTAMP, "timestamp",
542 (uint32_t*)&ts);
543 return ts;
544 }
545 return 0;
546 }
547
548 static unsigned hash_fd(void *key)
549 {
550 int fd = pointer_to_intptr(key);
551 struct stat stat;
552 fstat(fd, &stat);
553
554 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
555 }
556
557 static int compare_fd(void *key1, void *key2)
558 {
559 int fd1 = pointer_to_intptr(key1);
560 int fd2 = pointer_to_intptr(key2);
561 struct stat stat1, stat2;
562 fstat(fd1, &stat1);
563 fstat(fd2, &stat2);
564
565 return stat1.st_dev != stat2.st_dev ||
566 stat1.st_ino != stat2.st_ino ||
567 stat1.st_rdev != stat2.st_rdev;
568 }
569
570 void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
571 {
572 retry:
573 pipe_mutex_lock(ws->cs_stack_lock);
574 if (ws->ncs >= RING_LAST) {
575 /* no room left for a flush */
576 pipe_mutex_unlock(ws->cs_stack_lock);
577 goto retry;
578 }
579 ws->cs_stack[ws->ncs++] = cs;
580 pipe_mutex_unlock(ws->cs_stack_lock);
581 pipe_semaphore_signal(&ws->cs_queued);
582 }
583
584 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
585 {
586 struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
587 struct radeon_drm_cs *cs;
588 unsigned i;
589
590 while (1) {
591 pipe_semaphore_wait(&ws->cs_queued);
592 if (ws->kill_thread)
593 break;
594
595 pipe_mutex_lock(ws->cs_stack_lock);
596 cs = ws->cs_stack[0];
597 for (i = 1; i < ws->ncs; i++)
598 ws->cs_stack[i - 1] = ws->cs_stack[i];
599 ws->cs_stack[--ws->ncs] = NULL;
600 pipe_mutex_unlock(ws->cs_stack_lock);
601
602 if (cs) {
603 radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
604 pipe_semaphore_signal(&cs->flush_completed);
605 }
606 }
607 pipe_mutex_lock(ws->cs_stack_lock);
608 for (i = 0; i < ws->ncs; i++) {
609 pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
610 ws->cs_stack[i] = NULL;
611 }
612 ws->ncs = 0;
613 pipe_mutex_unlock(ws->cs_stack_lock);
614 return 0;
615 }
616
617 DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
618 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
619
620 PUBLIC struct radeon_winsys *radeon_drm_winsys_create(int fd)
621 {
622 struct radeon_drm_winsys *ws;
623
624 if (!fd_tab) {
625 fd_tab = util_hash_table_create(hash_fd, compare_fd);
626 }
627
628 ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
629 if (ws) {
630 pipe_reference(NULL, &ws->base.reference);
631 return &ws->base;
632 }
633
634 ws = CALLOC_STRUCT(radeon_drm_winsys);
635 if (!ws) {
636 return NULL;
637 }
638 ws->fd = fd;
639 util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
640
641 if (!do_winsys_init(ws))
642 goto fail;
643
644 /* Create managers. */
645 ws->kman = radeon_bomgr_create(ws);
646 if (!ws->kman)
647 goto fail;
648 ws->cman = pb_cache_manager_create(ws->kman, 1000000);
649 if (!ws->cman)
650 goto fail;
651
652 if (ws->gen >= DRV_R600) {
653 ws->surf_man = radeon_surface_manager_new(fd);
654 if (!ws->surf_man)
655 goto fail;
656 }
657
658 /* init reference */
659 pipe_reference_init(&ws->base.reference, 1);
660
661 /* Set functions. */
662 ws->base.destroy = radeon_winsys_destroy;
663 ws->base.query_info = radeon_query_info;
664 ws->base.cs_request_feature = radeon_cs_request_feature;
665 ws->base.surface_init = radeon_drm_winsys_surface_init;
666 ws->base.surface_best = radeon_drm_winsys_surface_best;
667 ws->base.query_value = radeon_query_value;
668
669 radeon_bomgr_init_functions(ws);
670 radeon_drm_cs_init_functions(ws);
671
672 pipe_mutex_init(ws->hyperz_owner_mutex);
673 pipe_mutex_init(ws->cmask_owner_mutex);
674 pipe_mutex_init(ws->cs_stack_lock);
675
676 ws->ncs = 0;
677 pipe_semaphore_init(&ws->cs_queued, 0);
678 if (ws->num_cpus > 1 && debug_get_option_thread())
679 ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
680
681 return &ws->base;
682
683 fail:
684 if (ws->cman)
685 ws->cman->destroy(ws->cman);
686 if (ws->kman)
687 ws->kman->destroy(ws->kman);
688 if (ws->surf_man)
689 radeon_surface_manager_free(ws->surf_man);
690 FREE(ws);
691 return NULL;
692 }