Remove useless checks for NULL before freeing
[mesa.git] / src / gallium / winsys / svga / drm / vmw_screen_ioctl.c
1 /**********************************************************
2 * Copyright 2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 /**
27 * @file
28 *
29 * Wrappers for DRM ioctl functionlaity used by the rest of the vmw
30 * drm winsys.
31 *
32 * Based on svgaicd_escape.c
33 */
34
35
36 #include "svga_cmd.h"
37 #include "util/u_memory.h"
38 #include "util/u_math.h"
39 #include "svgadump/svga_dump.h"
40 #include "state_tracker/drm_driver.h"
41 #include "vmw_screen.h"
42 #include "vmw_context.h"
43 #include "vmw_fence.h"
44 #include "xf86drm.h"
45 #include "vmwgfx_drm.h"
46 #include "svga3d_caps.h"
47 #include "svga3d_reg.h"
48
49 #include "os/os_mman.h"
50
51 #include <errno.h>
52 #include <unistd.h>
53
54 #define VMW_MAX_DEFAULT_TEXTURE_SIZE (128 * 1024 * 1024)
55
56 struct vmw_region
57 {
58 uint32_t handle;
59 uint64_t map_handle;
60 void *data;
61 uint32_t map_count;
62 int drm_fd;
63 uint32_t size;
64 };
65
66 /* XXX: This isn't a real hardware flag, but just a hack for kernel to
67 * know about primary surfaces. In newer versions of the kernel
68 * interface the driver uses a special field.
69 */
70 #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
71
72
73 uint32_t
74 vmw_region_size(struct vmw_region *region)
75 {
76 return region->size;
77 }
78
79 uint32
80 vmw_ioctl_context_create(struct vmw_winsys_screen *vws)
81 {
82 struct drm_vmw_context_arg c_arg;
83 int ret;
84
85 VMW_FUNC;
86
87 ret = drmCommandRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_CONTEXT,
88 &c_arg, sizeof(c_arg));
89
90 if (ret)
91 return -1;
92
93 vmw_printf("Context id is %d\n", c_arg.cid);
94
95 return c_arg.cid;
96 }
97
98 void
99 vmw_ioctl_context_destroy(struct vmw_winsys_screen *vws, uint32 cid)
100 {
101 struct drm_vmw_context_arg c_arg;
102
103 VMW_FUNC;
104
105 memset(&c_arg, 0, sizeof(c_arg));
106 c_arg.cid = cid;
107
108 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_CONTEXT,
109 &c_arg, sizeof(c_arg));
110
111 }
112
113 uint32
114 vmw_ioctl_surface_create(struct vmw_winsys_screen *vws,
115 SVGA3dSurfaceFlags flags,
116 SVGA3dSurfaceFormat format,
117 unsigned usage,
118 SVGA3dSize size,
119 uint32_t numFaces, uint32_t numMipLevels)
120 {
121 union drm_vmw_surface_create_arg s_arg;
122 struct drm_vmw_surface_create_req *req = &s_arg.req;
123 struct drm_vmw_surface_arg *rep = &s_arg.rep;
124 struct drm_vmw_size sizes[DRM_VMW_MAX_SURFACE_FACES*
125 DRM_VMW_MAX_MIP_LEVELS];
126 struct drm_vmw_size *cur_size;
127 uint32_t iFace;
128 uint32_t iMipLevel;
129 int ret;
130
131 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
132
133 memset(&s_arg, 0, sizeof(s_arg));
134 if (vws->use_old_scanout_flag &&
135 (flags & SVGA3D_SURFACE_HINT_SCANOUT)) {
136 req->flags = (uint32_t) flags;
137 req->scanout = false;
138 } else if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
139 req->flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
140 req->scanout = true;
141 } else {
142 req->flags = (uint32_t) flags;
143 req->scanout = false;
144 }
145 req->format = (uint32_t) format;
146 req->shareable = !!(usage & SVGA_SURFACE_USAGE_SHARED);
147
148 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
149 DRM_VMW_MAX_MIP_LEVELS);
150 cur_size = sizes;
151 for (iFace = 0; iFace < numFaces; ++iFace) {
152 SVGA3dSize mipSize = size;
153
154 req->mip_levels[iFace] = numMipLevels;
155 for (iMipLevel = 0; iMipLevel < numMipLevels; ++iMipLevel) {
156 cur_size->width = mipSize.width;
157 cur_size->height = mipSize.height;
158 cur_size->depth = mipSize.depth;
159 mipSize.width = MAX2(mipSize.width >> 1, 1);
160 mipSize.height = MAX2(mipSize.height >> 1, 1);
161 mipSize.depth = MAX2(mipSize.depth >> 1, 1);
162 cur_size++;
163 }
164 }
165 for (iFace = numFaces; iFace < SVGA3D_MAX_SURFACE_FACES; ++iFace) {
166 req->mip_levels[iFace] = 0;
167 }
168
169 req->size_addr = (unsigned long)&sizes;
170
171 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SURFACE,
172 &s_arg, sizeof(s_arg));
173
174 if (ret)
175 return -1;
176
177 vmw_printf("Surface id is %d\n", rep->sid);
178
179 return rep->sid;
180 }
181
182
183 uint32
184 vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
185 SVGA3dSurfaceFlags flags,
186 SVGA3dSurfaceFormat format,
187 unsigned usage,
188 SVGA3dSize size,
189 uint32_t numFaces,
190 uint32_t numMipLevels,
191 uint32_t buffer_handle,
192 struct vmw_region **p_region)
193 {
194 union drm_vmw_gb_surface_create_arg s_arg;
195 struct drm_vmw_gb_surface_create_req *req = &s_arg.req;
196 struct drm_vmw_gb_surface_create_rep *rep = &s_arg.rep;
197 struct vmw_region *region = NULL;
198 int ret;
199
200 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
201
202 if (p_region) {
203 region = CALLOC_STRUCT(vmw_region);
204 if (!region)
205 return SVGA3D_INVALID_ID;
206 }
207
208 memset(&s_arg, 0, sizeof(s_arg));
209 if (flags & SVGA3D_SURFACE_HINT_SCANOUT) {
210 req->svga3d_flags = (uint32_t) (flags & ~SVGA3D_SURFACE_HINT_SCANOUT);
211 req->drm_surface_flags = drm_vmw_surface_flag_scanout;
212 } else {
213 req->svga3d_flags = (uint32_t) flags;
214 }
215 req->format = (uint32_t) format;
216 if (usage & SVGA_SURFACE_USAGE_SHARED)
217 req->drm_surface_flags |= drm_vmw_surface_flag_shareable;
218 req->drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
219
220 assert(numFaces * numMipLevels < DRM_VMW_MAX_SURFACE_FACES*
221 DRM_VMW_MAX_MIP_LEVELS);
222 req->base_size.width = size.width;
223 req->base_size.height = size.height;
224 req->base_size.depth = size.depth;
225 req->mip_levels = numMipLevels;
226 req->multisample_count = 0;
227 req->autogen_filter = SVGA3D_TEX_FILTER_NONE;
228 if (buffer_handle)
229 req->buffer_handle = buffer_handle;
230 else
231 req->buffer_handle = SVGA3D_INVALID_ID;
232
233 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_CREATE,
234 &s_arg, sizeof(s_arg));
235
236 if (ret)
237 goto out_fail_create;
238
239 if (p_region) {
240 region->handle = rep->buffer_handle;
241 region->map_handle = rep->buffer_map_handle;
242 region->drm_fd = vws->ioctl.drm_fd;
243 region->size = rep->backup_size;
244 *p_region = region;
245 }
246
247 vmw_printf("Surface id is %d\n", rep->sid);
248 return rep->handle;
249
250 out_fail_create:
251 FREE(region);
252 return SVGA3D_INVALID_ID;
253 }
254
255 /**
256 * vmw_ioctl_surface_req - Fill in a struct surface_req
257 *
258 * @vws: Winsys screen
259 * @whandle: Surface handle
260 * @req: The struct surface req to fill in
261 * @needs_unref: This call takes a kernel surface reference that needs to
262 * be unreferenced.
263 *
264 * Returns 0 on success, negative error type otherwise.
265 * Fills in the surface_req structure according to handle type and kernel
266 * capabilities.
267 */
268 static int
269 vmw_ioctl_surface_req(const struct vmw_winsys_screen *vws,
270 const struct winsys_handle *whandle,
271 struct drm_vmw_surface_arg *req,
272 boolean *needs_unref)
273 {
274 int ret;
275
276 switch(whandle->type) {
277 case DRM_API_HANDLE_TYPE_SHARED:
278 case DRM_API_HANDLE_TYPE_KMS:
279 *needs_unref = FALSE;
280 req->handle_type = DRM_VMW_HANDLE_LEGACY;
281 req->sid = whandle->handle;
282 break;
283 case DRM_API_HANDLE_TYPE_FD:
284 if (!vws->ioctl.have_drm_2_6) {
285 uint32_t handle;
286
287 ret = drmPrimeFDToHandle(vws->ioctl.drm_fd, whandle->handle, &handle);
288 if (ret) {
289 vmw_error("Failed to get handle from prime fd %d.\n",
290 (int) whandle->handle);
291 return -EINVAL;
292 }
293
294 *needs_unref = TRUE;
295 req->handle_type = DRM_VMW_HANDLE_LEGACY;
296 req->sid = handle;
297 } else {
298 *needs_unref = FALSE;
299 req->handle_type = DRM_VMW_HANDLE_PRIME;
300 req->sid = whandle->handle;
301 }
302 break;
303 default:
304 vmw_error("Attempt to import unsupported handle type %d.\n",
305 whandle->type);
306 return -EINVAL;
307 }
308
309 return 0;
310 }
311
312 /**
313 * vmw_ioctl_gb_surface_ref - Put a reference on a guest-backed surface and
314 * get surface information
315 *
316 * @vws: Screen to register the reference on
317 * @handle: Kernel handle of the guest-backed surface
318 * @flags: flags used when the surface was created
319 * @format: Format used when the surface was created
320 * @numMipLevels: Number of mipmap levels of the surface
321 * @p_region: On successful return points to a newly allocated
322 * struct vmw_region holding a reference to the surface backup buffer.
323 *
324 * Returns 0 on success, a system error on failure.
325 */
326 int
327 vmw_ioctl_gb_surface_ref(struct vmw_winsys_screen *vws,
328 const struct winsys_handle *whandle,
329 SVGA3dSurfaceFlags *flags,
330 SVGA3dSurfaceFormat *format,
331 uint32_t *numMipLevels,
332 uint32_t *handle,
333 struct vmw_region **p_region)
334 {
335 union drm_vmw_gb_surface_reference_arg s_arg;
336 struct drm_vmw_surface_arg *req = &s_arg.req;
337 struct drm_vmw_gb_surface_ref_rep *rep = &s_arg.rep;
338 struct vmw_region *region = NULL;
339 boolean needs_unref = FALSE;
340 int ret;
341
342 vmw_printf("%s flags %d format %d\n", __FUNCTION__, flags, format);
343
344 assert(p_region != NULL);
345 region = CALLOC_STRUCT(vmw_region);
346 if (!region)
347 return -ENOMEM;
348
349 memset(&s_arg, 0, sizeof(s_arg));
350 ret = vmw_ioctl_surface_req(vws, whandle, req, &needs_unref);
351 if (ret)
352 goto out_fail_req;
353
354 *handle = req->sid;
355 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GB_SURFACE_REF,
356 &s_arg, sizeof(s_arg));
357
358 if (ret)
359 goto out_fail_ref;
360
361 region->handle = rep->crep.buffer_handle;
362 region->map_handle = rep->crep.buffer_map_handle;
363 region->drm_fd = vws->ioctl.drm_fd;
364 region->size = rep->crep.backup_size;
365 *p_region = region;
366
367 *handle = rep->crep.handle;
368 *flags = rep->creq.svga3d_flags;
369 *format = rep->creq.format;
370 *numMipLevels = rep->creq.mip_levels;
371
372 if (needs_unref)
373 vmw_ioctl_surface_destroy(vws, *handle);
374
375 return 0;
376 out_fail_ref:
377 if (needs_unref)
378 vmw_ioctl_surface_destroy(vws, *handle);
379 out_fail_req:
380 FREE(region);
381 return ret;
382 }
383
384 void
385 vmw_ioctl_surface_destroy(struct vmw_winsys_screen *vws, uint32 sid)
386 {
387 struct drm_vmw_surface_arg s_arg;
388
389 VMW_FUNC;
390
391 memset(&s_arg, 0, sizeof(s_arg));
392 s_arg.sid = sid;
393
394 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SURFACE,
395 &s_arg, sizeof(s_arg));
396 }
397
398 void
399 vmw_ioctl_command(struct vmw_winsys_screen *vws, int32_t cid,
400 uint32_t throttle_us, void *commands, uint32_t size,
401 struct pipe_fence_handle **pfence)
402 {
403 struct drm_vmw_execbuf_arg arg;
404 struct drm_vmw_fence_rep rep;
405 int ret;
406
407 #ifdef DEBUG
408 {
409 static boolean firsttime = TRUE;
410 static boolean debug = FALSE;
411 static boolean skip = FALSE;
412 if (firsttime) {
413 debug = debug_get_bool_option("SVGA_DUMP_CMD", FALSE);
414 skip = debug_get_bool_option("SVGA_SKIP_CMD", FALSE);
415 }
416 if (debug) {
417 VMW_FUNC;
418 svga_dump_commands(commands, size);
419 }
420 firsttime = FALSE;
421 if (skip) {
422 size = 0;
423 }
424 }
425 #endif
426
427 memset(&arg, 0, sizeof(arg));
428 memset(&rep, 0, sizeof(rep));
429
430 rep.error = -EFAULT;
431 if (pfence)
432 arg.fence_rep = (unsigned long)&rep;
433 arg.commands = (unsigned long)commands;
434 arg.command_size = size;
435 arg.throttle_us = throttle_us;
436 arg.version = DRM_VMW_EXECBUF_VERSION;
437
438 do {
439 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_EXECBUF, &arg, sizeof(arg));
440 } while(ret == -ERESTART);
441 if (ret) {
442 vmw_error("%s error %s.\n", __FUNCTION__, strerror(-ret));
443 }
444
445 if (rep.error) {
446
447 /*
448 * Kernel has already synced, or caller requested no fence.
449 */
450 if (pfence)
451 *pfence = NULL;
452 } else {
453 if (pfence) {
454 vmw_fences_signal(vws->fence_ops, rep.passed_seqno, rep.seqno,
455 TRUE);
456
457 *pfence = vmw_fence_create(vws->fence_ops, rep.handle,
458 rep.seqno, rep.mask);
459 if (*pfence == NULL) {
460 /*
461 * Fence creation failed. Need to sync.
462 */
463 (void) vmw_ioctl_fence_finish(vws, rep.handle, rep.mask);
464 vmw_ioctl_fence_unref(vws, rep.handle);
465 }
466 }
467 }
468 }
469
470
471 struct vmw_region *
472 vmw_ioctl_region_create(struct vmw_winsys_screen *vws, uint32_t size)
473 {
474 struct vmw_region *region;
475 union drm_vmw_alloc_dmabuf_arg arg;
476 struct drm_vmw_alloc_dmabuf_req *req = &arg.req;
477 struct drm_vmw_dmabuf_rep *rep = &arg.rep;
478 int ret;
479
480 vmw_printf("%s: size = %u\n", __FUNCTION__, size);
481
482 region = CALLOC_STRUCT(vmw_region);
483 if (!region)
484 goto out_err1;
485
486 memset(&arg, 0, sizeof(arg));
487 req->size = size;
488 do {
489 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_ALLOC_DMABUF, &arg,
490 sizeof(arg));
491 } while (ret == -ERESTART);
492
493 if (ret) {
494 vmw_error("IOCTL failed %d: %s\n", ret, strerror(-ret));
495 goto out_err1;
496 }
497
498 region->data = NULL;
499 region->handle = rep->handle;
500 region->map_handle = rep->map_handle;
501 region->map_count = 0;
502 region->size = size;
503 region->drm_fd = vws->ioctl.drm_fd;
504
505 vmw_printf(" gmrId = %u, offset = %u\n",
506 region->ptr.gmrId, region->ptr.offset);
507
508 return region;
509
510 out_err1:
511 FREE(region);
512 return NULL;
513 }
514
515 void
516 vmw_ioctl_region_destroy(struct vmw_region *region)
517 {
518 struct drm_vmw_unref_dmabuf_arg arg;
519
520 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
521 region->ptr.gmrId, region->ptr.offset);
522
523 if (region->data) {
524 os_munmap(region->data, region->size);
525 region->data = NULL;
526 }
527
528 memset(&arg, 0, sizeof(arg));
529 arg.handle = region->handle;
530 drmCommandWrite(region->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, sizeof(arg));
531
532 FREE(region);
533 }
534
535 SVGAGuestPtr
536 vmw_ioctl_region_ptr(struct vmw_region *region)
537 {
538 SVGAGuestPtr ptr = {region->handle, 0};
539 return ptr;
540 }
541
542 void *
543 vmw_ioctl_region_map(struct vmw_region *region)
544 {
545 void *map;
546
547 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
548 region->ptr.gmrId, region->ptr.offset);
549
550 if (region->data == NULL) {
551 map = os_mmap(NULL, region->size, PROT_READ | PROT_WRITE, MAP_SHARED,
552 region->drm_fd, region->map_handle);
553 if (map == MAP_FAILED) {
554 vmw_error("%s: Map failed.\n", __FUNCTION__);
555 return NULL;
556 }
557
558 region->data = map;
559 }
560
561 ++region->map_count;
562
563 return region->data;
564 }
565
566 void
567 vmw_ioctl_region_unmap(struct vmw_region *region)
568 {
569 vmw_printf("%s: gmrId = %u, offset = %u\n", __FUNCTION__,
570 region->ptr.gmrId, region->ptr.offset);
571 --region->map_count;
572 }
573
574 /**
575 * vmw_ioctl_syncforcpu - Synchronize a buffer object for CPU usage
576 *
577 * @region: Pointer to a struct vmw_region representing the buffer object.
578 * @dont_block: Dont wait for GPU idle, but rather return -EBUSY if the
579 * GPU is busy with the buffer object.
580 * @readonly: Hint that the CPU access is read-only.
581 * @allow_cs: Allow concurrent command submission while the buffer is
582 * synchronized for CPU. If FALSE command submissions referencing the
583 * buffer will block until a corresponding call to vmw_ioctl_releasefromcpu.
584 *
585 * This function idles any GPU activities touching the buffer and blocks
586 * command submission of commands referencing the buffer, even from
587 * other processes.
588 */
589 int
590 vmw_ioctl_syncforcpu(struct vmw_region *region,
591 boolean dont_block,
592 boolean readonly,
593 boolean allow_cs)
594 {
595 struct drm_vmw_synccpu_arg arg;
596
597 memset(&arg, 0, sizeof(arg));
598 arg.op = drm_vmw_synccpu_grab;
599 arg.handle = region->handle;
600 arg.flags = drm_vmw_synccpu_read;
601 if (!readonly)
602 arg.flags |= drm_vmw_synccpu_write;
603 if (dont_block)
604 arg.flags |= drm_vmw_synccpu_dontblock;
605 if (allow_cs)
606 arg.flags |= drm_vmw_synccpu_allow_cs;
607
608 return drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
609 }
610
611 /**
612 * vmw_ioctl_releasefromcpu - Undo a previous syncforcpu.
613 *
614 * @region: Pointer to a struct vmw_region representing the buffer object.
615 * @readonly: Should hold the same value as the matching syncforcpu call.
616 * @allow_cs: Should hold the same value as the matching syncforcpu call.
617 */
618 void
619 vmw_ioctl_releasefromcpu(struct vmw_region *region,
620 boolean readonly,
621 boolean allow_cs)
622 {
623 struct drm_vmw_synccpu_arg arg;
624
625 memset(&arg, 0, sizeof(arg));
626 arg.op = drm_vmw_synccpu_release;
627 arg.handle = region->handle;
628 arg.flags = drm_vmw_synccpu_read;
629 if (!readonly)
630 arg.flags |= drm_vmw_synccpu_write;
631 if (allow_cs)
632 arg.flags |= drm_vmw_synccpu_allow_cs;
633
634 (void) drmCommandWrite(region->drm_fd, DRM_VMW_SYNCCPU, &arg, sizeof(arg));
635 }
636
637 void
638 vmw_ioctl_fence_unref(struct vmw_winsys_screen *vws,
639 uint32_t handle)
640 {
641 struct drm_vmw_fence_arg arg;
642 int ret;
643
644 memset(&arg, 0, sizeof(arg));
645 arg.handle = handle;
646
647 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_FENCE_UNREF,
648 &arg, sizeof(arg));
649 if (ret != 0)
650 vmw_error("%s Failed\n", __FUNCTION__);
651 }
652
653 static INLINE uint32_t
654 vmw_drm_fence_flags(uint32_t flags)
655 {
656 uint32_t dflags = 0;
657
658 if (flags & SVGA_FENCE_FLAG_EXEC)
659 dflags |= DRM_VMW_FENCE_FLAG_EXEC;
660 if (flags & SVGA_FENCE_FLAG_QUERY)
661 dflags |= DRM_VMW_FENCE_FLAG_QUERY;
662
663 return dflags;
664 }
665
666
667 int
668 vmw_ioctl_fence_signalled(struct vmw_winsys_screen *vws,
669 uint32_t handle,
670 uint32_t flags)
671 {
672 struct drm_vmw_fence_signaled_arg arg;
673 uint32_t vflags = vmw_drm_fence_flags(flags);
674 int ret;
675
676 memset(&arg, 0, sizeof(arg));
677 arg.handle = handle;
678 arg.flags = vflags;
679
680 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_SIGNALED,
681 &arg, sizeof(arg));
682
683 if (ret != 0)
684 return ret;
685
686 vmw_fences_signal(vws->fence_ops, arg.passed_seqno, 0, FALSE);
687
688 return (arg.signaled) ? 0 : -1;
689 }
690
691
692
693 int
694 vmw_ioctl_fence_finish(struct vmw_winsys_screen *vws,
695 uint32_t handle,
696 uint32_t flags)
697 {
698 struct drm_vmw_fence_wait_arg arg;
699 uint32_t vflags = vmw_drm_fence_flags(flags);
700 int ret;
701
702 memset(&arg, 0, sizeof(arg));
703
704 arg.handle = handle;
705 arg.timeout_us = 10*1000000;
706 arg.lazy = 0;
707 arg.flags = vflags;
708
709 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_FENCE_WAIT,
710 &arg, sizeof(arg));
711
712 if (ret != 0)
713 vmw_error("%s Failed\n", __FUNCTION__);
714
715 return 0;
716 }
717
718 uint32
719 vmw_ioctl_shader_create(struct vmw_winsys_screen *vws,
720 SVGA3dShaderType type,
721 uint32 code_len)
722 {
723 struct drm_vmw_shader_create_arg sh_arg;
724 int ret;
725
726 VMW_FUNC;
727
728 memset(&sh_arg, 0, sizeof(sh_arg));
729
730 sh_arg.size = code_len;
731 sh_arg.buffer_handle = SVGA3D_INVALID_ID;
732 sh_arg.shader_handle = SVGA3D_INVALID_ID;
733 switch (type) {
734 case SVGA3D_SHADERTYPE_VS:
735 sh_arg.shader_type = drm_vmw_shader_type_vs;
736 break;
737 case SVGA3D_SHADERTYPE_PS:
738 sh_arg.shader_type = drm_vmw_shader_type_ps;
739 break;
740 default:
741 assert(!"Invalid shader type.");
742 break;
743 }
744
745 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_CREATE_SHADER,
746 &sh_arg, sizeof(sh_arg));
747
748 if (ret)
749 return SVGA3D_INVALID_ID;
750
751 return sh_arg.shader_handle;
752 }
753
754 void
755 vmw_ioctl_shader_destroy(struct vmw_winsys_screen *vws, uint32 shid)
756 {
757 struct drm_vmw_shader_arg sh_arg;
758
759 VMW_FUNC;
760
761 memset(&sh_arg, 0, sizeof(sh_arg));
762 sh_arg.handle = shid;
763
764 (void)drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_UNREF_SHADER,
765 &sh_arg, sizeof(sh_arg));
766
767 }
768
769 static int
770 vmw_ioctl_parse_caps(struct vmw_winsys_screen *vws,
771 const uint32_t *cap_buffer)
772 {
773 int i;
774
775 if (vws->base.have_gb_objects) {
776 for (i = 0; i < vws->ioctl.num_cap_3d; ++i) {
777 vws->ioctl.cap_3d[i].has_cap = TRUE;
778 vws->ioctl.cap_3d[i].result.u = cap_buffer[i];
779 }
780 return 0;
781 } else {
782 const uint32 *capsBlock;
783 const SVGA3dCapsRecord *capsRecord = NULL;
784 uint32 offset;
785 const SVGA3dCapPair *capArray;
786 int numCaps, index;
787
788 /*
789 * Search linearly through the caps block records for the specified type.
790 */
791 capsBlock = cap_buffer;
792 for (offset = 0; capsBlock[offset] != 0; offset += capsBlock[offset]) {
793 const SVGA3dCapsRecord *record;
794 assert(offset < SVGA_FIFO_3D_CAPS_SIZE);
795 record = (const SVGA3dCapsRecord *) (capsBlock + offset);
796 if ((record->header.type >= SVGA3DCAPS_RECORD_DEVCAPS_MIN) &&
797 (record->header.type <= SVGA3DCAPS_RECORD_DEVCAPS_MAX) &&
798 (!capsRecord || (record->header.type > capsRecord->header.type))) {
799 capsRecord = record;
800 }
801 }
802
803 if(!capsRecord)
804 return -1;
805
806 /*
807 * Calculate the number of caps from the size of the record.
808 */
809 capArray = (const SVGA3dCapPair *) capsRecord->data;
810 numCaps = (int) ((capsRecord->header.length * sizeof(uint32) -
811 sizeof capsRecord->header) / (2 * sizeof(uint32)));
812
813 for (i = 0; i < numCaps; i++) {
814 index = capArray[i][0];
815 if (index < vws->ioctl.num_cap_3d) {
816 vws->ioctl.cap_3d[index].has_cap = TRUE;
817 vws->ioctl.cap_3d[index].result.u = capArray[i][1];
818 } else {
819 debug_printf("Unknown devcaps seen: %d\n", index);
820 }
821 }
822 }
823 return 0;
824 }
825
826 boolean
827 vmw_ioctl_init(struct vmw_winsys_screen *vws)
828 {
829 struct drm_vmw_getparam_arg gp_arg;
830 struct drm_vmw_get_3d_cap_arg cap_arg;
831 unsigned int size;
832 int ret;
833 uint32_t *cap_buffer;
834 drmVersionPtr version;
835 boolean have_drm_2_5;
836
837 VMW_FUNC;
838
839 version = drmGetVersion(vws->ioctl.drm_fd);
840 if (!version)
841 goto out_no_version;
842
843 have_drm_2_5 = version->version_major > 2 ||
844 (version->version_major == 2 && version->version_minor > 4);
845 vws->ioctl.have_drm_2_6 = version->version_major > 2 ||
846 (version->version_major == 2 && version->version_minor > 5);
847
848 memset(&gp_arg, 0, sizeof(gp_arg));
849 gp_arg.param = DRM_VMW_PARAM_3D;
850 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
851 &gp_arg, sizeof(gp_arg));
852 if (ret || gp_arg.value == 0) {
853 vmw_error("No 3D enabled (%i, %s).\n", ret, strerror(-ret));
854 goto out_no_3d;
855 }
856
857 memset(&gp_arg, 0, sizeof(gp_arg));
858 gp_arg.param = DRM_VMW_PARAM_FIFO_HW_VERSION;
859 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
860 &gp_arg, sizeof(gp_arg));
861 if (ret) {
862 vmw_error("Failed to get fifo hw version (%i, %s).\n",
863 ret, strerror(-ret));
864 goto out_no_3d;
865 }
866 vws->ioctl.hwversion = gp_arg.value;
867
868 memset(&gp_arg, 0, sizeof(gp_arg));
869 gp_arg.param = DRM_VMW_PARAM_HW_CAPS;
870 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
871 &gp_arg, sizeof(gp_arg));
872 if (ret)
873 vws->base.have_gb_objects = FALSE;
874 else
875 vws->base.have_gb_objects =
876 !!(gp_arg.value & (uint64_t) SVGA_CAP_GBOBJECTS);
877
878 if (vws->base.have_gb_objects && !have_drm_2_5)
879 goto out_no_3d;
880
881 if (vws->base.have_gb_objects) {
882 memset(&gp_arg, 0, sizeof(gp_arg));
883 gp_arg.param = DRM_VMW_PARAM_3D_CAPS_SIZE;
884 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
885 &gp_arg, sizeof(gp_arg));
886 if (ret)
887 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
888 else
889 size = gp_arg.value;
890
891 if (vws->base.have_gb_objects)
892 vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
893 else
894 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
895
896
897 memset(&gp_arg, 0, sizeof(gp_arg));
898 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_MEMORY;
899 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
900 &gp_arg, sizeof(gp_arg));
901 if (ret) {
902 /* Just guess a large enough value. */
903 vws->ioctl.max_mob_memory = 256*1024*1024;
904 } else {
905 vws->ioctl.max_mob_memory = gp_arg.value;
906 }
907
908 memset(&gp_arg, 0, sizeof(gp_arg));
909 gp_arg.param = DRM_VMW_PARAM_MAX_MOB_SIZE;
910 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
911 &gp_arg, sizeof(gp_arg));
912
913 if (ret || gp_arg.value == 0) {
914 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
915 } else {
916 vws->ioctl.max_texture_size = gp_arg.value;
917 }
918
919 /* Never early flush surfaces, mobs do accounting. */
920 vws->ioctl.max_surface_memory = -1;
921 } else {
922 vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
923
924 memset(&gp_arg, 0, sizeof(gp_arg));
925 gp_arg.param = DRM_VMW_PARAM_MAX_SURF_MEMORY;
926 if (have_drm_2_5)
927 ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_GET_PARAM,
928 &gp_arg, sizeof(gp_arg));
929 if (!have_drm_2_5 || ret) {
930 /* Just guess a large enough value, around 800mb. */
931 vws->ioctl.max_surface_memory = 0x30000000;
932 } else {
933 vws->ioctl.max_surface_memory = gp_arg.value;
934 }
935
936 vws->ioctl.max_texture_size = VMW_MAX_DEFAULT_TEXTURE_SIZE;
937
938 size = SVGA_FIFO_3D_CAPS_SIZE * sizeof(uint32_t);
939 }
940
941 cap_buffer = calloc(1, size);
942 if (!cap_buffer) {
943 debug_printf("Failed alloc fifo 3D caps buffer.\n");
944 goto out_no_3d;
945 }
946
947 vws->ioctl.cap_3d = calloc(vws->ioctl.num_cap_3d,
948 sizeof(*vws->ioctl.cap_3d));
949 if (!vws->ioctl.cap_3d) {
950 debug_printf("Failed alloc fifo 3D caps buffer.\n");
951 goto out_no_caparray;
952 }
953
954 memset(&cap_arg, 0, sizeof(cap_arg));
955 cap_arg.buffer = (uint64_t) (unsigned long) (cap_buffer);
956 cap_arg.max_size = size;
957
958 ret = drmCommandWrite(vws->ioctl.drm_fd, DRM_VMW_GET_3D_CAP,
959 &cap_arg, sizeof(cap_arg));
960
961 if (ret) {
962 debug_printf("Failed to get 3D capabilities"
963 " (%i, %s).\n", ret, strerror(-ret));
964 goto out_no_caps;
965 }
966
967 ret = vmw_ioctl_parse_caps(vws, cap_buffer);
968 if (ret) {
969 debug_printf("Failed to parse 3D capabilities"
970 " (%i, %s).\n", ret, strerror(-ret));
971 goto out_no_caps;
972 }
973 free(cap_buffer);
974 drmFreeVersion(version);
975 vmw_printf("%s OK\n", __FUNCTION__);
976 return TRUE;
977 out_no_caps:
978 free(vws->ioctl.cap_3d);
979 out_no_caparray:
980 free(cap_buffer);
981 out_no_3d:
982 drmFreeVersion(version);
983 out_no_version:
984 vws->ioctl.num_cap_3d = 0;
985 debug_printf("%s Failed\n", __FUNCTION__);
986 return FALSE;
987 }
988
989
990
991 void
992 vmw_ioctl_cleanup(struct vmw_winsys_screen *vws)
993 {
994 VMW_FUNC;
995 }