2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
32 #include "anv_private.h"
33 #include "common/gen_defines.h"
36 anv_ioctl(int fd
, unsigned long request
, void *arg
)
41 ret
= ioctl(fd
, request
, arg
);
42 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
48 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
50 * Return gem handle, or 0 on failure. Gem handles are never 0.
53 anv_gem_create(struct anv_device
*device
, uint64_t size
)
55 struct drm_i915_gem_create gem_create
= {
59 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &gem_create
);
61 /* FIXME: What do we do if this fails? */
65 return gem_create
.handle
;
69 anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
)
71 struct drm_gem_close close
= {
75 anv_ioctl(device
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
79 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
82 anv_gem_mmap(struct anv_device
*device
, uint32_t gem_handle
,
83 uint64_t offset
, uint64_t size
, uint32_t flags
)
85 struct drm_i915_gem_mmap gem_mmap
= {
92 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &gem_mmap
);
96 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap
.addr_ptr
, gem_mmap
.size
, 0, 1));
97 return (void *)(uintptr_t) gem_mmap
.addr_ptr
;
100 /* This is just a wrapper around munmap, but it also notifies valgrind that
101 * this map is no longer valid. Pair this with anv_gem_mmap().
104 anv_gem_munmap(void *p
, uint64_t size
)
106 VG(VALGRIND_FREELIKE_BLOCK(p
, 0));
111 anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
)
113 struct drm_i915_gem_userptr userptr
= {
114 .user_ptr
= (__u64
)((unsigned long) mem
),
119 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
123 return userptr
.handle
;
127 anv_gem_set_caching(struct anv_device
*device
,
128 uint32_t gem_handle
, uint32_t caching
)
130 struct drm_i915_gem_caching gem_caching
= {
131 .handle
= gem_handle
,
135 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_CACHING
, &gem_caching
);
139 anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
140 uint32_t read_domains
, uint32_t write_domain
)
142 struct drm_i915_gem_set_domain gem_set_domain
= {
143 .handle
= gem_handle
,
144 .read_domains
= read_domains
,
145 .write_domain
= write_domain
,
148 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &gem_set_domain
);
152 * Returns 0, 1, or negative to indicate error
155 anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
)
157 struct drm_i915_gem_busy busy
= {
158 .handle
= gem_handle
,
161 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
165 return busy
.busy
!= 0;
169 * On error, \a timeout_ns holds the remaining time.
172 anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
)
174 struct drm_i915_gem_wait wait
= {
175 .bo_handle
= gem_handle
,
176 .timeout_ns
= *timeout_ns
,
180 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
181 *timeout_ns
= wait
.timeout_ns
;
187 anv_gem_execbuffer(struct anv_device
*device
,
188 struct drm_i915_gem_execbuffer2
*execbuf
)
190 if (execbuf
->flags
& I915_EXEC_FENCE_OUT
)
191 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
, execbuf
);
193 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2
, execbuf
);
196 /** Return -1 on error. */
198 anv_gem_get_tiling(struct anv_device
*device
, uint32_t gem_handle
)
200 struct drm_i915_gem_get_tiling get_tiling
= {
201 .handle
= gem_handle
,
204 if (anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
)) {
205 assert(!"Failed to get BO tiling");
209 return get_tiling
.tiling_mode
;
213 anv_gem_set_tiling(struct anv_device
*device
,
214 uint32_t gem_handle
, uint32_t stride
, uint32_t tiling
)
218 /* set_tiling overwrites the input on the error path, so we have to open
222 struct drm_i915_gem_set_tiling set_tiling
= {
223 .handle
= gem_handle
,
224 .tiling_mode
= tiling
,
228 ret
= ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
229 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
235 anv_gem_get_param(int fd
, uint32_t param
)
239 drm_i915_getparam_t gp
= {
244 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
252 anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
)
254 struct drm_gem_close close
;
257 struct drm_i915_gem_create gem_create
= {
261 if (anv_ioctl(fd
, DRM_IOCTL_I915_GEM_CREATE
, &gem_create
)) {
262 assert(!"Failed to create GEM BO");
266 bool swizzled
= false;
268 /* set_tiling overwrites the input on the error path, so we have to open
272 struct drm_i915_gem_set_tiling set_tiling
= {
273 .handle
= gem_create
.handle
,
274 .tiling_mode
= tiling
,
275 .stride
= tiling
== I915_TILING_X
? 512 : 128,
278 ret
= ioctl(fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
279 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
282 assert(!"Failed to set BO tiling");
283 goto close_and_return
;
286 struct drm_i915_gem_get_tiling get_tiling
= {
287 .handle
= gem_create
.handle
,
290 if (anv_ioctl(fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
)) {
291 assert(!"Failed to get BO tiling");
292 goto close_and_return
;
295 swizzled
= get_tiling
.swizzle_mode
!= I915_BIT_6_SWIZZLE_NONE
;
299 memset(&close
, 0, sizeof(close
));
300 close
.handle
= gem_create
.handle
;
301 anv_ioctl(fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
307 anv_gem_set_context_priority(struct anv_device
*device
,
310 return anv_gem_set_context_param(device
->fd
, device
->context_id
,
311 I915_CONTEXT_PARAM_PRIORITY
,
316 anv_gem_has_context_priority(int fd
)
318 return !anv_gem_set_context_param(fd
, 0, I915_CONTEXT_PARAM_PRIORITY
,
319 GEN_CONTEXT_MEDIUM_PRIORITY
);
323 anv_gem_create_context(struct anv_device
*device
)
325 struct drm_i915_gem_context_create create
= { 0 };
327 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
331 return create
.ctx_id
;
335 anv_gem_destroy_context(struct anv_device
*device
, int context
)
337 struct drm_i915_gem_context_destroy destroy
= {
341 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &destroy
);
345 anv_gem_set_context_param(int fd
, int context
, uint32_t param
, uint64_t value
)
347 struct drm_i915_gem_context_param p
= {
354 if (anv_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
360 anv_gem_get_context_param(int fd
, int context
, uint32_t param
, uint64_t *value
)
362 struct drm_i915_gem_context_param gp
= {
367 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &gp
);
376 anv_gem_get_aperture(int fd
, uint64_t *size
)
378 struct drm_i915_gem_get_aperture aperture
= { 0 };
380 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GEM_GET_APERTURE
, &aperture
);
384 *size
= aperture
.aper_available_size
;
390 anv_gem_supports_48b_addresses(int fd
)
392 struct drm_i915_gem_exec_object2 obj
= {
393 .flags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
,
396 struct drm_i915_gem_execbuffer2 execbuf
= {
397 .buffers_ptr
= (uintptr_t)&obj
,
402 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2
, &execbuf
);
404 return ret
== -1 && errno
== ENOENT
;
408 anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
409 uint32_t *active
, uint32_t *pending
)
411 struct drm_i915_reset_stats stats
= {
412 .ctx_id
= device
->context_id
,
415 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GET_RESET_STATS
, &stats
);
417 *active
= stats
.batch_active
;
418 *pending
= stats
.batch_pending
;
425 anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
)
427 struct drm_prime_handle args
= {
428 .handle
= gem_handle
,
429 .flags
= DRM_CLOEXEC
,
432 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_PRIME_HANDLE_TO_FD
, &args
);
440 anv_gem_fd_to_handle(struct anv_device
*device
, int fd
)
442 struct drm_prime_handle args
= {
446 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_PRIME_FD_TO_HANDLE
, &args
);
453 #ifndef SYNC_IOC_MAGIC
454 /* duplicated from linux/sync_file.h to avoid build-time dependency
455 * on new (v4.7) kernel headers. Once distro's are mostly using
456 * something newer than v4.7 drop this and #include <linux/sync_file.h>
459 struct sync_merge_data
{
467 #define SYNC_IOC_MAGIC '>'
468 #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
472 anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
)
474 const char name
[] = "anv merge fence";
475 struct sync_merge_data args
= {
479 memcpy(args
.name
, name
, sizeof(name
));
481 int ret
= anv_ioctl(fd1
, SYNC_IOC_MERGE
, &args
);
489 anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
)
491 struct drm_syncobj_create args
= {
495 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_CREATE
, &args
);
503 anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
)
505 struct drm_syncobj_destroy args
= {
509 anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_DESTROY
, &args
);
513 anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
)
515 struct drm_syncobj_handle args
= {
519 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
, &args
);
527 anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
)
529 struct drm_syncobj_handle args
= {
533 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE
, &args
);
541 anv_gem_syncobj_export_sync_file(struct anv_device
*device
, uint32_t handle
)
543 struct drm_syncobj_handle args
= {
545 .flags
= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
,
548 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
, &args
);
556 anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
557 uint32_t handle
, int fd
)
559 struct drm_syncobj_handle args
= {
562 .flags
= DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
,
565 return anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE
, &args
);
569 anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
)
571 struct drm_syncobj_array args
= {
572 .handles
= (uint64_t)(uintptr_t)&handle
,
576 anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_RESET
, &args
);
580 anv_gem_supports_syncobj_wait(int fd
)
584 struct drm_syncobj_create create
= {
587 ret
= anv_ioctl(fd
, DRM_IOCTL_SYNCOBJ_CREATE
, &create
);
591 uint32_t syncobj
= create
.handle
;
593 struct drm_syncobj_wait wait
= {
594 .handles
= (uint64_t)(uintptr_t)&create
,
597 .flags
= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
,
599 ret
= anv_ioctl(fd
, DRM_IOCTL_SYNCOBJ_WAIT
, &wait
);
601 struct drm_syncobj_destroy destroy
= {
604 anv_ioctl(fd
, DRM_IOCTL_SYNCOBJ_DESTROY
, &destroy
);
606 /* If it timed out, then we have the ioctl and it supports the
607 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.
609 return ret
== -1 && errno
== ETIME
;
613 anv_gem_syncobj_wait(struct anv_device
*device
,
614 uint32_t *handles
, uint32_t num_handles
,
615 int64_t abs_timeout_ns
, bool wait_all
)
617 struct drm_syncobj_wait args
= {
618 .handles
= (uint64_t)(uintptr_t)handles
,
619 .count_handles
= num_handles
,
620 .timeout_nsec
= abs_timeout_ns
,
621 .flags
= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
,
625 args
.flags
|= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
;
627 return anv_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_WAIT
, &args
);