2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <sys/ioctl.h>
31 #include "anv_private.h"
34 anv_ioctl(int fd
, unsigned long request
, void *arg
)
39 ret
= ioctl(fd
, request
, arg
);
40 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
46 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
48 * Return gem handle, or 0 on failure. Gem handles are never 0.
51 anv_gem_create(struct anv_device
*device
, uint64_t size
)
53 struct drm_i915_gem_create gem_create
= {
57 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &gem_create
);
59 /* FIXME: What do we do if this fails? */
63 return gem_create
.handle
;
67 anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
)
69 struct drm_gem_close close
= {
73 anv_ioctl(device
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
77 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
80 anv_gem_mmap(struct anv_device
*device
, uint32_t gem_handle
,
81 uint64_t offset
, uint64_t size
, uint32_t flags
)
83 struct drm_i915_gem_mmap gem_mmap
= {
90 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &gem_mmap
);
94 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap
.addr_ptr
, gem_mmap
.size
, 0, 1));
95 return (void *)(uintptr_t) gem_mmap
.addr_ptr
;
98 /* This is just a wrapper around munmap, but it also notifies valgrind that
99 * this map is no longer valid. Pair this with anv_gem_mmap().
102 anv_gem_munmap(void *p
, uint64_t size
)
104 VG(VALGRIND_FREELIKE_BLOCK(p
, 0));
109 anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
)
111 struct drm_i915_gem_userptr userptr
= {
112 .user_ptr
= (__u64
)((unsigned long) mem
),
117 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
121 return userptr
.handle
;
125 anv_gem_set_caching(struct anv_device
*device
,
126 uint32_t gem_handle
, uint32_t caching
)
128 struct drm_i915_gem_caching gem_caching
= {
129 .handle
= gem_handle
,
133 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_CACHING
, &gem_caching
);
137 anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
138 uint32_t read_domains
, uint32_t write_domain
)
140 struct drm_i915_gem_set_domain gem_set_domain
= {
141 .handle
= gem_handle
,
142 .read_domains
= read_domains
,
143 .write_domain
= write_domain
,
146 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &gem_set_domain
);
150 * Returns 0, 1, or negative to indicate error
153 anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
)
155 struct drm_i915_gem_busy busy
= {
156 .handle
= gem_handle
,
159 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
163 return busy
.busy
!= 0;
167 * On error, \a timeout_ns holds the remaining time.
170 anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
)
172 struct drm_i915_gem_wait wait
= {
173 .bo_handle
= gem_handle
,
174 .timeout_ns
= *timeout_ns
,
178 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
179 *timeout_ns
= wait
.timeout_ns
;
185 anv_gem_execbuffer(struct anv_device
*device
,
186 struct drm_i915_gem_execbuffer2
*execbuf
)
188 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2
, execbuf
);
192 anv_gem_set_tiling(struct anv_device
*device
,
193 uint32_t gem_handle
, uint32_t stride
, uint32_t tiling
)
197 /* set_tiling overwrites the input on the error path, so we have to open
201 struct drm_i915_gem_set_tiling set_tiling
= {
202 .handle
= gem_handle
,
203 .tiling_mode
= tiling
,
207 ret
= ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
208 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
214 anv_gem_get_param(int fd
, uint32_t param
)
218 drm_i915_getparam_t gp
= {
223 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
231 anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
)
233 struct drm_gem_close close
;
236 struct drm_i915_gem_create gem_create
= {
240 if (anv_ioctl(fd
, DRM_IOCTL_I915_GEM_CREATE
, &gem_create
)) {
241 assert(!"Failed to create GEM BO");
245 bool swizzled
= false;
247 /* set_tiling overwrites the input on the error path, so we have to open
251 struct drm_i915_gem_set_tiling set_tiling
= {
252 .handle
= gem_create
.handle
,
253 .tiling_mode
= tiling
,
254 .stride
= tiling
== I915_TILING_X
? 512 : 128,
257 ret
= ioctl(fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
258 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
261 assert(!"Failed to set BO tiling");
262 goto close_and_return
;
265 struct drm_i915_gem_get_tiling get_tiling
= {
266 .handle
= gem_create
.handle
,
269 if (anv_ioctl(fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
)) {
270 assert(!"Failed to get BO tiling");
271 goto close_and_return
;
274 swizzled
= get_tiling
.swizzle_mode
!= I915_BIT_6_SWIZZLE_NONE
;
278 memset(&close
, 0, sizeof(close
));
279 close
.handle
= gem_create
.handle
;
280 anv_ioctl(fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
286 anv_gem_create_context(struct anv_device
*device
)
288 struct drm_i915_gem_context_create create
= { 0 };
290 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
294 return create
.ctx_id
;
298 anv_gem_destroy_context(struct anv_device
*device
, int context
)
300 struct drm_i915_gem_context_destroy destroy
= {
304 return anv_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &destroy
);
308 anv_gem_get_context_param(int fd
, int context
, uint32_t param
, uint64_t *value
)
310 struct drm_i915_gem_context_param gp
= {
315 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &gp
);
324 anv_gem_get_aperture(int fd
, uint64_t *size
)
326 struct drm_i915_gem_get_aperture aperture
= { 0 };
328 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GEM_GET_APERTURE
, &aperture
);
332 *size
= aperture
.aper_available_size
;
338 anv_gem_supports_48b_addresses(int fd
)
340 struct drm_i915_gem_exec_object2 obj
= {
341 .flags
= EXEC_OBJECT_SUPPORTS_48B_ADDRESS
,
344 struct drm_i915_gem_execbuffer2 execbuf
= {
345 .buffers_ptr
= (uintptr_t)&obj
,
350 int ret
= anv_ioctl(fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2
, &execbuf
);
352 return ret
== -1 && errno
== ENOENT
;
356 anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
357 uint32_t *active
, uint32_t *pending
)
359 struct drm_i915_reset_stats stats
= {
360 .ctx_id
= device
->context_id
,
363 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_I915_GET_RESET_STATS
, &stats
);
365 *active
= stats
.batch_active
;
366 *pending
= stats
.batch_pending
;
373 anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
)
375 struct drm_prime_handle args
= {
376 .handle
= gem_handle
,
377 .flags
= DRM_CLOEXEC
,
380 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_PRIME_HANDLE_TO_FD
, &args
);
388 anv_gem_fd_to_handle(struct anv_device
*device
, int fd
)
390 struct drm_prime_handle args
= {
394 int ret
= anv_ioctl(device
->fd
, DRM_IOCTL_PRIME_FD_TO_HANDLE
, &args
);