2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
32 #include "anv_private.h"
33 #include "common/gen_defines.h"
34 #include "common/gen_gem.h"
35 #include "drm-uapi/sync_file.h"
38 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
40 * Return gem handle, or 0 on failure. Gem handles are never 0.
43 anv_gem_create(struct anv_device
*device
, uint64_t size
)
45 struct drm_i915_gem_create gem_create
= {
49 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CREATE
, &gem_create
);
51 /* FIXME: What do we do if this fails? */
55 return gem_create
.handle
;
59 anv_gem_close(struct anv_device
*device
, uint32_t gem_handle
)
61 struct drm_gem_close close
= {
65 gen_ioctl(device
->fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
69 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
72 anv_gem_mmap_offset(struct anv_device
*device
, uint32_t gem_handle
,
73 uint64_t offset
, uint64_t size
, uint32_t flags
)
75 struct drm_i915_gem_mmap_offset gem_mmap
= {
77 .flags
= (flags
& I915_MMAP_WC
) ?
78 I915_MMAP_OFFSET_WC
: I915_MMAP_OFFSET_WB
,
82 /* Get the fake offset back */
83 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_MMAP_OFFSET
, &gem_mmap
);
88 void *map
= mmap(NULL
, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
89 device
->fd
, gem_mmap
.offset
);
94 anv_gem_mmap_legacy(struct anv_device
*device
, uint32_t gem_handle
,
95 uint64_t offset
, uint64_t size
, uint32_t flags
)
97 struct drm_i915_gem_mmap gem_mmap
= {
104 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_MMAP
, &gem_mmap
);
108 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap
.addr_ptr
, gem_mmap
.size
, 0, 1));
109 return (void *)(uintptr_t) gem_mmap
.addr_ptr
;
113 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
116 anv_gem_mmap(struct anv_device
*device
, uint32_t gem_handle
,
117 uint64_t offset
, uint64_t size
, uint32_t flags
)
119 if (device
->physical
->has_mmap_offset
)
120 return anv_gem_mmap_offset(device
, gem_handle
, offset
, size
, flags
);
122 return anv_gem_mmap_legacy(device
, gem_handle
, offset
, size
, flags
);
125 /* This is just a wrapper around munmap, but it also notifies valgrind that
126 * this map is no longer valid. Pair this with anv_gem_mmap().
129 anv_gem_munmap(struct anv_device
*device
, void *p
, uint64_t size
)
131 if (!device
->physical
->has_mmap_offset
)
132 VG(VALGRIND_FREELIKE_BLOCK(p
, 0));
137 anv_gem_userptr(struct anv_device
*device
, void *mem
, size_t size
)
139 struct drm_i915_gem_userptr userptr
= {
140 .user_ptr
= (__u64
)((unsigned long) mem
),
145 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_USERPTR
, &userptr
);
149 return userptr
.handle
;
153 anv_gem_set_caching(struct anv_device
*device
,
154 uint32_t gem_handle
, uint32_t caching
)
156 struct drm_i915_gem_caching gem_caching
= {
157 .handle
= gem_handle
,
161 return gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_CACHING
, &gem_caching
);
165 anv_gem_set_domain(struct anv_device
*device
, uint32_t gem_handle
,
166 uint32_t read_domains
, uint32_t write_domain
)
168 struct drm_i915_gem_set_domain gem_set_domain
= {
169 .handle
= gem_handle
,
170 .read_domains
= read_domains
,
171 .write_domain
= write_domain
,
174 return gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_DOMAIN
, &gem_set_domain
);
178 * Returns 0, 1, or negative to indicate error
181 anv_gem_busy(struct anv_device
*device
, uint32_t gem_handle
)
183 struct drm_i915_gem_busy busy
= {
184 .handle
= gem_handle
,
187 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_BUSY
, &busy
);
191 return busy
.busy
!= 0;
195 * On error, \a timeout_ns holds the remaining time.
198 anv_gem_wait(struct anv_device
*device
, uint32_t gem_handle
, int64_t *timeout_ns
)
200 struct drm_i915_gem_wait wait
= {
201 .bo_handle
= gem_handle
,
202 .timeout_ns
= *timeout_ns
,
206 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
207 *timeout_ns
= wait
.timeout_ns
;
213 anv_gem_execbuffer(struct anv_device
*device
,
214 struct drm_i915_gem_execbuffer2
*execbuf
)
216 if (execbuf
->flags
& I915_EXEC_FENCE_OUT
)
217 return gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
, execbuf
);
219 return gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2
, execbuf
);
222 /** Return -1 on error. */
224 anv_gem_get_tiling(struct anv_device
*device
, uint32_t gem_handle
)
226 struct drm_i915_gem_get_tiling get_tiling
= {
227 .handle
= gem_handle
,
230 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
231 * anymore, so we will need another way to get the tiling. Apparently this
232 * is only used in Android code, so we may need some other way to
233 * communicate the tiling mode.
235 if (gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
)) {
236 assert(!"Failed to get BO tiling");
240 return get_tiling
.tiling_mode
;
244 anv_gem_set_tiling(struct anv_device
*device
,
245 uint32_t gem_handle
, uint32_t stride
, uint32_t tiling
)
249 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
250 * nothing needs to be done.
252 if (!device
->info
.has_tiling_uapi
)
255 /* set_tiling overwrites the input on the error path, so we have to open
259 struct drm_i915_gem_set_tiling set_tiling
= {
260 .handle
= gem_handle
,
261 .tiling_mode
= tiling
,
265 ret
= ioctl(device
->fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
266 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
272 anv_gem_get_param(int fd
, uint32_t param
)
276 drm_i915_getparam_t gp
= {
281 int ret
= gen_ioctl(fd
, DRM_IOCTL_I915_GETPARAM
, &gp
);
289 anv_gem_get_bit6_swizzle(int fd
, uint32_t tiling
)
291 struct drm_gem_close close
;
294 struct drm_i915_gem_create gem_create
= {
298 if (gen_ioctl(fd
, DRM_IOCTL_I915_GEM_CREATE
, &gem_create
)) {
299 assert(!"Failed to create GEM BO");
303 bool swizzled
= false;
305 /* set_tiling overwrites the input on the error path, so we have to open
309 struct drm_i915_gem_set_tiling set_tiling
= {
310 .handle
= gem_create
.handle
,
311 .tiling_mode
= tiling
,
312 .stride
= tiling
== I915_TILING_X
? 512 : 128,
315 ret
= ioctl(fd
, DRM_IOCTL_I915_GEM_SET_TILING
, &set_tiling
);
316 } while (ret
== -1 && (errno
== EINTR
|| errno
== EAGAIN
));
319 assert(!"Failed to set BO tiling");
320 goto close_and_return
;
323 struct drm_i915_gem_get_tiling get_tiling
= {
324 .handle
= gem_create
.handle
,
327 if (gen_ioctl(fd
, DRM_IOCTL_I915_GEM_GET_TILING
, &get_tiling
)) {
328 assert(!"Failed to get BO tiling");
329 goto close_and_return
;
332 swizzled
= get_tiling
.swizzle_mode
!= I915_BIT_6_SWIZZLE_NONE
;
336 memset(&close
, 0, sizeof(close
));
337 close
.handle
= gem_create
.handle
;
338 gen_ioctl(fd
, DRM_IOCTL_GEM_CLOSE
, &close
);
344 anv_gem_has_context_priority(int fd
)
346 return !anv_gem_set_context_param(fd
, 0, I915_CONTEXT_PARAM_PRIORITY
,
347 GEN_CONTEXT_MEDIUM_PRIORITY
);
351 anv_gem_create_context(struct anv_device
*device
)
353 struct drm_i915_gem_context_create create
= { 0 };
355 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_CREATE
, &create
);
359 return create
.ctx_id
;
363 anv_gem_destroy_context(struct anv_device
*device
, int context
)
365 struct drm_i915_gem_context_destroy destroy
= {
369 return gen_ioctl(device
->fd
, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY
, &destroy
);
373 anv_gem_set_context_param(int fd
, int context
, uint32_t param
, uint64_t value
)
375 struct drm_i915_gem_context_param p
= {
382 if (gen_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM
, &p
))
388 anv_gem_get_context_param(int fd
, int context
, uint32_t param
, uint64_t *value
)
390 struct drm_i915_gem_context_param gp
= {
395 int ret
= gen_ioctl(fd
, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM
, &gp
);
404 anv_gem_gpu_get_reset_stats(struct anv_device
*device
,
405 uint32_t *active
, uint32_t *pending
)
407 struct drm_i915_reset_stats stats
= {
408 .ctx_id
= device
->context_id
,
411 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_I915_GET_RESET_STATS
, &stats
);
413 *active
= stats
.batch_active
;
414 *pending
= stats
.batch_pending
;
421 anv_gem_handle_to_fd(struct anv_device
*device
, uint32_t gem_handle
)
423 struct drm_prime_handle args
= {
424 .handle
= gem_handle
,
425 .flags
= DRM_CLOEXEC
,
428 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_PRIME_HANDLE_TO_FD
, &args
);
436 anv_gem_fd_to_handle(struct anv_device
*device
, int fd
)
438 struct drm_prime_handle args
= {
442 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_PRIME_FD_TO_HANDLE
, &args
);
450 anv_gem_reg_read(int fd
, uint32_t offset
, uint64_t *result
)
452 struct drm_i915_reg_read args
= {
456 int ret
= gen_ioctl(fd
, DRM_IOCTL_I915_REG_READ
, &args
);
463 anv_gem_sync_file_merge(struct anv_device
*device
, int fd1
, int fd2
)
465 struct sync_merge_data args
= {
466 .name
= "anv merge fence",
471 int ret
= gen_ioctl(fd1
, SYNC_IOC_MERGE
, &args
);
479 anv_gem_syncobj_create(struct anv_device
*device
, uint32_t flags
)
481 struct drm_syncobj_create args
= {
485 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_CREATE
, &args
);
493 anv_gem_syncobj_destroy(struct anv_device
*device
, uint32_t handle
)
495 struct drm_syncobj_destroy args
= {
499 gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_DESTROY
, &args
);
503 anv_gem_syncobj_handle_to_fd(struct anv_device
*device
, uint32_t handle
)
505 struct drm_syncobj_handle args
= {
509 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
, &args
);
517 anv_gem_syncobj_fd_to_handle(struct anv_device
*device
, int fd
)
519 struct drm_syncobj_handle args
= {
523 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE
, &args
);
531 anv_gem_syncobj_export_sync_file(struct anv_device
*device
, uint32_t handle
)
533 struct drm_syncobj_handle args
= {
535 .flags
= DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE
,
538 int ret
= gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD
, &args
);
546 anv_gem_syncobj_import_sync_file(struct anv_device
*device
,
547 uint32_t handle
, int fd
)
549 struct drm_syncobj_handle args
= {
552 .flags
= DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE
,
555 return gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE
, &args
);
559 anv_gem_syncobj_reset(struct anv_device
*device
, uint32_t handle
)
561 struct drm_syncobj_array args
= {
562 .handles
= (uint64_t)(uintptr_t)&handle
,
566 gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_RESET
, &args
);
570 anv_gem_supports_syncobj_wait(int fd
)
572 return gen_gem_supports_syncobj_wait(fd
);
576 anv_gem_syncobj_wait(struct anv_device
*device
,
577 uint32_t *handles
, uint32_t num_handles
,
578 int64_t abs_timeout_ns
, bool wait_all
)
580 struct drm_syncobj_wait args
= {
581 .handles
= (uint64_t)(uintptr_t)handles
,
582 .count_handles
= num_handles
,
583 .timeout_nsec
= abs_timeout_ns
,
584 .flags
= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT
,
588 args
.flags
|= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL
;
590 return gen_ioctl(device
->fd
, DRM_IOCTL_SYNCOBJ_WAIT
, &args
);