* IN THE SOFTWARE.
*/
-#define _DEFAULT_SOURCE
-
#include <sys/ioctl.h>
+#include <sys/types.h>
#include <sys/mman.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include "anv_private.h"
-
-static int
-anv_ioctl(int fd, unsigned long request, void *arg)
-{
- int ret;
-
- do {
- ret = ioctl(fd, request, arg);
- } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
-
- return ret;
-}
+#include "common/gen_defines.h"
+#include "common/gen_gem.h"
+#include "drm-uapi/sync_file.h"
/**
* Wrapper around DRM_IOCTL_I915_GEM_CREATE.
* Return gem handle, or 0 on failure. Gem handles are never 0.
*/
uint32_t
-anv_gem_create(struct anv_device *device, size_t size)
+anv_gem_create(struct anv_device *device, uint64_t size)
{
struct drm_i915_gem_create gem_create = {
.size = size,
};
- int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
if (ret != 0) {
/* FIXME: What do we do if this fails? */
return 0;
.handle = gem_handle,
};
- anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ gen_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
}
/**
- * Wrapper around DRM_IOCTL_I915_GEM_MMAP.
+ * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
*/
-void*
-anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
- uint64_t offset, uint64_t size, uint32_t flags)
+static void*
+anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
+ uint64_t offset, uint64_t size, uint32_t flags)
+{
+ struct drm_i915_gem_mmap_offset gem_mmap = {
+ .handle = gem_handle,
+ .flags = (flags & I915_MMAP_WC) ?
+ I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
+ };
+ assert(offset == 0);
+
+ /* Get the fake offset back */
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
+ if (ret != 0)
+ return MAP_FAILED;
+
+ /* And map it */
+ void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ device->fd, gem_mmap.offset);
+ return map;
+}
+
+static void*
+anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
+ uint64_t offset, uint64_t size, uint32_t flags)
{
struct drm_i915_gem_mmap gem_mmap = {
.handle = gem_handle,
.flags = flags,
};
- int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
- if (ret != 0) {
- /* FIXME: Is NULL the right error return? Cf MAP_INVALID */
- return NULL;
- }
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
+ if (ret != 0)
+ return MAP_FAILED;
VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
return (void *)(uintptr_t) gem_mmap.addr_ptr;
}
+/**
+ * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
+ */
+void*
+anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
+ uint64_t offset, uint64_t size, uint32_t flags)
+{
+ if (device->physical->has_mmap_offset)
+ return anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
+ else
+ return anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
+}
+
/* This is just a wrapper around munmap, but it also notifies valgrind that
* this map is no longer valid. Pair this with anv_gem_mmap().
*/
void
-anv_gem_munmap(void *p, uint64_t size)
+anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
{
- VG(VALGRIND_FREELIKE_BLOCK(p, 0));
+ if (!device->physical->has_mmap_offset)
+ VG(VALGRIND_FREELIKE_BLOCK(p, 0));
munmap(p, size);
}
.flags = 0,
};
- int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
if (ret == -1)
return 0;
.caching = caching,
};
- return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
+ return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
}
int
.write_domain = write_domain,
};
- return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
+ return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
+}
+
+/**
+ * Returns 0, 1, or negative to indicate error
+ */
+int
+anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
+{
+ struct drm_i915_gem_busy busy = {
+ .handle = gem_handle,
+ };
+
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+ if (ret < 0)
+ return ret;
+
+ return busy.busy != 0;
}
/**
.flags = 0,
};
- int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
*timeout_ns = wait.timeout_ns;
return ret;
anv_gem_execbuffer(struct anv_device *device,
struct drm_i915_gem_execbuffer2 *execbuf)
{
- return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
+ if (execbuf->flags & I915_EXEC_FENCE_OUT)
+ return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
+ else
+ return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
+}
+
+/** Return -1 on error. */
+int
+anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
+{
+ struct drm_i915_gem_get_tiling get_tiling = {
+ .handle = gem_handle,
+ };
+
+ /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
+ * anymore, so we will need another way to get the tiling. Apparently this
+ * is only used in Android code, so we may need some other way to
+ * communicate the tiling mode.
+ */
+ if (gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
+ assert(!"Failed to get BO tiling");
+ return -1;
+ }
+
+ return get_tiling.tiling_mode;
}
int
{
int ret;
+ /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
+ * nothing needs to be done.
+ */
+ if (!device->info.has_tiling_uapi)
+ return 0;
+
/* set_tiling overwrites the input on the error path, so we have to open
- * code anv_ioctl.
+ * code gen_ioctl.
*/
do {
struct drm_i915_gem_set_tiling set_tiling = {
.value = &tmp,
};
- int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ int ret = gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret == 0)
return tmp;
.size = 4096,
};
- if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
+ if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
assert(!"Failed to create GEM BO");
return false;
}
bool swizzled = false;
/* set_tiling overwrites the input on the error path, so we have to open
- * code anv_ioctl.
+ * code gen_ioctl.
*/
do {
struct drm_i915_gem_set_tiling set_tiling = {
.handle = gem_create.handle,
};
- if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
+ if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
assert(!"Failed to get BO tiling");
goto close_and_return;
}
memset(&close, 0, sizeof(close));
close.handle = gem_create.handle;
- anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
+ gen_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
return swizzled;
}
+bool
+anv_gem_has_context_priority(int fd)
+{
+ return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
+ GEN_CONTEXT_MEDIUM_PRIORITY);
+}
+
int
anv_gem_create_context(struct anv_device *device)
{
struct drm_i915_gem_context_create create = { 0 };
- int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret == -1)
return -1;
.ctx_id = context,
};
- return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
+ return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
}
int
-anv_gem_get_aperture(int fd, uint64_t *size)
+anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
{
- struct drm_i915_gem_get_aperture aperture = { 0 };
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = context,
+ .param = param,
+ .value = value,
+ };
+ int err = 0;
- int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+ if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
+ err = -errno;
+ return err;
+}
+
+int
+anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
+{
+ struct drm_i915_gem_context_param gp = {
+ .ctx_id = context,
+ .param = param,
+ };
+
+ int ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
if (ret == -1)
return -1;
- *size = aperture.aper_available_size;
-
+ *value = gp.value;
return 0;
}
+int
+anv_gem_gpu_get_reset_stats(struct anv_device *device,
+ uint32_t *active, uint32_t *pending)
+{
+ struct drm_i915_reset_stats stats = {
+ .ctx_id = device->context_id,
+ };
+
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
+ if (ret == 0) {
+ *active = stats.batch_active;
+ *pending = stats.batch_pending;
+ }
+
+ return ret;
+}
+
int
anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
{
.flags = DRM_CLOEXEC,
};
- int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
if (ret == -1)
return -1;
.fd = fd,
};
- int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
+ if (ret == -1)
+ return 0;
+
+ return args.handle;
+}
+
+int
+anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
+{
+ struct drm_i915_reg_read args = {
+ .offset = offset
+ };
+
+ int ret = gen_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
+
+ *result = args.val;
+ return ret;
+}
+
+int
+anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
+{
+ struct sync_merge_data args = {
+ .name = "anv merge fence",
+ .fd2 = fd2,
+ .fence = -1,
+ };
+
+ int ret = gen_ioctl(fd1, SYNC_IOC_MERGE, &args);
if (ret == -1)
+ return -1;
+
+ return args.fence;
+}
+
+uint32_t
+anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
+{
+ struct drm_syncobj_create args = {
+ .flags = flags,
+ };
+
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
+ if (ret)
return 0;
return args.handle;
}
+
+void
+anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
+{
+ struct drm_syncobj_destroy args = {
+ .handle = handle,
+ };
+
+ gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
+}
+
+int
+anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
+{
+ struct drm_syncobj_handle args = {
+ .handle = handle,
+ };
+
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
+ if (ret)
+ return -1;
+
+ return args.fd;
+}
+
+uint32_t
+anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
+{
+ struct drm_syncobj_handle args = {
+ .fd = fd,
+ };
+
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
+ if (ret)
+ return 0;
+
+ return args.handle;
+}
+
+int
+anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
+{
+ struct drm_syncobj_handle args = {
+ .handle = handle,
+ .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
+ };
+
+ int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
+ if (ret)
+ return -1;
+
+ return args.fd;
+}
+
+int
+anv_gem_syncobj_import_sync_file(struct anv_device *device,
+ uint32_t handle, int fd)
+{
+ struct drm_syncobj_handle args = {
+ .handle = handle,
+ .fd = fd,
+ .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
+ };
+
+ return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
+}
+
+void
+anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
+{
+ struct drm_syncobj_array args = {
+ .handles = (uint64_t)(uintptr_t)&handle,
+ .count_handles = 1,
+ };
+
+ gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
+}
+
+bool
+anv_gem_supports_syncobj_wait(int fd)
+{
+ return gen_gem_supports_syncobj_wait(fd);
+}
+
+int
+anv_gem_syncobj_wait(struct anv_device *device,
+ uint32_t *handles, uint32_t num_handles,
+ int64_t abs_timeout_ns, bool wait_all)
+{
+ struct drm_syncobj_wait args = {
+ .handles = (uint64_t)(uintptr_t)handles,
+ .count_handles = num_handles,
+ .timeout_nsec = abs_timeout_ns,
+ .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ };
+
+ if (wait_all)
+ args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
+
+ return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
+}