'tu_descriptor_set.c',
'tu_descriptor_set.h',
'tu_formats.c',
+ 'tu_gem.c',
'tu_image.c',
'tu_meta_blit.c',
'tu_meta_buffer.c',
#include <fcntl.h>
#include <stdbool.h>
#include <string.h>
+#include <sys/mman.h>
#include <sys/sysinfo.h>
#include <unistd.h>
#include <xf86drm.h>
+#include <msm_drm.h>
static int
tu_device_get_cache_uuid(uint16_t family, void *uuid)
stub();
}
+VkResult
+tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
+{
+ /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
+ * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
+ */
+ uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
+ if (!gem_handle)
+ goto fail_new;
+
+ /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
+ * want immediate backing pages because vkAllocateMemory and friends must
+ * not lazily fail.
+ *
+ * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
+ * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
+ * maybe I misunderstand.
+ */
+
+ /* TODO: Do we need 'offset' if we have 'iova'? */
+ uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
+ if (!offset)
+ goto fail_info;
+
+ uint64_t iova = tu_gem_info_iova(dev, bo->gem_handle);
+ if (!iova)
+ goto fail_info;
+
+ *bo = (struct tu_bo) {
+ .gem_handle = gem_handle,
+ .size = size,
+ .offset = offset,
+ .iova = iova,
+ };
+
+ return VK_SUCCESS;
+
+fail_info:
+ tu_gem_close(dev, bo->gem_handle);
+fail_new:
+ return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+}
+
+VkResult
+tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
+{
+ if (bo->map)
+ return VK_SUCCESS;
+
+ /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
+ void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ dev->physical_device->local_fd, bo->offset);
+ if (map == MAP_FAILED)
+ return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
+
+ return VK_SUCCESS;
+}
+
+void
+tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
+{
+ assert(bo->gem_handle);
+
+ if (bo->map)
+ munmap(bo->map, bo->size);
+
+ tu_gem_close(dev, bo->gem_handle);
+}
+
static VkResult
tu_physical_device_init(struct tu_physical_device *device,
struct tu_instance *instance,
VkDeviceMemory *pMem)
{
struct tu_device_memory *mem;
+ VkResult result;
assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
if (mem == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- mem->bo = fd_bo_new(device->physical_device->drm_device, pAllocateInfo->allocationSize,
- DRM_FREEDRENO_GEM_CACHE_WCOMBINE |
- DRM_FREEDRENO_GEM_TYPE_KMEM);
- if (!mem->bo) {
+ result = tu_bo_init_new(device, &mem->bo, pAllocateInfo->allocationSize);
+ if (!result) {
vk_free2(&device->alloc, pAllocator, mem);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return result;
}
+
mem->size = pAllocateInfo->allocationSize;
mem->type_index = pAllocateInfo->memoryTypeIndex;
if (mem == NULL)
return;
- if (mem->bo)
- fd_bo_del(mem->bo);
-
+ tu_bo_finish(device, &mem->bo);
vk_free2(&device->alloc, pAllocator, mem);
}
{
TU_FROM_HANDLE(tu_device, device, _device);
TU_FROM_HANDLE(tu_device_memory, mem, _memory);
+ VkResult result;
if (mem == NULL) {
*ppData = NULL;
if (mem->user_ptr) {
*ppData = mem->user_ptr;
} else if (!mem->map){
- *ppData = mem->map = fd_bo_map(mem->bo);
+ result = tu_bo_map(device, &mem->bo);
+ if (result != VK_SUCCESS)
+ return result;
+ mem->map = mem->bo.map;
} else
*ppData = mem->map;
--- /dev/null
+/*
+ * Copyright © 2018 Google, Inc.
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+
+#include <msm_drm.h>
+
+#include "tu_private.h"
+
+static int
+tu_ioctl(int fd, unsigned long request, void *arg)
+{
+ int ret;
+
+ do {
+ ret = ioctl(fd, request, arg);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ return ret;
+}
+
+/**
+ * Return gem handle on success. Return 0 on failure.
+ */
+uint32_t
+tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags)
+{
+ struct drm_msm_gem_new req = {
+ .size = size,
+ .flags = flags,
+ };
+
+
+ int ret = tu_ioctl(dev->physical_device->local_fd, DRM_MSM_GEM_NEW, &req);
+ if (ret)
+ return 0;
+
+ return req.handle;
+}
+
+void
+tu_gem_close(struct tu_device *dev, uint32_t gem_handle)
+{
+ struct drm_gem_close req = {
+ .handle = gem_handle,
+ };
+
+ tu_ioctl(dev->physical_device->local_fd, DRM_IOCTL_GEM_CLOSE, &req);
+}
+
+/** Return UINT64_MAX on error. */
+static uint64_t
+tu_gem_info(struct tu_device *dev, uint32_t gem_handle, uint32_t flags)
+{
+ struct drm_msm_gem_info req = {
+ .handle = gem_handle,
+ .flags = flags,
+ };
+
+ int ret = tu_ioctl(dev->physical_device->local_fd, DRM_MSM_GEM_INFO, &req);
+ if (ret == -1)
+ return UINT64_MAX;
+
+ return req.offset;
+}
+
+/** Return UINT64_MAX on error. */
+uint64_t
+tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle)
+{
+ return tu_gem_info(dev, gem_handle, 0);
+}
+
+/** Return UINT64_MAX on error. */
+uint64_t
+tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle)
+{
+ return tu_gem_info(dev, gem_handle, MSM_INFO_IOVA);
+}
struct tu_bo_list bo_list;
};
+struct tu_bo
+{
+ uint32_t gem_handle;
+ uint64_t size;
+ uint64_t offset;
+ uint64_t iova;
+ void *map;
+};
+
+VkResult
+tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
+void
+tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
+VkResult
+tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
+
struct tu_device_memory
{
- struct fd_bo *bo;
+ struct tu_bo bo;
VkDeviceSize size;
/* for dedicated allocations */
struct radeon_winsys_sem;
+uint32_t
+tu_gem_new(struct tu_device *dev, uint64_t size, uint32_t flags);
+void
+tu_gem_close(struct tu_device *dev, uint32_t gem_handle);
+uint64_t
+tu_gem_info_offset(struct tu_device *dev, uint32_t gem_handle);
+uint64_t
+tu_gem_info_iova(struct tu_device *dev, uint32_t gem_handle);
+
#define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
\
static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \