turnip: add tu_cs_add_bo
[mesa.git] / src / freedreno / vulkan / tu_device.c
index 67c00c866beccd6de0ddf9c451ec2a537e93e644..d5533028a6216095c4414b40a7ac276647404eee 100644 (file)
@@ -28,7 +28,7 @@
 #include "tu_private.h"
 
 #include <fcntl.h>
-#include <msm_drm.h>
+#include <libsync.h>
 #include <stdbool.h>
 #include <string.h>
 #include <sys/mman.h>
@@ -42,6 +42,8 @@
 #include "vk_format.h"
 #include "vk_util.h"
 
+#include "drm/msm_drm.h"
+
 static int
 tu_device_get_cache_uuid(uint16_t family, void *uuid)
 {
@@ -62,13 +64,13 @@ static void
 tu_get_driver_uuid(void *uuid)
 {
    memset(uuid, 0, VK_UUID_SIZE);
+   snprintf(uuid, VK_UUID_SIZE, "freedreno");
 }
 
 static void
 tu_get_device_uuid(void *uuid)
 {
-   tu_use_args(uuid);
-   tu_stub();
+   memset(uuid, 0, VK_UUID_SIZE);
 }
 
 VkResult
@@ -81,20 +83,6 @@ tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
    if (!gem_handle)
       goto fail_new;
 
-   /* Calling DRM_MSM_GEM_INFO forces the kernel to allocate backing pages. We
-    * want immediate backing pages because vkAllocateMemory and friends must
-    * not lazily fail.
-    *
-    * TODO(chadv): Must we really call DRM_MSM_GEM_INFO to acquire backing
-    * pages? I infer so from reading comments in msm_bo.c:bo_allocate(), but
-    * maybe I misunderstand.
-    */
-
-   /* TODO: Do we need 'offset' if we have 'iova'? */
-   uint64_t offset = tu_gem_info_offset(dev, gem_handle);
-   if (!offset)
-      goto fail_info;
-
    uint64_t iova = tu_gem_info_iova(dev, gem_handle);
    if (!iova)
       goto fail_info;
@@ -102,7 +90,6 @@ tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
    *bo = (struct tu_bo) {
       .gem_handle = gem_handle,
       .size = size,
-      .offset = offset,
       .iova = iova,
    };
 
@@ -120,9 +107,13 @@ tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
    if (bo->map)
       return VK_SUCCESS;
 
+   uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
+   if (!offset)
+          return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+
    /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
    void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
-                    dev->physical_device->local_fd, bo->offset);
+                    dev->physical_device->local_fd, offset);
    if (map == MAP_FAILED)
       return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
 
@@ -151,7 +142,6 @@ tu_physical_device_init(struct tu_physical_device *device,
    drmVersionPtr version;
    int fd;
    int master_fd = -1;
-   uint64_t val;
 
    fd = open(path, O_RDWR | O_CLOEXEC);
    if (fd < 0) {
@@ -213,32 +203,21 @@ tu_physical_device_init(struct tu_physical_device *device,
    device->master_fd = master_fd;
    device->local_fd = fd;
 
-   device->drm_device = fd_device_new_dup(fd);
-   if (!device->drm_device) {
-      if (instance->debug_flags & TU_DEBUG_STARTUP)
-         tu_logi("Could not create the libdrm device");
-      result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
-                         "could not create the libdrm device");
-      goto fail;
-   }
-
-   if (tu_drm_query_param(device, MSM_PARAM_GPU_ID, &val)) {
+   if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
       if (instance->debug_flags & TU_DEBUG_STARTUP)
          tu_logi("Could not query the GPU ID");
       result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
                          "could not get GPU ID");
       goto fail;
    }
-   device->gpu_id = val;
 
-   if (tu_drm_query_param(device, MSM_PARAM_GMEM_SIZE, &val)) {
+   if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
       if (instance->debug_flags & TU_DEBUG_STARTUP)
          tu_logi("Could not query the GMEM size");
       result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
                          "could not get GMEM size");
       goto fail;
    }
-   device->gmem_size = val;
 
    memset(device->name, 0, sizeof(device->name));
    sprintf(device->name, "FD%d", device->gpu_id);
@@ -281,8 +260,6 @@ tu_physical_device_init(struct tu_physical_device *device,
    return VK_SUCCESS;
 
 fail:
-   if (device->drm_device)
-      fd_device_del(device->drm_device);
    close(fd);
    if (master_fd != -1)
       close(master_fd);
@@ -949,7 +926,7 @@ tu_GetPhysicalDeviceMemoryProperties2(
       physicalDevice, &pMemoryProperties->memoryProperties);
 }
 
-static int
+static VkResult
 tu_queue_init(struct tu_device *device,
               struct tu_queue *queue,
               uint32_t queue_family_index,
@@ -962,12 +939,22 @@ tu_queue_init(struct tu_device *device,
    queue->queue_idx = idx;
    queue->flags = flags;
 
+   int ret = tu_drm_submitqueue_new(device, 0, &queue->msm_queue_id);
+   if (ret)
+      return VK_ERROR_INITIALIZATION_FAILED;
+
+   queue->submit_fence_fd = -1;
+
    return VK_SUCCESS;
 }
 
 static void
 tu_queue_finish(struct tu_queue *queue)
 {
+   if (queue->submit_fence_fd >= 0) {
+      close(queue->submit_fence_fd);
+   }
+   tu_drm_submitqueue_close(queue->device, queue->msm_queue_id);
 }
 
 static int
@@ -1170,12 +1157,93 @@ tu_QueueSubmit(VkQueue _queue,
                const VkSubmitInfo *pSubmits,
                VkFence _fence)
 {
+   TU_FROM_HANDLE(tu_queue, queue, _queue);
+
+   for (uint32_t i = 0; i < submitCount; ++i) {
+      const VkSubmitInfo *submit = pSubmits + i;
+      const bool last_submit = (i == submitCount - 1);
+      struct tu_bo_list bo_list;
+      tu_bo_list_init(&bo_list);
+
+      uint32_t entry_count = 0;
+      for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
+         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
+         entry_count += cmdbuf->cs.entry_count;
+      }
+
+      struct drm_msm_gem_submit_cmd cmds[entry_count];
+      uint32_t entry_idx = 0;
+      for (uint32_t j = 0; j < submit->commandBufferCount; ++j) {
+         TU_FROM_HANDLE(tu_cmd_buffer, cmdbuf, submit->pCommandBuffers[j]);
+         struct tu_cs *cs = &cmdbuf->cs;
+         for (unsigned i = 0; i < cs->entry_count; ++i, ++entry_idx) {
+            cmds[entry_idx].type = MSM_SUBMIT_CMD_BUF;
+            cmds[entry_idx].submit_idx =
+               tu_bo_list_add(&bo_list, cs->entries[i].bo);
+            cmds[entry_idx].submit_offset = cs->entries[i].offset;
+            cmds[entry_idx].size = cs->entries[i].size;
+            cmds[entry_idx].pad = 0;
+            cmds[entry_idx].nr_relocs = 0;
+            cmds[entry_idx].relocs = 0;
+         }
+      }
+
+      struct drm_msm_gem_submit_bo bos[bo_list.count];
+      for (unsigned i = 0; i < bo_list.count; ++i) {
+         bos[i].flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE;
+         bos[i].handle = bo_list.handles[i];
+         bos[i].presumed = 0;
+      }
+
+      uint32_t flags = MSM_PIPE_3D0;
+      if (last_submit) {
+         flags |= MSM_SUBMIT_FENCE_FD_OUT;
+      }
+
+      struct drm_msm_gem_submit req = {
+         .flags = flags,
+         .queueid = queue->msm_queue_id,
+         .bos = (uint64_t)(uintptr_t)bos,
+         .nr_bos = bo_list.count,
+         .cmds = (uint64_t)(uintptr_t)cmds,
+         .nr_cmds = entry_count,
+      };
+
+      int ret = drmCommandWriteRead(queue->device->physical_device->local_fd,
+                                    DRM_MSM_GEM_SUBMIT,
+                                    &req, sizeof(req));
+      if (ret) {
+         fprintf(stderr, "submit failed: %s\n", strerror(errno));
+         abort();
+      }
+
+      tu_bo_list_destroy(&bo_list);
+
+      if (last_submit) {
+         /* no need to merge fences as queue execution is serialized */
+         if (queue->submit_fence_fd >= 0) {
+            close(queue->submit_fence_fd);
+         }
+         queue->submit_fence_fd = req.fence_fd;
+      }
+   }
    return VK_SUCCESS;
 }
 
 VkResult
 tu_QueueWaitIdle(VkQueue _queue)
 {
+   TU_FROM_HANDLE(tu_queue, queue, _queue);
+
+   if (queue->submit_fence_fd >= 0) {
+      int ret = sync_wait(queue->submit_fence_fd, -1);
+      if (ret)
+         tu_loge("sync_wait on fence fd %d failed", queue->submit_fence_fd);
+
+      close(queue->submit_fence_fd);
+      queue->submit_fence_fd = -1;
+   }
+
    return VK_SUCCESS;
 }