2 * Copyright © 2018 Google, Inc.
3 * Copyright © 2015 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 #include "tu_private.h"
30 #include <sys/ioctl.h>
36 #include "drm-uapi/msm_drm.h"
39 tu_drm_get_param(const struct tu_physical_device
*dev
,
43 /* Technically this requires a pipe, but the kernel only supports one pipe
44 * anyway at the time of writing and most of these are clearly pipe
46 struct drm_msm_param req
= {
51 int ret
= drmCommandWriteRead(dev
->local_fd
, DRM_MSM_GET_PARAM
, &req
,
62 tu_drm_get_gpu_id(const struct tu_physical_device
*dev
, uint32_t *id
)
65 int ret
= tu_drm_get_param(dev
, MSM_PARAM_GPU_ID
, &value
);
74 tu_drm_get_gmem_size(const struct tu_physical_device
*dev
, uint32_t *size
)
77 int ret
= tu_drm_get_param(dev
, MSM_PARAM_GMEM_SIZE
, &value
);
86 tu_drm_get_gmem_base(const struct tu_physical_device
*dev
, uint64_t *base
)
88 return tu_drm_get_param(dev
, MSM_PARAM_GMEM_BASE
, base
);
92 tu_drm_submitqueue_new(const struct tu_device
*dev
,
96 struct drm_msm_submitqueue req
= {
101 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
102 DRM_MSM_SUBMITQUEUE_NEW
, &req
, sizeof(req
));
111 tu_drm_submitqueue_close(const struct tu_device
*dev
, uint32_t queue_id
)
113 drmCommandWrite(dev
->physical_device
->local_fd
, DRM_MSM_SUBMITQUEUE_CLOSE
,
114 &queue_id
, sizeof(uint32_t));
118 tu_gem_close(const struct tu_device
*dev
, uint32_t gem_handle
)
120 struct drm_gem_close req
= {
121 .handle
= gem_handle
,
124 drmIoctl(dev
->physical_device
->local_fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
127 /** Helper for DRM_MSM_GEM_INFO, returns 0 on error. */
129 tu_gem_info(const struct tu_device
*dev
, uint32_t gem_handle
, uint32_t info
)
131 struct drm_msm_gem_info req
= {
132 .handle
= gem_handle
,
136 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
137 DRM_MSM_GEM_INFO
, &req
, sizeof(req
));
145 tu_bo_init(struct tu_device
*dev
,
150 uint64_t iova
= tu_gem_info(dev
, gem_handle
, MSM_INFO_GET_IOVA
);
152 tu_gem_close(dev
, gem_handle
);
153 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
156 *bo
= (struct tu_bo
) {
157 .gem_handle
= gem_handle
,
166 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
168 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
169 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
171 struct drm_msm_gem_new req
= {
176 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
177 DRM_MSM_GEM_NEW
, &req
, sizeof(req
));
179 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
181 return tu_bo_init(dev
, bo
, req
.handle
, size
);
185 tu_bo_init_dmabuf(struct tu_device
*dev
,
190 /* lseek() to get the real size */
191 off_t real_size
= lseek(prime_fd
, 0, SEEK_END
);
192 lseek(prime_fd
, 0, SEEK_SET
);
193 if (real_size
< 0 || (uint64_t) real_size
< size
)
194 return vk_error(dev
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
197 int ret
= drmPrimeFDToHandle(dev
->physical_device
->local_fd
, prime_fd
,
200 return vk_error(dev
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
202 return tu_bo_init(dev
, bo
, gem_handle
, size
);
206 tu_bo_export_dmabuf(struct tu_device
*dev
, struct tu_bo
*bo
)
209 int ret
= drmPrimeHandleToFD(dev
->physical_device
->local_fd
, bo
->gem_handle
,
210 DRM_CLOEXEC
, &prime_fd
);
212 return ret
== 0 ? prime_fd
: -1;
216 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
221 uint64_t offset
= tu_gem_info(dev
, bo
->gem_handle
, MSM_INFO_GET_OFFSET
);
223 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
225 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
226 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
227 dev
->physical_device
->local_fd
, offset
);
228 if (map
== MAP_FAILED
)
229 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
236 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
238 assert(bo
->gem_handle
);
241 munmap(bo
->map
, bo
->size
);
243 tu_gem_close(dev
, bo
->gem_handle
);
247 tu_drm_device_init(struct tu_physical_device
*device
,
248 struct tu_instance
*instance
,
249 drmDevicePtr drm_device
)
251 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
252 VkResult result
= VK_SUCCESS
;
253 drmVersionPtr version
;
257 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
259 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
260 "failed to open device %s", path
);
263 /* Version 1.3 added MSM_INFO_IOVA. */
264 const int min_version_major
= 1;
265 const int min_version_minor
= 3;
267 version
= drmGetVersion(fd
);
270 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
271 "failed to query kernel driver version for device %s",
275 if (strcmp(version
->name
, "msm")) {
276 drmFreeVersion(version
);
278 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
279 "device %s does not use the msm kernel driver", path
);
282 if (version
->version_major
!= min_version_major
||
283 version
->version_minor
< min_version_minor
) {
284 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
285 "kernel driver for device %s has version %d.%d, "
286 "but Vulkan requires version >= %d.%d",
287 path
, version
->version_major
, version
->version_minor
,
288 min_version_major
, min_version_minor
);
289 drmFreeVersion(version
);
294 device
->msm_major_version
= version
->version_major
;
295 device
->msm_minor_version
= version
->version_minor
;
297 drmFreeVersion(version
);
299 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
300 tu_logi("Found compatible device '%s'.", path
);
302 vk_object_base_init(NULL
, &device
->base
, VK_OBJECT_TYPE_PHYSICAL_DEVICE
);
303 device
->instance
= instance
;
304 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
305 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
307 if (instance
->enabled_extensions
.KHR_display
) {
309 open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
310 if (master_fd
>= 0) {
311 /* TODO: free master_fd is accel is not working? */
315 device
->master_fd
= master_fd
;
316 device
->local_fd
= fd
;
318 if (tu_drm_get_gpu_id(device
, &device
->gpu_id
)) {
319 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
320 tu_logi("Could not query the GPU ID");
321 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
322 "could not get GPU ID");
326 if (tu_drm_get_gmem_size(device
, &device
->gmem_size
)) {
327 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
328 tu_logi("Could not query the GMEM size");
329 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
330 "could not get GMEM size");
334 if (tu_drm_get_gmem_base(device
, &device
->gmem_base
)) {
335 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
336 tu_logi("Could not query the GMEM size");
337 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
338 "could not get GMEM size");
342 return tu_physical_device_init(device
, instance
);
352 tu_enumerate_devices(struct tu_instance
*instance
)
354 /* TODO: Check for more devices ? */
355 drmDevicePtr devices
[8];
356 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
359 instance
->physical_device_count
= 0;
361 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
363 if (instance
->debug_flags
& TU_DEBUG_STARTUP
) {
365 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices
));
367 tu_logi("Found %d drm nodes", max_devices
);
371 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
373 for (unsigned i
= 0; i
< (unsigned) max_devices
; i
++) {
374 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
375 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
377 result
= tu_drm_device_init(
378 instance
->physical_devices
+ instance
->physical_device_count
,
379 instance
, devices
[i
]);
380 if (result
== VK_SUCCESS
)
381 ++instance
->physical_device_count
;
382 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
386 drmFreeDevices(devices
, max_devices
);
391 // Queue semaphore functions
394 tu_semaphore_part_destroy(struct tu_device
*device
,
395 struct tu_semaphore_part
*part
)
398 case TU_SEMAPHORE_NONE
:
400 case TU_SEMAPHORE_SYNCOBJ
:
401 drmSyncobjDestroy(device
->physical_device
->local_fd
, part
->syncobj
);
404 part
->kind
= TU_SEMAPHORE_NONE
;
408 tu_semaphore_remove_temp(struct tu_device
*device
,
409 struct tu_semaphore
*sem
)
411 if (sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
) {
412 tu_semaphore_part_destroy(device
, &sem
->temporary
);
417 tu_get_semaphore_syncobjs(const VkSemaphore
*sems
,
420 struct drm_msm_gem_submit_syncobj
**out
,
423 uint32_t syncobj_count
= 0;
424 struct drm_msm_gem_submit_syncobj
*syncobjs
;
426 for (uint32_t i
= 0; i
< sem_count
; ++i
) {
427 TU_FROM_HANDLE(tu_semaphore
, sem
, sems
[i
]);
429 struct tu_semaphore_part
*part
=
430 sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
?
431 &sem
->temporary
: &sem
->permanent
;
433 if (part
->kind
== TU_SEMAPHORE_SYNCOBJ
)
438 *out_count
= syncobj_count
;
442 *out
= syncobjs
= calloc(syncobj_count
, sizeof (*syncobjs
));
444 return VK_ERROR_OUT_OF_HOST_MEMORY
;
446 for (uint32_t i
= 0, j
= 0; i
< sem_count
; ++i
) {
447 TU_FROM_HANDLE(tu_semaphore
, sem
, sems
[i
]);
449 struct tu_semaphore_part
*part
=
450 sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
?
451 &sem
->temporary
: &sem
->permanent
;
453 if (part
->kind
== TU_SEMAPHORE_SYNCOBJ
) {
454 syncobjs
[j
].handle
= part
->syncobj
;
455 syncobjs
[j
].flags
= wait
? MSM_SUBMIT_SYNCOBJ_RESET
: 0;
464 tu_semaphores_remove_temp(struct tu_device
*device
,
465 const VkSemaphore
*sems
,
468 for (uint32_t i
= 0; i
< sem_count
; ++i
) {
469 TU_FROM_HANDLE(tu_semaphore
, sem
, sems
[i
]);
470 tu_semaphore_remove_temp(device
, sem
);
475 tu_CreateSemaphore(VkDevice _device
,
476 const VkSemaphoreCreateInfo
*pCreateInfo
,
477 const VkAllocationCallbacks
*pAllocator
,
478 VkSemaphore
*pSemaphore
)
480 TU_FROM_HANDLE(tu_device
, device
, _device
);
482 struct tu_semaphore
*sem
=
483 vk_object_alloc(&device
->vk
, pAllocator
, sizeof(*sem
),
484 VK_OBJECT_TYPE_SEMAPHORE
);
486 return vk_error(device
->instance
, VK_ERROR_OUT_OF_HOST_MEMORY
);
488 const VkExportSemaphoreCreateInfo
*export
=
489 vk_find_struct_const(pCreateInfo
->pNext
, EXPORT_SEMAPHORE_CREATE_INFO
);
490 VkExternalSemaphoreHandleTypeFlags handleTypes
=
491 export
? export
->handleTypes
: 0;
493 sem
->permanent
.kind
= TU_SEMAPHORE_NONE
;
494 sem
->temporary
.kind
= TU_SEMAPHORE_NONE
;
497 if (drmSyncobjCreate(device
->physical_device
->local_fd
, 0, &sem
->permanent
.syncobj
) < 0) {
498 vk_free2(&device
->vk
.alloc
, pAllocator
, sem
);
499 return VK_ERROR_OUT_OF_HOST_MEMORY
;
501 sem
->permanent
.kind
= TU_SEMAPHORE_SYNCOBJ
;
503 *pSemaphore
= tu_semaphore_to_handle(sem
);
508 tu_DestroySemaphore(VkDevice _device
,
509 VkSemaphore _semaphore
,
510 const VkAllocationCallbacks
*pAllocator
)
512 TU_FROM_HANDLE(tu_device
, device
, _device
);
513 TU_FROM_HANDLE(tu_semaphore
, sem
, _semaphore
);
517 tu_semaphore_part_destroy(device
, &sem
->permanent
);
518 tu_semaphore_part_destroy(device
, &sem
->temporary
);
520 vk_object_free(&device
->vk
, pAllocator
, sem
);
524 tu_ImportSemaphoreFdKHR(VkDevice _device
,
525 const VkImportSemaphoreFdInfoKHR
*pImportSemaphoreFdInfo
)
527 TU_FROM_HANDLE(tu_device
, device
, _device
);
528 TU_FROM_HANDLE(tu_semaphore
, sem
, pImportSemaphoreFdInfo
->semaphore
);
530 struct tu_semaphore_part
*dst
= NULL
;
532 if (pImportSemaphoreFdInfo
->flags
& VK_SEMAPHORE_IMPORT_TEMPORARY_BIT
) {
533 dst
= &sem
->temporary
;
535 dst
= &sem
->permanent
;
538 uint32_t syncobj
= dst
->kind
== TU_SEMAPHORE_SYNCOBJ
? dst
->syncobj
: 0;
540 switch(pImportSemaphoreFdInfo
->handleType
) {
541 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
: {
542 uint32_t old_syncobj
= syncobj
;
543 ret
= drmSyncobjFDToHandle(device
->physical_device
->local_fd
, pImportSemaphoreFdInfo
->fd
, &syncobj
);
545 close(pImportSemaphoreFdInfo
->fd
);
547 drmSyncobjDestroy(device
->physical_device
->local_fd
, old_syncobj
);
551 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
: {
553 ret
= drmSyncobjCreate(device
->physical_device
->local_fd
, 0, &syncobj
);
557 if (pImportSemaphoreFdInfo
->fd
== -1) {
558 ret
= drmSyncobjSignal(device
->physical_device
->local_fd
, &syncobj
, 1);
560 ret
= drmSyncobjImportSyncFile(device
->physical_device
->local_fd
, syncobj
, pImportSemaphoreFdInfo
->fd
);
563 close(pImportSemaphoreFdInfo
->fd
);
567 unreachable("Unhandled semaphore handle type");
571 return VK_ERROR_INVALID_EXTERNAL_HANDLE
;
573 dst
->syncobj
= syncobj
;
574 dst
->kind
= TU_SEMAPHORE_SYNCOBJ
;
580 tu_GetSemaphoreFdKHR(VkDevice _device
,
581 const VkSemaphoreGetFdInfoKHR
*pGetFdInfo
,
584 TU_FROM_HANDLE(tu_device
, device
, _device
);
585 TU_FROM_HANDLE(tu_semaphore
, sem
, pGetFdInfo
->semaphore
);
587 uint32_t syncobj_handle
;
589 if (sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
) {
590 assert(sem
->temporary
.kind
== TU_SEMAPHORE_SYNCOBJ
);
591 syncobj_handle
= sem
->temporary
.syncobj
;
593 assert(sem
->permanent
.kind
== TU_SEMAPHORE_SYNCOBJ
);
594 syncobj_handle
= sem
->permanent
.syncobj
;
597 switch(pGetFdInfo
->handleType
) {
598 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
:
599 ret
= drmSyncobjHandleToFD(device
->physical_device
->local_fd
, syncobj_handle
, pFd
);
601 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
:
602 ret
= drmSyncobjExportSyncFile(device
->physical_device
->local_fd
, syncobj_handle
, pFd
);
604 if (sem
->temporary
.kind
!= TU_SEMAPHORE_NONE
) {
605 tu_semaphore_part_destroy(device
, &sem
->temporary
);
607 drmSyncobjReset(device
->physical_device
->local_fd
, &syncobj_handle
, 1);
612 unreachable("Unhandled semaphore handle type");
616 return vk_error(device
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
620 static bool tu_has_syncobj(struct tu_physical_device
*pdev
)
623 if (drmGetCap(pdev
->local_fd
, DRM_CAP_SYNCOBJ
, &value
))
625 return value
&& pdev
->msm_major_version
== 1 && pdev
->msm_minor_version
>= 6;
629 tu_GetPhysicalDeviceExternalSemaphoreProperties(
630 VkPhysicalDevice physicalDevice
,
631 const VkPhysicalDeviceExternalSemaphoreInfo
*pExternalSemaphoreInfo
,
632 VkExternalSemaphoreProperties
*pExternalSemaphoreProperties
)
634 TU_FROM_HANDLE(tu_physical_device
, pdev
, physicalDevice
);
636 if (tu_has_syncobj(pdev
) &&
637 (pExternalSemaphoreInfo
->handleType
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
||
638 pExternalSemaphoreInfo
->handleType
== VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
)) {
639 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
| VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
640 pExternalSemaphoreProperties
->compatibleHandleTypes
= VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT
| VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT
;
641 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT
|
642 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT
;
644 pExternalSemaphoreProperties
->exportFromImportedHandleTypes
= 0;
645 pExternalSemaphoreProperties
->compatibleHandleTypes
= 0;
646 pExternalSemaphoreProperties
->externalSemaphoreFeatures
= 0;
651 tu_QueueSubmit(VkQueue _queue
,
652 uint32_t submitCount
,
653 const VkSubmitInfo
*pSubmits
,
656 TU_FROM_HANDLE(tu_queue
, queue
, _queue
);
659 for (uint32_t i
= 0; i
< submitCount
; ++i
) {
660 const VkSubmitInfo
*submit
= pSubmits
+ i
;
661 const bool last_submit
= (i
== submitCount
- 1);
662 struct drm_msm_gem_submit_syncobj
*in_syncobjs
= NULL
, *out_syncobjs
= NULL
;
663 uint32_t nr_in_syncobjs
, nr_out_syncobjs
;
664 struct tu_bo_list bo_list
;
665 tu_bo_list_init(&bo_list
);
667 result
= tu_get_semaphore_syncobjs(pSubmits
[i
].pWaitSemaphores
,
668 pSubmits
[i
].waitSemaphoreCount
,
669 false, &in_syncobjs
, &nr_in_syncobjs
);
670 if (result
!= VK_SUCCESS
) {
671 return tu_device_set_lost(queue
->device
,
672 "failed to allocate space for semaphore submission\n");
675 result
= tu_get_semaphore_syncobjs(pSubmits
[i
].pSignalSemaphores
,
676 pSubmits
[i
].signalSemaphoreCount
,
677 false, &out_syncobjs
, &nr_out_syncobjs
);
678 if (result
!= VK_SUCCESS
) {
680 return tu_device_set_lost(queue
->device
,
681 "failed to allocate space for semaphore submission\n");
684 uint32_t entry_count
= 0;
685 for (uint32_t j
= 0; j
< submit
->commandBufferCount
; ++j
) {
686 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, submit
->pCommandBuffers
[j
]);
687 entry_count
+= cmdbuf
->cs
.entry_count
;
690 struct drm_msm_gem_submit_cmd cmds
[entry_count
];
691 uint32_t entry_idx
= 0;
692 for (uint32_t j
= 0; j
< submit
->commandBufferCount
; ++j
) {
693 TU_FROM_HANDLE(tu_cmd_buffer
, cmdbuf
, submit
->pCommandBuffers
[j
]);
694 struct tu_cs
*cs
= &cmdbuf
->cs
;
695 for (unsigned i
= 0; i
< cs
->entry_count
; ++i
, ++entry_idx
) {
696 cmds
[entry_idx
].type
= MSM_SUBMIT_CMD_BUF
;
697 cmds
[entry_idx
].submit_idx
=
698 tu_bo_list_add(&bo_list
, cs
->entries
[i
].bo
,
699 MSM_SUBMIT_BO_READ
| MSM_SUBMIT_BO_DUMP
);
700 cmds
[entry_idx
].submit_offset
= cs
->entries
[i
].offset
;
701 cmds
[entry_idx
].size
= cs
->entries
[i
].size
;
702 cmds
[entry_idx
].pad
= 0;
703 cmds
[entry_idx
].nr_relocs
= 0;
704 cmds
[entry_idx
].relocs
= 0;
707 tu_bo_list_merge(&bo_list
, &cmdbuf
->bo_list
);
710 uint32_t flags
= MSM_PIPE_3D0
;
711 if (nr_in_syncobjs
) {
712 flags
|= MSM_SUBMIT_SYNCOBJ_IN
;
714 if (nr_out_syncobjs
) {
715 flags
|= MSM_SUBMIT_SYNCOBJ_OUT
;
719 flags
|= MSM_SUBMIT_FENCE_FD_OUT
;
722 struct drm_msm_gem_submit req
= {
724 .queueid
= queue
->msm_queue_id
,
725 .bos
= (uint64_t)(uintptr_t) bo_list
.bo_infos
,
726 .nr_bos
= bo_list
.count
,
727 .cmds
= (uint64_t)(uintptr_t)cmds
,
728 .nr_cmds
= entry_count
,
729 .in_syncobjs
= (uint64_t)(uintptr_t)in_syncobjs
,
730 .out_syncobjs
= (uint64_t)(uintptr_t)out_syncobjs
,
731 .nr_in_syncobjs
= nr_in_syncobjs
,
732 .nr_out_syncobjs
= nr_out_syncobjs
,
733 .syncobj_stride
= sizeof(struct drm_msm_gem_submit_syncobj
),
736 int ret
= drmCommandWriteRead(queue
->device
->physical_device
->local_fd
,
742 return tu_device_set_lost(queue
->device
, "submit failed: %s\n",
746 tu_bo_list_destroy(&bo_list
);
750 tu_semaphores_remove_temp(queue
->device
, pSubmits
[i
].pWaitSemaphores
,
751 pSubmits
[i
].waitSemaphoreCount
);
753 /* no need to merge fences as queue execution is serialized */
754 tu_fence_update_fd(&queue
->submit_fence
, req
.fence_fd
);
755 } else if (last_submit
) {
760 if (_fence
!= VK_NULL_HANDLE
) {
761 TU_FROM_HANDLE(tu_fence
, fence
, _fence
);
762 tu_fence_copy(fence
, &queue
->submit_fence
);