2 * Copyright © 2018 Google, Inc.
3 * Copyright © 2015 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 #include "tu_private.h"
30 #include <sys/ioctl.h>
34 #include "drm-uapi/msm_drm.h"
37 tu_drm_get_param(const struct tu_physical_device
*dev
,
41 /* Technically this requires a pipe, but the kernel only supports one pipe
42 * anyway at the time of writing and most of these are clearly pipe
44 struct drm_msm_param req
= {
49 int ret
= drmCommandWriteRead(dev
->local_fd
, DRM_MSM_GET_PARAM
, &req
,
60 tu_drm_get_gpu_id(const struct tu_physical_device
*dev
, uint32_t *id
)
63 int ret
= tu_drm_get_param(dev
, MSM_PARAM_GPU_ID
, &value
);
72 tu_drm_get_gmem_size(const struct tu_physical_device
*dev
, uint32_t *size
)
75 int ret
= tu_drm_get_param(dev
, MSM_PARAM_GMEM_SIZE
, &value
);
84 tu_drm_get_gmem_base(const struct tu_physical_device
*dev
, uint64_t *base
)
86 return tu_drm_get_param(dev
, MSM_PARAM_GMEM_BASE
, base
);
90 tu_drm_submitqueue_new(const struct tu_device
*dev
,
94 struct drm_msm_submitqueue req
= {
99 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
100 DRM_MSM_SUBMITQUEUE_NEW
, &req
, sizeof(req
));
109 tu_drm_submitqueue_close(const struct tu_device
*dev
, uint32_t queue_id
)
111 drmCommandWrite(dev
->physical_device
->local_fd
, DRM_MSM_SUBMITQUEUE_CLOSE
,
112 &queue_id
, sizeof(uint32_t));
116 * Return gem handle on success. Return 0 on failure.
119 tu_gem_new(const struct tu_device
*dev
, uint64_t size
, uint32_t flags
)
121 struct drm_msm_gem_new req
= {
126 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
127 DRM_MSM_GEM_NEW
, &req
, sizeof(req
));
135 tu_gem_import_dmabuf(const struct tu_device
*dev
, int prime_fd
, uint64_t size
)
137 /* lseek() to get the real size */
138 off_t real_size
= lseek(prime_fd
, 0, SEEK_END
);
139 lseek(prime_fd
, 0, SEEK_SET
);
140 if (real_size
< 0 || (uint64_t) real_size
< size
)
144 int ret
= drmPrimeFDToHandle(dev
->physical_device
->local_fd
, prime_fd
,
153 tu_gem_export_dmabuf(const struct tu_device
*dev
, uint32_t gem_handle
)
156 int ret
= drmPrimeHandleToFD(dev
->physical_device
->local_fd
, gem_handle
,
157 DRM_CLOEXEC
, &prime_fd
);
159 return ret
== 0 ? prime_fd
: -1;
163 tu_gem_close(const struct tu_device
*dev
, uint32_t gem_handle
)
165 struct drm_gem_close req
= {
166 .handle
= gem_handle
,
169 drmIoctl(dev
->physical_device
->local_fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
172 /** Helper for DRM_MSM_GEM_INFO, returns 0 on error. */
174 tu_gem_info(const struct tu_device
*dev
, uint32_t gem_handle
, uint32_t info
)
176 struct drm_msm_gem_info req
= {
177 .handle
= gem_handle
,
181 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
182 DRM_MSM_GEM_INFO
, &req
, sizeof(req
));
189 /** Returns the offset for CPU-side mmap of the gem handle.
191 * Returns 0 on error (an invalid mmap offset in the DRM UBI).
194 tu_gem_info_offset(const struct tu_device
*dev
, uint32_t gem_handle
)
196 return tu_gem_info(dev
, gem_handle
, MSM_INFO_GET_OFFSET
);
199 /** Returns the the iova of the BO in GPU memory.
201 * Returns 0 on error (an invalid iova in the MSM DRM UABI).
204 tu_gem_info_iova(const struct tu_device
*dev
, uint32_t gem_handle
)
206 return tu_gem_info(dev
, gem_handle
, MSM_INFO_GET_IOVA
);
210 tu_bo_init(struct tu_device
*dev
,
215 uint64_t iova
= tu_gem_info_iova(dev
, gem_handle
);
217 return VK_ERROR_OUT_OF_DEVICE_MEMORY
;
219 *bo
= (struct tu_bo
) {
220 .gem_handle
= gem_handle
,
229 tu_bo_init_new(struct tu_device
*dev
, struct tu_bo
*bo
, uint64_t size
)
231 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
232 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
234 uint32_t gem_handle
= tu_gem_new(dev
, size
, MSM_BO_WC
);
236 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
238 VkResult result
= tu_bo_init(dev
, bo
, gem_handle
, size
);
239 if (result
!= VK_SUCCESS
) {
240 tu_gem_close(dev
, gem_handle
);
241 return vk_error(dev
->instance
, result
);
248 tu_bo_init_dmabuf(struct tu_device
*dev
,
253 uint32_t gem_handle
= tu_gem_import_dmabuf(dev
, fd
, size
);
255 return vk_error(dev
->instance
, VK_ERROR_INVALID_EXTERNAL_HANDLE
);
257 VkResult result
= tu_bo_init(dev
, bo
, gem_handle
, size
);
258 if (result
!= VK_SUCCESS
) {
259 tu_gem_close(dev
, gem_handle
);
260 return vk_error(dev
->instance
, result
);
267 tu_bo_export_dmabuf(struct tu_device
*dev
, struct tu_bo
*bo
)
269 return tu_gem_export_dmabuf(dev
, bo
->gem_handle
);
273 tu_bo_map(struct tu_device
*dev
, struct tu_bo
*bo
)
278 uint64_t offset
= tu_gem_info_offset(dev
, bo
->gem_handle
);
280 return vk_error(dev
->instance
, VK_ERROR_OUT_OF_DEVICE_MEMORY
);
282 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
283 void *map
= mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
284 dev
->physical_device
->local_fd
, offset
);
285 if (map
== MAP_FAILED
)
286 return vk_error(dev
->instance
, VK_ERROR_MEMORY_MAP_FAILED
);
293 tu_bo_finish(struct tu_device
*dev
, struct tu_bo
*bo
)
295 assert(bo
->gem_handle
);
298 munmap(bo
->map
, bo
->size
);
300 tu_gem_close(dev
, bo
->gem_handle
);
304 tu_drm_device_init(struct tu_physical_device
*device
,
305 struct tu_instance
*instance
,
306 drmDevicePtr drm_device
)
308 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
309 VkResult result
= VK_SUCCESS
;
310 drmVersionPtr version
;
314 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
316 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
317 "failed to open device %s", path
);
320 /* Version 1.3 added MSM_INFO_IOVA. */
321 const int min_version_major
= 1;
322 const int min_version_minor
= 3;
324 version
= drmGetVersion(fd
);
327 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
328 "failed to query kernel driver version for device %s",
332 if (strcmp(version
->name
, "msm")) {
333 drmFreeVersion(version
);
335 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
336 "device %s does not use the msm kernel driver", path
);
339 if (version
->version_major
!= min_version_major
||
340 version
->version_minor
< min_version_minor
) {
341 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
342 "kernel driver for device %s has version %d.%d, "
343 "but Vulkan requires version >= %d.%d",
344 path
, version
->version_major
, version
->version_minor
,
345 min_version_major
, min_version_minor
);
346 drmFreeVersion(version
);
351 device
->msm_major_version
= version
->version_major
;
352 device
->msm_minor_version
= version
->version_minor
;
354 drmFreeVersion(version
);
356 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
357 tu_logi("Found compatible device '%s'.", path
);
359 vk_object_base_init(NULL
, &device
->base
, VK_OBJECT_TYPE_PHYSICAL_DEVICE
);
360 device
->instance
= instance
;
361 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
362 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
364 if (instance
->enabled_extensions
.KHR_display
) {
366 open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
367 if (master_fd
>= 0) {
368 /* TODO: free master_fd is accel is not working? */
372 device
->master_fd
= master_fd
;
373 device
->local_fd
= fd
;
375 if (tu_drm_get_gpu_id(device
, &device
->gpu_id
)) {
376 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
377 tu_logi("Could not query the GPU ID");
378 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
379 "could not get GPU ID");
383 if (tu_drm_get_gmem_size(device
, &device
->gmem_size
)) {
384 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
385 tu_logi("Could not query the GMEM size");
386 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
387 "could not get GMEM size");
391 if (tu_drm_get_gmem_base(device
, &device
->gmem_base
)) {
392 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
393 tu_logi("Could not query the GMEM size");
394 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
395 "could not get GMEM size");
399 return tu_physical_device_init(device
, instance
);
409 tu_enumerate_devices(struct tu_instance
*instance
)
411 /* TODO: Check for more devices ? */
412 drmDevicePtr devices
[8];
413 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
416 instance
->physical_device_count
= 0;
418 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
420 if (instance
->debug_flags
& TU_DEBUG_STARTUP
) {
422 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices
));
424 tu_logi("Found %d drm nodes", max_devices
);
428 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
430 for (unsigned i
= 0; i
< (unsigned) max_devices
; i
++) {
431 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
432 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
434 result
= tu_drm_device_init(
435 instance
->physical_devices
+ instance
->physical_device_count
,
436 instance
, devices
[i
]);
437 if (result
== VK_SUCCESS
)
438 ++instance
->physical_device_count
;
439 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
443 drmFreeDevices(devices
, max_devices
);