2 * Copyright © 2018 Google, Inc.
3 * Copyright © 2015 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 #include "tu_private.h"
30 #include <sys/ioctl.h>
33 #include "drm-uapi/msm_drm.h"
36 tu_drm_get_param(const struct tu_physical_device
*dev
,
40 /* Technically this requires a pipe, but the kernel only supports one pipe
41 * anyway at the time of writing and most of these are clearly pipe
43 struct drm_msm_param req
= {
48 int ret
= drmCommandWriteRead(dev
->local_fd
, DRM_MSM_GET_PARAM
, &req
,
59 tu_drm_get_gpu_id(const struct tu_physical_device
*dev
, uint32_t *id
)
62 int ret
= tu_drm_get_param(dev
, MSM_PARAM_GPU_ID
, &value
);
71 tu_drm_get_gmem_size(const struct tu_physical_device
*dev
, uint32_t *size
)
74 int ret
= tu_drm_get_param(dev
, MSM_PARAM_GMEM_SIZE
, &value
);
83 tu_drm_get_gmem_base(const struct tu_physical_device
*dev
, uint64_t *base
)
85 return tu_drm_get_param(dev
, MSM_PARAM_GMEM_BASE
, base
);
89 tu_drm_submitqueue_new(const struct tu_device
*dev
,
93 struct drm_msm_submitqueue req
= {
98 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
99 DRM_MSM_SUBMITQUEUE_NEW
, &req
, sizeof(req
));
108 tu_drm_submitqueue_close(const struct tu_device
*dev
, uint32_t queue_id
)
110 drmCommandWrite(dev
->physical_device
->local_fd
, DRM_MSM_SUBMITQUEUE_CLOSE
,
111 &queue_id
, sizeof(uint32_t));
115 * Return gem handle on success. Return 0 on failure.
118 tu_gem_new(const struct tu_device
*dev
, uint64_t size
, uint32_t flags
)
120 struct drm_msm_gem_new req
= {
125 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
126 DRM_MSM_GEM_NEW
, &req
, sizeof(req
));
134 tu_gem_import_dmabuf(const struct tu_device
*dev
, int prime_fd
, uint64_t size
)
136 /* lseek() to get the real size */
137 off_t real_size
= lseek(prime_fd
, 0, SEEK_END
);
138 lseek(prime_fd
, 0, SEEK_SET
);
139 if (real_size
< 0 || (uint64_t) real_size
< size
)
143 int ret
= drmPrimeFDToHandle(dev
->physical_device
->local_fd
, prime_fd
,
152 tu_gem_export_dmabuf(const struct tu_device
*dev
, uint32_t gem_handle
)
155 int ret
= drmPrimeHandleToFD(dev
->physical_device
->local_fd
, gem_handle
,
156 DRM_CLOEXEC
, &prime_fd
);
158 return ret
== 0 ? prime_fd
: -1;
162 tu_gem_close(const struct tu_device
*dev
, uint32_t gem_handle
)
164 struct drm_gem_close req
= {
165 .handle
= gem_handle
,
168 drmIoctl(dev
->physical_device
->local_fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
171 /** Helper for DRM_MSM_GEM_INFO, returns 0 on error. */
173 tu_gem_info(const struct tu_device
*dev
, uint32_t gem_handle
, uint32_t info
)
175 struct drm_msm_gem_info req
= {
176 .handle
= gem_handle
,
180 int ret
= drmCommandWriteRead(dev
->physical_device
->local_fd
,
181 DRM_MSM_GEM_INFO
, &req
, sizeof(req
));
188 /** Returns the offset for CPU-side mmap of the gem handle.
190 * Returns 0 on error (an invalid mmap offset in the DRM UBI).
193 tu_gem_info_offset(const struct tu_device
*dev
, uint32_t gem_handle
)
195 return tu_gem_info(dev
, gem_handle
, MSM_INFO_GET_OFFSET
);
198 /** Returns the the iova of the BO in GPU memory.
200 * Returns 0 on error (an invalid iova in the MSM DRM UABI).
203 tu_gem_info_iova(const struct tu_device
*dev
, uint32_t gem_handle
)
205 return tu_gem_info(dev
, gem_handle
, MSM_INFO_GET_IOVA
);
209 tu_drm_device_init(struct tu_physical_device
*device
,
210 struct tu_instance
*instance
,
211 drmDevicePtr drm_device
)
213 const char *path
= drm_device
->nodes
[DRM_NODE_RENDER
];
214 VkResult result
= VK_SUCCESS
;
215 drmVersionPtr version
;
219 fd
= open(path
, O_RDWR
| O_CLOEXEC
);
221 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
222 "failed to open device %s", path
);
225 /* Version 1.3 added MSM_INFO_IOVA. */
226 const int min_version_major
= 1;
227 const int min_version_minor
= 3;
229 version
= drmGetVersion(fd
);
232 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
233 "failed to query kernel driver version for device %s",
237 if (strcmp(version
->name
, "msm")) {
238 drmFreeVersion(version
);
240 return vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
241 "device %s does not use the msm kernel driver", path
);
244 if (version
->version_major
!= min_version_major
||
245 version
->version_minor
< min_version_minor
) {
246 result
= vk_errorf(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
,
247 "kernel driver for device %s has version %d.%d, "
248 "but Vulkan requires version >= %d.%d",
249 path
, version
->version_major
, version
->version_minor
,
250 min_version_major
, min_version_minor
);
251 drmFreeVersion(version
);
256 device
->msm_major_version
= version
->version_major
;
257 device
->msm_minor_version
= version
->version_minor
;
259 drmFreeVersion(version
);
261 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
262 tu_logi("Found compatible device '%s'.", path
);
264 vk_object_base_init(NULL
, &device
->base
, VK_OBJECT_TYPE_PHYSICAL_DEVICE
);
265 device
->instance
= instance
;
266 assert(strlen(path
) < ARRAY_SIZE(device
->path
));
267 strncpy(device
->path
, path
, ARRAY_SIZE(device
->path
));
269 if (instance
->enabled_extensions
.KHR_display
) {
271 open(drm_device
->nodes
[DRM_NODE_PRIMARY
], O_RDWR
| O_CLOEXEC
);
272 if (master_fd
>= 0) {
273 /* TODO: free master_fd is accel is not working? */
277 device
->master_fd
= master_fd
;
278 device
->local_fd
= fd
;
280 if (tu_drm_get_gpu_id(device
, &device
->gpu_id
)) {
281 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
282 tu_logi("Could not query the GPU ID");
283 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
284 "could not get GPU ID");
288 if (tu_drm_get_gmem_size(device
, &device
->gmem_size
)) {
289 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
290 tu_logi("Could not query the GMEM size");
291 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
292 "could not get GMEM size");
296 if (tu_drm_get_gmem_base(device
, &device
->gmem_base
)) {
297 if (instance
->debug_flags
& TU_DEBUG_STARTUP
)
298 tu_logi("Could not query the GMEM size");
299 result
= vk_errorf(instance
, VK_ERROR_INITIALIZATION_FAILED
,
300 "could not get GMEM size");
304 return tu_physical_device_init(device
, instance
);
314 tu_enumerate_devices(struct tu_instance
*instance
)
316 /* TODO: Check for more devices ? */
317 drmDevicePtr devices
[8];
318 VkResult result
= VK_ERROR_INCOMPATIBLE_DRIVER
;
321 instance
->physical_device_count
= 0;
323 max_devices
= drmGetDevices2(0, devices
, ARRAY_SIZE(devices
));
325 if (instance
->debug_flags
& TU_DEBUG_STARTUP
) {
327 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices
));
329 tu_logi("Found %d drm nodes", max_devices
);
333 return vk_error(instance
, VK_ERROR_INCOMPATIBLE_DRIVER
);
335 for (unsigned i
= 0; i
< (unsigned) max_devices
; i
++) {
336 if (devices
[i
]->available_nodes
& 1 << DRM_NODE_RENDER
&&
337 devices
[i
]->bustype
== DRM_BUS_PLATFORM
) {
339 result
= tu_drm_device_init(
340 instance
->physical_devices
+ instance
->physical_device_count
,
341 instance
, devices
[i
]);
342 if (result
== VK_SUCCESS
)
343 ++instance
->physical_device_count
;
344 else if (result
!= VK_ERROR_INCOMPATIBLE_DRIVER
)
348 drmFreeDevices(devices
, max_devices
);