turnip: Move tu_bo functions to tu_drm.c
[mesa.git] / src / freedreno / vulkan / tu_drm.c
1 /*
2 * Copyright © 2018 Google, Inc.
3 * Copyright © 2015 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "tu_private.h"
26
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <stdint.h>
30 #include <sys/ioctl.h>
31 #include <sys/mman.h>
32 #include <xf86drm.h>
33
34 #include "drm-uapi/msm_drm.h"
35
36 static int
37 tu_drm_get_param(const struct tu_physical_device *dev,
38 uint32_t param,
39 uint64_t *value)
40 {
41 /* Technically this requires a pipe, but the kernel only supports one pipe
42 * anyway at the time of writing and most of these are clearly pipe
43 * independent. */
44 struct drm_msm_param req = {
45 .pipe = MSM_PIPE_3D0,
46 .param = param,
47 };
48
49 int ret = drmCommandWriteRead(dev->local_fd, DRM_MSM_GET_PARAM, &req,
50 sizeof(req));
51 if (ret)
52 return ret;
53
54 *value = req.value;
55
56 return 0;
57 }
58
59 static int
60 tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id)
61 {
62 uint64_t value;
63 int ret = tu_drm_get_param(dev, MSM_PARAM_GPU_ID, &value);
64 if (ret)
65 return ret;
66
67 *id = value;
68 return 0;
69 }
70
71 static int
72 tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size)
73 {
74 uint64_t value;
75 int ret = tu_drm_get_param(dev, MSM_PARAM_GMEM_SIZE, &value);
76 if (ret)
77 return ret;
78
79 *size = value;
80 return 0;
81 }
82
83 static int
84 tu_drm_get_gmem_base(const struct tu_physical_device *dev, uint64_t *base)
85 {
86 return tu_drm_get_param(dev, MSM_PARAM_GMEM_BASE, base);
87 }
88
89 int
90 tu_drm_submitqueue_new(const struct tu_device *dev,
91 int priority,
92 uint32_t *queue_id)
93 {
94 struct drm_msm_submitqueue req = {
95 .flags = 0,
96 .prio = priority,
97 };
98
99 int ret = drmCommandWriteRead(dev->physical_device->local_fd,
100 DRM_MSM_SUBMITQUEUE_NEW, &req, sizeof(req));
101 if (ret)
102 return ret;
103
104 *queue_id = req.id;
105 return 0;
106 }
107
108 void
109 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id)
110 {
111 drmCommandWrite(dev->physical_device->local_fd, DRM_MSM_SUBMITQUEUE_CLOSE,
112 &queue_id, sizeof(uint32_t));
113 }
114
115 /**
116 * Return gem handle on success. Return 0 on failure.
117 */
118 static uint32_t
119 tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags)
120 {
121 struct drm_msm_gem_new req = {
122 .size = size,
123 .flags = flags,
124 };
125
126 int ret = drmCommandWriteRead(dev->physical_device->local_fd,
127 DRM_MSM_GEM_NEW, &req, sizeof(req));
128 if (ret)
129 return 0;
130
131 return req.handle;
132 }
133
134 static uint32_t
135 tu_gem_import_dmabuf(const struct tu_device *dev, int prime_fd, uint64_t size)
136 {
137 /* lseek() to get the real size */
138 off_t real_size = lseek(prime_fd, 0, SEEK_END);
139 lseek(prime_fd, 0, SEEK_SET);
140 if (real_size < 0 || (uint64_t) real_size < size)
141 return 0;
142
143 uint32_t gem_handle;
144 int ret = drmPrimeFDToHandle(dev->physical_device->local_fd, prime_fd,
145 &gem_handle);
146 if (ret)
147 return 0;
148
149 return gem_handle;
150 }
151
152 static int
153 tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle)
154 {
155 int prime_fd;
156 int ret = drmPrimeHandleToFD(dev->physical_device->local_fd, gem_handle,
157 DRM_CLOEXEC, &prime_fd);
158
159 return ret == 0 ? prime_fd : -1;
160 }
161
162 static void
163 tu_gem_close(const struct tu_device *dev, uint32_t gem_handle)
164 {
165 struct drm_gem_close req = {
166 .handle = gem_handle,
167 };
168
169 drmIoctl(dev->physical_device->local_fd, DRM_IOCTL_GEM_CLOSE, &req);
170 }
171
172 /** Helper for DRM_MSM_GEM_INFO, returns 0 on error. */
173 static uint64_t
174 tu_gem_info(const struct tu_device *dev, uint32_t gem_handle, uint32_t info)
175 {
176 struct drm_msm_gem_info req = {
177 .handle = gem_handle,
178 .info = info,
179 };
180
181 int ret = drmCommandWriteRead(dev->physical_device->local_fd,
182 DRM_MSM_GEM_INFO, &req, sizeof(req));
183 if (ret < 0)
184 return 0;
185
186 return req.value;
187 }
188
189 /** Returns the offset for CPU-side mmap of the gem handle.
190 *
191 * Returns 0 on error (an invalid mmap offset in the DRM UBI).
192 */
193 static uint64_t
194 tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle)
195 {
196 return tu_gem_info(dev, gem_handle, MSM_INFO_GET_OFFSET);
197 }
198
199 /** Returns the the iova of the BO in GPU memory.
200 *
201 * Returns 0 on error (an invalid iova in the MSM DRM UABI).
202 */
203 static uint64_t
204 tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle)
205 {
206 return tu_gem_info(dev, gem_handle, MSM_INFO_GET_IOVA);
207 }
208
209 static VkResult
210 tu_bo_init(struct tu_device *dev,
211 struct tu_bo *bo,
212 uint32_t gem_handle,
213 uint64_t size)
214 {
215 uint64_t iova = tu_gem_info_iova(dev, gem_handle);
216 if (!iova)
217 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
218
219 *bo = (struct tu_bo) {
220 .gem_handle = gem_handle,
221 .size = size,
222 .iova = iova,
223 };
224
225 return VK_SUCCESS;
226 }
227
228 VkResult
229 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size)
230 {
231 /* TODO: Choose better flags. As of 2018-11-12, freedreno/drm/msm_bo.c
232 * always sets `flags = MSM_BO_WC`, and we copy that behavior here.
233 */
234 uint32_t gem_handle = tu_gem_new(dev, size, MSM_BO_WC);
235 if (!gem_handle)
236 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
237
238 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
239 if (result != VK_SUCCESS) {
240 tu_gem_close(dev, gem_handle);
241 return vk_error(dev->instance, result);
242 }
243
244 return VK_SUCCESS;
245 }
246
247 VkResult
248 tu_bo_init_dmabuf(struct tu_device *dev,
249 struct tu_bo *bo,
250 uint64_t size,
251 int fd)
252 {
253 uint32_t gem_handle = tu_gem_import_dmabuf(dev, fd, size);
254 if (!gem_handle)
255 return vk_error(dev->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE);
256
257 VkResult result = tu_bo_init(dev, bo, gem_handle, size);
258 if (result != VK_SUCCESS) {
259 tu_gem_close(dev, gem_handle);
260 return vk_error(dev->instance, result);
261 }
262
263 return VK_SUCCESS;
264 }
265
266 int
267 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo)
268 {
269 return tu_gem_export_dmabuf(dev, bo->gem_handle);
270 }
271
272 VkResult
273 tu_bo_map(struct tu_device *dev, struct tu_bo *bo)
274 {
275 if (bo->map)
276 return VK_SUCCESS;
277
278 uint64_t offset = tu_gem_info_offset(dev, bo->gem_handle);
279 if (!offset)
280 return vk_error(dev->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
281
282 /* TODO: Should we use the wrapper os_mmap() like Freedreno does? */
283 void *map = mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED,
284 dev->physical_device->local_fd, offset);
285 if (map == MAP_FAILED)
286 return vk_error(dev->instance, VK_ERROR_MEMORY_MAP_FAILED);
287
288 bo->map = map;
289 return VK_SUCCESS;
290 }
291
292 void
293 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo)
294 {
295 assert(bo->gem_handle);
296
297 if (bo->map)
298 munmap(bo->map, bo->size);
299
300 tu_gem_close(dev, bo->gem_handle);
301 }
302
303 static VkResult
304 tu_drm_device_init(struct tu_physical_device *device,
305 struct tu_instance *instance,
306 drmDevicePtr drm_device)
307 {
308 const char *path = drm_device->nodes[DRM_NODE_RENDER];
309 VkResult result = VK_SUCCESS;
310 drmVersionPtr version;
311 int fd;
312 int master_fd = -1;
313
314 fd = open(path, O_RDWR | O_CLOEXEC);
315 if (fd < 0) {
316 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
317 "failed to open device %s", path);
318 }
319
320 /* Version 1.3 added MSM_INFO_IOVA. */
321 const int min_version_major = 1;
322 const int min_version_minor = 3;
323
324 version = drmGetVersion(fd);
325 if (!version) {
326 close(fd);
327 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
328 "failed to query kernel driver version for device %s",
329 path);
330 }
331
332 if (strcmp(version->name, "msm")) {
333 drmFreeVersion(version);
334 close(fd);
335 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
336 "device %s does not use the msm kernel driver", path);
337 }
338
339 if (version->version_major != min_version_major ||
340 version->version_minor < min_version_minor) {
341 result = vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
342 "kernel driver for device %s has version %d.%d, "
343 "but Vulkan requires version >= %d.%d",
344 path, version->version_major, version->version_minor,
345 min_version_major, min_version_minor);
346 drmFreeVersion(version);
347 close(fd);
348 return result;
349 }
350
351 device->msm_major_version = version->version_major;
352 device->msm_minor_version = version->version_minor;
353
354 drmFreeVersion(version);
355
356 if (instance->debug_flags & TU_DEBUG_STARTUP)
357 tu_logi("Found compatible device '%s'.", path);
358
359 vk_object_base_init(NULL, &device->base, VK_OBJECT_TYPE_PHYSICAL_DEVICE);
360 device->instance = instance;
361 assert(strlen(path) < ARRAY_SIZE(device->path));
362 strncpy(device->path, path, ARRAY_SIZE(device->path));
363
364 if (instance->enabled_extensions.KHR_display) {
365 master_fd =
366 open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
367 if (master_fd >= 0) {
368 /* TODO: free master_fd is accel is not working? */
369 }
370 }
371
372 device->master_fd = master_fd;
373 device->local_fd = fd;
374
375 if (tu_drm_get_gpu_id(device, &device->gpu_id)) {
376 if (instance->debug_flags & TU_DEBUG_STARTUP)
377 tu_logi("Could not query the GPU ID");
378 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
379 "could not get GPU ID");
380 goto fail;
381 }
382
383 if (tu_drm_get_gmem_size(device, &device->gmem_size)) {
384 if (instance->debug_flags & TU_DEBUG_STARTUP)
385 tu_logi("Could not query the GMEM size");
386 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
387 "could not get GMEM size");
388 goto fail;
389 }
390
391 if (tu_drm_get_gmem_base(device, &device->gmem_base)) {
392 if (instance->debug_flags & TU_DEBUG_STARTUP)
393 tu_logi("Could not query the GMEM size");
394 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
395 "could not get GMEM size");
396 goto fail;
397 }
398
399 return tu_physical_device_init(device, instance);
400
401 fail:
402 close(fd);
403 if (master_fd != -1)
404 close(master_fd);
405 return result;
406 }
407
408 VkResult
409 tu_enumerate_devices(struct tu_instance *instance)
410 {
411 /* TODO: Check for more devices ? */
412 drmDevicePtr devices[8];
413 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
414 int max_devices;
415
416 instance->physical_device_count = 0;
417
418 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
419
420 if (instance->debug_flags & TU_DEBUG_STARTUP) {
421 if (max_devices < 0)
422 tu_logi("drmGetDevices2 returned error: %s\n", strerror(max_devices));
423 else
424 tu_logi("Found %d drm nodes", max_devices);
425 }
426
427 if (max_devices < 1)
428 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
429
430 for (unsigned i = 0; i < (unsigned) max_devices; i++) {
431 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
432 devices[i]->bustype == DRM_BUS_PLATFORM) {
433
434 result = tu_drm_device_init(
435 instance->physical_devices + instance->physical_device_count,
436 instance, devices[i]);
437 if (result == VK_SUCCESS)
438 ++instance->physical_device_count;
439 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
440 break;
441 }
442 }
443 drmFreeDevices(devices, max_devices);
444
445 return result;
446 }
447