vulkan: move anv VK_EXT_debug_report implementation to common code.
[mesa.git] / src / intel / vulkan / anv_gem.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "anv_private.h"
33
34 static int
35 anv_ioctl(int fd, unsigned long request, void *arg)
36 {
37 int ret;
38
39 do {
40 ret = ioctl(fd, request, arg);
41 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
42
43 return ret;
44 }
45
46 /**
47 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
48 *
49 * Return gem handle, or 0 on failure. Gem handles are never 0.
50 */
51 uint32_t
52 anv_gem_create(struct anv_device *device, uint64_t size)
53 {
54 struct drm_i915_gem_create gem_create = {
55 .size = size,
56 };
57
58 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
59 if (ret != 0) {
60 /* FIXME: What do we do if this fails? */
61 return 0;
62 }
63
64 return gem_create.handle;
65 }
66
67 void
68 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
69 {
70 struct drm_gem_close close = {
71 .handle = gem_handle,
72 };
73
74 anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
75 }
76
77 /**
78 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
79 */
80 void*
81 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
82 uint64_t offset, uint64_t size, uint32_t flags)
83 {
84 struct drm_i915_gem_mmap gem_mmap = {
85 .handle = gem_handle,
86 .offset = offset,
87 .size = size,
88 .flags = flags,
89 };
90
91 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
92 if (ret != 0)
93 return MAP_FAILED;
94
95 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
96 return (void *)(uintptr_t) gem_mmap.addr_ptr;
97 }
98
99 /* This is just a wrapper around munmap, but it also notifies valgrind that
100 * this map is no longer valid. Pair this with anv_gem_mmap().
101 */
102 void
103 anv_gem_munmap(void *p, uint64_t size)
104 {
105 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
106 munmap(p, size);
107 }
108
109 uint32_t
110 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
111 {
112 struct drm_i915_gem_userptr userptr = {
113 .user_ptr = (__u64)((unsigned long) mem),
114 .user_size = size,
115 .flags = 0,
116 };
117
118 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
119 if (ret == -1)
120 return 0;
121
122 return userptr.handle;
123 }
124
125 int
126 anv_gem_set_caching(struct anv_device *device,
127 uint32_t gem_handle, uint32_t caching)
128 {
129 struct drm_i915_gem_caching gem_caching = {
130 .handle = gem_handle,
131 .caching = caching,
132 };
133
134 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
135 }
136
137 int
138 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
139 uint32_t read_domains, uint32_t write_domain)
140 {
141 struct drm_i915_gem_set_domain gem_set_domain = {
142 .handle = gem_handle,
143 .read_domains = read_domains,
144 .write_domain = write_domain,
145 };
146
147 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
148 }
149
150 /**
151 * Returns 0, 1, or negative to indicate error
152 */
153 int
154 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
155 {
156 struct drm_i915_gem_busy busy = {
157 .handle = gem_handle,
158 };
159
160 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
161 if (ret < 0)
162 return ret;
163
164 return busy.busy != 0;
165 }
166
167 /**
168 * On error, \a timeout_ns holds the remaining time.
169 */
170 int
171 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
172 {
173 struct drm_i915_gem_wait wait = {
174 .bo_handle = gem_handle,
175 .timeout_ns = *timeout_ns,
176 .flags = 0,
177 };
178
179 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
180 *timeout_ns = wait.timeout_ns;
181
182 return ret;
183 }
184
185 int
186 anv_gem_execbuffer(struct anv_device *device,
187 struct drm_i915_gem_execbuffer2 *execbuf)
188 {
189 if (execbuf->flags & I915_EXEC_FENCE_OUT)
190 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
191 else
192 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
193 }
194
195 /** Return -1 on error. */
196 int
197 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
198 {
199 struct drm_i915_gem_get_tiling get_tiling = {
200 .handle = gem_handle,
201 };
202
203 if (anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
204 assert(!"Failed to get BO tiling");
205 return -1;
206 }
207
208 return get_tiling.tiling_mode;
209 }
210
211 int
212 anv_gem_set_tiling(struct anv_device *device,
213 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
214 {
215 int ret;
216
217 /* set_tiling overwrites the input on the error path, so we have to open
218 * code anv_ioctl.
219 */
220 do {
221 struct drm_i915_gem_set_tiling set_tiling = {
222 .handle = gem_handle,
223 .tiling_mode = tiling,
224 .stride = stride,
225 };
226
227 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
228 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
229
230 return ret;
231 }
232
233 int
234 anv_gem_get_param(int fd, uint32_t param)
235 {
236 int tmp;
237
238 drm_i915_getparam_t gp = {
239 .param = param,
240 .value = &tmp,
241 };
242
243 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
244 if (ret == 0)
245 return tmp;
246
247 return 0;
248 }
249
250 bool
251 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
252 {
253 struct drm_gem_close close;
254 int ret;
255
256 struct drm_i915_gem_create gem_create = {
257 .size = 4096,
258 };
259
260 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
261 assert(!"Failed to create GEM BO");
262 return false;
263 }
264
265 bool swizzled = false;
266
267 /* set_tiling overwrites the input on the error path, so we have to open
268 * code anv_ioctl.
269 */
270 do {
271 struct drm_i915_gem_set_tiling set_tiling = {
272 .handle = gem_create.handle,
273 .tiling_mode = tiling,
274 .stride = tiling == I915_TILING_X ? 512 : 128,
275 };
276
277 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
278 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
279
280 if (ret != 0) {
281 assert(!"Failed to set BO tiling");
282 goto close_and_return;
283 }
284
285 struct drm_i915_gem_get_tiling get_tiling = {
286 .handle = gem_create.handle,
287 };
288
289 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
290 assert(!"Failed to get BO tiling");
291 goto close_and_return;
292 }
293
294 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
295
296 close_and_return:
297
298 memset(&close, 0, sizeof(close));
299 close.handle = gem_create.handle;
300 anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
301
302 return swizzled;
303 }
304
305 int
306 anv_gem_create_context(struct anv_device *device)
307 {
308 struct drm_i915_gem_context_create create = { 0 };
309
310 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
311 if (ret == -1)
312 return -1;
313
314 return create.ctx_id;
315 }
316
317 int
318 anv_gem_destroy_context(struct anv_device *device, int context)
319 {
320 struct drm_i915_gem_context_destroy destroy = {
321 .ctx_id = context,
322 };
323
324 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
325 }
326
327 int
328 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
329 {
330 struct drm_i915_gem_context_param gp = {
331 .ctx_id = context,
332 .param = param,
333 };
334
335 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
336 if (ret == -1)
337 return -1;
338
339 *value = gp.value;
340 return 0;
341 }
342
343 int
344 anv_gem_get_aperture(int fd, uint64_t *size)
345 {
346 struct drm_i915_gem_get_aperture aperture = { 0 };
347
348 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
349 if (ret == -1)
350 return -1;
351
352 *size = aperture.aper_available_size;
353
354 return 0;
355 }
356
357 bool
358 anv_gem_supports_48b_addresses(int fd)
359 {
360 struct drm_i915_gem_exec_object2 obj = {
361 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS,
362 };
363
364 struct drm_i915_gem_execbuffer2 execbuf = {
365 .buffers_ptr = (uintptr_t)&obj,
366 .buffer_count = 1,
367 .rsvd1 = 0xffffffu,
368 };
369
370 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
371
372 return ret == -1 && errno == ENOENT;
373 }
374
375 int
376 anv_gem_gpu_get_reset_stats(struct anv_device *device,
377 uint32_t *active, uint32_t *pending)
378 {
379 struct drm_i915_reset_stats stats = {
380 .ctx_id = device->context_id,
381 };
382
383 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
384 if (ret == 0) {
385 *active = stats.batch_active;
386 *pending = stats.batch_pending;
387 }
388
389 return ret;
390 }
391
392 int
393 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
394 {
395 struct drm_prime_handle args = {
396 .handle = gem_handle,
397 .flags = DRM_CLOEXEC,
398 };
399
400 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
401 if (ret == -1)
402 return -1;
403
404 return args.fd;
405 }
406
407 uint32_t
408 anv_gem_fd_to_handle(struct anv_device *device, int fd)
409 {
410 struct drm_prime_handle args = {
411 .fd = fd,
412 };
413
414 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
415 if (ret == -1)
416 return 0;
417
418 return args.handle;
419 }
420
421 #ifndef SYNC_IOC_MAGIC
422 /* duplicated from linux/sync_file.h to avoid build-time dependency
423 * on new (v4.7) kernel headers. Once distro's are mostly using
424 * something newer than v4.7 drop this and #include <linux/sync_file.h>
425 * instead.
426 */
427 struct sync_merge_data {
428 char name[32];
429 __s32 fd2;
430 __s32 fence;
431 __u32 flags;
432 __u32 pad;
433 };
434
435 #define SYNC_IOC_MAGIC '>'
436 #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
437 #endif
438
439 int
440 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
441 {
442 const char name[] = "anv merge fence";
443 struct sync_merge_data args = {
444 .fd2 = fd2,
445 .fence = -1,
446 };
447 memcpy(args.name, name, sizeof(name));
448
449 int ret = anv_ioctl(fd1, SYNC_IOC_MERGE, &args);
450 if (ret == -1)
451 return -1;
452
453 return args.fence;
454 }
455
456 uint32_t
457 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
458 {
459 struct drm_syncobj_create args = {
460 .flags = flags,
461 };
462
463 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
464 if (ret)
465 return 0;
466
467 return args.handle;
468 }
469
470 void
471 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
472 {
473 struct drm_syncobj_destroy args = {
474 .handle = handle,
475 };
476
477 anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
478 }
479
480 int
481 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
482 {
483 struct drm_syncobj_handle args = {
484 .handle = handle,
485 };
486
487 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
488 if (ret)
489 return -1;
490
491 return args.fd;
492 }
493
494 uint32_t
495 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
496 {
497 struct drm_syncobj_handle args = {
498 .fd = fd,
499 };
500
501 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
502 if (ret)
503 return 0;
504
505 return args.handle;
506 }
507
508 int
509 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
510 {
511 struct drm_syncobj_handle args = {
512 .handle = handle,
513 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
514 };
515
516 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
517 if (ret)
518 return -1;
519
520 return args.fd;
521 }
522
523 int
524 anv_gem_syncobj_import_sync_file(struct anv_device *device,
525 uint32_t handle, int fd)
526 {
527 struct drm_syncobj_handle args = {
528 .handle = handle,
529 .fd = fd,
530 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
531 };
532
533 return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
534 }
535
536 void
537 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
538 {
539 struct drm_syncobj_array args = {
540 .handles = (uint64_t)(uintptr_t)&handle,
541 .count_handles = 1,
542 };
543
544 anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
545 }
546
547 bool
548 anv_gem_supports_syncobj_wait(int fd)
549 {
550 int ret;
551
552 struct drm_syncobj_create create = {
553 .flags = 0,
554 };
555 ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
556 if (ret)
557 return false;
558
559 uint32_t syncobj = create.handle;
560
561 struct drm_syncobj_wait wait = {
562 .handles = (uint64_t)(uintptr_t)&create,
563 .count_handles = 1,
564 .timeout_nsec = 0,
565 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
566 };
567 ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
568
569 struct drm_syncobj_destroy destroy = {
570 .handle = syncobj,
571 };
572 anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
573
574 /* If it timed out, then we have the ioctl and it supports the
575 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.
576 */
577 return ret == -1 && errno == ETIME;
578 }
579
580 int
581 anv_gem_syncobj_wait(struct anv_device *device,
582 uint32_t *handles, uint32_t num_handles,
583 int64_t abs_timeout_ns, bool wait_all)
584 {
585 struct drm_syncobj_wait args = {
586 .handles = (uint64_t)(uintptr_t)handles,
587 .count_handles = num_handles,
588 .timeout_nsec = abs_timeout_ns,
589 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
590 };
591
592 if (wait_all)
593 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
594
595 return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
596 }