anv: add new gem/drm helpers
[mesa.git] / src / intel / vulkan / anv_gem.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "anv_private.h"
33 #include "common/gen_defines.h"
34 #include "common/gen_gem.h"
35 #include "drm-uapi/sync_file.h"
36
37 /**
38 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
39 *
40 * Return gem handle, or 0 on failure. Gem handles are never 0.
41 */
42 uint32_t
43 anv_gem_create(struct anv_device *device, uint64_t size)
44 {
45 struct drm_i915_gem_create gem_create = {
46 .size = size,
47 };
48
49 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
50 if (ret != 0) {
51 /* FIXME: What do we do if this fails? */
52 return 0;
53 }
54
55 return gem_create.handle;
56 }
57
58 void
59 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
60 {
61 struct drm_gem_close close = {
62 .handle = gem_handle,
63 };
64
65 gen_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
66 }
67
68 /**
69 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
70 */
71 static void*
72 anv_gem_mmap_offset(struct anv_device *device, uint32_t gem_handle,
73 uint64_t offset, uint64_t size, uint32_t flags)
74 {
75 struct drm_i915_gem_mmap_offset gem_mmap = {
76 .handle = gem_handle,
77 .flags = (flags & I915_MMAP_WC) ?
78 I915_MMAP_OFFSET_WC : I915_MMAP_OFFSET_WB,
79 };
80 assert(offset == 0);
81
82 /* Get the fake offset back */
83 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP_OFFSET, &gem_mmap);
84 if (ret != 0)
85 return MAP_FAILED;
86
87 /* And map it */
88 void *map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
89 device->fd, gem_mmap.offset);
90 return map;
91 }
92
93 static void*
94 anv_gem_mmap_legacy(struct anv_device *device, uint32_t gem_handle,
95 uint64_t offset, uint64_t size, uint32_t flags)
96 {
97 struct drm_i915_gem_mmap gem_mmap = {
98 .handle = gem_handle,
99 .offset = offset,
100 .size = size,
101 .flags = flags,
102 };
103
104 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
105 if (ret != 0)
106 return MAP_FAILED;
107
108 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
109 return (void *)(uintptr_t) gem_mmap.addr_ptr;
110 }
111
112 /**
113 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
114 */
115 void*
116 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
117 uint64_t offset, uint64_t size, uint32_t flags)
118 {
119 if (device->physical->has_mmap_offset)
120 return anv_gem_mmap_offset(device, gem_handle, offset, size, flags);
121 else
122 return anv_gem_mmap_legacy(device, gem_handle, offset, size, flags);
123 }
124
125 /* This is just a wrapper around munmap, but it also notifies valgrind that
126 * this map is no longer valid. Pair this with anv_gem_mmap().
127 */
128 void
129 anv_gem_munmap(struct anv_device *device, void *p, uint64_t size)
130 {
131 if (!device->physical->has_mmap_offset)
132 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
133 munmap(p, size);
134 }
135
136 uint32_t
137 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
138 {
139 struct drm_i915_gem_userptr userptr = {
140 .user_ptr = (__u64)((unsigned long) mem),
141 .user_size = size,
142 .flags = 0,
143 };
144
145 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
146 if (ret == -1)
147 return 0;
148
149 return userptr.handle;
150 }
151
152 int
153 anv_gem_set_caching(struct anv_device *device,
154 uint32_t gem_handle, uint32_t caching)
155 {
156 struct drm_i915_gem_caching gem_caching = {
157 .handle = gem_handle,
158 .caching = caching,
159 };
160
161 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
162 }
163
164 int
165 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
166 uint32_t read_domains, uint32_t write_domain)
167 {
168 struct drm_i915_gem_set_domain gem_set_domain = {
169 .handle = gem_handle,
170 .read_domains = read_domains,
171 .write_domain = write_domain,
172 };
173
174 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
175 }
176
177 /**
178 * Returns 0, 1, or negative to indicate error
179 */
180 int
181 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
182 {
183 struct drm_i915_gem_busy busy = {
184 .handle = gem_handle,
185 };
186
187 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
188 if (ret < 0)
189 return ret;
190
191 return busy.busy != 0;
192 }
193
194 /**
195 * On error, \a timeout_ns holds the remaining time.
196 */
197 int
198 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
199 {
200 struct drm_i915_gem_wait wait = {
201 .bo_handle = gem_handle,
202 .timeout_ns = *timeout_ns,
203 .flags = 0,
204 };
205
206 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
207 *timeout_ns = wait.timeout_ns;
208
209 return ret;
210 }
211
212 int
213 anv_gem_execbuffer(struct anv_device *device,
214 struct drm_i915_gem_execbuffer2 *execbuf)
215 {
216 if (execbuf->flags & I915_EXEC_FENCE_OUT)
217 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
218 else
219 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
220 }
221
222 /** Return -1 on error. */
223 int
224 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
225 {
226 struct drm_i915_gem_get_tiling get_tiling = {
227 .handle = gem_handle,
228 };
229
230 /* FIXME: On discrete platforms we don't have DRM_IOCTL_I915_GEM_GET_TILING
231 * anymore, so we will need another way to get the tiling. Apparently this
232 * is only used in Android code, so we may need some other way to
233 * communicate the tiling mode.
234 */
235 if (gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
236 assert(!"Failed to get BO tiling");
237 return -1;
238 }
239
240 return get_tiling.tiling_mode;
241 }
242
243 int
244 anv_gem_set_tiling(struct anv_device *device,
245 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
246 {
247 int ret;
248
249 /* On discrete platforms we don't have DRM_IOCTL_I915_GEM_SET_TILING. So
250 * nothing needs to be done.
251 */
252 if (!device->info.has_tiling_uapi)
253 return 0;
254
255 /* set_tiling overwrites the input on the error path, so we have to open
256 * code gen_ioctl.
257 */
258 do {
259 struct drm_i915_gem_set_tiling set_tiling = {
260 .handle = gem_handle,
261 .tiling_mode = tiling,
262 .stride = stride,
263 };
264
265 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
266 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
267
268 return ret;
269 }
270
271 int
272 anv_gem_get_param(int fd, uint32_t param)
273 {
274 int tmp;
275
276 drm_i915_getparam_t gp = {
277 .param = param,
278 .value = &tmp,
279 };
280
281 int ret = gen_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
282 if (ret == 0)
283 return tmp;
284
285 return 0;
286 }
287
288 uint64_t
289 anv_gem_get_drm_cap(int fd, uint32_t capability)
290 {
291 struct drm_get_cap cap = {
292 .capability = capability,
293 };
294
295 gen_ioctl(fd, DRM_IOCTL_GET_CAP, &cap);
296 return cap.value;
297 }
298
299 bool
300 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
301 {
302 struct drm_gem_close close;
303 int ret;
304
305 struct drm_i915_gem_create gem_create = {
306 .size = 4096,
307 };
308
309 if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
310 assert(!"Failed to create GEM BO");
311 return false;
312 }
313
314 bool swizzled = false;
315
316 /* set_tiling overwrites the input on the error path, so we have to open
317 * code gen_ioctl.
318 */
319 do {
320 struct drm_i915_gem_set_tiling set_tiling = {
321 .handle = gem_create.handle,
322 .tiling_mode = tiling,
323 .stride = tiling == I915_TILING_X ? 512 : 128,
324 };
325
326 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
327 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
328
329 if (ret != 0) {
330 assert(!"Failed to set BO tiling");
331 goto close_and_return;
332 }
333
334 struct drm_i915_gem_get_tiling get_tiling = {
335 .handle = gem_create.handle,
336 };
337
338 if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
339 assert(!"Failed to get BO tiling");
340 goto close_and_return;
341 }
342
343 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
344
345 close_and_return:
346
347 memset(&close, 0, sizeof(close));
348 close.handle = gem_create.handle;
349 gen_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
350
351 return swizzled;
352 }
353
354 bool
355 anv_gem_has_context_priority(int fd)
356 {
357 return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
358 GEN_CONTEXT_MEDIUM_PRIORITY);
359 }
360
361 int
362 anv_gem_create_context(struct anv_device *device)
363 {
364 struct drm_i915_gem_context_create create = { 0 };
365
366 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
367 if (ret == -1)
368 return -1;
369
370 return create.ctx_id;
371 }
372
373 int
374 anv_gem_destroy_context(struct anv_device *device, int context)
375 {
376 struct drm_i915_gem_context_destroy destroy = {
377 .ctx_id = context,
378 };
379
380 return gen_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
381 }
382
383 int
384 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
385 {
386 struct drm_i915_gem_context_param p = {
387 .ctx_id = context,
388 .param = param,
389 .value = value,
390 };
391 int err = 0;
392
393 if (gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
394 err = -errno;
395 return err;
396 }
397
398 int
399 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
400 {
401 struct drm_i915_gem_context_param gp = {
402 .ctx_id = context,
403 .param = param,
404 };
405
406 int ret = gen_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
407 if (ret == -1)
408 return -1;
409
410 *value = gp.value;
411 return 0;
412 }
413
414 int
415 anv_gem_gpu_get_reset_stats(struct anv_device *device,
416 uint32_t *active, uint32_t *pending)
417 {
418 struct drm_i915_reset_stats stats = {
419 .ctx_id = device->context_id,
420 };
421
422 int ret = gen_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
423 if (ret == 0) {
424 *active = stats.batch_active;
425 *pending = stats.batch_pending;
426 }
427
428 return ret;
429 }
430
431 int
432 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
433 {
434 struct drm_prime_handle args = {
435 .handle = gem_handle,
436 .flags = DRM_CLOEXEC,
437 };
438
439 int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
440 if (ret == -1)
441 return -1;
442
443 return args.fd;
444 }
445
446 uint32_t
447 anv_gem_fd_to_handle(struct anv_device *device, int fd)
448 {
449 struct drm_prime_handle args = {
450 .fd = fd,
451 };
452
453 int ret = gen_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
454 if (ret == -1)
455 return 0;
456
457 return args.handle;
458 }
459
460 int
461 anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result)
462 {
463 struct drm_i915_reg_read args = {
464 .offset = offset
465 };
466
467 int ret = gen_ioctl(fd, DRM_IOCTL_I915_REG_READ, &args);
468
469 *result = args.val;
470 return ret;
471 }
472
473 int
474 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
475 {
476 struct sync_merge_data args = {
477 .name = "anv merge fence",
478 .fd2 = fd2,
479 .fence = -1,
480 };
481
482 int ret = gen_ioctl(fd1, SYNC_IOC_MERGE, &args);
483 if (ret == -1)
484 return -1;
485
486 return args.fence;
487 }
488
489 uint32_t
490 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
491 {
492 struct drm_syncobj_create args = {
493 .flags = flags,
494 };
495
496 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
497 if (ret)
498 return 0;
499
500 return args.handle;
501 }
502
503 void
504 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
505 {
506 struct drm_syncobj_destroy args = {
507 .handle = handle,
508 };
509
510 gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
511 }
512
513 int
514 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
515 {
516 struct drm_syncobj_handle args = {
517 .handle = handle,
518 };
519
520 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
521 if (ret)
522 return -1;
523
524 return args.fd;
525 }
526
527 uint32_t
528 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
529 {
530 struct drm_syncobj_handle args = {
531 .fd = fd,
532 };
533
534 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
535 if (ret)
536 return 0;
537
538 return args.handle;
539 }
540
541 int
542 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
543 {
544 struct drm_syncobj_handle args = {
545 .handle = handle,
546 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
547 };
548
549 int ret = gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
550 if (ret)
551 return -1;
552
553 return args.fd;
554 }
555
556 int
557 anv_gem_syncobj_import_sync_file(struct anv_device *device,
558 uint32_t handle, int fd)
559 {
560 struct drm_syncobj_handle args = {
561 .handle = handle,
562 .fd = fd,
563 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
564 };
565
566 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
567 }
568
569 void
570 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
571 {
572 struct drm_syncobj_array args = {
573 .handles = (uint64_t)(uintptr_t)&handle,
574 .count_handles = 1,
575 };
576
577 gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
578 }
579
580 bool
581 anv_gem_supports_syncobj_wait(int fd)
582 {
583 return gen_gem_supports_syncobj_wait(fd);
584 }
585
586 int
587 anv_gem_syncobj_wait(struct anv_device *device,
588 const uint32_t *handles, uint32_t num_handles,
589 int64_t abs_timeout_ns, bool wait_all)
590 {
591 struct drm_syncobj_wait args = {
592 .handles = (uint64_t)(uintptr_t)handles,
593 .count_handles = num_handles,
594 .timeout_nsec = abs_timeout_ns,
595 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
596 };
597
598 if (wait_all)
599 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
600
601 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
602 }
603
604 int
605 anv_gem_syncobj_timeline_wait(struct anv_device *device,
606 const uint32_t *handles, const uint64_t *points,
607 uint32_t num_items, int64_t abs_timeout_ns,
608 bool wait_all, bool wait_materialize)
609 {
610 assert(device->physical->has_syncobj_wait_available);
611
612 struct drm_syncobj_timeline_wait args = {
613 .handles = (uint64_t)(uintptr_t)handles,
614 .points = (uint64_t)(uintptr_t)points,
615 .count_handles = num_items,
616 .timeout_nsec = abs_timeout_ns,
617 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
618 };
619
620 if (wait_all)
621 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
622 if (wait_materialize)
623 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE;
624
625 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, &args);
626 }
627
628 int
629 anv_gem_syncobj_timeline_signal(struct anv_device *device,
630 const uint32_t *handles, const uint64_t *points,
631 uint32_t num_items)
632 {
633 assert(device->physical->has_syncobj_wait_available);
634
635 struct drm_syncobj_timeline_array args = {
636 .handles = (uint64_t)(uintptr_t)handles,
637 .points = (uint64_t)(uintptr_t)points,
638 .count_handles = num_items,
639 };
640
641 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, &args);
642 }
643
644 int
645 anv_gem_syncobj_timeline_query(struct anv_device *device,
646 const uint32_t *handles, uint64_t *points,
647 uint32_t num_items)
648 {
649 assert(device->physical->has_syncobj_wait_available);
650
651 struct drm_syncobj_timeline_array args = {
652 .handles = (uint64_t)(uintptr_t)handles,
653 .points = (uint64_t)(uintptr_t)points,
654 .count_handles = num_items,
655 };
656
657 return gen_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_QUERY, &args);
658 }