intel: fix check for 48b ppgtt support
[mesa.git] / src / intel / vulkan / anv_gem.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #include <string.h>
28 #include <errno.h>
29 #include <unistd.h>
30 #include <fcntl.h>
31
32 #include "anv_private.h"
33 #include "common/gen_defines.h"
34
35 static int
36 anv_ioctl(int fd, unsigned long request, void *arg)
37 {
38 int ret;
39
40 do {
41 ret = ioctl(fd, request, arg);
42 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
43
44 return ret;
45 }
46
47 /**
48 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
49 *
50 * Return gem handle, or 0 on failure. Gem handles are never 0.
51 */
52 uint32_t
53 anv_gem_create(struct anv_device *device, uint64_t size)
54 {
55 struct drm_i915_gem_create gem_create = {
56 .size = size,
57 };
58
59 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
60 if (ret != 0) {
61 /* FIXME: What do we do if this fails? */
62 return 0;
63 }
64
65 return gem_create.handle;
66 }
67
68 void
69 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
70 {
71 struct drm_gem_close close = {
72 .handle = gem_handle,
73 };
74
75 anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
76 }
77
78 /**
79 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
80 */
81 void*
82 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
83 uint64_t offset, uint64_t size, uint32_t flags)
84 {
85 struct drm_i915_gem_mmap gem_mmap = {
86 .handle = gem_handle,
87 .offset = offset,
88 .size = size,
89 .flags = flags,
90 };
91
92 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
93 if (ret != 0)
94 return MAP_FAILED;
95
96 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
97 return (void *)(uintptr_t) gem_mmap.addr_ptr;
98 }
99
100 /* This is just a wrapper around munmap, but it also notifies valgrind that
101 * this map is no longer valid. Pair this with anv_gem_mmap().
102 */
103 void
104 anv_gem_munmap(void *p, uint64_t size)
105 {
106 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
107 munmap(p, size);
108 }
109
110 uint32_t
111 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
112 {
113 struct drm_i915_gem_userptr userptr = {
114 .user_ptr = (__u64)((unsigned long) mem),
115 .user_size = size,
116 .flags = 0,
117 };
118
119 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
120 if (ret == -1)
121 return 0;
122
123 return userptr.handle;
124 }
125
126 int
127 anv_gem_set_caching(struct anv_device *device,
128 uint32_t gem_handle, uint32_t caching)
129 {
130 struct drm_i915_gem_caching gem_caching = {
131 .handle = gem_handle,
132 .caching = caching,
133 };
134
135 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
136 }
137
138 int
139 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
140 uint32_t read_domains, uint32_t write_domain)
141 {
142 struct drm_i915_gem_set_domain gem_set_domain = {
143 .handle = gem_handle,
144 .read_domains = read_domains,
145 .write_domain = write_domain,
146 };
147
148 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
149 }
150
151 /**
152 * Returns 0, 1, or negative to indicate error
153 */
154 int
155 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
156 {
157 struct drm_i915_gem_busy busy = {
158 .handle = gem_handle,
159 };
160
161 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
162 if (ret < 0)
163 return ret;
164
165 return busy.busy != 0;
166 }
167
168 /**
169 * On error, \a timeout_ns holds the remaining time.
170 */
171 int
172 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
173 {
174 struct drm_i915_gem_wait wait = {
175 .bo_handle = gem_handle,
176 .timeout_ns = *timeout_ns,
177 .flags = 0,
178 };
179
180 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
181 *timeout_ns = wait.timeout_ns;
182
183 return ret;
184 }
185
186 int
187 anv_gem_execbuffer(struct anv_device *device,
188 struct drm_i915_gem_execbuffer2 *execbuf)
189 {
190 if (execbuf->flags & I915_EXEC_FENCE_OUT)
191 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2_WR, execbuf);
192 else
193 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
194 }
195
196 /** Return -1 on error. */
197 int
198 anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle)
199 {
200 struct drm_i915_gem_get_tiling get_tiling = {
201 .handle = gem_handle,
202 };
203
204 if (anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
205 assert(!"Failed to get BO tiling");
206 return -1;
207 }
208
209 return get_tiling.tiling_mode;
210 }
211
212 int
213 anv_gem_set_tiling(struct anv_device *device,
214 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
215 {
216 int ret;
217
218 /* set_tiling overwrites the input on the error path, so we have to open
219 * code anv_ioctl.
220 */
221 do {
222 struct drm_i915_gem_set_tiling set_tiling = {
223 .handle = gem_handle,
224 .tiling_mode = tiling,
225 .stride = stride,
226 };
227
228 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
229 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
230
231 return ret;
232 }
233
234 int
235 anv_gem_get_param(int fd, uint32_t param)
236 {
237 int tmp;
238
239 drm_i915_getparam_t gp = {
240 .param = param,
241 .value = &tmp,
242 };
243
244 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
245 if (ret == 0)
246 return tmp;
247
248 return 0;
249 }
250
251 bool
252 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
253 {
254 struct drm_gem_close close;
255 int ret;
256
257 struct drm_i915_gem_create gem_create = {
258 .size = 4096,
259 };
260
261 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
262 assert(!"Failed to create GEM BO");
263 return false;
264 }
265
266 bool swizzled = false;
267
268 /* set_tiling overwrites the input on the error path, so we have to open
269 * code anv_ioctl.
270 */
271 do {
272 struct drm_i915_gem_set_tiling set_tiling = {
273 .handle = gem_create.handle,
274 .tiling_mode = tiling,
275 .stride = tiling == I915_TILING_X ? 512 : 128,
276 };
277
278 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
279 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
280
281 if (ret != 0) {
282 assert(!"Failed to set BO tiling");
283 goto close_and_return;
284 }
285
286 struct drm_i915_gem_get_tiling get_tiling = {
287 .handle = gem_create.handle,
288 };
289
290 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
291 assert(!"Failed to get BO tiling");
292 goto close_and_return;
293 }
294
295 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
296
297 close_and_return:
298
299 memset(&close, 0, sizeof(close));
300 close.handle = gem_create.handle;
301 anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
302
303 return swizzled;
304 }
305
306 bool
307 anv_gem_has_context_priority(int fd)
308 {
309 return !anv_gem_set_context_param(fd, 0, I915_CONTEXT_PARAM_PRIORITY,
310 GEN_CONTEXT_MEDIUM_PRIORITY);
311 }
312
313 int
314 anv_gem_create_context(struct anv_device *device)
315 {
316 struct drm_i915_gem_context_create create = { 0 };
317
318 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
319 if (ret == -1)
320 return -1;
321
322 return create.ctx_id;
323 }
324
325 int
326 anv_gem_destroy_context(struct anv_device *device, int context)
327 {
328 struct drm_i915_gem_context_destroy destroy = {
329 .ctx_id = context,
330 };
331
332 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
333 }
334
335 int
336 anv_gem_set_context_param(int fd, int context, uint32_t param, uint64_t value)
337 {
338 struct drm_i915_gem_context_param p = {
339 .ctx_id = context,
340 .param = param,
341 .value = value,
342 };
343 int err = 0;
344
345 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &p))
346 err = -errno;
347 return err;
348 }
349
350 int
351 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
352 {
353 struct drm_i915_gem_context_param gp = {
354 .ctx_id = context,
355 .param = param,
356 };
357
358 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
359 if (ret == -1)
360 return -1;
361
362 *value = gp.value;
363 return 0;
364 }
365
366 int
367 anv_gem_get_aperture(int fd, uint64_t *size)
368 {
369 struct drm_i915_gem_get_aperture aperture = { 0 };
370
371 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
372 if (ret == -1)
373 return -1;
374
375 *size = aperture.aper_available_size;
376
377 return 0;
378 }
379
380 int
381 anv_gem_gpu_get_reset_stats(struct anv_device *device,
382 uint32_t *active, uint32_t *pending)
383 {
384 struct drm_i915_reset_stats stats = {
385 .ctx_id = device->context_id,
386 };
387
388 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
389 if (ret == 0) {
390 *active = stats.batch_active;
391 *pending = stats.batch_pending;
392 }
393
394 return ret;
395 }
396
397 int
398 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
399 {
400 struct drm_prime_handle args = {
401 .handle = gem_handle,
402 .flags = DRM_CLOEXEC,
403 };
404
405 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
406 if (ret == -1)
407 return -1;
408
409 return args.fd;
410 }
411
412 uint32_t
413 anv_gem_fd_to_handle(struct anv_device *device, int fd)
414 {
415 struct drm_prime_handle args = {
416 .fd = fd,
417 };
418
419 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
420 if (ret == -1)
421 return 0;
422
423 return args.handle;
424 }
425
426 #ifndef SYNC_IOC_MAGIC
427 /* duplicated from linux/sync_file.h to avoid build-time dependency
428 * on new (v4.7) kernel headers. Once distro's are mostly using
429 * something newer than v4.7 drop this and #include <linux/sync_file.h>
430 * instead.
431 */
432 struct sync_merge_data {
433 char name[32];
434 __s32 fd2;
435 __s32 fence;
436 __u32 flags;
437 __u32 pad;
438 };
439
440 #define SYNC_IOC_MAGIC '>'
441 #define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
442 #endif
443
444 int
445 anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2)
446 {
447 const char name[] = "anv merge fence";
448 struct sync_merge_data args = {
449 .fd2 = fd2,
450 .fence = -1,
451 };
452 memcpy(args.name, name, sizeof(name));
453
454 int ret = anv_ioctl(fd1, SYNC_IOC_MERGE, &args);
455 if (ret == -1)
456 return -1;
457
458 return args.fence;
459 }
460
461 uint32_t
462 anv_gem_syncobj_create(struct anv_device *device, uint32_t flags)
463 {
464 struct drm_syncobj_create args = {
465 .flags = flags,
466 };
467
468 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_CREATE, &args);
469 if (ret)
470 return 0;
471
472 return args.handle;
473 }
474
475 void
476 anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle)
477 {
478 struct drm_syncobj_destroy args = {
479 .handle = handle,
480 };
481
482 anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_DESTROY, &args);
483 }
484
485 int
486 anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle)
487 {
488 struct drm_syncobj_handle args = {
489 .handle = handle,
490 };
491
492 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
493 if (ret)
494 return -1;
495
496 return args.fd;
497 }
498
499 uint32_t
500 anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd)
501 {
502 struct drm_syncobj_handle args = {
503 .fd = fd,
504 };
505
506 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
507 if (ret)
508 return 0;
509
510 return args.handle;
511 }
512
513 int
514 anv_gem_syncobj_export_sync_file(struct anv_device *device, uint32_t handle)
515 {
516 struct drm_syncobj_handle args = {
517 .handle = handle,
518 .flags = DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE,
519 };
520
521 int ret = anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &args);
522 if (ret)
523 return -1;
524
525 return args.fd;
526 }
527
528 int
529 anv_gem_syncobj_import_sync_file(struct anv_device *device,
530 uint32_t handle, int fd)
531 {
532 struct drm_syncobj_handle args = {
533 .handle = handle,
534 .fd = fd,
535 .flags = DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE,
536 };
537
538 return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, &args);
539 }
540
541 void
542 anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle)
543 {
544 struct drm_syncobj_array args = {
545 .handles = (uint64_t)(uintptr_t)&handle,
546 .count_handles = 1,
547 };
548
549 anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_RESET, &args);
550 }
551
552 bool
553 anv_gem_supports_syncobj_wait(int fd)
554 {
555 int ret;
556
557 struct drm_syncobj_create create = {
558 .flags = 0,
559 };
560 ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &create);
561 if (ret)
562 return false;
563
564 uint32_t syncobj = create.handle;
565
566 struct drm_syncobj_wait wait = {
567 .handles = (uint64_t)(uintptr_t)&create,
568 .count_handles = 1,
569 .timeout_nsec = 0,
570 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
571 };
572 ret = anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
573
574 struct drm_syncobj_destroy destroy = {
575 .handle = syncobj,
576 };
577 anv_ioctl(fd, DRM_IOCTL_SYNCOBJ_DESTROY, &destroy);
578
579 /* If it timed out, then we have the ioctl and it supports the
580 * DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT flag.
581 */
582 return ret == -1 && errno == ETIME;
583 }
584
585 int
586 anv_gem_syncobj_wait(struct anv_device *device,
587 uint32_t *handles, uint32_t num_handles,
588 int64_t abs_timeout_ns, bool wait_all)
589 {
590 struct drm_syncobj_wait args = {
591 .handles = (uint64_t)(uintptr_t)handles,
592 .count_handles = num_handles,
593 .timeout_nsec = abs_timeout_ns,
594 .flags = DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
595 };
596
597 if (wait_all)
598 args.flags |= DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL;
599
600 return anv_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &args);
601 }