Merge remote-tracking branch 'public/master' into vulkan
[mesa.git] / src / intel / vulkan / anv_gem.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #define _DEFAULT_SOURCE
25
26 #include <sys/ioctl.h>
27 #include <sys/mman.h>
28 #include <string.h>
29 #include <errno.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32
33 #include "anv_private.h"
34
35 static int
36 anv_ioctl(int fd, unsigned long request, void *arg)
37 {
38 int ret;
39
40 do {
41 ret = ioctl(fd, request, arg);
42 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
43
44 return ret;
45 }
46
47 /**
48 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
49 *
50 * Return gem handle, or 0 on failure. Gem handles are never 0.
51 */
52 uint32_t
53 anv_gem_create(struct anv_device *device, size_t size)
54 {
55 struct drm_i915_gem_create gem_create = {
56 .size = size,
57 };
58
59 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
60 if (ret != 0) {
61 /* FIXME: What do we do if this fails? */
62 return 0;
63 }
64
65 return gem_create.handle;
66 }
67
68 void
69 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
70 {
71 struct drm_gem_close close = {
72 .handle = gem_handle,
73 };
74
75 anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
76 }
77
78 /**
79 * Wrapper around DRM_IOCTL_I915_GEM_MMAP.
80 */
81 void*
82 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
83 uint64_t offset, uint64_t size, uint32_t flags)
84 {
85 struct drm_i915_gem_mmap gem_mmap = {
86 .handle = gem_handle,
87 .offset = offset,
88 .size = size,
89 .flags = flags,
90 };
91
92 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
93 if (ret != 0) {
94 /* FIXME: Is NULL the right error return? Cf MAP_INVALID */
95 return NULL;
96 }
97
98 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
99 return (void *)(uintptr_t) gem_mmap.addr_ptr;
100 }
101
102 /* This is just a wrapper around munmap, but it also notifies valgrind that
103 * this map is no longer valid. Pair this with anv_gem_mmap().
104 */
105 void
106 anv_gem_munmap(void *p, uint64_t size)
107 {
108 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
109 munmap(p, size);
110 }
111
112 uint32_t
113 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
114 {
115 struct drm_i915_gem_userptr userptr = {
116 .user_ptr = (__u64)((unsigned long) mem),
117 .user_size = size,
118 .flags = 0,
119 };
120
121 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
122 if (ret == -1)
123 return 0;
124
125 return userptr.handle;
126 }
127
128 int
129 anv_gem_set_caching(struct anv_device *device,
130 uint32_t gem_handle, uint32_t caching)
131 {
132 struct drm_i915_gem_caching gem_caching = {
133 .handle = gem_handle,
134 .caching = caching,
135 };
136
137 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
138 }
139
140 int
141 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
142 uint32_t read_domains, uint32_t write_domain)
143 {
144 struct drm_i915_gem_set_domain gem_set_domain = {
145 .handle = gem_handle,
146 .read_domains = read_domains,
147 .write_domain = write_domain,
148 };
149
150 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
151 }
152
153 /**
154 * On error, \a timeout_ns holds the remaining time.
155 */
156 int
157 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
158 {
159 struct drm_i915_gem_wait wait = {
160 .bo_handle = gem_handle,
161 .timeout_ns = *timeout_ns,
162 .flags = 0,
163 };
164
165 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
166 *timeout_ns = wait.timeout_ns;
167
168 return ret;
169 }
170
171 int
172 anv_gem_execbuffer(struct anv_device *device,
173 struct drm_i915_gem_execbuffer2 *execbuf)
174 {
175 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
176 }
177
178 int
179 anv_gem_set_tiling(struct anv_device *device,
180 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
181 {
182 int ret;
183
184 /* set_tiling overwrites the input on the error path, so we have to open
185 * code anv_ioctl.
186 */
187 do {
188 struct drm_i915_gem_set_tiling set_tiling = {
189 .handle = gem_handle,
190 .tiling_mode = tiling,
191 .stride = stride,
192 };
193
194 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
195 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
196
197 return ret;
198 }
199
200 int
201 anv_gem_get_param(int fd, uint32_t param)
202 {
203 int tmp;
204
205 drm_i915_getparam_t gp = {
206 .param = param,
207 .value = &tmp,
208 };
209
210 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
211 if (ret == 0)
212 return tmp;
213
214 return 0;
215 }
216
217 bool
218 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
219 {
220 struct drm_gem_close close;
221 int ret;
222
223 struct drm_i915_gem_create gem_create = {
224 .size = 4096,
225 };
226
227 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
228 assert(!"Failed to create GEM BO");
229 return false;
230 }
231
232 bool swizzled = false;
233
234 /* set_tiling overwrites the input on the error path, so we have to open
235 * code anv_ioctl.
236 */
237 do {
238 struct drm_i915_gem_set_tiling set_tiling = {
239 .handle = gem_create.handle,
240 .tiling_mode = tiling,
241 .stride = tiling == I915_TILING_X ? 512 : 128,
242 };
243
244 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
245 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
246
247 if (ret != 0) {
248 assert(!"Failed to set BO tiling");
249 goto close_and_return;
250 }
251
252 struct drm_i915_gem_get_tiling get_tiling = {
253 .handle = gem_create.handle,
254 };
255
256 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
257 assert(!"Failed to get BO tiling");
258 goto close_and_return;
259 }
260
261 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
262
263 close_and_return:
264
265 memset(&close, 0, sizeof(close));
266 close.handle = gem_create.handle;
267 anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
268
269 return swizzled;
270 }
271
272 int
273 anv_gem_create_context(struct anv_device *device)
274 {
275 struct drm_i915_gem_context_create create = { 0 };
276
277 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
278 if (ret == -1)
279 return -1;
280
281 return create.ctx_id;
282 }
283
284 int
285 anv_gem_destroy_context(struct anv_device *device, int context)
286 {
287 struct drm_i915_gem_context_destroy destroy = {
288 .ctx_id = context,
289 };
290
291 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
292 }
293
294 int
295 anv_gem_get_aperture(int fd, uint64_t *size)
296 {
297 struct drm_i915_gem_get_aperture aperture = { 0 };
298
299 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
300 if (ret == -1)
301 return -1;
302
303 *size = aperture.aper_available_size;
304
305 return 0;
306 }
307
308 int
309 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
310 {
311 struct drm_prime_handle args = {
312 .handle = gem_handle,
313 .flags = DRM_CLOEXEC,
314 };
315
316 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
317 if (ret == -1)
318 return -1;
319
320 return args.fd;
321 }
322
323 uint32_t
324 anv_gem_fd_to_handle(struct anv_device *device, int fd)
325 {
326 struct drm_prime_handle args = {
327 .fd = fd,
328 };
329
330 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
331 if (ret == -1)
332 return 0;
333
334 return args.handle;
335 }