anv/gpu_memcpy: Add a lighter-weight GPU memcpy function
[mesa.git] / src / intel / vulkan / anv_gem.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <sys/ioctl.h>
25 #include <sys/mman.h>
26 #include <string.h>
27 #include <errno.h>
28 #include <unistd.h>
29 #include <fcntl.h>
30
31 #include "anv_private.h"
32
33 static int
34 anv_ioctl(int fd, unsigned long request, void *arg)
35 {
36 int ret;
37
38 do {
39 ret = ioctl(fd, request, arg);
40 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
41
42 return ret;
43 }
44
45 /**
46 * Wrapper around DRM_IOCTL_I915_GEM_CREATE.
47 *
48 * Return gem handle, or 0 on failure. Gem handles are never 0.
49 */
50 uint32_t
51 anv_gem_create(struct anv_device *device, uint64_t size)
52 {
53 struct drm_i915_gem_create gem_create = {
54 .size = size,
55 };
56
57 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create);
58 if (ret != 0) {
59 /* FIXME: What do we do if this fails? */
60 return 0;
61 }
62
63 return gem_create.handle;
64 }
65
66 void
67 anv_gem_close(struct anv_device *device, uint32_t gem_handle)
68 {
69 struct drm_gem_close close = {
70 .handle = gem_handle,
71 };
72
73 anv_ioctl(device->fd, DRM_IOCTL_GEM_CLOSE, &close);
74 }
75
76 /**
77 * Wrapper around DRM_IOCTL_I915_GEM_MMAP. Returns MAP_FAILED on error.
78 */
79 void*
80 anv_gem_mmap(struct anv_device *device, uint32_t gem_handle,
81 uint64_t offset, uint64_t size, uint32_t flags)
82 {
83 struct drm_i915_gem_mmap gem_mmap = {
84 .handle = gem_handle,
85 .offset = offset,
86 .size = size,
87 .flags = flags,
88 };
89
90 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_MMAP, &gem_mmap);
91 if (ret != 0)
92 return MAP_FAILED;
93
94 VG(VALGRIND_MALLOCLIKE_BLOCK(gem_mmap.addr_ptr, gem_mmap.size, 0, 1));
95 return (void *)(uintptr_t) gem_mmap.addr_ptr;
96 }
97
98 /* This is just a wrapper around munmap, but it also notifies valgrind that
99 * this map is no longer valid. Pair this with anv_gem_mmap().
100 */
101 void
102 anv_gem_munmap(void *p, uint64_t size)
103 {
104 VG(VALGRIND_FREELIKE_BLOCK(p, 0));
105 munmap(p, size);
106 }
107
108 uint32_t
109 anv_gem_userptr(struct anv_device *device, void *mem, size_t size)
110 {
111 struct drm_i915_gem_userptr userptr = {
112 .user_ptr = (__u64)((unsigned long) mem),
113 .user_size = size,
114 .flags = 0,
115 };
116
117 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
118 if (ret == -1)
119 return 0;
120
121 return userptr.handle;
122 }
123
124 int
125 anv_gem_set_caching(struct anv_device *device,
126 uint32_t gem_handle, uint32_t caching)
127 {
128 struct drm_i915_gem_caching gem_caching = {
129 .handle = gem_handle,
130 .caching = caching,
131 };
132
133 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_CACHING, &gem_caching);
134 }
135
136 int
137 anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
138 uint32_t read_domains, uint32_t write_domain)
139 {
140 struct drm_i915_gem_set_domain gem_set_domain = {
141 .handle = gem_handle,
142 .read_domains = read_domains,
143 .write_domain = write_domain,
144 };
145
146 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &gem_set_domain);
147 }
148
149 /**
150 * Returns 0, 1, or negative to indicate error
151 */
152 int
153 anv_gem_busy(struct anv_device *device, uint32_t gem_handle)
154 {
155 struct drm_i915_gem_busy busy = {
156 .handle = gem_handle,
157 };
158
159 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
160 if (ret < 0)
161 return ret;
162
163 return busy.busy != 0;
164 }
165
166 /**
167 * On error, \a timeout_ns holds the remaining time.
168 */
169 int
170 anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns)
171 {
172 struct drm_i915_gem_wait wait = {
173 .bo_handle = gem_handle,
174 .timeout_ns = *timeout_ns,
175 .flags = 0,
176 };
177
178 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
179 *timeout_ns = wait.timeout_ns;
180
181 return ret;
182 }
183
184 int
185 anv_gem_execbuffer(struct anv_device *device,
186 struct drm_i915_gem_execbuffer2 *execbuf)
187 {
188 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, execbuf);
189 }
190
191 int
192 anv_gem_set_tiling(struct anv_device *device,
193 uint32_t gem_handle, uint32_t stride, uint32_t tiling)
194 {
195 int ret;
196
197 /* set_tiling overwrites the input on the error path, so we have to open
198 * code anv_ioctl.
199 */
200 do {
201 struct drm_i915_gem_set_tiling set_tiling = {
202 .handle = gem_handle,
203 .tiling_mode = tiling,
204 .stride = stride,
205 };
206
207 ret = ioctl(device->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
208 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
209
210 return ret;
211 }
212
213 int
214 anv_gem_get_param(int fd, uint32_t param)
215 {
216 int tmp;
217
218 drm_i915_getparam_t gp = {
219 .param = param,
220 .value = &tmp,
221 };
222
223 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
224 if (ret == 0)
225 return tmp;
226
227 return 0;
228 }
229
230 bool
231 anv_gem_get_bit6_swizzle(int fd, uint32_t tiling)
232 {
233 struct drm_gem_close close;
234 int ret;
235
236 struct drm_i915_gem_create gem_create = {
237 .size = 4096,
238 };
239
240 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &gem_create)) {
241 assert(!"Failed to create GEM BO");
242 return false;
243 }
244
245 bool swizzled = false;
246
247 /* set_tiling overwrites the input on the error path, so we have to open
248 * code anv_ioctl.
249 */
250 do {
251 struct drm_i915_gem_set_tiling set_tiling = {
252 .handle = gem_create.handle,
253 .tiling_mode = tiling,
254 .stride = tiling == I915_TILING_X ? 512 : 128,
255 };
256
257 ret = ioctl(fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
258 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
259
260 if (ret != 0) {
261 assert(!"Failed to set BO tiling");
262 goto close_and_return;
263 }
264
265 struct drm_i915_gem_get_tiling get_tiling = {
266 .handle = gem_create.handle,
267 };
268
269 if (anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling)) {
270 assert(!"Failed to get BO tiling");
271 goto close_and_return;
272 }
273
274 swizzled = get_tiling.swizzle_mode != I915_BIT_6_SWIZZLE_NONE;
275
276 close_and_return:
277
278 memset(&close, 0, sizeof(close));
279 close.handle = gem_create.handle;
280 anv_ioctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
281
282 return swizzled;
283 }
284
285 int
286 anv_gem_create_context(struct anv_device *device)
287 {
288 struct drm_i915_gem_context_create create = { 0 };
289
290 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
291 if (ret == -1)
292 return -1;
293
294 return create.ctx_id;
295 }
296
297 int
298 anv_gem_destroy_context(struct anv_device *device, int context)
299 {
300 struct drm_i915_gem_context_destroy destroy = {
301 .ctx_id = context,
302 };
303
304 return anv_ioctl(device->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, &destroy);
305 }
306
307 int
308 anv_gem_get_context_param(int fd, int context, uint32_t param, uint64_t *value)
309 {
310 struct drm_i915_gem_context_param gp = {
311 .ctx_id = context,
312 .param = param,
313 };
314
315 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &gp);
316 if (ret == -1)
317 return -1;
318
319 *value = gp.value;
320 return 0;
321 }
322
323 int
324 anv_gem_get_aperture(int fd, uint64_t *size)
325 {
326 struct drm_i915_gem_get_aperture aperture = { 0 };
327
328 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
329 if (ret == -1)
330 return -1;
331
332 *size = aperture.aper_available_size;
333
334 return 0;
335 }
336
337 bool
338 anv_gem_supports_48b_addresses(int fd)
339 {
340 struct drm_i915_gem_exec_object2 obj = {
341 .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS,
342 };
343
344 struct drm_i915_gem_execbuffer2 execbuf = {
345 .buffers_ptr = (uintptr_t)&obj,
346 .buffer_count = 1,
347 .rsvd1 = 0xffffffu,
348 };
349
350 int ret = anv_ioctl(fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
351
352 return ret == -1 && errno == ENOENT;
353 }
354
355 int
356 anv_gem_gpu_get_reset_stats(struct anv_device *device,
357 uint32_t *active, uint32_t *pending)
358 {
359 struct drm_i915_reset_stats stats = {
360 .ctx_id = device->context_id,
361 };
362
363 int ret = anv_ioctl(device->fd, DRM_IOCTL_I915_GET_RESET_STATS, &stats);
364 if (ret == 0) {
365 *active = stats.batch_active;
366 *pending = stats.batch_pending;
367 }
368
369 return ret;
370 }
371
372 int
373 anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle)
374 {
375 struct drm_prime_handle args = {
376 .handle = gem_handle,
377 .flags = DRM_CLOEXEC,
378 };
379
380 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
381 if (ret == -1)
382 return -1;
383
384 return args.fd;
385 }
386
387 uint32_t
388 anv_gem_fd_to_handle(struct anv_device *device, int fd)
389 {
390 struct drm_prime_handle args = {
391 .fd = fd,
392 };
393
394 int ret = anv_ioctl(device->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
395 if (ret == -1)
396 return 0;
397
398 return args.handle;
399 }