intel/tools: new intel_sanitize_gpu tool
[mesa.git] / src / intel / tools / intel_sanitize_gpu.c
1 /*
2 * Copyright © 2015-2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #undef _FILE_OFFSET_BITS /* prevent #define open open64 */
25
26 #include <string.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdint.h>
30 #include <stdarg.h>
31 #include <fcntl.h>
32 #include <unistd.h>
33 #include <sys/ioctl.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/sysmacros.h>
37 #include <dlfcn.h>
38 #include <pthread.h>
39 #include <i915_drm.h>
40
41 #include "util/hash_table.h"
42
43 #define INTEL_LOG_TAG "INTEL-SANITIZE-GPU"
44 #include "common/intel_log.h"
45 #include "common/gen_clflush.h"
46
47 static int (*libc_open)(const char *pathname, int flags, mode_t mode);
48 static int (*libc_close)(int fd);
49 static int (*libc_ioctl)(int fd, unsigned long request, void *argp);
50 static int (*libc_fcntl)(int fd, int cmd, int param);
51
52 #define DRM_MAJOR 226
53
54 /* TODO: we want to make sure that the padding forces
55 * the BO to take another page on the (PP)GTT; 4KB
56 * may or may not be the page size for the BO. Indeed,
57 * depending on GPU, kernel version and GEM size, the
58 * page size can be one of 4KB, 64KB or 2M.
59 */
60 #define PADDING_SIZE 4096
61
62 struct refcnt_hash_table {
63 struct hash_table *t;
64 int refcnt;
65 };
66
67 pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
68 #define MUTEX_LOCK() do { \
69 if (unlikely(pthread_mutex_lock(&mutex))) { \
70 intel_loge("mutex_lock failed"); \
71 abort(); \
72 } \
73 } while (0)
74 #define MUTEX_UNLOCK() do { \
75 if (unlikely(pthread_mutex_unlock(&mutex))) { \
76 intel_loge("mutex_unlock failed"); \
77 abort(); \
78 } \
79 } while (0)
80
81 static struct hash_table *fds_to_bo_sizes = NULL;
82
83 static inline struct hash_table*
84 bo_size_table(int fd)
85 {
86 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
87 (void*)(uintptr_t)fd);
88 return e ? ((struct refcnt_hash_table*)e->data)->t : NULL;
89 }
90
91 static inline uint64_t
92 bo_size(int fd, uint32_t handle)
93 {
94 struct hash_table *t = bo_size_table(fd);
95 if (!t)
96 return UINT64_MAX;
97 struct hash_entry *e = _mesa_hash_table_search(t, (void*)(uintptr_t)handle);
98 return e ? (uint64_t)e->data : UINT64_MAX;
99 }
100
101 static inline bool
102 is_drm_fd(int fd)
103 {
104 return !!bo_size_table(fd);
105 }
106
107 static inline void
108 add_drm_fd(int fd)
109 {
110 struct refcnt_hash_table *r = malloc(sizeof(*r));
111 r->refcnt = 1;
112 r->t = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
113 _mesa_key_pointer_equal);
114 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)fd,
115 (void*)(uintptr_t)r);
116 }
117
118 static inline void
119 dup_drm_fd(int old_fd, int new_fd)
120 {
121 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
122 (void*)(uintptr_t)old_fd);
123 struct refcnt_hash_table *r = e->data;
124 r->refcnt++;
125 _mesa_hash_table_insert(fds_to_bo_sizes, (void*)(uintptr_t)new_fd,
126 (void*)(uintptr_t)r);
127 }
128
129 static inline void
130 del_drm_fd(int fd)
131 {
132 struct hash_entry *e = _mesa_hash_table_search(fds_to_bo_sizes,
133 (void*)(uintptr_t)fd);
134 struct refcnt_hash_table *r = e->data;
135 if (!--r->refcnt) {
136 _mesa_hash_table_remove(fds_to_bo_sizes, e);
137 _mesa_hash_table_destroy(r->t, NULL);
138 free(r);
139 }
140 }
141
142 /* Our goal is not to have noise good enough for cryto,
143 * but instead values that are unique-ish enough that
144 * it is incredibly unlikely that a buffer overwrite
145 * will produce the exact same values.
146 */
147 static uint8_t
148 next_noise_value(uint8_t prev_noise)
149 {
150 uint32_t v = prev_noise;
151 return (v * 103u + 227u) & 0xFF;
152 }
153
154 static void
155 fill_noise_buffer(uint8_t *dst, uint8_t start, uint32_t length)
156 {
157 for(uint32_t i = 0; i < length; ++i) {
158 dst[i] = start;
159 start = next_noise_value(start);
160 }
161 }
162
163 static bool
164 padding_is_good(int fd, uint32_t handle)
165 {
166 struct drm_i915_gem_mmap mmap_arg = {
167 .handle = handle,
168 .offset = bo_size(fd, handle),
169 .size = PADDING_SIZE,
170 .flags = 0,
171 };
172
173 /* Unknown bo, maybe prime or userptr. Ignore */
174 if (mmap_arg.offset == UINT64_MAX)
175 return true;
176
177 uint8_t *mapped;
178 int ret;
179 uint8_t expected_value;
180
181 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
182 if (ret != 0) {
183 intel_logd("Unable to map buffer %d for pad checking.", handle);
184 return false;
185 }
186
187 mapped = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
188 /* bah-humbug, we need to see the latest contents and
189 * if the bo is not cache coherent we likely need to
190 * invalidate the cache lines to get it.
191 */
192 gen_invalidate_range(mapped, PADDING_SIZE);
193
194 expected_value = handle & 0xFF;
195 for (uint32_t i = 0; i < PADDING_SIZE; ++i) {
196 if (expected_value != mapped[i]) {
197 munmap(mapped, PADDING_SIZE);
198 return false;
199 }
200 expected_value = next_noise_value(expected_value);
201 }
202 munmap(mapped, PADDING_SIZE);
203
204 return true;
205 }
206
207 static int
208 create_with_padding(int fd, struct drm_i915_gem_create *create)
209 {
210 create->size += PADDING_SIZE;
211 int ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, create);
212 create->size -= PADDING_SIZE;
213
214 if (ret != 0)
215 return ret;
216
217 uint8_t *noise_values;
218 struct drm_i915_gem_mmap mmap_arg = {
219 .handle = create->handle,
220 .offset = create->size,
221 .size = PADDING_SIZE,
222 .flags = 0,
223 };
224
225 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
226 if (ret != 0)
227 return 0;
228
229 noise_values = (uint8_t*) (uintptr_t) mmap_arg.addr_ptr;
230 fill_noise_buffer(noise_values, create->handle & 0xFF,
231 PADDING_SIZE);
232 munmap(noise_values, PADDING_SIZE);
233
234 _mesa_hash_table_insert(bo_size_table(fd), (void*)(uintptr_t)create->handle,
235 (void*)(uintptr_t)create->size);
236
237 return 0;
238 }
239
240 static int
241 exec_and_check_padding(int fd, unsigned long request,
242 struct drm_i915_gem_execbuffer2 *exec)
243 {
244 int ret = libc_ioctl(fd, request, exec);
245 if (ret != 0)
246 return ret;
247
248 struct drm_i915_gem_exec_object2 *objects =
249 (void*)(uintptr_t)exec->buffers_ptr;
250 uint32_t batch_bo = exec->flags & I915_EXEC_BATCH_FIRST ? objects[0].handle :
251 objects[exec->buffer_count - 1].handle;
252
253 struct drm_i915_gem_wait wait = {
254 .bo_handle = batch_bo,
255 .timeout_ns = -1,
256 };
257 ret = libc_ioctl(fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
258 if (ret != 0)
259 return ret;
260
261 bool detected_out_of_bounds_write = false;
262
263 for (int i = 0; i < exec->buffer_count; i++) {
264 uint32_t handle = objects[i].handle;
265
266 if (!padding_is_good(fd, handle)) {
267 detected_out_of_bounds_write = true;
268 intel_loge("Detected buffer out-of-bounds write in bo %d", handle);
269 }
270 }
271
272 if (unlikely(detected_out_of_bounds_write)) {
273 abort();
274 }
275
276 return 0;
277 }
278
279 static int
280 gem_close(int fd, struct drm_gem_close *close)
281 {
282 int ret = libc_ioctl(fd, DRM_IOCTL_GEM_CLOSE, close);
283 if (ret != 0)
284 return ret;
285
286 struct hash_table *t = bo_size_table(fd);
287 struct hash_entry *e =
288 _mesa_hash_table_search(t, (void*)(uintptr_t)close->handle);
289
290 if (e)
291 _mesa_hash_table_remove(t, e);
292
293 return 0;
294 }
295
296 static bool
297 is_i915(int fd) {
298 struct stat stat;
299 if (fstat(fd, &stat))
300 return false;
301
302 if (!S_ISCHR(stat.st_mode) || major(stat.st_rdev) != DRM_MAJOR)
303 return false;
304
305 char name[5] = "";
306 drm_version_t version = {
307 .name = name,
308 .name_len = sizeof(name) - 1,
309 };
310 if (libc_ioctl(fd, DRM_IOCTL_VERSION, &version))
311 return false;
312
313 return strcmp("i915", name) == 0;
314 }
315
316 __attribute__ ((visibility ("default"))) int
317 open(const char *path, int flags, ...)
318 {
319 va_list args;
320 mode_t mode;
321
322 va_start(args, flags);
323 mode = va_arg(args, int);
324 va_end(args);
325
326 int fd = libc_open(path, flags, mode);
327
328 MUTEX_LOCK();
329
330 if (fd >= 0 && is_i915(fd))
331 add_drm_fd(fd);
332
333 MUTEX_UNLOCK();
334
335 return fd;
336 }
337
338 __attribute__ ((visibility ("default"), alias ("open"))) int
339 open64(const char *path, int flags, ...);
340
341 __attribute__ ((visibility ("default"))) int
342 close(int fd)
343 {
344 MUTEX_LOCK();
345
346 if (is_drm_fd(fd))
347 del_drm_fd(fd);
348
349 MUTEX_UNLOCK();
350
351 return libc_close(fd);
352 }
353
354 __attribute__ ((visibility ("default"))) int
355 fcntl(int fd, int cmd, ...)
356 {
357 va_list args;
358 int param;
359
360 va_start(args, cmd);
361 param = va_arg(args, int);
362 va_end(args);
363
364 int res = libc_fcntl(fd, cmd, param);
365
366 MUTEX_LOCK();
367
368 if (is_drm_fd(fd) && cmd == F_DUPFD_CLOEXEC)
369 dup_drm_fd(fd, res);
370
371 MUTEX_UNLOCK();
372
373 return res;
374 }
375
376 __attribute__ ((visibility ("default"))) int
377 ioctl(int fd, unsigned long request, ...)
378 {
379 int res;
380 va_list args;
381 void *argp;
382
383 MUTEX_LOCK();
384
385 va_start(args, request);
386 argp = va_arg(args, void *);
387 va_end(args);
388
389 if (_IOC_TYPE(request) == DRM_IOCTL_BASE && !is_drm_fd(fd) && is_i915(fd)) {
390 intel_loge("missed drm fd %d", fd);
391 add_drm_fd(fd);
392 }
393
394 if (is_drm_fd(fd)) {
395 switch (request) {
396 case DRM_IOCTL_GEM_CLOSE:
397 res = gem_close(fd, (struct drm_gem_close*)argp);
398 goto out;
399
400 case DRM_IOCTL_I915_GEM_CREATE:
401 res = create_with_padding(fd, (struct drm_i915_gem_create*)argp);
402 goto out;
403
404 case DRM_IOCTL_I915_GEM_EXECBUFFER2:
405 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR:
406 res = exec_and_check_padding(fd, request,
407 (struct drm_i915_gem_execbuffer2*)argp);
408 goto out;
409
410 default:
411 break;
412 }
413 }
414 res = libc_ioctl(fd, request, argp);
415
416 out:
417 MUTEX_UNLOCK();
418 return res;
419 }
420
421 static void __attribute__ ((constructor))
422 init(void)
423 {
424 fds_to_bo_sizes = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
425 _mesa_key_pointer_equal);
426 libc_open = dlsym(RTLD_NEXT, "open");
427 libc_close = dlsym(RTLD_NEXT, "close");
428 libc_fcntl = dlsym(RTLD_NEXT, "fcntl");
429 libc_ioctl = dlsym(RTLD_NEXT, "ioctl");
430 }