2 * Copyright © 2015-2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #undef _FILE_OFFSET_BITS /* prevent #define open open64 */
33 #include <sys/ioctl.h>
36 #include <sys/sysmacros.h>
41 #include "util/hash_table.h"
43 #define INTEL_LOG_TAG "INTEL-SANITIZE-GPU"
44 #include "common/intel_log.h"
45 #include "common/gen_clflush.h"
47 static int (*libc_open
)(const char *pathname
, int flags
, mode_t mode
);
48 static int (*libc_close
)(int fd
);
49 static int (*libc_ioctl
)(int fd
, unsigned long request
, void *argp
);
50 static int (*libc_fcntl
)(int fd
, int cmd
, int param
);
54 /* TODO: we want to make sure that the padding forces
55 * the BO to take another page on the (PP)GTT; 4KB
56 * may or may not be the page size for the BO. Indeed,
57 * depending on GPU, kernel version and GEM size, the
58 * page size can be one of 4KB, 64KB or 2M.
60 #define PADDING_SIZE 4096
62 struct refcnt_hash_table
{
67 pthread_mutex_t mutex
= PTHREAD_MUTEX_INITIALIZER
;
68 #define MUTEX_LOCK() do { \
69 if (unlikely(pthread_mutex_lock(&mutex))) { \
70 intel_loge("mutex_lock failed"); \
74 #define MUTEX_UNLOCK() do { \
75 if (unlikely(pthread_mutex_unlock(&mutex))) { \
76 intel_loge("mutex_unlock failed"); \
81 static struct hash_table
*fds_to_bo_sizes
= NULL
;
83 static inline struct hash_table
*
86 struct hash_entry
*e
= _mesa_hash_table_search(fds_to_bo_sizes
,
87 (void*)(uintptr_t)fd
);
88 return e
? ((struct refcnt_hash_table
*)e
->data
)->t
: NULL
;
91 static inline uint64_t
92 bo_size(int fd
, uint32_t handle
)
94 struct hash_table
*t
= bo_size_table(fd
);
97 struct hash_entry
*e
= _mesa_hash_table_search(t
, (void*)(uintptr_t)handle
);
98 return e
? (uint64_t)e
->data
: UINT64_MAX
;
104 return !!bo_size_table(fd
);
110 struct refcnt_hash_table
*r
= malloc(sizeof(*r
));
112 r
->t
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
113 _mesa_key_pointer_equal
);
114 _mesa_hash_table_insert(fds_to_bo_sizes
, (void*)(uintptr_t)fd
,
115 (void*)(uintptr_t)r
);
119 dup_drm_fd(int old_fd
, int new_fd
)
121 struct hash_entry
*e
= _mesa_hash_table_search(fds_to_bo_sizes
,
122 (void*)(uintptr_t)old_fd
);
123 struct refcnt_hash_table
*r
= e
->data
;
125 _mesa_hash_table_insert(fds_to_bo_sizes
, (void*)(uintptr_t)new_fd
,
126 (void*)(uintptr_t)r
);
132 struct hash_entry
*e
= _mesa_hash_table_search(fds_to_bo_sizes
,
133 (void*)(uintptr_t)fd
);
134 struct refcnt_hash_table
*r
= e
->data
;
136 _mesa_hash_table_remove(fds_to_bo_sizes
, e
);
137 _mesa_hash_table_destroy(r
->t
, NULL
);
142 /* Our goal is not to have noise good enough for cryto,
143 * but instead values that are unique-ish enough that
144 * it is incredibly unlikely that a buffer overwrite
145 * will produce the exact same values.
148 next_noise_value(uint8_t prev_noise
)
150 uint32_t v
= prev_noise
;
151 return (v
* 103u + 227u) & 0xFF;
155 fill_noise_buffer(uint8_t *dst
, uint8_t start
, uint32_t length
)
157 for(uint32_t i
= 0; i
< length
; ++i
) {
159 start
= next_noise_value(start
);
164 padding_is_good(int fd
, uint32_t handle
)
166 struct drm_i915_gem_mmap mmap_arg
= {
168 .offset
= bo_size(fd
, handle
),
169 .size
= PADDING_SIZE
,
173 /* Unknown bo, maybe prime or userptr. Ignore */
174 if (mmap_arg
.offset
== UINT64_MAX
)
179 uint8_t expected_value
;
181 ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
183 intel_logd("Unable to map buffer %d for pad checking.", handle
);
187 mapped
= (uint8_t*) (uintptr_t) mmap_arg
.addr_ptr
;
188 /* bah-humbug, we need to see the latest contents and
189 * if the bo is not cache coherent we likely need to
190 * invalidate the cache lines to get it.
192 gen_invalidate_range(mapped
, PADDING_SIZE
);
194 expected_value
= handle
& 0xFF;
195 for (uint32_t i
= 0; i
< PADDING_SIZE
; ++i
) {
196 if (expected_value
!= mapped
[i
]) {
197 munmap(mapped
, PADDING_SIZE
);
200 expected_value
= next_noise_value(expected_value
);
202 munmap(mapped
, PADDING_SIZE
);
208 create_with_padding(int fd
, struct drm_i915_gem_create
*create
)
210 create
->size
+= PADDING_SIZE
;
211 int ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_CREATE
, create
);
212 create
->size
-= PADDING_SIZE
;
217 uint8_t *noise_values
;
218 struct drm_i915_gem_mmap mmap_arg
= {
219 .handle
= create
->handle
,
220 .offset
= create
->size
,
221 .size
= PADDING_SIZE
,
225 ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
229 noise_values
= (uint8_t*) (uintptr_t) mmap_arg
.addr_ptr
;
230 fill_noise_buffer(noise_values
, create
->handle
& 0xFF,
232 munmap(noise_values
, PADDING_SIZE
);
234 _mesa_hash_table_insert(bo_size_table(fd
), (void*)(uintptr_t)create
->handle
,
235 (void*)(uintptr_t)create
->size
);
241 exec_and_check_padding(int fd
, unsigned long request
,
242 struct drm_i915_gem_execbuffer2
*exec
)
244 int ret
= libc_ioctl(fd
, request
, exec
);
248 struct drm_i915_gem_exec_object2
*objects
=
249 (void*)(uintptr_t)exec
->buffers_ptr
;
250 uint32_t batch_bo
= exec
->flags
& I915_EXEC_BATCH_FIRST
? objects
[0].handle
:
251 objects
[exec
->buffer_count
- 1].handle
;
253 struct drm_i915_gem_wait wait
= {
254 .bo_handle
= batch_bo
,
257 ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
261 bool detected_out_of_bounds_write
= false;
263 for (int i
= 0; i
< exec
->buffer_count
; i
++) {
264 uint32_t handle
= objects
[i
].handle
;
266 if (!padding_is_good(fd
, handle
)) {
267 detected_out_of_bounds_write
= true;
268 intel_loge("Detected buffer out-of-bounds write in bo %d", handle
);
272 if (unlikely(detected_out_of_bounds_write
)) {
280 gem_close(int fd
, struct drm_gem_close
*close
)
282 int ret
= libc_ioctl(fd
, DRM_IOCTL_GEM_CLOSE
, close
);
286 struct hash_table
*t
= bo_size_table(fd
);
287 struct hash_entry
*e
=
288 _mesa_hash_table_search(t
, (void*)(uintptr_t)close
->handle
);
291 _mesa_hash_table_remove(t
, e
);
299 if (fstat(fd
, &stat
))
302 if (!S_ISCHR(stat
.st_mode
) || major(stat
.st_rdev
) != DRM_MAJOR
)
306 drm_version_t version
= {
308 .name_len
= sizeof(name
) - 1,
310 if (libc_ioctl(fd
, DRM_IOCTL_VERSION
, &version
))
313 return strcmp("i915", name
) == 0;
316 __attribute__ ((visibility ("default"))) int
317 open(const char *path
, int flags
, ...)
322 va_start(args
, flags
);
323 mode
= va_arg(args
, int);
326 int fd
= libc_open(path
, flags
, mode
);
330 if (fd
>= 0 && is_i915(fd
))
338 __attribute__ ((visibility ("default"), alias ("open"))) int
339 open64(const char *path
, int flags
, ...);
341 __attribute__ ((visibility ("default"))) int
351 return libc_close(fd
);
354 __attribute__ ((visibility ("default"))) int
355 fcntl(int fd
, int cmd
, ...)
361 param
= va_arg(args
, int);
364 int res
= libc_fcntl(fd
, cmd
, param
);
368 if (is_drm_fd(fd
) && cmd
== F_DUPFD_CLOEXEC
)
376 __attribute__ ((visibility ("default"))) int
377 ioctl(int fd
, unsigned long request
, ...)
385 va_start(args
, request
);
386 argp
= va_arg(args
, void *);
389 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&& !is_drm_fd(fd
) && is_i915(fd
)) {
390 intel_loge("missed drm fd %d", fd
);
396 case DRM_IOCTL_GEM_CLOSE
:
397 res
= gem_close(fd
, (struct drm_gem_close
*)argp
);
400 case DRM_IOCTL_I915_GEM_CREATE
:
401 res
= create_with_padding(fd
, (struct drm_i915_gem_create
*)argp
);
404 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
405 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
:
406 res
= exec_and_check_padding(fd
, request
,
407 (struct drm_i915_gem_execbuffer2
*)argp
);
414 res
= libc_ioctl(fd
, request
, argp
);
421 static void __attribute__ ((constructor
))
424 fds_to_bo_sizes
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
425 _mesa_key_pointer_equal
);
426 libc_open
= dlsym(RTLD_NEXT
, "open");
427 libc_close
= dlsym(RTLD_NEXT
, "close");
428 libc_fcntl
= dlsym(RTLD_NEXT
, "fcntl");
429 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");