2 * Copyright © 2015-2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #undef _FILE_OFFSET_BITS /* prevent #define open open64 */
33 #include <sys/ioctl.h>
36 #include <sys/sysmacros.h>
41 #include "util/hash_table.h"
42 #include "util/u_math.h"
44 #define INTEL_LOG_TAG "INTEL-SANITIZE-GPU"
45 #include "common/intel_log.h"
46 #include "common/gen_clflush.h"
48 static int (*libc_open
)(const char *pathname
, int flags
, mode_t mode
);
49 static int (*libc_close
)(int fd
);
50 static int (*libc_ioctl
)(int fd
, unsigned long request
, void *argp
);
51 static int (*libc_fcntl
)(int fd
, int cmd
, int param
);
55 /* TODO: we want to make sure that the padding forces
56 * the BO to take another page on the (PP)GTT; 4KB
57 * may or may not be the page size for the BO. Indeed,
58 * depending on GPU, kernel version and GEM size, the
59 * page size can be one of 4KB, 64KB or 2M.
61 #define PADDING_SIZE 4096
63 struct refcnt_hash_table
{
68 pthread_mutex_t mutex
= PTHREAD_MUTEX_INITIALIZER
;
69 #define MUTEX_LOCK() do { \
70 if (unlikely(pthread_mutex_lock(&mutex))) { \
71 intel_loge("mutex_lock failed"); \
75 #define MUTEX_UNLOCK() do { \
76 if (unlikely(pthread_mutex_unlock(&mutex))) { \
77 intel_loge("mutex_unlock failed"); \
82 static struct hash_table
*fds_to_bo_sizes
= NULL
;
84 static inline struct hash_table
*
87 struct hash_entry
*e
= _mesa_hash_table_search(fds_to_bo_sizes
,
88 (void*)(uintptr_t)fd
);
89 return e
? ((struct refcnt_hash_table
*)e
->data
)->t
: NULL
;
92 static inline uint64_t
93 bo_size(int fd
, uint32_t handle
)
95 struct hash_table
*t
= bo_size_table(fd
);
98 struct hash_entry
*e
= _mesa_hash_table_search(t
, (void*)(uintptr_t)handle
);
99 return e
? (uint64_t)e
->data
: UINT64_MAX
;
105 return !!bo_size_table(fd
);
111 struct refcnt_hash_table
*r
= malloc(sizeof(*r
));
113 r
->t
= _mesa_pointer_hash_table_create(NULL
);
114 _mesa_hash_table_insert(fds_to_bo_sizes
, (void*)(uintptr_t)fd
,
115 (void*)(uintptr_t)r
);
119 dup_drm_fd(int old_fd
, int new_fd
)
121 struct hash_entry
*e
= _mesa_hash_table_search(fds_to_bo_sizes
,
122 (void*)(uintptr_t)old_fd
);
123 struct refcnt_hash_table
*r
= e
->data
;
125 _mesa_hash_table_insert(fds_to_bo_sizes
, (void*)(uintptr_t)new_fd
,
126 (void*)(uintptr_t)r
);
132 struct hash_entry
*e
= _mesa_hash_table_search(fds_to_bo_sizes
,
133 (void*)(uintptr_t)fd
);
134 struct refcnt_hash_table
*r
= e
->data
;
136 _mesa_hash_table_remove(fds_to_bo_sizes
, e
);
137 _mesa_hash_table_destroy(r
->t
, NULL
);
142 /* Our goal is not to have noise good enough for cryto,
143 * but instead values that are unique-ish enough that
144 * it is incredibly unlikely that a buffer overwrite
145 * will produce the exact same values.
148 next_noise_value(uint8_t prev_noise
)
150 uint32_t v
= prev_noise
;
151 return (v
* 103u + 227u) & 0xFF;
155 fill_noise_buffer(uint8_t *dst
, uint8_t start
, uint32_t length
)
157 for(uint32_t i
= 0; i
< length
; ++i
) {
159 start
= next_noise_value(start
);
164 padding_is_good(int fd
, uint32_t handle
)
166 struct drm_i915_gem_mmap mmap_arg
= {
168 .offset
= align64(bo_size(fd
, handle
), 4096),
169 .size
= PADDING_SIZE
,
173 /* Unknown bo, maybe prime or userptr. Ignore */
174 if (mmap_arg
.offset
== UINT64_MAX
)
179 uint8_t expected_value
;
181 ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
183 intel_logd("Unable to map buffer %d for pad checking.", handle
);
187 mapped
= (uint8_t*) (uintptr_t) mmap_arg
.addr_ptr
;
188 /* bah-humbug, we need to see the latest contents and
189 * if the bo is not cache coherent we likely need to
190 * invalidate the cache lines to get it.
192 gen_invalidate_range(mapped
, PADDING_SIZE
);
194 expected_value
= handle
& 0xFF;
195 for (uint32_t i
= 0; i
< PADDING_SIZE
; ++i
) {
196 if (expected_value
!= mapped
[i
]) {
197 munmap(mapped
, PADDING_SIZE
);
200 expected_value
= next_noise_value(expected_value
);
202 munmap(mapped
, PADDING_SIZE
);
208 create_with_padding(int fd
, struct drm_i915_gem_create
*create
)
210 uint64_t original_size
= create
->size
;
212 create
->size
= align64(original_size
, 4096) + PADDING_SIZE
;
213 int ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_CREATE
, create
);
214 create
->size
= original_size
;
219 uint8_t *noise_values
;
220 struct drm_i915_gem_mmap mmap_arg
= {
221 .handle
= create
->handle
,
222 .offset
= align64(create
->size
, 4096),
223 .size
= PADDING_SIZE
,
227 ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_MMAP
, &mmap_arg
);
229 intel_logd("Unable to map buffer %d for pad creation.\n", create
->handle
);
233 noise_values
= (uint8_t*) (uintptr_t) mmap_arg
.addr_ptr
;
234 fill_noise_buffer(noise_values
, create
->handle
& 0xFF,
236 munmap(noise_values
, PADDING_SIZE
);
238 _mesa_hash_table_insert(bo_size_table(fd
), (void*)(uintptr_t)create
->handle
,
239 (void*)(uintptr_t)create
->size
);
245 exec_and_check_padding(int fd
, unsigned long request
,
246 struct drm_i915_gem_execbuffer2
*exec
)
248 int ret
= libc_ioctl(fd
, request
, exec
);
252 struct drm_i915_gem_exec_object2
*objects
=
253 (void*)(uintptr_t)exec
->buffers_ptr
;
254 uint32_t batch_bo
= exec
->flags
& I915_EXEC_BATCH_FIRST
? objects
[0].handle
:
255 objects
[exec
->buffer_count
- 1].handle
;
257 struct drm_i915_gem_wait wait
= {
258 .bo_handle
= batch_bo
,
261 ret
= libc_ioctl(fd
, DRM_IOCTL_I915_GEM_WAIT
, &wait
);
265 bool detected_out_of_bounds_write
= false;
267 for (int i
= 0; i
< exec
->buffer_count
; i
++) {
268 uint32_t handle
= objects
[i
].handle
;
270 if (!padding_is_good(fd
, handle
)) {
271 detected_out_of_bounds_write
= true;
272 intel_loge("Detected buffer out-of-bounds write in bo %d", handle
);
276 if (unlikely(detected_out_of_bounds_write
)) {
284 gem_close(int fd
, struct drm_gem_close
*close
)
286 int ret
= libc_ioctl(fd
, DRM_IOCTL_GEM_CLOSE
, close
);
290 struct hash_table
*t
= bo_size_table(fd
);
291 struct hash_entry
*e
=
292 _mesa_hash_table_search(t
, (void*)(uintptr_t)close
->handle
);
295 _mesa_hash_table_remove(t
, e
);
303 if (fstat(fd
, &stat
))
306 if (!S_ISCHR(stat
.st_mode
) || major(stat
.st_rdev
) != DRM_MAJOR
)
310 drm_version_t version
= {
312 .name_len
= sizeof(name
) - 1,
314 if (libc_ioctl(fd
, DRM_IOCTL_VERSION
, &version
))
317 return strcmp("i915", name
) == 0;
320 __attribute__ ((visibility ("default"))) int
321 open(const char *path
, int flags
, ...)
326 va_start(args
, flags
);
327 mode
= va_arg(args
, int);
330 int fd
= libc_open(path
, flags
, mode
);
334 if (fd
>= 0 && is_i915(fd
))
342 __attribute__ ((visibility ("default"), alias ("open"))) int
343 open64(const char *path
, int flags
, ...);
345 __attribute__ ((visibility ("default"))) int
355 return libc_close(fd
);
358 __attribute__ ((visibility ("default"))) int
359 fcntl(int fd
, int cmd
, ...)
365 param
= va_arg(args
, int);
368 int res
= libc_fcntl(fd
, cmd
, param
);
372 if (is_drm_fd(fd
) && cmd
== F_DUPFD_CLOEXEC
)
380 __attribute__ ((visibility ("default"))) int
381 ioctl(int fd
, unsigned long request
, ...)
389 va_start(args
, request
);
390 argp
= va_arg(args
, void *);
393 if (_IOC_TYPE(request
) == DRM_IOCTL_BASE
&& !is_drm_fd(fd
) && is_i915(fd
)) {
394 intel_loge("missed drm fd %d", fd
);
400 case DRM_IOCTL_GEM_CLOSE
:
401 res
= gem_close(fd
, (struct drm_gem_close
*)argp
);
404 case DRM_IOCTL_I915_GEM_CREATE
:
405 res
= create_with_padding(fd
, (struct drm_i915_gem_create
*)argp
);
408 case DRM_IOCTL_I915_GEM_EXECBUFFER2
:
409 case DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
:
410 res
= exec_and_check_padding(fd
, request
,
411 (struct drm_i915_gem_execbuffer2
*)argp
);
418 res
= libc_ioctl(fd
, request
, argp
);
425 static void __attribute__ ((constructor
))
428 fds_to_bo_sizes
= _mesa_pointer_hash_table_create(NULL
);
429 libc_open
= dlsym(RTLD_NEXT
, "open");
430 libc_close
= dlsym(RTLD_NEXT
, "close");
431 libc_fcntl
= dlsym(RTLD_NEXT
, "fcntl");
432 libc_ioctl
= dlsym(RTLD_NEXT
, "ioctl");