+2017-10-19 Jakub Jelinek <jakub@redhat.com>
+
+ * ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch
+ builtins, store max (log2 (align), 0) into uchar field instead of
+ align into uptr field.
+ (ubsan_expand_objsize_ifn): Use _v1 suffixed type mismatch builtins,
+ store uchar 0 field instead of uptr 0 field.
+ (instrument_nonnull_return): Use _v1 suffixed nonnull return builtin,
+ instead of passing one address of struct with 2 locations pass
+ two addresses of structs with 1 location each.
+ * sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
+ BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): Removed.
+ (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1,
+ BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1,
+ BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT): New builtins.
+
2017-10-19 Martin Liska <mliska@suse.cz>
PR driver/81829
"__ubsan_handle_vla_bound_not_positive",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NOTHROW_LEAF_LIST)
-DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH,
- "__ubsan_handle_type_mismatch",
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1,
+ "__ubsan_handle_type_mismatch_v1",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW,
"__ubsan_handle_vla_bound_not_positive_abort",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
-DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT,
- "__ubsan_handle_type_mismatch_abort",
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT,
+ "__ubsan_handle_type_mismatch_v1_abort",
BT_FN_VOID_PTR_PTR,
ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_ADD_OVERFLOW_ABORT,
"__ubsan_handle_nonnull_arg_abort",
BT_FN_VOID_PTR,
ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
-DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN,
- "__ubsan_handle_nonnull_return",
- BT_FN_VOID_PTR,
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1,
+ "__ubsan_handle_nonnull_return_v1",
+ BT_FN_VOID_PTR_PTR,
ATTR_COLD_NOTHROW_LEAF_LIST)
-DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT,
- "__ubsan_handle_nonnull_return_abort",
- BT_FN_VOID_PTR,
+DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT,
+ "__ubsan_handle_nonnull_return_v1_abort",
+ BT_FN_VOID_PTR_PTR,
ATTR_COLD_NORETURN_NOTHROW_LEAF_LIST)
DEF_SANITIZER_BUILTIN(BUILT_IN_UBSAN_HANDLE_DYNAMIC_TYPE_CACHE_MISS,
"__ubsan_handle_dynamic_type_cache_miss",
2017-10-19 Jakub Jelinek <jakub@redhat.com>
+ * c-c++-common/ubsan/float-cast-overflow-1.c: Drop value keyword
+ from expected output regexps.
+ * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-3.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-5.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-6.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise.
+ * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise.
+ * g++.dg/ubsan/float-cast-overflow-bf.C: Likewise.
+ * gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise.
+ * g++.dg/asan/default-options-1.C (__asan_default_options): Add
+ used attribute.
+ * g++.dg/asan/asan_test.C: Run with ASAN_OPTIONS=handle_segv=2
+ in the environment.
+
PR target/82580
* gcc.target/i386/pr82580.c: Use {\msbb} instead of "sbb" in
scan-assembler-times. Check that there are no movzb* instructions
return 0;
}
-/* { dg-output "value -133 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -129.5 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 128 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 128.5 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 132 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 256 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 256.5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 260 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32773 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32769.5 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 32768 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 32768.5 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 32772 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 65536 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 65536.5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 65540 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type 'long long unsigned int'" } */
+/* { dg-output " -133 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -129.5 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 128 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 128.5 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 132 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 256 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 256.5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 260 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32773 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32769.5 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 32768 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 32768.5 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 32772 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 65536 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 65536.5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 65540 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 9.22337e\\\+18 is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.84467e\\\+19 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type 'long long unsigned int'" } */
#include "float-cast-overflow-8.c"
/* _Decimal32 */
-/* { dg-output "value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output " <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
/* _Decimal64 */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
/* _Decimal128 */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
return 0;
}
-/* { dg-output "runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value nan is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -?nan is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value inf is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -inf is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -5 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -1.5 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value nan is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -?nan is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value inf is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*runtime error: value -inf is outside the range of representable values of type '__int128 unsigned'" } */
+/* { dg-output "runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 1.70141e\\\+38 is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: nan is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -?nan is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: inf is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -inf is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: 3.40282e\\\+38 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -5 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -1.5 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: nan is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -?nan is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: inf is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]*runtime error: -inf is outside the range of representable values of type '__int128 unsigned'" } */
return 0;
}
-/* { dg-output "value -133* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -129.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 128 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 128.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 132 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 256 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 256.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 260 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type" } */
+/* { dg-output " -133* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -129.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -129 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 128 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 128.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 132 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 256 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 256.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 260 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type" } */
return 0;
}
-/* { dg-output "value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -?nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -inf is outside the range of representable values of type" } */
+/* { dg-output " -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -?nan is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* inf is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -inf is outside the range of representable values of type" } */
return 0;
}
-/* { dg-output "value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[^\n\r]* is outside the range of representable values of type" } */
+/* { dg-output " \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[^\n\r]* is outside the range of representable values of type" } */
return 0;
}
-/* { dg-output "value -133 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -129.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 128 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 128.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 132 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 256 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 256.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 260 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type" } */
+/* { dg-output " -133 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -129.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -129 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 128 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 128.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 132 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 256 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 256.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 260 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type" } */
}
/* float */
-/* { dg-output "value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output " -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
/* No error for float and __int128 unsigned max value, as ui128_MAX is +Inf in float. */
/* double */
-/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]* -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
/* long double */
-/* { dg-output "\[^\n\r]*value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]* -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" { target { ilp32 || lp64 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target { int128 } } } */
#include "float-cast-overflow-8.c"
/* __float80 */
-/* { dg-output "value -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* { dg-output " -129 is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* (-129|-1) is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -32769 is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* \[0-9.e+-]* is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
/* __float128 */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
-/* { dg-output "\[^\n\r]*value <unknown> is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'signed char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned char'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'short unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type 'long long unsigned int'\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type '__int128'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
+/* { dg-output "\[^\n\r]* <unknown> is outside the range of representable values of type '__int128 unsigned'\[^\n\r]*(\n|\r\n|\r)" { target int128 } } */
// { dg-additional-options "-DASAN_AVOID_EXPENSIVE_TESTS=1" { target { ! run_expensive_tests } } }
// { dg-additional-options "-msse2" { target { i?86-*-linux* x86_64-*-linux* } } }
// { dg-additional-options "-D__NO_INLINE__" { target { *-*-linux-gnu } } }
+// { dg-set-target-env-var ASAN_OPTIONS "handle_segv=2" }
// { dg-final { asan-gtest } }
#include "asan_test.cc"
const char *kAsanDefaultOptions="verbosity=1 foo=bar";
extern "C"
-__attribute__((no_sanitize_address))
+__attribute__((no_sanitize_address, used))
const char *__asan_default_options() {
return kAsanDefaultOptions;
}
return 0;
}
-/* { dg-output "value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type" } */
+/* { dg-output " -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.14748e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 4.29497e\\\+09 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type" } */
return 0;
}
-/* { dg-output "value -2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
-/* { dg-output "\[^\n\r]*value 2.5 is outside the range of representable values of type" } */
+/* { dg-output " -2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1.5 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* -1 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2 is outside the range of representable values of type\[^\n\r]*(\n|\r\n|\r)" } */
+/* { dg-output "\[^\n\r]* 2.5 is outside the range of representable values of type" } */
enum built_in_function bcode
= (flag_sanitize_recover & ((check_align ? SANITIZE_ALIGNMENT : 0)
| (check_null ? SANITIZE_NULL : 0)))
- ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH
- : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT;
+ ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1
+ : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT;
tree fn = builtin_decl_implicit (bcode);
+ int align_log = tree_log2 (align);
tree data
= ubsan_create_data ("__ubsan_null_data", 1, &loc,
ubsan_type_descriptor (TREE_TYPE (ckind),
UBSAN_PRINT_POINTER),
NULL_TREE,
- align,
+ build_int_cst (unsigned_char_type_node,
+ MAX (align_log, 0)),
fold_convert (unsigned_char_type_node, ckind),
NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
ubsan_type_descriptor (TREE_TYPE (ptr),
UBSAN_PRINT_POINTER),
NULL_TREE,
- build_zero_cst (pointer_sized_int_node),
+ build_zero_cst (unsigned_char_type_node),
ckind,
NULL_TREE);
data = build_fold_addr_expr_loc (loc, data);
enum built_in_function bcode
= (flag_sanitize_recover & SANITIZE_OBJECT_SIZE)
- ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH
- : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT;
+ ? BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1
+ : BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT;
tree p = make_ssa_name (pointer_sized_int_node);
g = gimple_build_assign (p, NOP_EXPR, ptr);
gimple_set_location (g, loc);
else
{
tree data = ubsan_create_data ("__ubsan_nonnull_return_data",
- 2, loc, NULL_TREE, NULL_TREE);
+ 1, &loc[1], NULL_TREE, NULL_TREE);
data = build_fold_addr_expr_loc (loc[0], data);
+ tree data2 = ubsan_create_data ("__ubsan_nonnull_return_data",
+ 1, &loc[0], NULL_TREE, NULL_TREE);
+ data2 = build_fold_addr_expr_loc (loc[0], data2);
enum built_in_function bcode
= (flag_sanitize_recover & SANITIZE_RETURNS_NONNULL_ATTRIBUTE)
- ? BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN
- : BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT;
+ ? BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1
+ : BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT;
tree fn = builtin_decl_explicit (bcode);
- g = gimple_build_call (fn, 1, data);
+ g = gimple_build_call (fn, 2, data, data2);
}
gimple_set_location (g, loc[0]);
gsi_insert_before (gsi, g, GSI_SAME_STMT);
+2017-10-19 Jakub Jelinek <jakub@redhat.com>
+
+ * All source files: Merge from upstream 315899.
+ * asan/Makefile.am (nodist_saninclude_HEADERS): Add
+ include/sanitizer/tsan_interface.h.
+ * asan/libtool-version: Bump the libasan SONAME.
+ * lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc.
+ (lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Add
+ sancov_flags.cc, sanitizer_allocator_checks.cc,
+ sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc,
+ sanitizer_file.cc, sanitizer_mac_libcdep.cc and
+ sanitizer_stoptheworld_mac.cc. Remove sanitizer_coverage_libcdep.cc
+ and sanitizer_coverage_mapping_libcdep.cc.
+ * tsan/Makefile.am (tsan_files): Add tsan_external.cc.
+ * ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1.
+ (ubsan_files): Add ubsan_init_standalone.cc and
+ ubsan_signals_standalone.cc.
+ * ubsan/libtool-version: Bump the libubsan SONAME.
+ * asan/Makefile.in: Regenerate.
+ * lsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * tsan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Regenerate.
+
2017-10-05 H.J. Lu <hongjiu.lu@intel.com>
PR sanitizer/82379
-285547
+315899
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
SUBDIRS += lsan asan ubsan
nodist_saninclude_HEADERS += \
include/sanitizer/lsan_interface.h \
- include/sanitizer/asan_interface.h
+ include/sanitizer/asan_interface.h \
+ include/sanitizer/tsan_interface.h
if TSAN_SUPPORTED
SUBDIRS += tsan
endif
target_triplet = @target@
@SANITIZER_SUPPORTED_TRUE@am__append_1 = include/sanitizer/common_interface_defs.h \
@SANITIZER_SUPPORTED_TRUE@ include/sanitizer/lsan_interface.h \
-@SANITIZER_SUPPORTED_TRUE@ include/sanitizer/asan_interface.h
+@SANITIZER_SUPPORTED_TRUE@ include/sanitizer/asan_interface.h \
+@SANITIZER_SUPPORTED_TRUE@ include/sanitizer/tsan_interface.h
@SANITIZER_SUPPORTED_TRUE@@USING_MAC_INTERPOSE_FALSE@am__append_2 = interception
@LIBBACKTRACE_SUPPORTED_TRUE@@SANITIZER_SUPPORTED_TRUE@am__append_3 = libbacktrace
@SANITIZER_SUPPORTED_TRUE@@TSAN_SUPPORTED_TRUE@am__append_4 = tsan
asan_flags.cc \
asan_globals.cc \
asan_interceptors.cc \
+ asan_interceptors_memintrinsics.cc \
asan_linux.cc \
asan_mac.cc \
asan_malloc_linux.cc \
asan_posix.cc \
asan_report.cc \
asan_rtl.cc \
+ asan_shadow_setup.cc \
asan_stack.cc \
asan_stats.cc \
asan_suppressions.cc \
am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
asan_descriptions.lo asan_errors.lo asan_fake_stack.lo \
asan_flags.lo asan_globals.lo asan_interceptors.lo \
- asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
- asan_malloc_mac.lo asan_malloc_win.lo asan_memory_profile.lo \
- asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
- asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
- asan_suppressions.lo asan_thread.lo asan_win.lo \
- asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo
+ asan_interceptors_memintrinsics.lo asan_linux.lo asan_mac.lo \
+ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
+ asan_memory_profile.lo asan_new_delete.lo asan_poisoning.lo \
+ asan_posix.lo asan_report.lo asan_rtl.lo asan_shadow_setup.lo \
+ asan_stack.lo asan_stats.lo asan_suppressions.lo \
+ asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \
+ asan_win_dynamic_runtime_thunk.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
asan_flags.cc \
asan_globals.cc \
asan_interceptors.cc \
+ asan_interceptors_memintrinsics.cc \
asan_linux.cc \
asan_mac.cc \
asan_malloc_linux.cc \
asan_posix.cc \
asan_report.cc \
asan_rtl.cc \
+ asan_shadow_setup.cc \
asan_stack.cc \
asan_stats.cc \
asan_suppressions.cc \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors_memintrinsics.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_shadow_setup.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@
void Print() {
Report(
- "quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
- "malloc_context_size %d, alloc_dealloc_mismatch %d, "
- "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
- allocator_options.quarantine_size_mb, allocator_options.max_redzone,
- poison_heap, malloc_context_size,
+ "quarantine_size_mb %d, thread_local_quarantine_size_kb %d, "
+ "max_redzone %d, poison_heap %d, malloc_context_size %d, "
+ "alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, "
+ "coverage_dir %s, allocator_release_to_os_interval_ms %d\n",
+ allocator_options.quarantine_size_mb,
+ allocator_options.thread_local_quarantine_size_kb,
+ allocator_options.max_redzone, poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
- allocator_options.may_return_null, coverage, coverage_dir);
+ allocator_options.may_return_null, coverage, coverage_dir,
+ allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;
// Deactivate the runtime.
SetCanPoisonMemory(false);
SetMallocContextSize(1);
- ReInitializeCoverage(false, nullptr);
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
+ disabled.thread_local_quarantine_size_kb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
disabled.alloc_dealloc_mismatch = false;
SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
- ReInitializeCoverage(asan_deactivated_flags.coverage,
- asan_deactivated_flags.coverage_dir);
ReInitializeAllocator(asan_deactivated_flags.allocator_options);
asan_is_deactivated = false;
ASAN_ACTIVATION_FLAG(int, redzone)
ASAN_ACTIVATION_FLAG(int, max_redzone)
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
+ASAN_ACTIVATION_FLAG(int, thread_local_quarantine_size_kb)
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
ASAN_ACTIVATION_FLAG(bool, poison_heap)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
+COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
}
void *Allocate(uptr size) {
- return get_allocator().Allocate(cache_, size, 1, false);
+ void *res = get_allocator().Allocate(cache_, size, 1);
+ // TODO(alekseys): Consider making quarantine OOM-friendly.
+ if (UNLIKELY(!res))
+ return DieOnFailure::OnOOM();
+ return res;
}
void Deallocate(void *p) {
void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
quarantine_size_mb = f->quarantine_size_mb;
+ thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
min_redzone = f->redzone;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+ release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->quarantine_size_mb = quarantine_size_mb;
+ f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
f->redzone = min_redzone;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+ cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
struct Allocator {
static const uptr kMaxAllowedMallocSize =
FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
- static const uptr kMaxThreadLocalQuarantine =
- FIRST_32_SECOND_64(1 << 18, 1 << 20);
AsanAllocator allocator;
AsanQuarantine quarantine;
AllocatorCache fallback_allocator_cache;
QuarantineCache fallback_quarantine_cache;
+ atomic_uint8_t rss_limit_exceeded;
+
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
atomic_uint16_t max_redzone;
void SharedInitCode(const AllocatorOptions &options) {
CheckOptions(options);
quarantine.Init((uptr)options.quarantine_size_mb << 20,
- kMaxThreadLocalQuarantine);
+ (uptr)options.thread_local_quarantine_size_kb << 10);
atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
memory_order_release);
atomic_store(&min_redzone, options.min_redzone, memory_order_release);
}
void Initialize(const AllocatorOptions &options) {
- allocator.Init(options.may_return_null);
+ SetAllocatorMayReturnNull(options.may_return_null);
+ allocator.Init(options.release_to_os_interval_ms);
SharedInitCode(options);
}
+ bool RssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+ }
+
+ void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+ }
+
void RePoisonChunk(uptr chunk) {
- // This could a user-facing chunk (with redzones), or some internal
+ // This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize(true);
uptr chunk_end = chunk + allocated_size;
- if (chunk < beg && beg < end && end <= chunk_end) {
- // Looks like a valid AsanChunk. Or maybe not. Be conservative and only
- // poison the redzones.
+ if (chunk < beg && beg < end && end <= chunk_end &&
+ ac->chunk_state == CHUNK_ALLOCATED) {
+ // Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
} else {
- // This can not be an AsanChunk. Poison everything. It may be reused as
- // AsanChunk later.
+ // This is either not an AsanChunk or freed or quarantined AsanChunk.
+ // In either case, poison everything.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
}
void ReInitialize(const AllocatorOptions &options) {
- allocator.SetMayReturnNull(options.may_return_null);
+ SetAllocatorMayReturnNull(options.may_return_null);
+ allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
// Poison all existing allocation's redzones.
void GetOptions(AllocatorOptions *options) const {
options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
- options->may_return_null = allocator.MayReturnNull();
+ options->may_return_null = AllocatorMayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
+ if (RssLimitExceeded())
+ return AsanAllocator::FailureHandler::OnOOM();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDieOnBadRequest();
+ return AsanAllocator::FailureHandler::OnBadRequest();
}
AsanThread *t = GetCurrentThread();
void *allocated;
- bool check_rss_limit = true;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
- allocated =
- allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ allocated = allocator.Allocate(cache, needed_size, 8);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
- allocated =
- allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ allocated = allocator.Allocate(cache, needed_size, 8);
}
-
- if (!allocated) return allocator.ReturnNullOrDieOnOOM();
+ if (!allocated)
+ return nullptr;
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlagIfAllocated.
- void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
- AllocType alloc_type) {
+ void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
AsanThread *t = GetCurrentThread();
m->free_tid = t ? t->tid() : 0;
m->free_context_id = StackDepotPut(*stack);
+
+ Flags &fl = *flags();
+ if (fl.max_free_fill_size > 0) {
+ // We have to skip the chunk header, it contains free_context_id.
+ uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
+ if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
+ uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
+ size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
+ REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
+ }
+ }
+
// Poison the region.
PoisonShadow(m->Beg(),
RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
+ // malloc. Don't report an invalid free in this case.
+ if (SANITIZER_WINDOWS &&
+ !get_allocator().PointerIsMine(ptr)) {
+ if (!IsSystemHeapAddress(p))
+ ReportFreeNotMalloced(p, stack);
+ return;
+ }
+
ASAN_FREE_HOOK(ptr);
+
// Must mark the chunk as quarantined before any changes to its metadata.
// Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
- QuarantineChunk(m, ptr, stack, alloc_type);
+ QuarantineChunk(m, ptr, stack);
}
void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
}
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
- if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDieOnBadRequest();
+ if (CheckForCallocOverflow(size, nmemb))
+ return AsanAllocator::FailureHandler::OnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
void PrintStats() {
allocator.PrintStats();
+ quarantine.PrintStats();
}
void ForceLock() {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
-
- void ReleaseToOS() { allocator.ReleaseToOS(); }
};
static Allocator instance(LINKER_INITIALIZED);
return instance.allocator;
}
-bool AsanChunkView::IsValid() {
+bool AsanChunkView::IsValid() const {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
}
-bool AsanChunkView::IsAllocated() {
+bool AsanChunkView::IsAllocated() const {
return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
}
-uptr AsanChunkView::Beg() { return chunk_->Beg(); }
-uptr AsanChunkView::End() { return Beg() + UsedSize(); }
-uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
-uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
-AllocType AsanChunkView::GetAllocType() {
+bool AsanChunkView::IsQuarantined() const {
+ return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
+}
+uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
+uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
+uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
+AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
return res;
}
-u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
-u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
+u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
+u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
-StackTrace AsanChunkView::GetAllocStack() {
+StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
}
-StackTrace AsanChunkView::GetFreeStack() {
+StackTrace AsanChunkView::GetFreeStack() const {
return GetStackTraceFromId(GetFreeStackId());
}
-void ReleaseToOS() { instance.ReleaseToOS(); }
-
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
- SetAllocatorReleaseToOSCallback(ReleaseToOS);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
instance.PrintStats();
}
-void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
- AllocType alloc_type) {
- return instance.Allocate(size, alignment, stack, alloc_type, true);
-}
-
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
instance.Deallocate(ptr, 0, stack, alloc_type);
}
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
- return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+ return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
}
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
- return instance.Calloc(nmemb, size, stack);
+ return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
}
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (!p)
- return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
+ return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
if (size == 0) {
- instance.Deallocate(p, 0, stack, FROM_MALLOC);
- return nullptr;
+ if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
+ instance.Deallocate(p, 0, stack, FROM_MALLOC);
+ return nullptr;
+ }
+ // Allocate a size of 1 if we shouldn't free() on Realloc to 0
+ size = 1;
}
- return instance.Reallocate(p, size, stack);
+ return SetErrnoOnNull(instance.Reallocate(p, size, stack));
}
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
- return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
+ return SetErrnoOnNull(
+ instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
}
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
- size = RoundUpTo(size, PageSize);
- if (size == 0) {
- // pvalloc(0) should allocate one page.
- size = PageSize;
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ return AsanAllocator::FailureHandler::OnBadRequest();
}
- return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(
+ instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
+}
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ return AsanAllocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(
+ instance.Allocate(size, alignment, stack, alloc_type, true));
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ AsanAllocator::FailureHandler::OnBadRequest();
+ return errno_EINVAL;
+ }
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
+ if (UNLIKELY(!ptr))
+ return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
instance.ForceUnlock();
}
-void AsanSoftRssLimitExceededCallback(bool exceeded) {
- instance.allocator.SetRssLimitIsExceeded(exceeded);
+void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
+ instance.SetRssLimitExceeded(limit_exceeded);
}
} // namespace __asan
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_malloc_hook(void *ptr, uptr size) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+ void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_free_hook(void *ptr) {
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
(void)ptr;
}
-} // extern "C"
#endif
struct AllocatorOptions {
u32 quarantine_size_mb;
+ u32 thread_local_quarantine_size_kb;
u16 min_redzone;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
+ s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
- bool IsValid(); // Checks if AsanChunkView points to a valid allocated
- // or quarantined chunk.
- bool IsAllocated(); // Checks if the memory is currently allocated.
- uptr Beg(); // First byte of user memory.
- uptr End(); // Last byte of user memory.
- uptr UsedSize(); // Size requested by the user.
- uptr AllocTid();
- uptr FreeTid();
+ bool IsValid() const; // Checks if AsanChunkView points to a valid
+ // allocated or quarantined chunk.
+ bool IsAllocated() const; // Checks if the memory is currently allocated.
+ bool IsQuarantined() const; // Checks if the memory is currently quarantined.
+ uptr Beg() const; // First byte of user memory.
+ uptr End() const; // Last byte of user memory.
+ uptr UsedSize() const; // Size requested by the user.
+ uptr AllocTid() const;
+ uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
- u32 GetAllocStackId();
- u32 GetFreeStackId();
- StackTrace GetAllocStack();
- StackTrace GetFreeStack();
- AllocType GetAllocType();
- bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
+ u32 GetAllocStackId() const;
+ u32 GetFreeStackId() const;
+ StackTrace GetAllocStack() const;
+ StackTrace GetFreeStack() const;
+ AllocType GetAllocType() const;
+ bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
return true;
}
return false;
}
- bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) {
+ bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
(void)access_size;
if (addr < Beg()) {
*offset = Beg() - addr;
}
return false;
}
- bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) {
+ bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
if (addr + access_size > End()) {
*offset = addr - End();
return true;
};
#if SANITIZER_CAN_USE_ALLOCATOR64
-# if defined(__powerpc64__)
+# if SANITIZER_FUCHSIA
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
# endif
typedef CompactSizeClassMap SizeClassMap;
-typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16,
- SizeClassMap, kRegionSizeLog,
- ByteMap,
- AsanMapUnmapCallback> PrimaryAllocator;
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 16;
+ typedef __asan::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __asan::kRegionSizeLog;
+ typedef __asan::ByteMap ByteMap;
+ typedef AsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#endif // SANITIZER_CAN_USE_ALLOCATOR64
static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
(void *)descr.chunk_begin,
(void *)(descr.chunk_begin + descr.chunk_size));
- str.append("%s", d.EndLocation());
+ str.append("%s", d.Default());
Printf("%s", str.data());
}
str.append("%c", var.name_pos[i]);
}
str.append("'");
+ if (var.line > 0) {
+ str.append(" (line %d)", var.line);
+ }
if (pos_descr) {
Decorator d;
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
+ d.Location(), addr, pos_descr, d.Default());
} else {
str.append("\n");
}
MaybeDemangleGlobalName(g.name));
PrintGlobalLocation(&str, g);
str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
- str.append("%s", d.EndLocation());
+ str.append("%s", d.Default());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
}
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
if (!frame_descr) {
- Printf("%s\n", d.EndLocation());
+ Printf("%s\n", d.Default());
return;
}
- Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
+ Printf(" at offset %zu in frame%s\n", offset, d.Default());
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
- Printf("%s", d.EndLocation());
+ Printf("%s", d.Default());
StackTrace alloca_stack(&frame_pc, 1);
alloca_stack.Print();
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
- d.EndAllocation());
+ d.Default());
StackTrace free_stack = GetStackTraceFromId(free_stack_id);
free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
+ d.Default());
} else {
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
+ d.Default());
}
alloc_stack.Print();
DescribeThread(GetCurrentThread());
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Access() { return Blue(); }
- const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
- const char *EndLocation() { return Default(); }
const char *Allocation() { return Magenta(); }
- const char *EndAllocation() { return Default(); }
const char *ShadowByte(u8 byte) {
switch (byte) {
return Default();
}
}
- const char *EndShadowByte() { return Default(); }
- const char *MemoryByte() { return Magenta(); }
- const char *EndMemoryByte() { return Default(); }
};
enum ShadowKind : u8 {
namespace __asan {
-void ErrorStackOverflow::Print() {
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: stack-overflow on address %p"
- " (pc %p bp %p sp %p T%d)\n",
- (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
- Printf("%s", d.EndWarning());
- scariness.Print();
- BufferedStackTrace stack;
- GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
- common_flags()->fast_unwind_on_fatal);
- stack.Print();
- ReportErrorSummary("stack-overflow", &stack);
-}
-
-static void MaybeDumpInstructionBytes(uptr pc) {
- if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
- InternalScopedString str(1024);
- str.append("First 16 instruction bytes at pc: ");
- if (IsAccessibleMemoryRange(pc, 16)) {
- for (int i = 0; i < 16; ++i) {
- PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
- }
- str.append("\n");
- } else {
- str.append("unaccessible\n");
- }
- Report("%s", str.data());
+static void OnStackUnwind(const SignalContext &sig,
+ const void *callback_context,
+ BufferedStackTrace *stack) {
+ bool fast = common_flags()->fast_unwind_on_fatal;
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+ // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
+ // yields the call stack of the signal's handler and not of the code
+ // that raised the signal (as it does on Linux).
+ fast = true;
+#endif
+ // Tests and maybe some users expect that scariness is going to be printed
+ // just before the stack. As only asan has scariness score we have no
+ // corresponding code in the sanitizer_common and we use this callback to
+ // print it.
+ static_cast<const ScarinessScoreBase *>(callback_context)->Print();
+ GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
+ sig.context, fast);
}
void ErrorDeadlySignal::Print() {
- Decorator d;
- Printf("%s", d.Warning());
- const char *description = DescribeSignalOrException(signo);
- Report(
- "ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
- "T%d)\n",
- description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
- Printf("%s", d.EndWarning());
- if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
- if (is_memory_access) {
- const char *access_type =
- write_flag == SignalContext::WRITE
- ? "WRITE"
- : (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
- Report("The signal is caused by a %s memory access.\n", access_type);
- if (addr < GetPageSizeCached())
- Report("Hint: address points to the zero page.\n");
- }
- scariness.Print();
- BufferedStackTrace stack;
- GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
- common_flags()->fast_unwind_on_fatal);
- stack.Print();
- MaybeDumpInstructionBytes(pc);
- Printf("AddressSanitizer can not provide additional info.\n");
- ReportErrorSummary(description, &stack);
+ ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness);
}
void ErrorDoubleFree::Print() {
Printf("%s", d.Warning());
char tname[128];
Report(
- "ERROR: AddressSanitizer: attempting double-free on %p in "
+ "ERROR: AddressSanitizer: attempting %s on %p in "
"thread T%d%s:\n",
- addr_description.addr, tid,
+ scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
second_free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
- ReportErrorSummary("double-free", &stack);
+ ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorNewDeleteSizeMismatch::Print() {
Printf("%s", d.Warning());
char tname[128];
Report(
- "ERROR: AddressSanitizer: new-delete-type-mismatch on %p in thread "
+ "ERROR: AddressSanitizer: %s on %p in thread "
"T%d%s:\n",
- addr_description.addr, tid,
+ scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
- Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf("%s object passed to delete has wrong type:\n", d.Default());
Printf(
" size of the allocated type: %zd bytes;\n"
" size of the deallocated type: %zd bytes.\n",
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
- ReportErrorSummary("new-delete-type-mismatch", &stack);
+ ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=new_delete_type_mismatch=0\n");
"which was not malloc()-ed: %p in thread T%d%s\n",
addr_description.Address(), tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
stack.Print();
addr_description.Print();
- ReportErrorSummary("bad-free", &stack);
+ ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorAllocTypeMismatch::Print() {
CHECK_NE(alloc_type, dealloc_type);
Decorator d;
Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
+ Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
+ scariness.GetDescription(),
alloc_names[alloc_type], dealloc_names[dealloc_type],
addr_description.addr);
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
CHECK_GT(dealloc_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
stack.Print();
addr_description.Print();
- ReportErrorSummary("alloc-dealloc-mismatch", &stack);
+ ReportErrorSummary(scariness.GetDescription(), &stack);
Report(
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
"pointer which is not owned: %p\n",
addr_description.Address());
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
stack->Print();
addr_description.Print();
- ReportErrorSummary("bad-malloc_usable_size", stack);
+ ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
"ERROR: AddressSanitizer: attempting to call "
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
addr_description.Address());
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
stack->Print();
addr_description.Print();
- ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
+ ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorStringFunctionMemoryRangesOverlap::Print() {
bug_type, addr1_description.Address(),
addr1_description.Address() + length1, addr2_description.Address(),
addr2_description.Address() + length2);
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
scariness.Print();
stack->Print();
addr1_description.Print();
void ErrorStringFunctionSizeOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
- const char *bug_type = "negative-size-param";
- Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
- Printf("%s", d.EndWarning());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
+ scariness.GetDescription(), size);
+ Printf("%s", d.Default());
scariness.Print();
stack->Print();
addr_description.Print();
- ReportErrorSummary(bug_type, stack);
+ ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorBadParamsToAnnotateContiguousContainer::Print() {
if (!IsAligned(beg, granularity))
Report("ERROR: beg is not aligned by %d\n", granularity);
stack->Print();
- ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+ ReportErrorSummary(scariness.GetDescription(), stack);
}
void ErrorODRViolation::Print() {
Decorator d;
Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: odr-violation (%p):\n", global1.beg);
- Printf("%s", d.EndWarning());
+ Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
+ global1.beg);
+ Printf("%s", d.Default());
InternalScopedString g1_loc(256), g2_loc(256);
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
"HINT: if you don't care about these errors you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
InternalScopedString error_msg(256);
- error_msg.append("odr-violation: global '%s' at %s",
+ error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
MaybeDemangleGlobalName(global1.name), g1_loc.data());
ReportErrorSummary(error_msg.data());
}
void ErrorInvalidPointerPair::Print() {
- const char *bug_type = "invalid-pointer-pair";
Decorator d;
Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n",
+ Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
addr1_description.Address(), addr2_description.Address());
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
addr1_description.Print();
addr2_description.Print();
- ReportErrorSummary(bug_type, &stack);
+ ReportErrorSummary(scariness.GetDescription(), &stack);
}
static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
uptr addr = addr_description.Address();
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
bug_descr, (void *)addr, pc, bp, sp);
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
char tname[128];
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
(void *)addr, tid,
- ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(pc, bp);
u32 tid;
};
-struct ErrorStackOverflow : ErrorBase {
- uptr addr, pc, bp, sp;
- // ErrorStackOverflow never owns the context.
- void *context;
- // VS2013 doesn't implement unrestricted unions, so we need a trivial default
- // constructor
- ErrorStackOverflow() = default;
- ErrorStackOverflow(u32 tid, const SignalContext &sig)
- : ErrorBase(tid),
- addr(sig.addr),
- pc(sig.pc),
- bp(sig.bp),
- sp(sig.sp),
- context(sig.context) {
- scariness.Clear();
- scariness.Scare(10, "stack-overflow");
- }
- void Print();
-};
-
struct ErrorDeadlySignal : ErrorBase {
- uptr addr, pc, bp, sp;
- // ErrorDeadlySignal never owns the context.
- void *context;
- int signo;
- SignalContext::WriteFlag write_flag;
- bool is_memory_access;
+ SignalContext signal;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDeadlySignal() = default;
- ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
- : ErrorBase(tid),
- addr(sig.addr),
- pc(sig.pc),
- bp(sig.bp),
- sp(sig.sp),
- context(sig.context),
- signo(signo_),
- write_flag(sig.write_flag),
- is_memory_access(sig.is_memory_access) {
+ ErrorDeadlySignal(u32 tid, const SignalContext &sig)
+ : ErrorBase(tid), signal(sig) {
scariness.Clear();
- if (is_memory_access) {
- if (addr < GetPageSizeCached()) {
- scariness.Scare(10, "null-deref");
- } else if (addr == pc) {
- scariness.Scare(60, "wild-jump");
- } else if (write_flag == SignalContext::WRITE) {
- scariness.Scare(30, "wild-addr-write");
- } else if (write_flag == SignalContext::READ) {
- scariness.Scare(20, "wild-addr-read");
- } else {
- scariness.Scare(25, "wild-addr");
- }
- } else {
+ if (signal.IsStackOverflow()) {
+ scariness.Scare(10, "stack-overflow");
+ } else if (!signal.is_memory_access) {
scariness.Scare(10, "signal");
+ } else if (signal.addr < GetPageSizeCached()) {
+ scariness.Scare(10, "null-deref");
+ } else if (signal.addr == signal.pc) {
+ scariness.Scare(60, "wild-jump");
+ } else if (signal.write_flag == SignalContext::WRITE) {
+ scariness.Scare(30, "wild-addr-write");
+ } else if (signal.write_flag == SignalContext::READ) {
+ scariness.Scare(20, "wild-addr-read");
+ } else {
+ scariness.Scare(25, "wild-addr");
}
}
void Print();
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
+ scariness.Scare(10, "bad-malloc_usable_size");
}
void Print();
};
stack(stack_),
addr_description(addr, /*shouldLockThreadRegistry=*/false) {
scariness.Clear();
+ scariness.Scare(10, "bad-__sanitizer_get_allocated_size");
}
void Print();
};
beg(beg_),
end(end_),
old_mid(old_mid_),
- new_mid(new_mid_) {}
+ new_mid(new_mid_) {
+ scariness.Clear();
+ scariness.Scare(10, "bad-__sanitizer_annotate_contiguous_container");
+ }
void Print();
};
global1(*g1),
global2(*g2),
stack_id1(stack_id1_),
- stack_id2(stack_id2_) {}
+ stack_id2(stack_id2_) {
+ scariness.Clear();
+ scariness.Scare(10, "odr-violation");
+ }
void Print();
};
bp(bp_),
sp(sp_),
addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
- addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {}
+ addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(10, "invalid-pointer-pair");
+ }
void Print();
};
// clang-format off
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
- macro(StackOverflow) \
macro(DeadlySignal) \
macro(DoubleFree) \
macro(NewDeleteSizeMismatch) \
// We can add a wrapper around it to make it "more c++-like", but that would
// add a lot of code and the benefit wouldn't be that big.
union {
+ ErrorBase Base;
ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
};
}
}
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
static THREADLOCAL FakeStack *fake_stack_tls;
FakeStack *GetTLSFakeStack() {
#else
FakeStack *GetTLSFakeStack() { return 0; }
void SetTLSFakeStack(FakeStack *fs) { }
-#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread();
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
- cf.detect_leaks = CAN_SANITIZE_LEAKS;
+ cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS;
cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
cf.malloc_context_size = kDefaultMallocContextSize;
cf.intercept_tls_get_addr = true;
RegisterCommonFlags(&ubsan_parser);
#endif
+ if (SANITIZER_MAC) {
+ // Support macOS MallocScribble and MallocPreScribble:
+ // <https://developer.apple.com/library/content/documentation/Performance/
+ // Conceptual/ManagingMemory/Articles/MallocDebug.html>
+ if (GetEnv("MallocScribble")) {
+ f->max_free_fill_size = 0x1000;
+ }
+ if (GetEnv("MallocPreScribble")) {
+ f->malloc_fill_byte = 0xaa;
+ }
+ }
+
// Override from ASan compile definition.
const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
asan_parser.ParseString(asan_compile_def);
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
ubsan_parser.ParseString(ubsan_default_options);
#endif
+#if CAN_SANITIZE_LEAKS
+ const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
+ lsan_parser.ParseString(lsan_default_options);
+#endif
// Override from command line.
asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
f->quarantine_size_mb = f->quarantine_size >> 20;
if (f->quarantine_size_mb < 0) {
const int kDefaultQuarantineSizeMb =
- (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
+ (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
+ if (f->thread_local_quarantine_size_kb < 0) {
+ const u32 kDefaultThreadLocalQuarantineSizeKb =
+ // It is not advised to go lower than 64Kb, otherwise quarantine batches
+ // pushed from thread local quarantine to global one will create too
+ // much overhead. One quarantine batch size is 8Kb and it holds up to
+ // 1021 chunk, which amounts to 1/8 memory overhead per batch when
+ // thread local quarantine is set to 64Kb.
+ (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
+ f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
+ }
+ if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) {
+ Report("%s: thread_local_quarantine_size_kb can be set to 0 only when "
+ "quarantine_size_mb is set to 0\n", SanitizerToolName);
+ Die();
+ }
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");
Report("WARNING: strchr* interceptors are enabled even though "
"replace_str=0. Use intercept_strchr=0 to disable them.");
}
+ if (!f->replace_str && common_flags()->intercept_strndup) {
+ Report("WARNING: strndup* interceptors are enabled even though "
+ "replace_str=0. Use intercept_strndup=0 to disable them.");
+ }
}
} // namespace __asan
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char* __asan_default_options() { return ""; }
-} // extern "C"
-#endif
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) {
+ return "";
+}
"Size (in Mb) of quarantine used to detect use-after-free "
"errors. Lower value may reduce memory usage but increase the "
"chance of false negatives.")
+ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
+ "Size (in Kb) of thread local quarantine used to detect "
+ "use-after-free errors. Lower value may reduce memory usage but "
+ "increase the chance of false negatives. It is not advised to go "
+ "lower than 64Kb, otherwise frequent transfers to global quarantine "
+ "might affect performance.")
ASAN_FLAG(int, redzone, 16,
"Minimal size (in bytes) of redzones around heap objects. "
"Requirement: redzone >= 16, is a power of two.")
int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
"ASan allocator flag. max_malloc_fill_size is the maximal amount of "
"bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(
+ int, max_free_fill_size, 0,
+ "ASan allocator flag. max_free_fill_size is the maximal amount of "
+ "bytes that will be filled with free_fill_byte during free.")
ASAN_FLAG(int, malloc_fill_byte, 0xbe,
"Value used to fill the newly allocated memory.")
+ASAN_FLAG(int, free_fill_byte, 0x55,
+ "Value used to fill deallocated memory.")
ASAN_FLAG(bool, allow_user_poisoning, true,
"If set, user may manually mark memory regions as poisoned or "
"unpoisoned.")
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).")
+ASAN_FLAG(
+ int, sleep_after_init, 0,
+ "Number of seconds to sleep after AddressSanitizer is initialized. "
+ "Useful for debugging purposes (e.g. when one needs to attach gdb).")
ASAN_FLAG(bool, check_malloc_usable_size, true,
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.")
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes")
-ASAN_FLAG(bool, dump_instruction_bytes, false,
- "If true, dump 16 bytes starting at the instruction that caused SEGV")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "
"(WARNING: USE AT YOUR OWN RISK!)")
ASAN_FLAG(bool, use_odr_indicator, false,
"Use special ODR indicator symbol for ODR violation detection")
+ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
+ "realloc(p, 0) is equivalent to free(p) by default (Same as the "
+ "POSIX standard). If set to false, realloc(p, 0) will return a "
+ "pointer to an allocated space which can not be used.")
+ASAN_FLAG(bool, verify_asan_link_order, true,
+ "Check position of ASan runtime in library list (needs to be disabled"
+ " when other library has to be preloaded system-wide)")
--- /dev/null
+//===-- asan_fuchsia.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Fuchsia-specific details.
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+
+#include <limits.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+#include <zircon/threads.h>
+
+namespace __asan {
+
+// The system already set up the shadow memory for us.
+// __sanitizer::GetMaxVirtualAddress has already been called by
+// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc).
+// Just do some additional sanity checks here.
+void InitializeShadowMemory() {
+ if (Verbosity()) PrintAddressSpaceLayout();
+
+ // Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+ DCHECK(kLowShadowBeg != kDefaultShadowSentinel);
+ __asan_shadow_memory_dynamic_address = kLowShadowBeg;
+
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+ CHECK_EQ(kHighMemEnd, __sanitizer::ShadowBounds.memory_limit - 1);
+ CHECK_EQ(kHighMemBeg, __sanitizer::ShadowBounds.shadow_limit);
+ CHECK_EQ(kHighShadowBeg, __sanitizer::ShadowBounds.shadow_base);
+ CHECK_EQ(kShadowGapEnd, __sanitizer::ShadowBounds.shadow_base - 1);
+ CHECK_EQ(kLowShadowEnd, 0);
+ CHECK_EQ(kLowShadowBeg, 0);
+}
+
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
+}
+
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+void InitializeAsanInterceptors() {}
+
+void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
+
+void InitializePlatformExceptionHandlers() {}
+void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ UNIMPLEMENTED();
+}
+
+// We can use a plain thread_local variable for TSD.
+static thread_local void *per_thread;
+
+void *AsanTSDGet() { return per_thread; }
+
+void AsanTSDSet(void *tsd) { per_thread = tsd; }
+
+// There's no initialization needed, and the passed-in destructor
+// will never be called. Instead, our own thread destruction hook
+// (below) will call AsanThread::TSDDtor directly.
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ DCHECK(destructor == &PlatformTSDDtor);
+}
+
+void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
+
+static inline size_t AsanThreadMmapSize() {
+ return RoundUpTo(sizeof(AsanThread), PAGE_SIZE);
+}
+
+struct AsanThread::InitOptions {
+ uptr stack_bottom, stack_size;
+};
+
+// Shared setup between thread creation and startup for the initial thread.
+static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
+ uptr user_id, bool detached,
+ const char *name, uptr stack_bottom,
+ uptr stack_size) {
+ // In lieu of AsanThread::Create.
+ AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
+
+ AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
+ u32 tid =
+ asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
+ asanThreadRegistry().SetThreadName(tid, name);
+
+ // On other systems, AsanThread::Init() is called from the new
+ // thread itself. But on Fuchsia we already know the stack address
+ // range beforehand, so we can do most of the setup right now.
+ const AsanThread::InitOptions options = {stack_bottom, stack_size};
+ thread->Init(&options);
+
+ return thread;
+}
+
+// This gets the same arguments passed to Init by CreateAsanThread, above.
+// We're in the creator thread before the new thread is actually started,
+// but its stack address range is already known. We don't bother tracking
+// the static TLS address range because the system itself already uses an
+// ASan-aware allocator for that.
+void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
+ DCHECK_NE(GetCurrentThread(), this);
+ DCHECK_NE(GetCurrentThread(), nullptr);
+ CHECK_NE(options->stack_bottom, 0);
+ CHECK_NE(options->stack_size, 0);
+ stack_bottom_ = options->stack_bottom;
+ stack_top_ = options->stack_bottom + options->stack_size;
+}
+
+// Called by __asan::AsanInitInternal (asan_rtl.c).
+AsanThread *CreateMainThread() {
+ thrd_t self = thrd_current();
+ char name[ZX_MAX_NAME_LEN];
+ CHECK_NE(__sanitizer::MainThreadStackBase, 0);
+ CHECK_GT(__sanitizer::MainThreadStackSize, 0);
+ AsanThread *t = CreateAsanThread(
+ nullptr, 0, reinterpret_cast<uptr>(self), true,
+ _zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name,
+ sizeof(name)) == ZX_OK
+ ? name
+ : nullptr,
+ __sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize);
+ SetCurrentThread(t);
+ return t;
+}
+
+// This is called before each thread creation is attempted. So, in
+// its first call, the calling thread is the initial and sole thread.
+static void *BeforeThreadCreateHook(uptr user_id, bool detached,
+ const char *name, uptr stack_bottom,
+ uptr stack_size) {
+ EnsureMainThreadIDIsCorrect();
+ // Strict init-order checking is thread-hostile.
+ if (flags()->strict_init_order) StopInitOrderChecking();
+
+ GET_STACK_TRACE_THREAD;
+ u32 parent_tid = GetCurrentTidOrInvalid();
+
+ return CreateAsanThread(&stack, parent_tid, user_id, detached, name,
+ stack_bottom, stack_size);
+}
+
+// This is called after creating a new thread (in the creating thread),
+// with the pointer returned by BeforeThreadCreateHook (above).
+static void ThreadCreateHook(void *hook, bool aborted) {
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ if (!aborted) {
+ // The thread was created successfully.
+ // ThreadStartHook is already running in the new thread.
+ } else {
+ // The thread wasn't created after all.
+ // Clean up everything we set up in BeforeThreadCreateHook.
+ asanThreadRegistry().FinishThread(thread->tid());
+ UnmapOrDie(thread, AsanThreadMmapSize());
+ }
+}
+
+// This is called in the newly-created thread before it runs anything else,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// cf. asan_interceptors.cc:asan_thread_start
+static void ThreadStartHook(void *hook, uptr os_id) {
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ SetCurrentThread(thread);
+
+ // In lieu of AsanThread::ThreadStart.
+ asanThreadRegistry().StartThread(thread->tid(), os_id, /*workerthread*/ false,
+ nullptr);
+}
+
+// Each thread runs this just before it exits,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// All per-thread destructors have already been called.
+static void ThreadExitHook(void *hook, uptr os_id) {
+ AsanThread::TSDDtor(per_thread);
+}
+
+} // namespace __asan
+
+// These are declared (in extern "C") by <zircon/sanitizer.h>.
+// The system runtime will call our definitions directly.
+
+void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
+ const char *name, void *stack_base,
+ size_t stack_size) {
+ return __asan::BeforeThreadCreateHook(
+ reinterpret_cast<uptr>(thread), detached, name,
+ reinterpret_cast<uptr>(stack_base), stack_size);
+}
+
+void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
+ __asan::ThreadCreateHook(hook, error != thrd_success);
+}
+
+void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
+ __asan::ThreadStartHook(hook, reinterpret_cast<uptr>(self));
+}
+
+void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
+ __asan::ThreadExitHook(hook, reinterpret_cast<uptr>(self));
+}
+
+#endif // SANITIZER_FUCHSIA
*flag = 0;
}
+void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
+ if (*flag) return;
+ if (!start) return;
+ CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
+ __asan_global *globals_start = (__asan_global*)start;
+ __asan_global *globals_stop = (__asan_global*)stop;
+ __asan_register_globals(globals_start, globals_stop - globals_start);
+ *flag = 1;
+}
+
+void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {
+ if (!*flag) return;
+ if (!start) return;
+ CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
+ __asan_global *globals_start = (__asan_global*)start;
+ __asan_global *globals_stop = (__asan_global*)stop;
+ __asan_unregister_globals(globals_start, globals_stop - globals_start);
+ *flag = 0;
+}
+
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
}
for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // The MSVC incremental linker may pad globals out to 256 bytes. As long
+ // as __asan_global is less than 256 bytes large and its size is a power
+ // of two, we can skip over the padding.
+ static_assert(
+ sizeof(__asan_global) < 256 &&
+ (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0,
+ "sizeof(__asan_global) incompatible with incremental linker padding");
+ // If these are padding bytes, the rest of the global should be zero.
+ CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 &&
+ globals[i].name == nullptr && globals[i].module_name == nullptr &&
+ globals[i].odr_indicator == 0);
+ continue;
+ }
RegisterGlobal(&globals[i]);
}
+
+ // Poison the metadata. It should not be accessible to user code.
+ PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global),
+ kAsanGlobalRedzoneMagic);
}
// Unregister an array of globals.
if (!flags()->report_globals) return;
BlockingMutexLock lock(&mu_for_globals);
for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // Skip globals that look like padding from the MSVC incremental linker.
+ // See comment in __asan_register_globals.
+ continue;
+ }
UnregisterGlobal(&globals[i]);
}
+
+ // Unpoison the metadata.
+ PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global), 0);
}
// This method runs immediately prior to dynamic initialization in each TU,
--- /dev/null
+//===-- asan_globals_win.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Global registration code that is linked into every Windows DLL and EXE.
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_interface_internal.h"
+#if SANITIZER_WINDOWS
+
+namespace __asan {
+
+#pragma section(".ASAN$GA", read, write) // NOLINT
+#pragma section(".ASAN$GZ", read, write) // NOLINT
+extern "C" __declspec(allocate(".ASAN$GA"))
+__asan_global __asan_globals_start = {};
+extern "C" __declspec(allocate(".ASAN$GZ"))
+__asan_global __asan_globals_end = {};
+#pragma comment(linker, "/merge:.ASAN=.data")
+
+static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
+ __asan_global *start = &__asan_globals_start + 1;
+ __asan_global *end = &__asan_globals_end;
+ uptr bytediff = (uptr)end - (uptr)start;
+ if (bytediff % sizeof(__asan_global) != 0) {
+#ifdef SANITIZER_DLL_THUNK
+ __debugbreak();
+#else
+ CHECK("corrupt asan global array");
+#endif
+ }
+ // We know end >= start because the linker sorts the portion after the dollar
+ // sign alphabetically.
+ uptr n = end - start;
+ hook(start, n);
+}
+
+static void register_dso_globals() {
+ call_on_globals(&__asan_register_globals);
+}
+
+static void unregister_dso_globals() {
+ call_on_globals(&__asan_unregister_globals);
+}
+
+// Register globals
+#pragma section(".CRT$XCU", long, read) // NOLINT
+#pragma section(".CRT$XTX", long, read) // NOLINT
+extern "C" __declspec(allocate(".CRT$XCU"))
+void (*const __asan_dso_reg_hook)() = ®ister_dso_globals;
+extern "C" __declspec(allocate(".CRT$XTX"))
+void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals;
+
+} // namespace __asan
+
+#endif // SANITIZER_WINDOWS
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+// There is no general interception at all on Fuchsia.
+// Only the functions in asan_interceptors_memintrinsics.cc are
+// really defined to replace libc functions.
+#if !SANITIZER_FUCHSIA
+
#if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_posix.h"
#endif
namespace __asan {
-// Return true if we can quickly decide that the region is unpoisoned.
-static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
- if (size == 0) return true;
- if (size <= 32)
- return !AddressIsPoisoned(beg) &&
- !AddressIsPoisoned(beg + size - 1) &&
- !AddressIsPoisoned(beg + size / 2);
- return false;
-}
-
-struct AsanInterceptorContext {
- const char *interceptor_name;
-};
-
-// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
-// and ASAN_WRITE_RANGE as macro instead of function so
-// that no extra frames are created, and stack trace contains
-// relevant information only.
-// We check all shadow bytes.
-#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
- uptr __offset = (uptr)(offset); \
- uptr __size = (uptr)(size); \
- uptr __bad = 0; \
- if (__offset > __offset + __size) { \
- GET_STACK_TRACE_FATAL_HERE; \
- ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
- } \
- if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
- (__bad = __asan_region_is_poisoned(__offset, __size))) { \
- AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
- bool suppressed = false; \
- if (_ctx) { \
- suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
- if (!suppressed && HaveStackTraceBasedSuppressions()) { \
- GET_STACK_TRACE_FATAL_HERE; \
- suppressed = IsStackTraceSuppressed(&stack); \
- } \
- } \
- if (!suppressed) { \
- GET_CURRENT_PC_BP_SP; \
- ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
- } \
- } \
- } while (0)
-
-#define ASAN_READ_RANGE(ctx, offset, size) \
- ACCESS_MEMORY_RANGE(ctx, offset, size, false)
-#define ASAN_WRITE_RANGE(ctx, offset, size) \
- ACCESS_MEMORY_RANGE(ctx, offset, size, true)
-
#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \
ASAN_READ_RANGE((ctx), (s), \
common_flags()->strict_string_checks ? (len) + 1 : (n))
#define ASAN_READ_STRING(ctx, s, n) \
ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
-// Behavior of functions like "memcpy" or "strcpy" is undefined
-// if memory intervals overlap. We report error in this case.
-// Macro is used to avoid creation of new frames.
-static inline bool RangesOverlap(const char *offset1, uptr length1,
- const char *offset2, uptr length2) {
- return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
-}
-#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \
- const char *offset1 = (const char*)_offset1; \
- const char *offset2 = (const char*)_offset2; \
- if (RangesOverlap(offset1, length1, offset2, length2)) { \
- GET_STACK_TRACE_FATAL_HERE; \
- ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
- offset2, length2, &stack); \
- } \
-} while (0)
-
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if SANITIZER_INTERCEPT_STRNLEN
if (REAL(strnlen)) {
}
int OnExit() {
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
+ __lsan::HasReportedLeaks()) {
+ return common_flags()->exitcode;
+ }
// FIXME: ask frontend whether we need to return failure.
return 0;
}
// Strict init-order checking is dlopen-hostile:
// https://github.com/google/sanitizers/issues/178
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
- if (flags()->strict_init_order) { \
- StopInitOrderChecking(); \
- }
+ do { \
+ if (flags()->strict_init_order) \
+ StopInitOrderChecking(); \
+ CheckNoDeepBind(filename, flag); \
+ } while (false)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
-#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
- CoverageUpdateMapping()
-#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
if (AsanThread *t = GetCurrentThread()) { \
} else { \
*begin = *end = 0; \
}
-// Asan needs custom handling of these:
-#undef SANITIZER_INTERCEPT_MEMSET
-#undef SANITIZER_INTERCEPT_MEMMOVE
-#undef SANITIZER_INTERCEPT_MEMCPY
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
+ ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memset); \
+ ASAN_MEMSET_IMPL(ctx, block, c, size); \
+ } while (false)
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
// for them.
DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
-#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
-
-#if SANITIZER_ANDROID
-INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
- if (!IsHandledDeadlySignal(signum) ||
- common_flags()->allow_user_segv_handler) {
- return REAL(bsd_signal)(signum, handler);
- }
- return 0;
-}
-#endif
-
-INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!IsHandledDeadlySignal(signum) ||
- common_flags()->allow_user_segv_handler) {
- return REAL(signal)(signum, handler);
- }
- return nullptr;
-}
-
-INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
- struct sigaction *oldact) {
- if (!IsHandledDeadlySignal(signum) ||
- common_flags()->allow_user_segv_handler) {
- return REAL(sigaction)(signum, act, oldact);
- }
- return 0;
-}
-
-namespace __sanitizer {
-int real_sigaction(int signum, const void *act, void *oldact) {
- return REAL(sigaction)(signum, (const struct sigaction *)act,
- (struct sigaction *)oldact);
-}
-} // namespace __sanitizer
-
-#elif SANITIZER_POSIX
-// We need to have defined REAL(sigaction) on posix systems.
-DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
- struct sigaction *oldact)
-#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
-
#if ASAN_INTERCEPT_SWAPCONTEXT
static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
// Align to page size.
}
#endif // ASAN_INTERCEPT_SWAPCONTEXT
+#if SANITIZER_NETBSD
+#define longjmp __longjmp14
+#define siglongjmp __siglongjmp14
+#endif
+
INTERCEPTOR(void, longjmp, void *env, int val) {
__asan_handle_no_return();
REAL(longjmp)(env, val);
}
#endif
+#if ASAN_INTERCEPT___LONGJMP_CHK
+INTERCEPTOR(void, __longjmp_chk, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(__longjmp_chk)(env, val);
+}
+#endif
+
#if ASAN_INTERCEPT_SIGLONGJMP
INTERCEPTOR(void, siglongjmp, void *env, int val) {
__asan_handle_no_return();
}
#endif
-// memcpy is called during __asan_init() from the internals of printf(...).
-// We do not treat memcpy with to==from as a bug.
-// See http://llvm.org/bugs/show_bug.cgi?id=11763.
-#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \
- if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
- if (asan_init_is_running) { \
- return REAL(memcpy)(to, from, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- if (to != from) { \
- CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
- } \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return REAL(memcpy)(to, from, size); \
- } while (0)
-
-
-void *__asan_memcpy(void *to, const void *from, uptr size) {
- ASAN_MEMCPY_IMPL(nullptr, to, from, size);
-}
-
-// memset is called inside Printf.
-#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \
- if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
- if (asan_init_is_running) { \
- return REAL(memset)(block, c, size); \
- } \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_WRITE_RANGE(ctx, block, size); \
- } \
- return REAL(memset)(block, c, size); \
- } while (0)
-
-void *__asan_memset(void *block, int c, uptr size) {
- ASAN_MEMSET_IMPL(nullptr, block, c, size);
-}
-
-#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \
- if (UNLIKELY(!asan_inited)) \
- return internal_memmove(to, from, size); \
- ENSURE_ASAN_INITED(); \
- if (flags()->replace_intrin) { \
- ASAN_READ_RANGE(ctx, from, size); \
- ASAN_WRITE_RANGE(ctx, to, size); \
- } \
- return internal_memmove(to, from, size); \
- } while (0)
-
-void *__asan_memmove(void *to, const void *from, uptr size) {
- ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
-}
-
-INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memmove);
- ASAN_MEMMOVE_IMPL(ctx, to, from, size);
-}
-
-INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memcpy);
-#if !SANITIZER_MAC
- ASAN_MEMCPY_IMPL(ctx, to, from, size);
-#else
- // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced
- // with WRAP(memcpy). As a result, false positives are reported for memmove()
- // calls. If we just disable error reporting with
- // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with
- // internal_memcpy(), which may lead to crashes, see
- // http://llvm.org/bugs/show_bug.cgi?id=16362.
- ASAN_MEMMOVE_IMPL(ctx, to, from, size);
-#endif // !SANITIZER_MAC
-}
-
-INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, memset);
- ASAN_MEMSET_IMPL(ctx, block, c, size);
-}
-
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
}
#endif // ASAN_INTERCEPT___STRDUP
-INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
- SIZE_T length = internal_wcslen(s);
- if (!asan_init_is_running) {
- ENSURE_ASAN_INITED();
- ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
- }
- return length;
-}
-
INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
#if ASAN_INTERCEPT_FORK
INTERCEPTOR(int, fork, void) {
ENSURE_ASAN_INITED();
- if (common_flags()->coverage) CovBeforeFork();
int pid = REAL(fork)();
- if (common_flags()->coverage) CovAfterFork(pid);
return pid;
}
#endif // ASAN_INTERCEPT_FORK
CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
-
- // Intercept mem* functions.
- ASAN_INTERCEPT_FUNC(memmove);
- ASAN_INTERCEPT_FUNC(memset);
- if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
- // In asan, REAL(memmove) is not used, but it is used in msan.
- ASAN_INTERCEPT_FUNC(memcpy);
- } else {
- ASSIGN_REAL(memcpy, memmove);
- }
- CHECK(REAL(memcpy));
+ InitializeSignalInterceptors();
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
- ASAN_INTERCEPT_FUNC(wcslen);
ASAN_INTERCEPT_FUNC(strncat);
ASAN_INTERCEPT_FUNC(strncpy);
ASAN_INTERCEPT_FUNC(strdup);
ASAN_INTERCEPT_FUNC(strtoll);
#endif
- // Intecept signal- and jump-related functions.
+ // Intecept jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
-#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
- ASAN_INTERCEPT_FUNC(sigaction);
-#if SANITIZER_ANDROID
- ASAN_INTERCEPT_FUNC(bsd_signal);
-#endif
- ASAN_INTERCEPT_FUNC(signal);
-#endif
+
#if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext);
#endif
#if ASAN_INTERCEPT__LONGJMP
ASAN_INTERCEPT_FUNC(_longjmp);
#endif
+#if ASAN_INTERCEPT___LONGJMP_CHK
+ ASAN_INTERCEPT_FUNC(__longjmp_chk);
+#endif
#if ASAN_INTERCEPT_SIGLONGJMP
ASAN_INTERCEPT_FUNC(siglongjmp);
#endif
}
} // namespace __asan
+
+#endif // !SANITIZER_FUCHSIA
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
+#include "asan_interceptors_memintrinsics.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
+namespace __asan {
+
+void InitializeAsanInterceptors();
+void InitializePlatformInterceptors();
+
+#define ENSURE_ASAN_INITED() \
+ do { \
+ CHECK(!asan_init_is_running); \
+ if (UNLIKELY(!asan_inited)) { \
+ AsanInitFromRtl(); \
+ } \
+ } while (0)
+
+} // namespace __asan
+
+// There is no general interception at all on Fuchsia.
+// Only the functions in asan_interceptors_memintrinsics.h are
+// really defined to replace libc functions.
+#if !SANITIZER_FUCHSIA
+
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_FORK 0
#endif
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
+# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
-# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
+# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
-#if !SANITIZER_WINDOWS
-# define ASAN_INTERCEPT_SIGLONGJMP 1
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT___LONGJMP_CHK 1
#else
-# define ASAN_INTERCEPT_SIGLONGJMP 0
+# define ASAN_INTERCEPT___LONGJMP_CHK 0
#endif
// Android bug: https://code.google.com/p/android/issues/detail?id=61799
#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
-DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
-DECLARE_REAL(void*, memset, void *block, int c, uptr size)
DECLARE_REAL(char*, strchr, const char *str, int c)
DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
-namespace __asan {
-
-void InitializeAsanInterceptors();
-void InitializePlatformInterceptors();
-
-#define ENSURE_ASAN_INITED() do { \
- CHECK(!asan_init_is_running); \
- if (UNLIKELY(!asan_inited)) { \
- AsanInitFromRtl(); \
- } \
-} while (0)
-
-} // namespace __asan
+#endif // !SANITIZER_FUCHSIA
#endif // ASAN_INTERCEPTORS_H
--- /dev/null
+//===-- asan_interceptors_memintrinsics.cc --------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan versions of memcpy, memmove, and memset.
+//===---------------------------------------------------------------------===//
+
+#include "asan_interceptors_memintrinsics.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_suppressions.h"
+
+using namespace __asan; // NOLINT
+
+void *__asan_memcpy(void *to, const void *from, uptr size) {
+ ASAN_MEMCPY_IMPL(nullptr, to, from, size);
+}
+
+void *__asan_memset(void *block, int c, uptr size) {
+ ASAN_MEMSET_IMPL(nullptr, block, c, size);
+}
+
+void *__asan_memmove(void *to, const void *from, uptr size) {
+ ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
+}
+
+#if SANITIZER_FUCHSIA
+
+// Fuchsia doesn't use sanitizer_common_interceptors.inc, but the only
+// things there it wants are these three. Just define them as aliases
+// here rather than repeating the contents.
+
+decltype(memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
+decltype(memmove) memmove[[gnu::alias("__asan_memmove")]];
+decltype(memset) memset[[gnu::alias("__asan_memset")]];
+
+#endif // SANITIZER_FUCHSIA
--- /dev/null
+//===-- asan_interceptors_memintrinsics.h -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_memintrin.cc
+//===---------------------------------------------------------------------===//
+#ifndef ASAN_MEMINTRIN_H
+#define ASAN_MEMINTRIN_H
+
+#include "asan_interface_internal.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "interception/interception.h"
+
+DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
+DECLARE_REAL(void*, memset, void *block, int c, uptr size)
+
+namespace __asan {
+
+// Return true if we can quickly decide that the region is unpoisoned.
+// We assume that a redzone is at least 16 bytes.
+static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
+ if (size == 0) return true;
+ if (size <= 32)
+ return !AddressIsPoisoned(beg) &&
+ !AddressIsPoisoned(beg + size - 1) &&
+ !AddressIsPoisoned(beg + size / 2);
+ if (size <= 64)
+ return !AddressIsPoisoned(beg) &&
+ !AddressIsPoisoned(beg + size / 4) &&
+ !AddressIsPoisoned(beg + size - 1) &&
+ !AddressIsPoisoned(beg + 3 * size / 4) &&
+ !AddressIsPoisoned(beg + size / 2);
+ return false;
+}
+
+struct AsanInterceptorContext {
+ const char *interceptor_name;
+};
+
+// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
+// and ASAN_WRITE_RANGE as macro instead of function so
+// that no extra frames are created, and stack trace contains
+// relevant information only.
+// We check all shadow bytes.
+#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
+ uptr __offset = (uptr)(offset); \
+ uptr __size = (uptr)(size); \
+ uptr __bad = 0; \
+ if (__offset > __offset + __size) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
+ } \
+ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
+ (__bad = __asan_region_is_poisoned(__offset, __size))) { \
+ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
+ bool suppressed = false; \
+ if (_ctx) { \
+ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
+ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ suppressed = IsStackTraceSuppressed(&stack); \
+ } \
+ } \
+ if (!suppressed) { \
+ GET_CURRENT_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
+ } \
+ } \
+ } while (0)
+
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
+ if (asan_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ if (to != from) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
+ if (asan_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
+#define ASAN_READ_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, false)
+#define ASAN_WRITE_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, true)
+
+// Behavior of functions like "memcpy" or "strcpy" is undefined
+// if memory intervals overlap. We report error in this case.
+// Macro is used to avoid creation of new frames.
+static inline bool RangesOverlap(const char *offset1, uptr length1,
+ const char *offset2, uptr length2) {
+ return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
+}
+#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \
+ const char *offset1 = (const char*)_offset1; \
+ const char *offset2 = (const char*)_offset2; \
+ if (RangesOverlap(offset1, length1, offset2, length2)) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
+ offset2, length2, &stack); \
+ } \
+} while (0)
+
+} // namespace __asan
+
+#endif // ASAN_MEMINTRIN_H
--- /dev/null
+//===-- asan_interface.inc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Asan interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+INTERFACE_FUNCTION(__asan_address_is_poisoned)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+INTERFACE_FUNCTION(__asan_alloca_poison)
+INTERFACE_FUNCTION(__asan_allocas_unpoison)
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_describe_address)
+INTERFACE_FUNCTION(__asan_exp_load1)
+INTERFACE_FUNCTION(__asan_exp_load2)
+INTERFACE_FUNCTION(__asan_exp_load4)
+INTERFACE_FUNCTION(__asan_exp_load8)
+INTERFACE_FUNCTION(__asan_exp_load16)
+INTERFACE_FUNCTION(__asan_exp_loadN)
+INTERFACE_FUNCTION(__asan_exp_store1)
+INTERFACE_FUNCTION(__asan_exp_store2)
+INTERFACE_FUNCTION(__asan_exp_store4)
+INTERFACE_FUNCTION(__asan_exp_store8)
+INTERFACE_FUNCTION(__asan_exp_store16)
+INTERFACE_FUNCTION(__asan_exp_storeN)
+INTERFACE_FUNCTION(__asan_get_alloc_stack)
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_get_free_stack)
+INTERFACE_FUNCTION(__asan_get_report_access_size)
+INTERFACE_FUNCTION(__asan_get_report_access_type)
+INTERFACE_FUNCTION(__asan_get_report_address)
+INTERFACE_FUNCTION(__asan_get_report_bp)
+INTERFACE_FUNCTION(__asan_get_report_description)
+INTERFACE_FUNCTION(__asan_get_report_pc)
+INTERFACE_FUNCTION(__asan_get_report_sp)
+INTERFACE_FUNCTION(__asan_get_shadow_mapping)
+INTERFACE_FUNCTION(__asan_handle_no_return)
+INTERFACE_FUNCTION(__asan_init)
+INTERFACE_FUNCTION(__asan_load_cxx_array_cookie)
+INTERFACE_FUNCTION(__asan_load1)
+INTERFACE_FUNCTION(__asan_load2)
+INTERFACE_FUNCTION(__asan_load4)
+INTERFACE_FUNCTION(__asan_load8)
+INTERFACE_FUNCTION(__asan_load16)
+INTERFACE_FUNCTION(__asan_loadN)
+INTERFACE_FUNCTION(__asan_load1_noabort)
+INTERFACE_FUNCTION(__asan_load2_noabort)
+INTERFACE_FUNCTION(__asan_load4_noabort)
+INTERFACE_FUNCTION(__asan_load8_noabort)
+INTERFACE_FUNCTION(__asan_load16_noabort)
+INTERFACE_FUNCTION(__asan_loadN_noabort)
+INTERFACE_FUNCTION(__asan_locate_address)
+INTERFACE_FUNCTION(__asan_memcpy)
+INTERFACE_FUNCTION(__asan_memmove)
+INTERFACE_FUNCTION(__asan_memset)
+INTERFACE_FUNCTION(__asan_poison_cxx_array_cookie)
+INTERFACE_FUNCTION(__asan_poison_intra_object_redzone)
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_print_accumulated_stats)
+INTERFACE_FUNCTION(__asan_region_is_poisoned)
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_register_elf_globals)
+INTERFACE_FUNCTION(__asan_register_image_globals)
+INTERFACE_FUNCTION(__asan_report_error)
+INTERFACE_FUNCTION(__asan_report_exp_load1)
+INTERFACE_FUNCTION(__asan_report_exp_load2)
+INTERFACE_FUNCTION(__asan_report_exp_load4)
+INTERFACE_FUNCTION(__asan_report_exp_load8)
+INTERFACE_FUNCTION(__asan_report_exp_load16)
+INTERFACE_FUNCTION(__asan_report_exp_load_n)
+INTERFACE_FUNCTION(__asan_report_exp_store1)
+INTERFACE_FUNCTION(__asan_report_exp_store2)
+INTERFACE_FUNCTION(__asan_report_exp_store4)
+INTERFACE_FUNCTION(__asan_report_exp_store8)
+INTERFACE_FUNCTION(__asan_report_exp_store16)
+INTERFACE_FUNCTION(__asan_report_exp_store_n)
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+INTERFACE_FUNCTION(__asan_report_load1_noabort)
+INTERFACE_FUNCTION(__asan_report_load2_noabort)
+INTERFACE_FUNCTION(__asan_report_load4_noabort)
+INTERFACE_FUNCTION(__asan_report_load8_noabort)
+INTERFACE_FUNCTION(__asan_report_load16_noabort)
+INTERFACE_FUNCTION(__asan_report_load_n_noabort)
+INTERFACE_FUNCTION(__asan_report_present)
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+INTERFACE_FUNCTION(__asan_report_store1_noabort)
+INTERFACE_FUNCTION(__asan_report_store2_noabort)
+INTERFACE_FUNCTION(__asan_report_store4_noabort)
+INTERFACE_FUNCTION(__asan_report_store8_noabort)
+INTERFACE_FUNCTION(__asan_report_store16_noabort)
+INTERFACE_FUNCTION(__asan_report_store_n_noabort)
+INTERFACE_FUNCTION(__asan_set_death_callback)
+INTERFACE_FUNCTION(__asan_set_error_report_callback)
+INTERFACE_FUNCTION(__asan_set_shadow_00)
+INTERFACE_FUNCTION(__asan_set_shadow_f1)
+INTERFACE_FUNCTION(__asan_set_shadow_f2)
+INTERFACE_FUNCTION(__asan_set_shadow_f3)
+INTERFACE_FUNCTION(__asan_set_shadow_f5)
+INTERFACE_FUNCTION(__asan_set_shadow_f8)
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_3)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+INTERFACE_FUNCTION(__asan_store1)
+INTERFACE_FUNCTION(__asan_store2)
+INTERFACE_FUNCTION(__asan_store4)
+INTERFACE_FUNCTION(__asan_store8)
+INTERFACE_FUNCTION(__asan_store16)
+INTERFACE_FUNCTION(__asan_storeN)
+INTERFACE_FUNCTION(__asan_store1_noabort)
+INTERFACE_FUNCTION(__asan_store2_noabort)
+INTERFACE_FUNCTION(__asan_store4_noabort)
+INTERFACE_FUNCTION(__asan_store8_noabort)
+INTERFACE_FUNCTION(__asan_store16_noabort)
+INTERFACE_FUNCTION(__asan_storeN_noabort)
+INTERFACE_FUNCTION(__asan_unpoison_intra_object_redzone)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+INTERFACE_FUNCTION(__asan_unregister_elf_globals)
+INTERFACE_FUNCTION(__asan_unregister_image_globals)
+INTERFACE_FUNCTION(__asan_version_mismatch_check_v8)
+INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
+INTERFACE_FUNCTION(__sanitizer_ptr_sub)
+INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_WEAK_FUNCTION(__asan_default_options)
+INTERFACE_WEAK_FUNCTION(__asan_default_suppressions)
+INTERFACE_WEAK_FUNCTION(__asan_on_error)
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_unregister_image_globals(uptr *flag);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_register_elf_globals(uptr *flag, void *start, void *stop);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop);
+
// These two functions should be called by the instrumented code.
// 'globals' is an array of structures describing 'n' globals.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_set_error_report_callback(void (*callback)(const char*));
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_on_error();
+ void __asan_on_error();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ const char* __asan_default_options();
+ const char* __asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
void __asan_alloca_poison(uptr addr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_allocas_unpoison(uptr top, uptr bottom);
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ const char* __asan_default_suppressions();
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
-# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
+# if SANITIZER_IOS || SANITIZER_ANDROID
# define ASAN_LOW_MEMORY 1
# else
# define ASAN_LOW_MEMORY 0
// asan_win.cc
void InitializePlatformExceptionHandlers();
-
-// asan_win.cc / asan_posix.cc
-const char *DescribeSignalOrException(int signo);
+// Returns whether an address is a valid allocated system heap block.
+// 'addr' must point to the beginning of the block.
+bool IsSystemHeapAddress(uptr addr);
// asan_rtl.cc
+void PrintAddressSpaceLayout();
void NORETURN ShowStatsAndAbort();
+// asan_shadow_setup.cc
+void InitializeShadowMemory();
+
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
// asan_linux.cc / asan_mac.cc / asan_win.cc
+uptr FindDynamicShadowStart();
void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
+// asan_thread.cc
+AsanThread *CreateMainThread();
+
// Support function for __asan_(un)register_image_globals. Searches for the
// loaded image containing `needle' and then enumerates all global metadata
// structures declared in that image, applying `op' (e.g.,
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-// Platform-specific options.
-#if SANITIZER_MAC
-bool PlatformHasDifferentMemcpyAndMemmove();
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
- (PlatformHasDifferentMemcpyAndMemmove())
-#elif SANITIZER_WINDOWS64
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
-#else
-# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
-#endif // SANITIZER_MAC
-
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "asan_interceptors.h"
#include "asan_internal.h"
#if SANITIZER_ANDROID || SANITIZER_FREEBSD
#include <ucontext.h>
extern "C" void* _DYNAMIC;
+#elif SANITIZER_NETBSD
+#include <link_elf.h>
+#include <ucontext.h>
+extern Elf_Dyn _DYNAMIC;
#else
#include <sys/ucontext.h>
#include <link.h>
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
+bool IsSystemHeapAddress (uptr addr) { return false; }
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
return &_DYNAMIC; // defined in link.h
}
+uptr FindDynamicShadowStart() {
+ UNREACHABLE("FindDynamicShadowStart is not available");
+ return 0;
+}
+
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
return 0;
+#if SANITIZER_NETBSD
+ // Ignore first entry (the main program)
+ char **p = (char **)data;
+ if (!(*p)) {
+ *p = (char *)-1;
+ return 0;
+ }
+#endif
+
*(const char **)data = info->dlpi_name;
return 1;
}
}
void AsanCheckDynamicRTPrereqs() {
- if (!ASAN_DYNAMIC)
+ if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order)
return;
// Ensure that dynamic RT is the first DSO in the list
// system libraries, causing crashes later in ASan initialization.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
char filename[128];
- while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
- sizeof(filename), nullptr)) {
- if (IsDynamicRTName(filename)) {
+ MemoryMappedSegment segment(filename, sizeof(filename));
+ while (proc_maps.Next(&segment)) {
+ if (IsDynamicRTName(segment.filename)) {
Report("Your application is linked against "
"incompatible ASan runtimes.\n");
Die();
} // namespace __asan
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
void InitializePlatformInterceptors() {}
void InitializePlatformExceptionHandlers() {}
-
-bool PlatformHasDifferentMemcpyAndMemmove() {
- // On OS X 10.7 memcpy() and memmove() are both resolved
- // into memmove$VARIANT$sse42.
- // See also https://github.com/google/sanitizers/issues/34.
- // TODO(glider): need to check dynamically that memcpy() and memmove() are
- // actually the same function.
- return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
-}
+bool IsSystemHeapAddress (uptr addr) { return false; }
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;
}
+uptr FindDynamicShadowStart() {
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+
+ uptr largest_gap_found = 0;
+ uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
+ granularity, &largest_gap_found);
+ // If the shadow doesn't fit, restrict the address space to make it fit.
+ if (shadow_start == 0) {
+ uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
+ RestrictMemoryToMaxAddress(new_max_vm);
+ kHighMemEnd = new_max_vm - 1;
+ space_size = kHighShadowEnd + left_padding;
+ shadow_start =
+ FindAvailableMemoryRange(space_size, alignment, granularity, nullptr);
+ }
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
// No-op. Mac does not support static linkage anyway.
void AsanCheckDynamicRTPrereqs() {}
t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
parent_tid, stack, /* detached */ true);
t->Init();
- asanThreadRegistry().StartThread(t->tid(), 0, 0);
+ asanThreadRegistry().StartThread(t->tid(), GetTid(),
+ /* workerthread */ true, 0);
SetCurrentThread(t);
}
}
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
+ SANITIZER_NETBSD
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
static const uptr kDlsymAllocPoolSize = 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-static bool IsInDlsymAllocPool(const void *ptr) {
+static INLINE bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- return off < sizeof(alloc_memory_for_dlsym);
+ return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
static void *AllocateFromLocalPool(uptr size_in_bytes) {
return mem;
}
+static INLINE bool MaybeInDlsym() {
+ // Fuchsia doesn't use dlsym-based interceptors.
+ return !SANITIZER_FUCHSIA && asan_init_is_running;
+}
+
+static void *ReallocFromLocalPool(void *ptr, uptr size) {
+ const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(MaybeInDlsym())) {
+ new_ptr = AllocateFromLocalPool(size);
+ } else {
+ ENSURE_ASAN_INITED();
+ GET_STACK_TRACE_MALLOC;
+ new_ptr = asan_malloc(size, &stack);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+}
+
INTERCEPTOR(void, free, void *ptr) {
GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
asan_free(ptr, &stack, FROM_MALLOC);
}
+#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return;
asan_free(ptr, &stack, FROM_MALLOC);
}
+#endif // SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void*, malloc, uptr size) {
- if (UNLIKELY(!asan_inited))
+ if (UNLIKELY(MaybeInDlsym()))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
- if (UNLIKELY(!asan_inited))
+ if (UNLIKELY(MaybeInDlsym()))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
return AllocateFromLocalPool(nmemb * size);
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
+ if (UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return ReallocFromLocalPool(ptr, size);
+ if (UNLIKELY(MaybeInDlsym()))
+ return AllocateFromLocalPool(size);
+ ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
- if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
- uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
- uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr;
- if (UNLIKELY(!asan_inited)) {
- new_ptr = AllocateFromLocalPool(size);
- } else {
- copy_size = size;
- new_ptr = asan_malloc(copy_size, &stack);
- }
- internal_memcpy(new_ptr, ptr, copy_size);
- return new_ptr;
- }
return asan_realloc(ptr, size, &stack);
}
+#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
-INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
- GET_STACK_TRACE_MALLOC;
- return asan_memalign(boundary, size, &stack, FROM_MALLOC);
-}
-
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
DTLS_on_libc_memalign(res, size);
return res;
}
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+}
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
GET_CURRENT_PC_BP_SP;
return asan_malloc_usable_size(ptr, pc, bp);
}
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
// We avoid including malloc.h for portability reasons.
// man mallinfo says the fields are "long", but the implementation uses int.
// It doesn't matter much -- we just need to make sure that the libc's mallinfo
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
GET_STACK_TRACE_MALLOC;
return asan_valloc(size, &stack);
}
+#if SANITIZER_INTERCEPT_PVALLOC
INTERCEPTOR(void*, pvalloc, uptr size) {
GET_STACK_TRACE_MALLOC;
return asan_pvalloc(size, &stack);
}
+#endif // SANITIZER_INTERCEPT_PVALLOC
INTERCEPTOR(void, malloc_stats, void) {
__asan_print_accumulated_stats();
} // namespace __asan
#endif // SANITIZER_ANDROID
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
+ // SANITIZER_NETBSD
free(ptr);
}
-ALLOCATION_FUNCTION_ATTRIBUTE
-void cfree(void *ptr) {
- CHECK(!"cfree() should not be used on Windows");
-}
-
ALLOCATION_FUNCTION_ATTRIBUTE
void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
- CHECK(!"_realloc_dbg should not exist!");
+ UNREACHABLE("_realloc_dbg should not exist!");
return 0;
}
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
//
+// Shadow mapping on NetBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
+// || `[0x4feffffffe01, 0x7f7ffffff000]` || HighMem ||
+// || `[0x49fdffffffc0, 0x4feffffffe00]` || HighShadow ||
+// || `[0x480000000000, 0x49fdffffffbf]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
// Default Windows/i386 mapping:
// (the exact location of HighShadow/HighMem may vary depending
// on WoW64, /LARGEADDRESSAWARE, etc).
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
+static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_SCALE kDefaultShadowScale
-
-#if SANITIZER_WORDSIZE == 32
+#if SANITIZER_FUCHSIA
+# define SHADOW_OFFSET (0)
+#elif SANITIZER_WORDSIZE == 32
# if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
# elif defined(__mips__)
# define SHADOW_OFFSET kSystemZ_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# elif SANITIZER_NETBSD
+# define SHADOW_OFFSET kNetBSD_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64)
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
-#define SHADOW_TO_MEM(shadow) (((shadow) - SHADOW_OFFSET) << SHADOW_SCALE)
#define kLowMemBeg 0
#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
class HeapProfile {
public:
HeapProfile() : allocations_(1024) {}
- void Insert(u32 id, uptr size) {
- total_allocated_ += size;
- total_count_++;
- // Linear lookup will be good enough for most cases (although not all).
- for (uptr i = 0; i < allocations_.size(); i++) {
- if (allocations_[i].id == id) {
- allocations_[i].total_size += size;
- allocations_[i].count++;
- return;
- }
+
+ void ProcessChunk(const AsanChunkView& cv) {
+ if (cv.IsAllocated()) {
+ total_allocated_user_size_ += cv.UsedSize();
+ total_allocated_count_++;
+ u32 id = cv.GetAllocStackId();
+ if (id)
+ Insert(id, cv.UsedSize());
+ } else if (cv.IsQuarantined()) {
+ total_quarantined_user_size_ += cv.UsedSize();
+ total_quarantined_count_++;
+ } else {
+ total_other_count_++;
}
- allocations_.push_back({id, size, 1});
}
- void Print(uptr top_percent) {
+ void Print(uptr top_percent, uptr max_number_of_contexts) {
InternalSort(&allocations_, allocations_.size(),
[](const AllocationSite &a, const AllocationSite &b) {
return a.total_size > b.total_size;
});
- CHECK(total_allocated_);
+ CHECK(total_allocated_user_size_);
uptr total_shown = 0;
- Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
- "showing top %zd%%\n", total_allocated_, total_count_, top_percent);
- for (uptr i = 0; i < allocations_.size(); i++) {
+ Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
+ "%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
+ "showing top %zd%% (at most %zd unique contexts)\n",
+ total_allocated_user_size_, total_allocated_count_,
+ total_quarantined_user_size_, total_quarantined_count_,
+ total_other_count_, total_allocated_count_ +
+ total_quarantined_count_ + total_other_count_, top_percent,
+ max_number_of_contexts);
+ for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts);
+ i++) {
auto &a = allocations_[i];
Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
- a.total_size * 100 / total_allocated_, a.count);
+ a.total_size * 100 / total_allocated_user_size_, a.count);
StackDepotGet(a.id).Print();
total_shown += a.total_size;
- if (total_shown * 100 / total_allocated_ > top_percent)
+ if (total_shown * 100 / total_allocated_user_size_ > top_percent)
break;
}
}
private:
- uptr total_allocated_ = 0;
- uptr total_count_ = 0;
+ uptr total_allocated_user_size_ = 0;
+ uptr total_allocated_count_ = 0;
+ uptr total_quarantined_user_size_ = 0;
+ uptr total_quarantined_count_ = 0;
+ uptr total_other_count_ = 0;
InternalMmapVector<AllocationSite> allocations_;
+
+ void Insert(u32 id, uptr size) {
+ // Linear lookup will be good enough for most cases (although not all).
+ for (uptr i = 0; i < allocations_.size(); i++) {
+ if (allocations_[i].id == id) {
+ allocations_[i].total_size += size;
+ allocations_[i].count++;
+ return;
+ }
+ }
+ allocations_.push_back({id, size, 1});
+ }
};
static void ChunkCallback(uptr chunk, void *arg) {
- HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
- AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
- if (!cv.IsAllocated()) return;
- u32 id = cv.GetAllocStackId();
- if (!id) return;
- hp->Insert(id, cv.UsedSize());
+ reinterpret_cast<HeapProfile*>(arg)->ProcessChunk(
+ FindHeapChunkByAllocBeg(chunk));
}
static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
void *argument) {
HeapProfile hp;
__lsan::ForEachChunk(ChunkCallback, &hp);
- hp.Print(reinterpret_cast<uptr>(argument));
+ uptr *Arg = reinterpret_cast<uptr*>(argument);
+ hp.Print(Arg[0], Arg[1]);
+
+ if (Verbosity())
+ __asan_print_accumulated_stats();
}
} // namespace __asan
+#endif // CAN_SANITIZE_LEAKS
+
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_print_memory_profile(uptr top_percent) {
- __sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
+void __sanitizer_print_memory_profile(uptr top_percent,
+ uptr max_number_of_contexts) {
+#if CAN_SANITIZE_LEAKS
+ uptr Arg[2];
+ Arg[0] = top_percent;
+ Arg[1] = max_number_of_contexts;
+ __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
+#endif // CAN_SANITIZE_LEAKS
}
} // extern "C"
-
-#endif // CAN_SANITIZE_LEAKS
// dllexport would normally do. We need to export them in order to make the
// VS2015 dynamic CRT (MD) work.
#if SANITIZER_WINDOWS
-# define CXX_OPERATOR_ATTRIBUTE
-# ifdef _WIN64
-# pragma comment(linker, "/export:??2@YAPEAX_K@Z") // operator new
-# pragma comment(linker, "/export:??3@YAXPEAX@Z") // operator delete
-# pragma comment(linker, "/export:??3@YAXPEAX_K@Z") // sized operator delete
-# pragma comment(linker, "/export:??_U@YAPEAX_K@Z") // operator new[]
-# pragma comment(linker, "/export:??_V@YAXPEAX@Z") // operator delete[]
-# else
-# pragma comment(linker, "/export:??2@YAPAXI@Z") // operator new
-# pragma comment(linker, "/export:??3@YAXPAX@Z") // operator delete
-# pragma comment(linker, "/export:??3@YAXPAXI@Z") // sized operator delete
-# pragma comment(linker, "/export:??_U@YAPAXI@Z") // operator new[]
-# pragma comment(linker, "/export:??_V@YAXPAX@Z") // operator delete[]
-# endif
+#define CXX_OPERATOR_ATTRIBUTE
+#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym))
+#ifdef _WIN64
+COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new
+COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow
+COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete
+COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete
+COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[]
+COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[]
#else
-# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+COMMENT_EXPORT("??2@YAPAXI@Z") // operator new
+COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow
+COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete
+COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete
+COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[]
+COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[]
+#endif
+#undef COMMENT_EXPORT
+#else
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
#endif
using namespace __asan; // NOLINT
enum class align_val_t: size_t {};
} // namespace std
-#define OPERATOR_NEW_BODY(type) \
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(type, nothrow) \
GET_STACK_TRACE_MALLOC;\
- return asan_memalign(0, size, &stack, type);
-#define OPERATOR_NEW_BODY_ALIGN(type) \
+ void *res = asan_memalign(0, size, &stack, type);\
+ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
GET_STACK_TRACE_MALLOC;\
- return asan_memalign((uptr)align, size, &stack, type);
+ void *res = asan_memalign((uptr)align, size, &stack, type);\
+ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
+ return res;
// On OS X it's not enough to just provide our own 'operator new' and
// 'operator delete' implementations, because they're going to be in the
// OS X we need to intercept them using their mangled names.
#if !SANITIZER_MAC
CXX_OPERATOR_ATTRIBUTE
-void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
+void *operator new(size_t size)
+{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
-void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
+void *operator new[](size_t size)
+{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY(FROM_NEW); }
+{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
+{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW); }
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); }
CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
-{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR); }
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); }
#else // SANITIZER_MAC
INTERCEPTOR(void *, _Znwm, size_t size) {
- OPERATOR_NEW_BODY(FROM_NEW);
+ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/);
}
INTERCEPTOR(void *, _Znam, size_t size) {
- OPERATOR_NEW_BODY(FROM_NEW_BR);
+ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/);
}
INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(FROM_NEW);
+ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/);
}
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(FROM_NEW_BR);
+ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
}
#endif
};
void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- uptr page_size = GetPageSizeCached();
- uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
- uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
// ending with end.
uptr kMaxRangeToCheck = 32;
uptr r1_beg = beg;
- uptr r1_end = Min(end + kMaxRangeToCheck, mid);
+ uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
uptr r2_end = Min(end, mid + kMaxRangeToCheck);
uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
// for mapping shadow and zeroing out pages doesn't "just work", so we should
// probably provide higher-level interface for these operations.
// For now, just memset on Windows.
- if (value ||
- SANITIZER_WINDOWS == 1 ||
+ if (value || SANITIZER_WINDOWS == 1 ||
+ // TODO(mcgrathr): Fuchsia doesn't allow the shadow mapping to be
+ // changed at all. It doesn't currently have an efficient means
+ // to zero a bunch of pages, but maybe we should add one.
+ SANITIZER_FUCHSIA == 1 ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
}
}
-// Calls __sanitizer::ReleaseMemoryToOS() on
-// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
+// Calls __sanitizer::ReleaseMemoryPagesToOS() on
+// [MemToShadow(p), MemToShadow(p+size)].
void FlushUnneededASanShadowMemory(uptr p, uptr size);
} // namespace __asan
namespace __asan {
-const char *DescribeSignalOrException(int signo) {
- switch (signo) {
- case SIGFPE:
- return "FPE";
- case SIGILL:
- return "ILL";
- case SIGABRT:
- return "ABRT";
- default:
- return "SEGV";
- }
-}
-
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
- ScopedDeadlySignal signal_scope(GetCurrentThread());
- int code = (int)((siginfo_t*)siginfo)->si_code;
- // Write the first message using fd=2, just in case.
- // It may actually fail to write in case stderr is closed.
- internal_write(2, "ASAN:DEADLYSIGNAL\n", 18);
- SignalContext sig = SignalContext::Create(siginfo, context);
-
- // Access at a reasonable offset above SP, or slightly below it (to account
- // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
- // probably a stack overflow.
-#ifdef __s390__
- // On s390, the fault address in siginfo points to start of the page, not
- // to the precise word that was accessed. Mask off the low bits of sp to
- // take it into account.
- bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) &&
- sig.addr < sig.sp + 0xFFFF;
-#else
- bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
-#endif
-
-#if __powerpc__
- // Large stack frames can be allocated with e.g.
- // lis r0,-10000
- // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
- // If the store faults then sp will not have been updated, so test above
- // will not work, becase the fault address will be more than just "slightly"
- // below sp.
- if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
- u32 inst = *(unsigned *)sig.pc;
- u32 ra = (inst >> 16) & 0x1F;
- u32 opcd = inst >> 26;
- u32 xo = (inst >> 1) & 0x3FF;
- // Check for store-with-update to sp. The instructions we accept are:
- // stbu rs,d(ra) stbux rs,ra,rb
- // sthu rs,d(ra) sthux rs,ra,rb
- // stwu rs,d(ra) stwux rs,ra,rb
- // stdu rs,ds(ra) stdux rs,ra,rb
- // where ra is r1 (the stack pointer).
- if (ra == 1 &&
- (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
- (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
- IsStackAccess = true;
- }
-#endif // __powerpc__
-
- // We also check si_code to filter out SEGV caused by something else other
- // then hitting the guard page or unmapped memory, like, for example,
- // unaligned memory access.
- if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
- ReportStackOverflow(sig);
- else
- ReportDeadlySignal(signo, sig);
+ StartReportDeadlySignal();
+ SignalContext sig(siginfo, context);
+ ReportDeadlySignal(sig);
}
// ---------------------- TSD ---------------- {{{1
bool in_shadow, const char *after) {
Decorator d;
str->append("%s%s%x%x%s%s", before,
- in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
- byte >> 4, byte & 15,
- in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
+ in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
+ byte & 15, d.Default(), after);
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
char *p;
// This string is created by the compiler and has the following form:
// "n alloc_1 alloc_2 ... alloc_n"
- // where alloc_i looks like "offset size len ObjectName".
+ // where alloc_i looks like "offset size len ObjectName"
+ // or "offset size len ObjectName:line".
uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
if (n_objects == 0)
return false;
return false;
}
p++;
- StackVarDescr var = {beg, size, p, len};
+ char *colon_pos = internal_strchr(p, ':');
+ uptr line = 0;
+ uptr name_len = len;
+ if (colon_pos != nullptr && colon_pos < p + len) {
+ name_len = colon_pos - p;
+ line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10);
+ }
+ StackVarDescr var = {beg, size, p, name_len, line};
vars->push_back(var);
p += len;
}
// immediately after printing error report.
class ScopedInErrorReport {
public:
- explicit ScopedInErrorReport(bool fatal = false) {
- halt_on_error_ = fatal || flags()->halt_on_error;
-
- if (lock_.TryLock()) {
- StartReporting();
- return;
- }
-
- // ASan found two bugs in different threads simultaneously.
-
- u32 current_tid = GetCurrentTidOrInvalid();
- if (reporting_thread_tid_ == current_tid ||
- reporting_thread_tid_ == kInvalidTid) {
- // This is either asynch signal or nested error during error reporting.
- // Fail simple to avoid deadlocks in Report().
-
- // Can't use Report() here because of potential deadlocks
- // in nested signal handlers.
- const char msg[] = "AddressSanitizer: nested bug in the same thread, "
- "aborting.\n";
- WriteToFile(kStderrFd, msg, sizeof(msg));
-
- internal__exit(common_flags()->exitcode);
- }
-
- if (halt_on_error_) {
- // Do not print more than one report, otherwise they will mix up.
- // Error reporting functions shouldn't return at this situation, as
- // they are effectively no-returns.
-
- Report("AddressSanitizer: while reporting a bug found another one. "
- "Ignoring.\n");
-
- // Sleep long enough to make sure that the thread which started
- // to print an error report will finish doing it.
- SleepForSeconds(Max(100, flags()->sleep_before_dying + 1));
-
- // If we're still not dead for some reason, use raw _exit() instead of
- // Die() to bypass any additional checks.
- internal__exit(common_flags()->exitcode);
- } else {
- // The other thread will eventually finish reporting
- // so it's safe to wait
- lock_.Lock();
- }
-
- StartReporting();
+ explicit ScopedInErrorReport(bool fatal = false)
+ : halt_on_error_(fatal || flags()->halt_on_error) {
+ // Make sure the registry and sanitizer report mutexes are locked while
+ // we're printing an error report.
+ // We can lock them only here to avoid self-deadlock in case of
+ // recursive reports.
+ asanThreadRegistry().Lock();
+ Printf(
+ "=================================================================\n");
}
~ScopedInErrorReport() {
if (common_flags()->print_cmdline)
PrintCmdline();
+ if (common_flags()->print_module_map == 2) PrintModuleMap();
+
// Copy the message buffer so that we could start logging without holding a
// lock that gets aquired during printing.
InternalScopedBuffer<char> buffer_copy(kErrorMessageBufferSize);
error_report_callback(buffer_copy.data());
}
+ if (halt_on_error_ && common_flags()->abort_on_error) {
+ // On Android the message is truncated to 512 characters.
+ // FIXME: implement "compact" error format, possibly without, or with
+ // highly compressed stack traces?
+ // FIXME: or just use the summary line as abort message?
+ SetAbortMessage(buffer_copy.data());
+ }
+
// In halt_on_error = false mode, reset the current error object (before
// unlocking).
if (!halt_on_error_)
internal_memset(¤t_error_, 0, sizeof(current_error_));
- CommonSanitizerReportMutex.Unlock();
- reporting_thread_tid_ = kInvalidTid;
- lock_.Unlock();
if (halt_on_error_) {
Report("ABORTING\n");
Die();
}
private:
- void StartReporting() {
- // Make sure the registry and sanitizer report mutexes are locked while
- // we're printing an error report.
- // We can lock them only here to avoid self-deadlock in case of
- // recursive reports.
- asanThreadRegistry().Lock();
- CommonSanitizerReportMutex.Lock();
- reporting_thread_tid_ = GetCurrentTidOrInvalid();
- Printf("===================================================="
- "=============\n");
- }
-
- static StaticSpinMutex lock_;
- static u32 reporting_thread_tid_;
+ ScopedErrorReportLock error_report_lock_;
// Error currently being reported. This enables the destructor to interact
// with the debugger and point it to an error description.
static ErrorDescription current_error_;
bool halt_on_error_;
};
-StaticSpinMutex ScopedInErrorReport::lock_;
-u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid;
ErrorDescription ScopedInErrorReport::current_error_;
-void ReportStackOverflow(const SignalContext &sig) {
+void ReportDeadlySignal(const SignalContext &sig) {
ScopedInErrorReport in_report(/*fatal*/ true);
- ErrorStackOverflow error(GetCurrentTidOrInvalid(), sig);
- in_report.ReportError(error);
-}
-
-void ReportDeadlySignal(int signo, const SignalContext &sig) {
- ScopedInErrorReport in_report(/*fatal*/ true);
- ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig, signo);
+ ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig);
in_report.ReportError(error);
}
}
int __asan_report_present() {
- return ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric;
+ return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
}
uptr __asan_get_report_pc() {
}
uptr __asan_get_report_address() {
- if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
- return ScopedInErrorReport::CurrentError()
- .Generic.addr_description.Address();
+ ErrorDescription &err = ScopedInErrorReport::CurrentError();
+ if (err.kind == kErrorKindGeneric)
+ return err.Generic.addr_description.Address();
+ else if (err.kind == kErrorKindDoubleFree)
+ return err.DoubleFree.addr_description.addr;
return 0;
}
const char *__asan_get_report_description() {
if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
return ScopedInErrorReport::CurrentError().Generic.bug_descr;
- return nullptr;
+ return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
}
extern "C" {
}
} // extern "C"
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default implementation of __asan_on_error that does nothing
// and may be overriden by user.
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
-void __asan_on_error() {}
-#endif
+SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
uptr size;
const char *name_pos;
uptr name_len;
+ uptr line;
};
// Returns the number of globals close to the provided address and copies
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
-void ReportStackOverflow(const SignalContext &sig);
-void ReportDeadlySignal(int signo, const SignalContext &sig);
+void ReportDeadlySignal(const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
// Don't die twice - run a busy loop.
while (1) { }
}
+ if (common_flags()->print_module_map >= 1) PrintModuleMap();
if (flags()->sleep_before_dying) {
Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
SleepForSeconds(flags()->sleep_before_dying);
Die();
}
-// ---------------------- mmap -------------------- {{{1
-// Reserve memory range [beg, end].
-// We need to use inclusive range because end+1 may not be representable.
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- void *res = MmapFixedNoReserve(beg, size, name);
- if (res != (void*)beg) {
- Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n", size);
- Abort();
- }
- if (common_flags()->no_huge_pages_for_shadow)
- NoHugePagesInRegion(beg, size);
- if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(beg, size);
-}
-
// --------------- LowLevelAllocateCallbac ---------- {{{1
static void OnLowLevelAllocate(uptr ptr, uptr size) {
PoisonShadow(ptr, size, kAsanInternalHeapMagic);
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
-static void ProtectGap(uptr addr, uptr size) {
- if (!flags()->protect_shadow_gap) {
- // The shadow gap is unprotected, so there is a chance that someone
- // is actually using this memory. Which means it needs a shadow...
- uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
- uptr GapShadowEnd =
- RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
- if (Verbosity())
- Printf("protect_shadow_gap=0:"
- " not protecting shadow gap, allocating gap's shadow\n"
- "|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
- GapShadowEnd);
- ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
- "unprotected gap shadow");
- return;
- }
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res)
- return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == kZeroBaseShadowStart) {
- uptr step = GetMmapGranularity();
- while (size > step && addr < kZeroBaseMaxShadowStart) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res)
- return;
- }
- }
-
- Report("ERROR: Failed to protect the shadow gap. "
- "ASan cannot proceed correctly. ABORTING.\n");
- DumpProcessMap();
- Die();
-}
-
-static void PrintAddressSpaceLayout() {
+void PrintAddressSpaceLayout() {
Printf("|| `[%p, %p]` || HighMem ||\n",
(void*)kHighMemBeg, (void*)kHighMemEnd);
Printf("|| `[%p, %p]` || HighShadow ||\n",
Printf("redzone=%zu\n", (uptr)flags()->redzone);
Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
+ Printf("thread_local_quarantine_size_kb=%zuK\n",
+ (uptr)flags()->thread_local_quarantine_size_kb);
Printf("malloc_context_size=%zu\n",
(uptr)common_flags()->malloc_context_size);
ReplaceSystemMalloc();
- // Set the shadow memory address to uninitialized.
- __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
-
- uptr shadow_start = kLowShadowBeg;
- // Detect if a dynamic shadow address must used and find a available location
- // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
- // expands to |__asan_shadow_memory_dynamic_address| which is
- // |kDefaultShadowSentinel|.
- if (shadow_start == kDefaultShadowSentinel) {
- __asan_shadow_memory_dynamic_address = 0;
- CHECK_EQ(0, kLowShadowBeg);
-
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
-
- shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- }
- // Update the shadow memory address (potentially) used by instrumentation.
- __asan_shadow_memory_dynamic_address = shadow_start;
-
- if (kLowShadowBeg)
- shadow_start -= GetMmapGranularity();
- bool full_shadow_is_available =
- MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
-
-#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
- !ASAN_FIXED_MAPPING
- if (!full_shadow_is_available) {
- kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
- kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
- }
-#endif
-
- if (Verbosity()) PrintAddressSpaceLayout();
-
DisableCoreDumperIfNecessary();
- if (full_shadow_is_available) {
- // mmap the low shadow plus at least one page at the left.
- if (kLowShadowBeg)
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
- // protect the gap.
- ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
- CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
- } else if (kMidMemBeg &&
- MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
- MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
- CHECK(kLowShadowBeg != kLowShadowEnd);
- // mmap the low shadow plus at least one page at the left.
- ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
- // mmap the mid shadow.
- ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
- // mmap the high shadow.
- ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
- // protect the gaps.
- ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
- ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
- ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
- } else {
- Report("Shadow memory range interleaves with an existing memory mapping. "
- "ASan cannot proceed correctly. ABORTING.\n");
- Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
- shadow_start, kHighShadowEnd);
- DumpProcessMap();
- Die();
- }
+ InitializeShadowMemory();
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnDeadlySignal);
InitTlsSize();
// Create main thread.
- AsanThread *main_thread = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
- /* stack */ nullptr, /* detached */ true);
+ AsanThread *main_thread = CreateMainThread();
CHECK_EQ(0, main_thread->tid());
- SetCurrentThread(main_thread);
- main_thread->ThreadStart(internal_getpid(),
- /* signal_thread_is_registered */ nullptr);
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
if (CAN_SANITIZE_LEAKS) {
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
- Atexit(__lsan::DoLeakCheck);
+ if (flags()->halt_on_error)
+ Atexit(__lsan::DoLeakCheck);
+ else
+ Atexit(__lsan::DoRecoverableLeakCheckVoid);
}
}
}
VReport(1, "AddressSanitizer Init done\n");
+
+ if (flags()->sleep_after_init) {
+ Report("Sleeping for %d second(s)\n", flags()->sleep_after_init);
+ SleepForSeconds(flags()->sleep_after_init);
+ }
}
// Initialize as requested from some part of ASan runtime library (interceptors,
top = curr_thread->stack_top();
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
} else {
+ CHECK(!SANITIZER_FUCHSIA);
// If we haven't seen this thread, try asking the OS for stack bounds.
uptr tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
};
int GetScore() const { return score; }
const char *GetDescription() const { return descr; }
- void Print() {
+ void Print() const {
if (score && flags()->print_scariness)
Printf("SCARINESS: %d (%s)\n", score, descr);
}
--- /dev/null
+//===-- asan_shadow_setup.cc ----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Set up the shadow memory.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+// asan_fuchsia.cc has its own InitializeShadowMemory implementation.
+#if !SANITIZER_FUCHSIA
+
+#include "asan_internal.h"
+#include "asan_mapping.h"
+
+namespace __asan {
+
+// ---------------------- mmap -------------------- {{{1
+// Reserve memory range [beg, end].
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
+ uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ void *res = MmapFixedNoReserve(beg, size, name);
+ if (res != (void *)beg) {
+ Report(
+ "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
+ "Perhaps you're using ulimit -v\n",
+ size);
+ Abort();
+ }
+ if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
+}
+
+static void ProtectGap(uptr addr, uptr size) {
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf(
+ "protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n",
+ GapShadowBeg, GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
+ return;
+ }
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res) return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == kZeroBaseShadowStart) {
+ uptr step = GetMmapGranularity();
+ while (size > step && addr < kZeroBaseMaxShadowStart) {
+ addr += step;
+ size -= step;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res) return;
+ }
+ }
+
+ Report(
+ "ERROR: Failed to protect the shadow gap. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ DumpProcessMap();
+ Die();
+}
+
+static void MaybeReportLinuxPIEBug() {
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
+ Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
+ Report(
+ "See https://github.com/google/sanitizers/issues/856 for possible "
+ "workarounds.\n");
+#endif
+}
+
+void InitializeShadowMemory() {
+ // Set the shadow memory address to uninitialized.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+
+ uptr shadow_start = kLowShadowBeg;
+ // Detect if a dynamic shadow address must used and find a available location
+ // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
+ // expands to |__asan_shadow_memory_dynamic_address| which is
+ // |kDefaultShadowSentinel|.
+ if (shadow_start == kDefaultShadowSentinel) {
+ __asan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, kLowShadowBeg);
+ shadow_start = FindDynamicShadowStart();
+ }
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __asan_shadow_memory_dynamic_address = shadow_start;
+
+ if (kLowShadowBeg) shadow_start -= GetMmapGranularity();
+ bool full_shadow_is_available =
+ MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
+
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
+ if (!full_shadow_is_available) {
+ kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
+ kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
+ }
+#endif
+
+ if (Verbosity()) PrintAddressSpaceLayout();
+
+ if (full_shadow_is_available) {
+ // mmap the low shadow plus at least one page at the left.
+ if (kLowShadowBeg)
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gap.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+ } else if (kMidMemBeg &&
+ MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
+ MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
+ CHECK(kLowShadowBeg != kLowShadowEnd);
+ // mmap the low shadow plus at least one page at the left.
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the mid shadow.
+ ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gaps.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
+ ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
+ } else {
+ Report(
+ "Shadow memory range interleaves with an existing memory mapping. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
+ MaybeReportLinuxPIEBug();
+ DumpProcessMap();
+ Die();
+ }
+}
+
+} // namespace __asan
+
+#endif // !SANITIZER_FUCHSIA
stack->size = 0;
if (LIKELY(asan_inited)) {
if ((t = GetCurrentThread()) && !t->isUnwinding()) {
- // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
- // yields the call stack of the signal's handler and not of the code
- // that raised the signal (as it does on Linux).
- if (SANITIZER_FREEBSD && t->isInDeadlySignal()) fast = true;
uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t);
kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
kODRViolation};
-extern "C" {
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char *__asan_default_suppressions();
-#else
-// No week hooks, provide empty implementation.
-const char *__asan_default_suppressions() { return ""; }
-#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
-} // extern "C"
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) {
+ return "";
+}
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
// AsanThreadContext implementation.
-struct CreateThreadContextArgs {
- AsanThread *thread;
- StackTrace *stack;
-};
-
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
- CreateThreadContextArgs args = { thread, stack };
+ AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
parent_tid, &args);
}
inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
- if (!atomic_load(&stack_switching_, memory_order_acquire))
- return StackBounds{stack_bottom_, stack_top_}; // NOLINT
+ if (!atomic_load(&stack_switching_, memory_order_acquire)) {
+ // Make sure the stack bounds are fully initialized.
+ if (stack_bottom_ >= stack_top_) return {0, 0};
+ return {stack_bottom_, stack_top_};
+ }
char local;
const uptr cur_stack = (uptr)&local;
// Note: need to check next stack first, because FinishSwitchFiber
// may be in process of overwriting stack_top_/bottom_. But in such case
// we are already on the next stack.
if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
- return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
- return StackBounds{stack_bottom_, stack_top_}; // NOLINT
+ return {next_stack_bottom_, next_stack_top_};
+ return {stack_bottom_, stack_top_};
}
uptr AsanThread::stack_top() {
return nullptr;
}
-void AsanThread::Init() {
+void AsanThread::Init(const InitOptions *options) {
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
fake_stack_ = nullptr; // Will be initialized lazily if needed.
CHECK_EQ(this->stack_size(), 0U);
- SetThreadStackAndTls();
+ SetThreadStackAndTls(options);
CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
&local);
}
+// Fuchsia doesn't use ThreadStart.
+// asan_fuchsia.c defines CreateMainThread and SetThreadStackAndTls.
+#if !SANITIZER_FUCHSIA
+
thread_return_t AsanThread::ThreadStart(
- uptr os_id, atomic_uintptr_t *signal_thread_is_registered) {
+ tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init();
- asanThreadRegistry().StartThread(tid(), os_id, nullptr);
+ asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false,
+ nullptr);
if (signal_thread_is_registered)
atomic_store(signal_thread_is_registered, 1, memory_order_release);
return res;
}
-void AsanThread::SetThreadStackAndTls() {
+AsanThread *CreateMainThread() {
+ AsanThread *main_thread = AsanThread::Create(
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* stack */ nullptr, /* detached */ true);
+ SetCurrentThread(main_thread);
+ main_thread->ThreadStart(internal_getpid(),
+ /* signal_thread_is_registered */ nullptr);
+ return main_thread;
+}
+
+// This implementation doesn't use the argument, which is just passed down
+// from the caller of Init (which see, above). It's only there to support
+// OS-specific implementations that need more information passed through.
+void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
+ DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
CHECK(AddrIsInStack((uptr)&local));
}
+#endif // !SANITIZER_FUCHSIA
+
void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_)
return true;
}
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
+ uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr != kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
+ mem_ptr -= SHADOW_GRANULARITY;
}
while (shadow_ptr >= shadow_bottom &&
*shadow_ptr == kAsanStackLeftRedzoneMagic) {
shadow_ptr--;
+ mem_ptr -= SHADOW_GRANULARITY;
}
if (shadow_ptr < shadow_bottom) {
return false;
}
- uptr* ptr = (uptr*)SHADOW_TO_MEM((uptr)(shadow_ptr + 1));
+ uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
CHECK(ptr[0] == kCurrentStackFrameMagic);
access->offset = addr - (uptr)ptr;
access->frame_pc = ptr[2];
context->os_id = GetTid();
}
-__asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
+__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
__asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
__asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
if (!context) return nullptr;
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
-bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
return true;
}
-void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (t && t->has_fake_stack())
void OnCreated(void *arg) override;
void OnFinished() override;
+
+ struct CreateThreadContextArgs {
+ AsanThread *thread;
+ StackTrace *stack;
+ };
};
// AsanThreadContext objects are never freed, so we need many of them.
static void TSDDtor(void *tsd);
void Destroy();
- void Init(); // Should be called from the thread itself.
- thread_return_t ThreadStart(uptr os_id,
+ struct InitOptions;
+ void Init(const InitOptions *options = nullptr);
+
+ thread_return_t ThreadStart(tid_t os_id,
atomic_uintptr_t *signal_thread_is_registered);
uptr stack_top();
bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding_ = b; }
- // True if we are in a deadly signal handler.
- bool isInDeadlySignal() const { return in_deadly_signal_; }
- void setInDeadlySignal(bool b) { in_deadly_signal_ = b; }
-
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
private:
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
- void SetThreadStackAndTls();
+
+ void SetThreadStackAndTls(const InitOptions *options);
+
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding_;
- bool in_deadly_signal_;
};
// ScopedUnwinding is a scope for stacktracing member of a context
AsanThread *thread;
};
-// ScopedDeadlySignal is a scope for handling deadly signals.
-class ScopedDeadlySignal {
- public:
- explicit ScopedDeadlySignal(AsanThread *t) : thread(t) {
- if (thread) thread->setInDeadlySignal(true);
- }
- ~ScopedDeadlySignal() {
- if (thread) thread->setInDeadlySignal(false);
- }
-
- private:
- AsanThread *thread;
-};
-
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();
#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_win.h"
+#include "sanitizer_common/sanitizer_win_defs.h"
using namespace __asan; // NOLINT
__asan_init();
return __asan_shadow_memory_dynamic_address;
}
-
-// -------------------- A workaround for the absence of weak symbols ----- {{{
-// We don't have a direct equivalent of weak symbols when using MSVC, but we can
-// use the /alternatename directive to tell the linker to default a specific
-// symbol to a specific value, which works nicely for allocator hooks and
-// __asan_default_options().
-void __sanitizer_default_malloc_hook(void *ptr, uptr size) { }
-void __sanitizer_default_free_hook(void *ptr) { }
-const char* __asan_default_default_options() { return ""; }
-const char* __asan_default_default_suppressions() { return ""; }
-void __asan_default_on_error() {}
-// 64-bit msvc will not prepend an underscore for symbols.
-#ifdef _WIN64
-#pragma comment(linker, "/alternatename:__sanitizer_malloc_hook=__sanitizer_default_malloc_hook") // NOLINT
-#pragma comment(linker, "/alternatename:__sanitizer_free_hook=__sanitizer_default_free_hook") // NOLINT
-#pragma comment(linker, "/alternatename:__asan_default_options=__asan_default_default_options") // NOLINT
-#pragma comment(linker, "/alternatename:__asan_default_suppressions=__asan_default_default_suppressions") // NOLINT
-#pragma comment(linker, "/alternatename:__asan_on_error=__asan_default_on_error") // NOLINT
-#else
-#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
-#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
-#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
-#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
-#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
-#endif
-// }}}
} // extern "C"
// ---------------------- Windows-specific interceptors ---------------- {{{
+static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
+static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+
+ // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+
+ SignalContext sig(exception_record, context);
+ ReportDeadlySignal(sig);
+ UNREACHABLE("returned from reporting deadly signal");
+}
+
+// Wrapper SEH Handler. If the exception should be handled by asan, we call
+// __asan_unhandled_exception_filter, otherwise, we execute the user provided
+// exception handler or the default.
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ DWORD exception_code = info->ExceptionRecord->ExceptionCode;
+ if (__sanitizer::IsHandledDeadlyException(exception_code))
+ return __asan_unhandled_exception_filter(info);
+ if (user_seh_handler)
+ return user_seh_handler(info);
+ // Bubble out to the default exception filter.
+ if (default_seh_handler)
+ return default_seh_handler(info);
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter,
+ LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) {
+ CHECK(REAL(SetUnhandledExceptionFilter));
+ if (ExceptionFilter == &SEHHandler)
+ return REAL(SetUnhandledExceptionFilter)(ExceptionFilter);
+ // We record the user provided exception handler to be called for all the
+ // exceptions unhandled by asan.
+ Swap(ExceptionFilter, user_seh_handler);
+ return ExceptionFilter;
+}
+
INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
CHECK(REAL(RtlRaiseException));
// This is a noreturn function, unless it's one of the exceptions raised to
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
#ifdef _WIN64
ASAN_INTERCEPT_FUNC(__C_specific_handler);
return 0;
}
+uptr FindDynamicShadowStart() {
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+ uptr shadow_start =
+ FindAvailableMemoryRange(space_size, alignment, granularity, nullptr);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
#endif
}
-static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
-
-// Check based on flags if we should report this exception.
-static bool ShouldReportDeadlyException(unsigned code) {
- switch (code) {
- case EXCEPTION_ACCESS_VIOLATION:
- case EXCEPTION_IN_PAGE_ERROR:
- return common_flags()->handle_segv;
- case EXCEPTION_BREAKPOINT:
- case EXCEPTION_ILLEGAL_INSTRUCTION: {
- return common_flags()->handle_sigill;
- }
- }
- return false;
-}
-
-// Return the textual name for this exception.
-const char *DescribeSignalOrException(int signo) {
- unsigned code = signo;
- // Get the string description of the exception if this is a known deadly
- // exception.
- switch (code) {
- case EXCEPTION_ACCESS_VIOLATION:
- return "access-violation";
- case EXCEPTION_IN_PAGE_ERROR:
- return "in-page-error";
- case EXCEPTION_BREAKPOINT:
- return "breakpoint";
- case EXCEPTION_ILLEGAL_INSTRUCTION:
- return "illegal-instruction";
- }
- return nullptr;
-}
-
-static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
- EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
- CONTEXT *context = info->ContextRecord;
-
- if (ShouldReportDeadlyException(exception_record->ExceptionCode)) {
- SignalContext sig = SignalContext::Create(exception_record, context);
- ReportDeadlySignal(exception_record->ExceptionCode, sig);
- }
-
- // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
-
- return default_seh_handler(info);
+bool IsSystemHeapAddress(uptr addr) {
+ return ::HeapValidate(GetProcessHeap(), 0, (void*)addr) != FALSE;
}
// We want to install our own exception handler (EH) to print helpful reports
// immediately after the CRT runs. This way, our exception filter is called
// first and we can delegate to their filter if appropriate.
#pragma section(".CRT$XCAB", long, read) // NOLINT
-__declspec(allocate(".CRT$XCAB"))
- int (*__intercept_seh)() = __asan_set_seh_filter;
+__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() =
+ __asan_set_seh_filter;
+
+// Piggyback on the TLS initialization callback directory to initialize asan as
+// early as possible. Initializers in .CRT$XL* are called directly by ntdll,
+// which run before the CRT. Users also add code to .CRT$XLC, so it's important
+// to run our initializers first.
+static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) __asan_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
#endif
+
+WIN_FORCE_LINK(__asan_dso_reg_hook)
+
// }}}
} // namespace __asan
-#endif // _WIN32
+#endif // SANITIZER_WINDOWS
// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
-// Only compile this code when building asan_dll_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DLL_THUNK
+#ifdef SANITIZER_DLL_THUNK
#include "asan_init_version.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_win_defs.h"
+#include "sanitizer_common/sanitizer_win_dll_thunk.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
-// ---------- Function interception helper functions and macros ----------- {{{1
-extern "C" {
-void *__stdcall GetModuleHandleA(const char *module_name);
-void *__stdcall GetProcAddress(void *module, const char *proc_name);
-void abort();
-}
-
-using namespace __sanitizer;
-
-static uptr getRealProcAddressOrDie(const char *name) {
- uptr ret =
- __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
- if (!ret)
- abort();
- return ret;
-}
-
-// We need to intercept some functions (e.g. ASan interface, memory allocator --
-// let's call them "hooks") exported by the DLL thunk and forward the hooks to
-// the runtime in the main module.
-// However, we don't want to keep two lists of these hooks.
-// To avoid that, the list of hooks should be defined using the
-// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted
-// at once by calling INTERCEPT_HOOKS().
-
-// Use macro+template magic to automatically generate the list of hooks.
-// Each hook at line LINE defines a template class with a static
-// FunctionInterceptor<LINE>::Execute() method intercepting the hook.
-// The default implementation of FunctionInterceptor<LINE> is to call
-// the Execute() method corresponding to the previous line.
-template<int LINE>
-struct FunctionInterceptor {
- static void Execute() { FunctionInterceptor<LINE-1>::Execute(); }
-};
-
-// There shouldn't be any hooks with negative definition line number.
-template<>
-struct FunctionInterceptor<0> {
- static void Execute() {}
-};
-
-#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
- template <> struct FunctionInterceptor<__LINE__> { \
- static void Execute() { \
- uptr wrapper = getRealProcAddressOrDie(main_function); \
- if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \
- abort(); \
- FunctionInterceptor<__LINE__ - 1>::Execute(); \
- } \
- };
-
-// Special case of hooks -- ASan own interface functions. Those are only called
-// after __asan_init, thus an empty implementation is sufficient.
-#define INTERFACE_FUNCTION(name) \
- extern "C" __declspec(noinline) void name() { \
- volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \
- __debugbreak(); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name)
-
-// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE.
-#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute
-
-// We can't define our own version of strlen etc. because that would lead to
-// link-time or even type mismatch errors. Instead, we can declare a function
-// just to be able to get its address. Me may miss the first few calls to the
-// functions since it can be called before __asan_init, but that would lead to
-// false negatives in the startup code before user's global initializers, which
-// isn't a big deal.
-#define INTERCEPT_LIBRARY_FUNCTION(name) \
- extern "C" void name(); \
- INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name)
-
-// Disable compiler warnings that show up if we declare our own version
-// of a compiler intrinsic (e.g. strlen).
-#pragma warning(disable: 4391)
-#pragma warning(disable: 4392)
-
-static void InterceptHooks();
-// }}}
-
-// ---------- Function wrapping helpers ----------------------------------- {{{1
-#define WRAP_V_V(name) \
- extern "C" void name() { \
- typedef void (*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_V_W(name) \
- extern "C" void name(void *arg) { \
- typedef void (*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_V_WW(name) \
- extern "C" void name(void *arg1, void *arg2) { \
- typedef void (*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_V_WWW(name) \
- extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2, arg3); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_V(name) \
- extern "C" void *name() { \
- typedef void *(*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_W(name) \
- extern "C" void *name(void *arg) { \
- typedef void *(*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WW(name) \
- extern "C" void *name(void *arg1, void *arg2) { \
- typedef void *(*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef void *(*fntype)(void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-
-#define WRAP_W_WWWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5, void *arg6) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
- } \
- INTERCEPT_WHEN_POSSIBLE(#name, name);
-// }}}
-
-// ----------------- ASan own interface functions --------------------
-// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
-// want to call it in the __asan_init interceptor.
-WRAP_W_V(__asan_should_detect_stack_use_after_return)
-WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
-
-extern "C" {
- int __asan_option_detect_stack_use_after_return;
- uptr __asan_shadow_memory_dynamic_address;
-
- // Manually wrap __asan_init as we need to initialize
- // __asan_option_detect_stack_use_after_return afterwards.
- void __asan_init() {
- typedef void (*fntype)();
- static fntype fn = 0;
- // __asan_init is expected to be called by only one thread.
- if (fn) return;
-
- fn = (fntype)getRealProcAddressOrDie("__asan_init");
- fn();
- __asan_option_detect_stack_use_after_return =
- (__asan_should_detect_stack_use_after_return() != 0);
- __asan_shadow_memory_dynamic_address =
- (uptr)__asan_get_shadow_memory_dynamic_address();
- InterceptHooks();
- }
-}
-
-extern "C" void __asan_version_mismatch_check() {
- // Do nothing.
-}
-
-INTERFACE_FUNCTION(__asan_handle_no_return)
-
-INTERFACE_FUNCTION(__asan_report_store1)
-INTERFACE_FUNCTION(__asan_report_store2)
-INTERFACE_FUNCTION(__asan_report_store4)
-INTERFACE_FUNCTION(__asan_report_store8)
-INTERFACE_FUNCTION(__asan_report_store16)
-INTERFACE_FUNCTION(__asan_report_store_n)
-
-INTERFACE_FUNCTION(__asan_report_load1)
-INTERFACE_FUNCTION(__asan_report_load2)
-INTERFACE_FUNCTION(__asan_report_load4)
-INTERFACE_FUNCTION(__asan_report_load8)
-INTERFACE_FUNCTION(__asan_report_load16)
-INTERFACE_FUNCTION(__asan_report_load_n)
-
-INTERFACE_FUNCTION(__asan_store1)
-INTERFACE_FUNCTION(__asan_store2)
-INTERFACE_FUNCTION(__asan_store4)
-INTERFACE_FUNCTION(__asan_store8)
-INTERFACE_FUNCTION(__asan_store16)
-INTERFACE_FUNCTION(__asan_storeN)
-
-INTERFACE_FUNCTION(__asan_load1)
-INTERFACE_FUNCTION(__asan_load2)
-INTERFACE_FUNCTION(__asan_load4)
-INTERFACE_FUNCTION(__asan_load8)
-INTERFACE_FUNCTION(__asan_load16)
-INTERFACE_FUNCTION(__asan_loadN)
-
-INTERFACE_FUNCTION(__asan_memcpy);
-INTERFACE_FUNCTION(__asan_memset);
-INTERFACE_FUNCTION(__asan_memmove);
-
-INTERFACE_FUNCTION(__asan_set_shadow_00);
-INTERFACE_FUNCTION(__asan_set_shadow_f1);
-INTERFACE_FUNCTION(__asan_set_shadow_f2);
-INTERFACE_FUNCTION(__asan_set_shadow_f3);
-INTERFACE_FUNCTION(__asan_set_shadow_f5);
-INTERFACE_FUNCTION(__asan_set_shadow_f8);
-
-INTERFACE_FUNCTION(__asan_alloca_poison);
-INTERFACE_FUNCTION(__asan_allocas_unpoison);
-
-INTERFACE_FUNCTION(__asan_register_globals)
-INTERFACE_FUNCTION(__asan_unregister_globals)
-
-INTERFACE_FUNCTION(__asan_before_dynamic_init)
-INTERFACE_FUNCTION(__asan_after_dynamic_init)
-
-INTERFACE_FUNCTION(__asan_poison_stack_memory)
-INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+// ASan own interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "asan_interface.inc"
-INTERFACE_FUNCTION(__asan_poison_memory_region)
-INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+// Memory allocation functions.
+INTERCEPT_WRAP_V_W(free)
+INTERCEPT_WRAP_V_W(_free_base)
+INTERCEPT_WRAP_V_WW(_free_dbg)
-INTERFACE_FUNCTION(__asan_address_is_poisoned)
-INTERFACE_FUNCTION(__asan_region_is_poisoned)
+INTERCEPT_WRAP_W_W(malloc)
+INTERCEPT_WRAP_W_W(_malloc_base)
+INTERCEPT_WRAP_W_WWWW(_malloc_dbg)
-INTERFACE_FUNCTION(__asan_get_current_fake_stack)
-INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+INTERCEPT_WRAP_W_WW(calloc)
+INTERCEPT_WRAP_W_WW(_calloc_base)
+INTERCEPT_WRAP_W_WWWWW(_calloc_dbg)
+INTERCEPT_WRAP_W_WWW(_calloc_impl)
-INTERFACE_FUNCTION(__asan_stack_malloc_0)
-INTERFACE_FUNCTION(__asan_stack_malloc_1)
-INTERFACE_FUNCTION(__asan_stack_malloc_2)
-INTERFACE_FUNCTION(__asan_stack_malloc_3)
-INTERFACE_FUNCTION(__asan_stack_malloc_4)
-INTERFACE_FUNCTION(__asan_stack_malloc_5)
-INTERFACE_FUNCTION(__asan_stack_malloc_6)
-INTERFACE_FUNCTION(__asan_stack_malloc_7)
-INTERFACE_FUNCTION(__asan_stack_malloc_8)
-INTERFACE_FUNCTION(__asan_stack_malloc_9)
-INTERFACE_FUNCTION(__asan_stack_malloc_10)
+INTERCEPT_WRAP_W_WW(realloc)
+INTERCEPT_WRAP_W_WW(_realloc_base)
+INTERCEPT_WRAP_W_WWW(_realloc_dbg)
+INTERCEPT_WRAP_W_WWW(_recalloc)
+INTERCEPT_WRAP_W_WWW(_recalloc_base)
-INTERFACE_FUNCTION(__asan_stack_free_0)
-INTERFACE_FUNCTION(__asan_stack_free_1)
-INTERFACE_FUNCTION(__asan_stack_free_2)
-INTERFACE_FUNCTION(__asan_stack_free_4)
-INTERFACE_FUNCTION(__asan_stack_free_5)
-INTERFACE_FUNCTION(__asan_stack_free_6)
-INTERFACE_FUNCTION(__asan_stack_free_7)
-INTERFACE_FUNCTION(__asan_stack_free_8)
-INTERFACE_FUNCTION(__asan_stack_free_9)
-INTERFACE_FUNCTION(__asan_stack_free_10)
-
-// FIXME: we might want to have a sanitizer_win_dll_thunk?
-INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
-INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
-INTERFACE_FUNCTION(__sanitizer_cov)
-INTERFACE_FUNCTION(__sanitizer_cov_dump)
-INTERFACE_FUNCTION(__sanitizer_cov_indir_call16)
-INTERFACE_FUNCTION(__sanitizer_cov_init)
-INTERFACE_FUNCTION(__sanitizer_cov_module_init)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
-INTERFACE_FUNCTION(__sanitizer_cov_with_check)
-INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
-INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
-INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
-INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
-INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
-INTERFACE_FUNCTION(__sanitizer_get_heap_size)
-INTERFACE_FUNCTION(__sanitizer_get_ownership)
-INTERFACE_FUNCTION(__sanitizer_get_total_unique_caller_callee_pairs)
-INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
-INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
-INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
-INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
-INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
-INTERFACE_FUNCTION(__sanitizer_symbolize_global)
-INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
-INTERFACE_FUNCTION(__sanitizer_ptr_sub)
-INTERFACE_FUNCTION(__sanitizer_report_error_summary)
-INTERFACE_FUNCTION(__sanitizer_reset_coverage)
-INTERFACE_FUNCTION(__sanitizer_get_number_of_counters)
-INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
-INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
-INTERFACE_FUNCTION(__sanitizer_set_death_callback)
-INTERFACE_FUNCTION(__sanitizer_set_report_path)
-INTERFACE_FUNCTION(__sanitizer_set_report_fd)
-INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
-INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
-INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
-INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
-INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
-INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
-INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
-INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
-INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
-INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
-
-// TODO(timurrrr): Add more interface functions on the as-needed basis.
-
-// ----------------- Memory allocation functions ---------------------
-WRAP_V_W(free)
-WRAP_V_W(_free_base)
-WRAP_V_WW(_free_dbg)
-
-WRAP_W_W(malloc)
-WRAP_W_W(_malloc_base)
-WRAP_W_WWWW(_malloc_dbg)
-
-WRAP_W_WW(calloc)
-WRAP_W_WW(_calloc_base)
-WRAP_W_WWWWW(_calloc_dbg)
-WRAP_W_WWW(_calloc_impl)
-
-WRAP_W_WW(realloc)
-WRAP_W_WW(_realloc_base)
-WRAP_W_WWW(_realloc_dbg)
-WRAP_W_WWW(_recalloc)
-WRAP_W_WWW(_recalloc_base)
-
-WRAP_W_W(_msize)
-WRAP_W_W(_expand)
-WRAP_W_W(_expand_dbg)
+INTERCEPT_WRAP_W_W(_msize)
+INTERCEPT_WRAP_W_W(_expand)
+INTERCEPT_WRAP_W_W(_expand_dbg)
// TODO(timurrrr): Might want to add support for _aligned_* allocation
// functions to detect a bit more bugs. Those functions seem to wrap malloc().
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
-
-#ifdef _WIN64
-INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
-#else
-INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
-
-// _except_handler4 checks -GS cookie which is different for each module, so we
-// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
-INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
- __asan_handle_no_return();
- return REAL(_except_handler4)(a, b, c, d);
-}
-#endif
-
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
#if SANITIZER_INTERCEPT_MEMCHR
INTERCEPT_LIBRARY_FUNCTION(strrchr);
INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
+INTERCEPT_LIBRARY_FUNCTION(strtok);
INTERCEPT_LIBRARY_FUNCTION(strtol);
INTERCEPT_LIBRARY_FUNCTION(wcslen);
+INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
+
+#ifdef _WIN64
+INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
+#else
+INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
+// _except_handler4 checks -GS cookie which is different for each module, so we
+// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+#endif
+
+// Window specific functions not included in asan_interface.inc.
+INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
+INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
+INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
+
+using namespace __sanitizer;
+
+extern "C" {
+int __asan_option_detect_stack_use_after_return;
+uptr __asan_shadow_memory_dynamic_address;
+} // extern "C"
+
+static int asan_dll_thunk_init() {
+ typedef void (*fntype)();
+ static fntype fn = 0;
+ // asan_dll_thunk_init is expected to be called by only one thread.
+ if (fn) return 0;
+
+ // Ensure all interception was executed.
+ __dll_thunk_init();
+
+ fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init");
+ fn();
+ __asan_option_detect_stack_use_after_return =
+ (__asan_should_detect_stack_use_after_return() != 0);
+ __asan_shadow_memory_dynamic_address =
+ (uptr)__asan_get_shadow_memory_dynamic_address();
-// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS
-// is defined.
-void InterceptHooks() {
- INTERCEPT_HOOKS();
#ifndef _WIN64
INTERCEPT_FUNCTION(_except_handler4);
#endif
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
}
-// We want to call __asan_init before C/C++ initializers/constructors are
-// executed, otherwise functions like memset might be invoked.
-// For some strange reason, merely linking in asan_preinit.cc doesn't work
-// as the callback is never called... Is link.exe doing something too smart?
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init;
-// In DLLs, the callbacks are expected to return 0,
-// otherwise CRT initialization fails.
-static int call_asan_init() {
- __asan_init();
- return 0;
+static void WINAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init();
}
-#pragma section(".CRT$XIB", long, read) // NOLINT
-__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
-#endif // ASAN_DLL_THUNK
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
+WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+#endif // SANITIZER_DLL_THUNK
// using the default "import library" generated when linking the DLL RTL.
//
// This includes:
+// - creating weak aliases to default implementation imported from asan dll.
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
-// Only compile this code when building asan_dynamic_runtime_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_common/sanitizer_win_defs.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
+// Define weak alias for all weak functions imported from asan dll.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "asan_interface.inc"
+
// First, declare CRT sections we'll be using in this file
+#pragma section(".CRT$XIB", long, read) // NOLINT
#pragma section(".CRT$XID", long, read) // NOLINT
#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
+#pragma section(".CRT$XLAB", long, read) // NOLINT
////////////////////////////////////////////////////////////////////////////////
// Define a copy of __asan_option_detect_stack_use_after_return that should be
// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
-int __asan_option_detect_stack_use_after_return =
- __asan_should_detect_stack_use_after_return();
+int __asan_option_detect_stack_use_after_return;
__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
-void* __asan_shadow_memory_dynamic_address =
+void* __asan_shadow_memory_dynamic_address;
+}
+
+static int InitializeClonedVariables() {
+ __asan_option_detect_stack_use_after_return =
+ __asan_should_detect_stack_use_after_return();
+ __asan_shadow_memory_dynamic_address =
__asan_get_shadow_memory_dynamic_address();
+ return 0;
}
+static void NTAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
+}
+
+// Our cloned variables must be initialized before C/C++ constructors. If TLS
+// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
+// initializer is needed as a backup.
+__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
+ InitializeClonedVariables;
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
////////////////////////////////////////////////////////////////////////////////
// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
// unload or on exit. ASan relies on LLVM global_dtors to call
SetSEHFilter;
}
-#endif // ASAN_DYNAMIC_RUNTIME_THUNK
+WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
--- /dev/null
+//===-- asan_win_weak_interception.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Address Sanitizer when it is implemented as
+// a shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_common/sanitizer_win_weak_interception.h"
+#include "asan_interface_internal.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "asan_interface.inc"
+#endif // SANITIZER_DYNAMIC
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
-4:0:0
+5:0:0
#endif
#define CONST_SECTION .section .rodata
-#if defined(__GNU__) || defined(__ANDROID__) || defined(__FreeBSD__)
+#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+ defined(__linux__)
#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
#else
#define NO_EXEC_STACK_DIRECTIVE
#endif
#if defined(__arm__)
+
+/*
+ * Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
+ * - for '-mthumb -march=armv6' compiler defines '__thumb__'
+ * - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
+ */
+#if defined(__thumb2__) || defined(__thumb__)
+#define DEFINE_CODE_STATE .thumb SEPARATOR
+#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
+#if defined(__thumb2__)
+#define USE_THUMB_2
+#define IT(cond) it cond
+#define ITT(cond) itt cond
+#define ITE(cond) ite cond
+#else
+#define USE_THUMB_1
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif // defined(__thumb__2)
+#else // !defined(__thumb2__) && !defined(__thumb__)
+#define DEFINE_CODE_STATE .arm SEPARATOR
+#define DECLARE_FUNC_ENCODING
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif
+
+#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
+#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
+#endif
+
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
-#if !defined(__ARM_FEATURE_CLZ) && \
+#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
JMP(ip)
#endif
-#if __ARM_ARCH_ISA_THUMB == 2
-#define IT(cond) it cond
-#define ITT(cond) itt cond
-#else
-#define IT(cond)
-#define ITT(cond)
-#endif
-
-#if __ARM_ARCH_ISA_THUMB == 2
+#if defined(USE_THUMB_2)
#define WIDE(op) op.w
#else
#define WIDE(op) op
#endif
+#else // !defined(__arm)
+#define DECLARE_FUNC_ENCODING
+#define DEFINE_CODE_STATE
#endif
#define GLUE2(a, b) a##b
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
+ DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
+ DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
HIDDEN(name) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
name:
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end);
+ // Performs cleanup before a [[noreturn]] function. Must be called
+ // before things like _exit and execl to avoid false positives on stack.
+ void __asan_handle_no_return(void);
+
#ifdef __cplusplus
} // extern "C"
#endif
// Prints stack traces for all live heap allocations ordered by total
// allocation size until `top_percent` of total live heap is shown.
// `top_percent` should be between 1 and 100.
+ // At most `max_number_of_contexts` contexts (stack traces) is printed.
// Experimental feature currently available only with asan on Linux/x86_64.
- void __sanitizer_print_memory_profile(size_t top_percent);
+ void __sanitizer_print_memory_profile(size_t top_percent,
+ size_t max_number_of_contexts);
// Fiber annotation interface.
// Before switching to a different stack, one must call
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
const void **bottom_old,
size_t *size_old);
+
+ // Get full module name and calculate pc offset within it.
+ // Returns 1 if pc belongs to some module, 0 if module was not found.
+ int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
+ size_t module_path_len,
+ void **pc_offset);
+
#ifdef __cplusplus
} // extern "C"
#endif
extern "C" {
#endif
- // Initialize coverage.
- void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
- // Open <name>.sancov.packed in the coverage directory and return the file
- // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
- // This is intended for use by sandboxing code.
- intptr_t __sanitizer_maybe_open_cov_file(const char *name);
- // Get the number of unique covered blocks (or edges).
- // This can be useful for coverage-directed in-process fuzzers.
- uintptr_t __sanitizer_get_total_unique_coverage();
- // Get the number of unique indirect caller-callee pairs.
- uintptr_t __sanitizer_get_total_unique_caller_callee_pairs();
- // Reset the basic-block (edge) coverage to the initial state.
- // Useful for in-process fuzzing to start collecting coverage from scratch.
- // Experimental, will likely not work for multi-threaded process.
- void __sanitizer_reset_coverage();
- // Set *data to the array of covered PCs and return the size of that array.
- // Some of the entries in *data will be zero.
- uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data);
+ // Clear collected coverage info.
+ void __sanitizer_cov_reset();
- // The coverage instrumentation may optionally provide imprecise counters.
- // Rather than exposing the counter values to the user we instead map
- // the counters to a bitset.
- // Every counter is associated with 8 bits in the bitset.
- // We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+
- // The i-th bit is set to 1 if the counter value is in the i-th range.
- // This counter-based coverage implementation is *not* thread-safe.
-
- // Returns the number of registered coverage counters.
- uintptr_t __sanitizer_get_number_of_counters();
- // Updates the counter 'bitset', clears the counters and returns the number of
- // new bits in 'bitset'.
- // If 'bitset' is nullptr, only clears the counters.
- // Otherwise 'bitset' should be at least
- // __sanitizer_get_number_of_counters bytes long and 8-aligned.
- uintptr_t
- __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
+ // Dump collected coverage info. Sorts pcs by module into individual .sancov
+ // files.
+ void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
#ifdef __cplusplus
} // extern "C"
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
+ // To avoid dead stripping, you may need to define this function with
+ // __attribute__((used))
int __lsan_is_turned_off();
+ // This function may be optionally provided by user and should return
+ // a string containing LSan runtime options. See lsan_flags.inc for details.
+ const char *__lsan_default_options();
+
// This function may be optionally provided by the user and should return
// a string containing LSan suppressions.
const char *__lsan_default_suppressions();
--- /dev/null
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Public interface header for TSan.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_TSAN_INTERFACE_H
+#define SANITIZER_TSAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// __tsan_release establishes a happens-before relation with a preceding
+// __tsan_acquire on the same address.
+void __tsan_acquire(void *addr);
+void __tsan_release(void *addr);
+
+// Annotations for custom mutexes.
+// The annotations allow to get better reports (with sets of locked mutexes),
+// detect more types of bugs (e.g. mutex misuses, races between lock/unlock and
+// destruction and potential deadlocks) and improve precision and performance
+// (by ignoring individual atomic operations in mutex code). However, the
+// downside is that annotated mutex code itself is not checked for correctness.
+
+// Mutex creation flags are passed to __tsan_mutex_create annotation.
+// If mutex has no constructor and __tsan_mutex_create is not called,
+// the flags may be passed to __tsan_mutex_pre_lock/__tsan_mutex_post_lock
+// annotations.
+
+// Mutex has static storage duration and no-op constructor and destructor.
+// This effectively makes tsan ignore destroy annotation.
+const unsigned __tsan_mutex_linker_init = 1 << 0;
+// Mutex is write reentrant.
+const unsigned __tsan_mutex_write_reentrant = 1 << 1;
+// Mutex is read reentrant.
+const unsigned __tsan_mutex_read_reentrant = 1 << 2;
+
+// Mutex operation flags:
+
+// Denotes read lock operation.
+const unsigned __tsan_mutex_read_lock = 1 << 3;
+// Denotes try lock operation.
+const unsigned __tsan_mutex_try_lock = 1 << 4;
+// Denotes that a try lock operation has failed to acquire the mutex.
+const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
+// Denotes that the lock operation acquires multiple recursion levels.
+// Number of levels is passed in recursion parameter.
+// This is useful for annotation of e.g. Java builtin monitors,
+// for which wait operation releases all recursive acquisitions of the mutex.
+const unsigned __tsan_mutex_recursive_lock = 1 << 6;
+// Denotes that the unlock operation releases all recursion levels.
+// Number of released levels is returned and later must be passed to
+// the corresponding __tsan_mutex_post_lock annotation.
+const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
+
+// Annotate creation of a mutex.
+// Supported flags: mutex creation flags.
+void __tsan_mutex_create(void *addr, unsigned flags);
+
+// Annotate destruction of a mutex.
+// Supported flags:
+// - __tsan_mutex_linker_init
+void __tsan_mutex_destroy(void *addr, unsigned flags);
+
+// Annotate start of lock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock
+// - __tsan_mutex_try_lock
+// - all mutex creation flags
+void __tsan_mutex_pre_lock(void *addr, unsigned flags);
+
+// Annotate end of lock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_lock)
+// - __tsan_mutex_try_lock (must match __tsan_mutex_pre_lock)
+// - __tsan_mutex_try_lock_failed
+// - __tsan_mutex_recursive_lock
+// - all mutex creation flags
+void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion);
+
+// Annotate start of unlock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock
+// - __tsan_mutex_recursive_unlock
+int __tsan_mutex_pre_unlock(void *addr, unsigned flags);
+
+// Annotate end of unlock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock)
+void __tsan_mutex_post_unlock(void *addr, unsigned flags);
+
+// Annotate start/end of notify/signal/broadcast operation.
+// Supported flags: none.
+void __tsan_mutex_pre_signal(void *addr, unsigned flags);
+void __tsan_mutex_post_signal(void *addr, unsigned flags);
+
+// Annotate start/end of a region of code where lock/unlock/signal operation
+// diverts to do something else unrelated to the mutex. This can be used to
+// annotate, for example, calls into cooperative scheduler or contention
+// profiling code.
+// These annotations must be called only from within
+// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock,
+// __tsan_mutex_pre/post_signal regions.
+// Supported flags: none.
+void __tsan_mutex_pre_divert(void *addr, unsigned flags);
+void __tsan_mutex_post_divert(void *addr, unsigned flags);
+
+// External race detection API.
+// Can be used by non-instrumented libraries to detect when their objects are
+// being used in an unsafe manner.
+// - __tsan_external_read/__tsan_external_write annotates the logical reads
+// and writes of the object at the specified address. 'caller_pc' should
+// be the PC of the library user, which the library can obtain with e.g.
+// `__builtin_return_address(0)`.
+// - __tsan_external_register_tag registers a 'tag' with the specified name,
+// which is later used in read/write annotations to denote the object type
+// - __tsan_external_assign_tag can optionally mark a heap object with a tag
+void *__tsan_external_register_tag(const char *object_type);
+void __tsan_external_register_header(void *tag, const char *header);
+void __tsan_external_assign_tag(void *addr, void *tag);
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_TSAN_INTERFACE_H
#ifndef INTERCEPTION_H
#define INTERCEPTION_H
-#if !defined(__linux__) && !defined(__FreeBSD__) && \
- !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__APPLE__) && \
+ !defined(__NetBSD__) && !defined(_WIN32) && !defined(__Fuchsia__)
# error "Interception doesn't work on this operating system."
#endif
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__NetBSD__)
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__) \
__attribute__((alias("__interceptor_" #func), visibility("default")));
-#else
+#elif !defined(__Fuchsia__)
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
__attribute__((weak, alias("__interceptor_" #func), visibility("default")));
#endif
-#if !defined(__APPLE__)
+#if defined(__Fuchsia__)
+// There is no general interception at all on Fuchsia.
+// Sanitizer runtimes just define functions directly to preempt them,
+// and have bespoke ways to access the underlying libc functions.
+# include <zircon/sanitizer.h>
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# define REAL(x) __unsanitized_##x
+# define DECLARE_REAL(ret_type, func, ...)
+#elif !defined(__APPLE__)
# define PTR_TO_REAL(x) real_##x
# define REAL(x) __interception::PTR_TO_REAL(x)
# define FUNC_TYPE(x) x##_f
# define ASSIGN_REAL(x, y)
#endif // __APPLE__
+#if !defined(__Fuchsia__)
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
DECLARE_REAL(ret_type, func, __VA_ARGS__) \
extern "C" ret_type WRAP(func)(__VA_ARGS__);
+#else
+#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
+#endif
// Generally, you don't need to use DEFINE_REAL by itself, as INTERCEPTOR
// macros does its job. In exceptional cases you may need to call REAL(foo)
// without defining INTERCEPTOR(..., foo, ...). For example, if you override
// foo with an interceptor for other function.
-#if !defined(__APPLE__)
+#if !defined(__APPLE__) && !defined(__Fuchsia__)
# define DEFINE_REAL(ret_type, func, ...) \
typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
namespace __interception { \
# define DEFINE_REAL(ret_type, func, ...)
#endif
-#if !defined(__APPLE__)
+#if defined(__Fuchsia__)
+
+// We need to define the __interceptor_func name just to get
+// sanitizer_common/scripts/gen_dynamic_list.py to export func.
+// But we don't need to export __interceptor_func to get that.
+#define INTERCEPTOR(ret_type, func, ...) \
+ extern "C"[[ gnu::alias(#func), gnu::visibility("hidden") ]] ret_type \
+ __interceptor_##func(__VA_ARGS__); \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
+
+#elif !defined(__APPLE__)
+
#define INTERCEPTOR(ret_type, func, ...) \
DEFINE_REAL(ret_type, func, __VA_ARGS__) \
DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
#define INCLUDED_FROM_INTERCEPTION_LIB
-#if defined(__linux__) || defined(__FreeBSD__)
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
# include "interception_linux.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
INTERCEPT_FUNCTION_VER_MAC(func, symver)
-#else // defined(_WIN32)
+#elif defined(_WIN32)
# include "interception_win.h"
# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
# define INTERCEPT_FUNCTION_VER(func, symver) \
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#if defined(__linux__) || defined(__FreeBSD__)
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
#include "interception.h"
#include <dlfcn.h> // for dlsym() and dlvsym()
+#ifdef __NetBSD__
+#include "sanitizer_common/sanitizer_libc.h"
+#endif
+
namespace __interception {
bool GetRealFunctionAddress(const char *func_name, uptr *func_addr,
uptr real, uptr wrapper) {
+#ifdef __NetBSD__
+ // XXX: Find a better way to handle renames
+ if (internal_strcmp(func_name, "sigaction") == 0) func_name = "__sigaction14";
+#endif
*func_addr = (uptr)dlsym(RTLD_NEXT, func_name);
return real == wrapper;
}
} // namespace __interception
-
-#endif // __linux__ || __FreeBSD__
+#endif // __linux__ || __FreeBSD__ || __NetBSD__
// Linux-specific interception methods.
//===----------------------------------------------------------------------===//
-#if defined(__linux__) || defined(__FreeBSD__)
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
# error "interception_linux.h should be included from interception library only"
#endif // !defined(__ANDROID__)
#endif // INTERCEPTION_LINUX_H
-#endif // __linux__ || __FreeBSD__
+#endif // __linux__ || __FreeBSD__ || __NetBSD__
}
static bool DistanceIsWithin2Gig(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
if (from < target)
return target - from <= (uptr)0x7FFFFFFFU;
else
return from - target <= (uptr)0x80000000U;
+#else
+ // In a 32-bit address space, the address calculation will wrap, so this check
+ // is unnecessary.
+ return true;
+#endif
}
static uptr GetMmapGranularity() {
return true;
}
-static const u8 kHintNop10Bytes[] = {
- 0x66, 0x66, 0x0F, 0x1F, 0x84,
- 0x00, 0x00, 0x00, 0x00, 0x00
+static const u8 kHintNop9Bytes[] = {
+ 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
};
template<class T>
static bool FunctionHasPadding(uptr address, uptr size) {
if (IsMemoryPadding(address - size, size))
return true;
- if (size <= sizeof(kHintNop10Bytes) &&
- FunctionHasPrefix(address, kHintNop10Bytes))
+ if (size <= sizeof(kHintNop9Bytes) &&
+ FunctionHasPrefix(address, kHintNop9Bytes))
return true;
return false;
}
switch (*(u8*)address) {
case 0xA1: // A1 XX XX XX XX XX XX XX XX :
// movabs eax, dword ptr ds:[XXXXXXXX]
- return 8;
+ return 9;
}
switch (*(u16*)address) {
case 0x5741: // push r15
case 0x9066: // Two-byte NOP
return 2;
+
+ case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
+ if (rel_offset)
+ *rel_offset = 2;
+ return 6;
}
switch (0x00FFFFFF & *(u32*)address) {
IMAGE_DATA_DIRECTORY *export_directory =
&headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
+ if (export_directory->Size == 0)
+ return 0;
RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
export_directory->VirtualAddress);
RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
sanitizer_lsan_files = \
lsan_common.cc \
- lsan_common_linux.cc
+ lsan_common_linux.cc \
+ lsan_common_mac.cc
lsan_files = \
$(sanitizer_lsan_files) \
lsan.cc \
+ lsan_linux.cc \
+ lsan_mac.cc \
+ lsan_malloc_mac.cc \
lsan_allocator.cc \
lsan_interceptors.cc \
lsan_preinit.cc \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/interception/libinterception.la \
$(am__append_1) $(am__DEPENDENCIES_1)
-am__objects_1 = lsan_common.lo lsan_common_linux.lo
-am__objects_2 = $(am__objects_1) lsan.lo lsan_allocator.lo \
- lsan_interceptors.lo lsan_preinit.lo lsan_thread.lo
+am__objects_1 = lsan_common.lo lsan_common_linux.lo lsan_common_mac.lo
+am__objects_2 = $(am__objects_1) lsan.lo lsan_linux.lo lsan_mac.lo \
+ lsan_malloc_mac.lo lsan_allocator.lo lsan_interceptors.lo \
+ lsan_preinit.lo lsan_thread.lo
am_liblsan_la_OBJECTS = $(am__objects_2)
liblsan_la_OBJECTS = $(am_liblsan_la_OBJECTS)
liblsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@LSAN_SUPPORTED_TRUE@toolexeclib_LTLIBRARIES = liblsan.la
sanitizer_lsan_files = \
lsan_common.cc \
- lsan_common_linux.cc
+ lsan_common_linux.cc \
+ lsan_common_mac.cc
lsan_files = \
$(sanitizer_lsan_files) \
lsan.cc \
+ lsan_linux.cc \
+ lsan_mac.cc \
+ lsan_malloc_mac.cc \
lsan_allocator.cc \
lsan_interceptors.cc \
lsan_preinit.cc \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_interceptors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_malloc_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_preinit.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_thread.Plo@am__quote@
RegisterLsanFlags(&parser, f);
RegisterCommonFlags(&parser);
+ // Override from user-specified string.
+ const char *lsan_default_options = MaybeCallLsanDefaultOptions();
+ parser.ParseString(lsan_default_options);
parser.ParseString(GetEnv("LSAN_OPTIONS"));
SetVerbosity(common_flags()->verbosity);
if (common_flags()->help) parser.PrintFlagDescriptions();
}
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
+ sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+ nullptr);
+}
+
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
+ ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
+ InstallDeadlySignalHandlers(LsanOnDeadlySignal);
u32 tid = ThreadCreate(0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(tid, GetTid());
//
//===----------------------------------------------------------------------===//
+#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#define GET_STACK_TRACE(max_size, fast) \
- BufferedStackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- if (!SANITIZER_MIPS || \
- IsValidFrame(GET_CURRENT_FRAME(), stack_top, stack_bottom)) { \
- stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
- /* context */ 0, stack_top, stack_bottom, fast); \
- } \
- }
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, max_size, \
+ StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), nullptr, fast);
#define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
namespace __lsan {
void InitializeInterceptors();
+void ReplaceSystemMalloc();
+
+#define ENSURE_LSAN_INITED do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+} while (0)
+
+// Get the stack trace with the given pc and bp.
+// The pc will be in the position 0 of the resulting stack trace.
+// The bp may refer to the current frame or to the caller's frame.
+ALWAYS_INLINE
+void GetStackTraceWithPcBpAndContext(__sanitizer::BufferedStackTrace *stack,
+ __sanitizer::uptr max_depth,
+ __sanitizer::uptr pc, __sanitizer::uptr bp,
+ void *context, bool fast) {
+ uptr stack_top = 0, stack_bottom = 0;
+ ThreadContext *t;
+ if (fast && (t = CurrentThreadContext())) {
+ stack_top = t->stack_end();
+ stack_bottom = t->stack_begin();
+ }
+ if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
+ stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+ }
+}
} // namespace __lsan
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
-
-struct ChunkMetadata {
- u8 allocated : 8; // Must be first.
- ChunkTag tag : 2;
- uptr requested_size : 54;
- u32 stack_trace_id;
-};
-
-#if defined(__mips64) || defined(__aarch64__)
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1UL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
-static const uptr kRegionSizeLog = 20;
-static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
-typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-typedef CompactSizeClassMap SizeClassMap;
-typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
- sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
- PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-
-struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = 0x600000000000ULL;
- static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
- static const uptr kMetadataSize = sizeof(ChunkMetadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef NoOpMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
-};
-
-typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
-static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {
- allocator.SwallowCache(&cache);
+ allocator.SwallowCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
- return nullptr;
+ return Allocator::FailureHandler::OnBadRequest();
}
- void *p = allocator.Allocate(&cache, size, alignment, false);
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
return p;
}
+static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
+ return Allocator::FailureHandler::OnBadRequest();
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
- allocator.Deallocate(&cache, p);
- return nullptr;
+ allocator.Deallocate(GetAllocatorCache(), p);
+ return Allocator::FailureHandler::OnBadRequest();
}
- p = allocator.Reallocate(&cache, p, new_size, alignment);
+ p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
- *begin = (uptr)&cache;
- *end = *begin + sizeof(cache);
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
}
uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Reallocate(stack, p, size, 1));
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Calloc(nmemb, size, stack));
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(
+ Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
///// Interface to the common LSan module. /////
void LockAllocator() {
uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p);
}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+#endif
} // extern "C"
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
namespace __lsan {
void AllocatorThreadFinish();
void InitializeAllocator();
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__arm__)
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __lsan::kRegionSizeLog;
+ typedef __lsan::ByteMap ByteMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#elif defined(__x86_64__) || defined(__powerpc64__)
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+
+AllocatorCache *GetAllocatorCache();
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL int disable_counter;
-bool DisabledInThisThread() { return disable_counter > 0; }
-void DisableInThisThread() { disable_counter++; }
-void EnableInThisThread() {
- if (!disable_counter && common_flags()->detect_leaks) {
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
- disable_counter--;
}
-Flags lsan_flags;
-
void Flags::SetDefaults() {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "lsan_flags.inc"
static SuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char kStdSuppressions[] =
+#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
+#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+#if SANITIZER_MAC
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
+#endif
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
suppression_ctx->Parse(__lsan_default_suppressions());
+ suppression_ctx->Parse(kStdSuppressions);
}
static SuppressionContext *GetSuppressionContext() {
return suppression_ctx;
}
-struct RootRegion {
- const void *begin;
- uptr size;
-};
+static InternalMmapVector<RootRegion> *root_regions;
-InternalMmapVector<RootRegion> *root_regions;
+InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
void InitializeRootRegions() {
CHECK(!root_regions);
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
+const char *MaybeCallLsanDefaultOptions() {
+ return (&__lsan_default_options) ? __lsan_default_options() : "";
+}
+
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
Decorator() : SanitizerCommonDecorator() { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
- const char *End() { return Default(); }
};
static inline bool CanBeAHeapPointer(uptr p) {
}
}
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
- InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
+ InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end = registers_begin + registers.size();
- for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
- uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+ tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
continue;
}
uptr sp;
- bool have_registers =
- (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
- if (!have_registers) {
- Report("Unable to get registers from thread %d.\n");
- // If unable to get SP, consider the entire stack to be reachable.
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %d.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
sp = stack_begin;
}
}
if (flags()->use_tls) {
- LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
- if (cache_begin == cache_end) {
- ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
- } else {
- // Because LSan should not be loaded with dlopen(), we can assume
- // that allocator cache will be part of static TLS image.
- CHECK_LE(tls_begin, cache_begin);
- CHECK_GE(tls_end, cache_end);
- if (tls_begin < cache_begin)
- ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
- kReachable);
- if (tls_end > cache_end)
- ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
+ if (tls_begin) {
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
+ // If the tls and cache ranges don't overlap, scan full tls range,
+ // otherwise, only scan the non-overlapping portions
+ if (cache_begin == cache_end || tls_end < cache_begin ||
+ tls_begin > cache_end) {
+ ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+ } else {
+ if (tls_begin < cache_begin)
+ ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+ kReachable);
+ if (tls_end > cache_end)
+ ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+ kReachable);
+ }
}
- if (dtls) {
+ if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) {
uptr dtls_beg = dtls->dtv[j].beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
kReachable);
}
}
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
}
}
}
}
-static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
- uptr root_end) {
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr begin, end, prot;
- while (proc_maps.Next(&begin, &end,
- /*offset*/ nullptr, /*filename*/ nullptr,
- /*filename_size*/ 0, &prot)) {
- uptr intersection_begin = Max(root_begin, begin);
- uptr intersection_end = Min(end, root_end);
- if (intersection_begin >= intersection_end) continue;
- bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
- LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
- root_begin, root_end, begin, end,
- is_readable ? "readable" : "unreadable");
- if (is_readable)
- ScanRangeForPointers(intersection_begin, intersection_end, frontier,
- "ROOT", kReachable);
+void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
+ uptr region_begin, uptr region_end, bool is_readable) {
+ uptr intersection_begin = Max(root_region.begin, region_begin);
+ uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
+ if (intersection_begin >= intersection_end) return;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_region.begin, root_region.begin + root_region.size,
+ region_begin, region_end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
+ kReachable);
+}
+
+static void ProcessRootRegion(Frontier *frontier,
+ const RootRegion &root_region) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ ScanRootRegion(frontier, root_region, segment.start, segment.end,
+ segment.IsReadable());
}
}
if (!flags()->use_root_regions) return;
CHECK(root_regions);
for (uptr i = 0; i < root_regions->size(); i++) {
- RootRegion region = (*root_regions)[i];
- uptr begin_addr = reinterpret_cast<uptr>(region.begin);
- ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ ProcessRootRegion(frontier, (*root_regions)[i]);
}
}
}
}
+static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
+ CHECK(stack_id);
+ StackTrace stack = map->Get(stack_id);
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+struct InvalidPCParam {
+ Frontier *frontier;
+ StackDepotReverseMap *stack_depot_reverse_map;
+ bool skip_linker_allocations;
+};
+
+// ForEachChunk callback. If the caller pc is invalid or is within the linker,
+// mark as reachable. Called by ProcessPlatformSpecificAllocations.
+static void MarkInvalidPCCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
+ u32 stack_id = m.stack_trace_id();
+ uptr caller_pc = 0;
+ if (stack_id > 0)
+ caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ if (caller_pc == 0 || (param->skip_linker_allocations &&
+ GetLinker()->containsAddress(caller_pc))) {
+ m.set_tag(kReachable);
+ param->frontier->push_back(chunk);
+ }
+ }
+}
+
+// On Linux, handles dynamically allocated TLS blocks by treating all chunks
+// allocated from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+void ProcessPC(Frontier *frontier) {
+ StackDepotReverseMap stack_depot_reverse_map;
+ InvalidPCParam arg;
+ arg.frontier = frontier;
+ arg.stack_depot_reverse_map = &stack_depot_reverse_map;
+ arg.skip_linker_allocations =
+ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
+ ForEachChunk(MarkInvalidPCCb, &arg);
+}
+
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
+ CHECK_EQ(0, frontier.size());
+ ProcessPC(&frontier);
+
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
LOG_POINTERS("Processing platform-specific allocations.\n");
- CHECK_EQ(0, frontier.size());
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
"\n");
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
- Printf("%s", d.End());
+ Printf("%s", d.Default());
param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
if (common_flags()->print_suppressions)
return false;
}
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
void DoLeakCheck() {
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
- bool have_leaks = CheckForLeaks();
- if (!have_leaks) {
- return;
- }
- if (common_flags()->exitcode) {
- Die();
- }
+ has_reported_leaks = CheckForLeaks();
+ if (has_reported_leaks) HandleLeaks();
}
static int DoRecoverableLeakCheck() {
return have_leaks ? 1 : 0;
}
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
static Suppression *GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr;
Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
leaks_[index].total_size, leaks_[index].hit_count);
- Printf("%s", d.End());
+ Printf("%s", d.Default());
PrintStackTraceById(leaks_[index].stack_trace_id);
namespace __lsan {
void InitCommonLsan() { }
void DoLeakCheck() { }
+void DoRecoverableLeakCheckVoid() { }
void DisableInThisThread() { }
void EnableInThisThread() { }
}
#if CAN_SANITIZE_LEAKS
BlockingMutexLock l(&global_mutex);
CHECK(root_regions);
- RootRegion region = {begin, size};
+ RootRegion region = {reinterpret_cast<uptr>(begin), size};
root_regions->push_back(region);
VReport(1, "Registered root region at %p of size %llu\n", begin, size);
#endif // CAN_SANITIZE_LEAKS
bool removed = false;
for (uptr i = 0; i < root_regions->size(); i++) {
RootRegion region = (*root_regions)[i];
- if (region.begin == begin && region.size == size) {
+ if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
removed = true;
uptr last_index = root_regions->size() - 1;
(*root_regions)[i] = (*root_regions)[last_index];
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char * __lsan_default_options() {
+ return "";
+}
+
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;
}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions() {
+ return "";
+}
#endif
} // extern "C"
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \
- && (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
+// supported for Linux only. Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on new architecture, one need to implement
+// internal_clone function as well as (probably) adjust TLS machinery for
+// new architecture inside sanitizer library.
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+ (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+ defined(__powerpc64__))
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && \
+ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && \
+ SANITIZER_LINUX && !SANITIZER_ANDROID
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
kIgnored = 3
};
+const u32 kInvalidTid = (u32) -1;
+
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
+
+struct RootRegion {
+ uptr begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> const *GetRootRegions();
+void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
+ uptr region_begin, uptr region_end, bool is_readable);
// Run stoptheworld while holding any platform-specific locks.
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
};
// Functions called from the parent tool.
+const char *MaybeCallLsanDefaultOptions();
void InitCommonLsan();
void DoLeakCheck();
+void DoRecoverableLeakCheckVoid();
+void DisableCounterUnderflow();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
// Special case for "new T[0]" where T is a type with DTOR.
-// new T[0] will allocate one word for the array size (0) and store a pointer
-// to the end of allocated chunk.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
- return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
- *reinterpret_cast<uptr *>(chunk_beg) == 0;
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
}
// The following must be implemented in the parent tool.
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
-bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
-void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
+
+// Return the linker module, if valid for the platform.
+LoadedModule *GetLinker();
+
+// Return true if LSan has finished leak checking and reported leaks.
+bool HasReportedLeaks();
+
+// Run platform-specific leak handlers.
+void HandleLeaks();
+
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
} // namespace __lsan
extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_options();
+
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off();
return LibraryNameIs(full_name, kLinkerName);
}
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
void InitializePlatformSpecificModules() {
ListOfModules modules;
modules.init();
return;
}
}
- VReport(1, "LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ if (linker == nullptr) {
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
+ }
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
- uptr allocator_begin = 0, allocator_end = 0;
- GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
- if (begin <= allocator_begin && allocator_begin < end) {
- CHECK_LE(allocator_begin, allocator_end);
- CHECK_LE(allocator_end, end);
- if (begin < allocator_begin)
- ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
- kReachable);
- if (allocator_end < end)
- ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
- kReachable);
- } else {
- ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
- }
+ ScanGlobalRange(begin, end, frontier);
}
return 0;
}
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
-static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
- CHECK(stack_id);
- StackTrace stack = map->Get(stack_id);
- // The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (stack.size >= 2)
- return stack.trace[1];
- return 0;
-}
+LoadedModule *GetLinker() { return linker; }
-struct ProcessPlatformAllocParam {
- Frontier *frontier;
- StackDepotReverseMap *stack_depot_reverse_map;
- bool skip_linker_allocations;
-};
-
-// ForEachChunk callback. Identifies unreachable chunks which must be treated as
-// reachable. Marks them as reachable and adds them to the frontier.
-static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
- CHECK(arg);
- ProcessPlatformAllocParam *param =
- reinterpret_cast<ProcessPlatformAllocParam *>(arg);
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
- u32 stack_id = m.stack_trace_id();
- uptr caller_pc = 0;
- if (stack_id > 0)
- caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
- // it as reachable, as we can't properly report its allocation stack anyway.
- if (caller_pc == 0 || (param->skip_linker_allocations &&
- linker->containsAddress(caller_pc))) {
- m.set_tag(kReachable);
- param->frontier->push_back(chunk);
- }
- }
-}
-
-// Handles dynamically allocated TLS blocks by treating all chunks allocated
-// from ld-linux.so as reachable.
-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
-// They are allocated with a __libc_memalign() call in allocate_and_init()
-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
-// blocks, but we can make sure they come from our own allocator by intercepting
-// __libc_memalign(). On top of that, there is no easy way to reach them. Their
-// addresses are stored in a dynamically allocated array (the DTV) which is
-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
-// being reachable from the static TLS, and the dynamic TLS being reachable from
-// the DTV. This is because the initial DTV is allocated before our interception
-// mechanism kicks in, and thus we don't recognize it as allocated memory. We
-// can't special-case it either, since we don't know its size.
-// Our solution is to include in the root set all allocations made from
-// ld-linux.so (which is where allocate_and_init() is implemented). This is
-// guaranteed to include all dynamic TLS blocks (and possibly other allocations
-// which we don't care about).
-void ProcessPlatformSpecificAllocations(Frontier *frontier) {
- StackDepotReverseMap stack_depot_reverse_map;
- ProcessPlatformAllocParam arg;
- arg.frontier = frontier;
- arg.stack_depot_reverse_map = &stack_depot_reverse_map;
- arg.skip_linker_allocations =
- flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
- ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
-}
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
struct DoStopTheWorldParam {
StopTheWorldCallback callback;
void *argument;
};
+// While calling Die() here is undefined behavior and can potentially
+// cause race conditions, it isn't possible to intercept exit on linux,
+// so we have no choice but to call Die() from the atexit handler.
+void HandleLeaks() {
+ if (common_flags()->exitcode) Die();
+}
+
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
void *data) {
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
--- /dev/null
+//=-- lsan_common_mac.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "lsan_allocator.h"
+
+#include <pthread.h>
+
+#include <mach/mach.h>
+
+namespace __lsan {
+
+typedef struct {
+ int disable_counter;
+ u32 current_thread_id;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread id,
+// so we can't destroy it until it's been used and reset to invalid tid
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread_id != kInvalidTid)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread_id = kInvalidTid;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+u32 GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->current_thread_id : kInvalidTid;
+}
+
+void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+LoadedModule *GetLinker() { return nullptr; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {}
+
+// Sections which can't contain contain global pointers. This list errs on the
+// side of caution to avoid false positives, at the expense of performance.
+//
+// Other potentially safe sections include:
+// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
+//
+// Sections which definitely cannot be included here are:
+// __objc_data, __objc_const, __data, __bss, __common, __thread_data,
+// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
+static const char *kSkippedSecNames[] = {
+ "__cfstring", "__la_symbol_ptr", "__mod_init_func",
+ "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
+ "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
+ "__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName);
+
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ // Sections storing global variables are writable and non-executable
+ if (range.executable || !range.writable) continue;
+
+ for (auto name : kSkippedSecNames) {
+ if (!internal_strcmp(range.name, name)) continue;
+ }
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ mach_port_name_t port;
+ if (task_for_pid(mach_task_self(), internal_getpid(), &port)
+ != KERN_SUCCESS) {
+ return;
+ }
+
+ unsigned depth = 1;
+ vm_size_t size = 0;
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
+
+ while (err == KERN_SUCCESS) {
+ struct vm_region_submap_info_64 info;
+ err = vm_region_recurse_64(port, &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+
+ uptr end_address = address + size;
+
+ // libxpc stashes some pointers in the Kernel Alloc Once page,
+ // make sure not to report those as leaks.
+ if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+
+ // Recursing over the full memory map is very slow, break out
+ // early if we don't need the full iteration.
+ if (!flags()->use_root_regions || !root_regions->size())
+ break;
+ }
+
+ // This additional root region scan is required on Darwin in order to
+ // detect root regions contained within mmap'd memory regions, because
+ // the Darwin implementation of sanitizer_procmaps traverses images
+ // as loaded by dyld, and not the complete set of all memory regions.
+ //
+ // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
+ // behavior as sanitizer_procmaps_linux and traverses all memory regions
+ if (flags()->use_root_regions) {
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
+ info.protection & kProtectionRead);
+ }
+ }
+
+ address = end_address;
+ }
+}
+
+// On darwin, we can intercept _exit gracefully, and return a failing exit code
+// if required at that point. Calling Die() here is undefined behavior and
+// causes rare race conditions.
+void HandleLeaks() {}
+
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ StopTheWorld(callback, argument);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
+#include <stddef.h>
+
using namespace __lsan;
extern "C" {
int pthread_setspecific(unsigned key, const void *v);
}
-#define ENSURE_LSAN_INITED do { \
- CHECK(!lsan_init_is_running); \
- if (!lsan_inited) \
- __lsan_init(); \
-} while (0)
-
///// Malloc/free interceptors. /////
-const bool kAlwaysClearMemory = true;
-
namespace std {
struct nothrow_t;
+ enum class align_val_t: size_t;
}
+#if !SANITIZER_MAC
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, 1, kAlwaysClearMemory);
+ return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
ENSURE_LSAN_INITED;
- Deallocate(p);
+ lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
- if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- size *= nmemb;
- return Allocate(stack, size, 1, true);
+ return lsan_calloc(nmemb, size, stack);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Reallocate(stack, q, size, 1);
+ return lsan_realloc(q, size, stack);
}
-INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
+ *memptr = lsan_memalign(alignment, size, stack);
+ // FIXME: Return ENOMEM if user requested more than max alloc size.
+ return 0;
}
-INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
+ return lsan_valloc(size, stack);
}
+#endif
-INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
- // FIXME: Return ENOMEM if user requested more than max alloc size.
- return 0;
+ return lsan_memalign(alignment, size, stack);
}
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
}
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
-INTERCEPTOR(void*, valloc, uptr size) {
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- if (size == 0)
- size = GetPageSizeCached();
- return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+ return lsan_memalign(alignment, size, stack);
}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr);
}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
struct fake_mallinfo {
int x[10];
};
internal_memset(&res, 0, sizeof(res));
return res;
}
+#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
+#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLINFO
+#define LSAN_MAYBE_INTERCEPT_MALLOPT
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+#if SANITIZER_INTERCEPT_PVALLOC
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
}
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
+#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_PVALLOC
+#endif // SANITIZER_INTERCEPT_PVALLOC
+#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define LSAN_MAYBE_INTERCEPT_CFREE
+#endif // SANITIZER_INTERCEPT_CFREE
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
-#define OPERATOR_NEW_BODY \
- ENSURE_LSAN_INITED; \
- GET_STACK_TRACE_MALLOC; \
- return Allocate(stack, size, 1, kAlwaysClearMemory);
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow) \
+ ENSURE_LSAN_INITED; \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = lsan_malloc(size, stack); \
+ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
+ ENSURE_LSAN_INITED; \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = lsan_memalign((uptr)align, size, stack); \
+ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
+ return res;
+
+#define OPERATOR_DELETE_BODY \
+ ENSURE_LSAN_INITED; \
+ lsan_free(ptr);
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the runtime
+// dylib, and the main executable will depend on both the runtime dylib and
+// libstdc++, each of has its implementation of new and delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
-
-#define OPERATOR_DELETE_BODY \
- ENSURE_LSAN_INITED; \
- Deallocate(ptr);
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const &) {
- OPERATOR_DELETE_BODY;
-}
+void operator delete[](void *ptr, std::nothrow_t const &)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+#else // SANITIZER_MAC
+
+INTERCEPTOR(void *, _Znwm, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _Znam, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+
+#endif // !SANITIZER_MAC
+
///// Thread initialization and finalization. /////
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
}
if (res == 0) {
- int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
+ int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
+ IsStateDetached(detached));
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
return res;
}
+INTERCEPTOR(void, _exit, int status) {
+ if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
+ REAL(_exit)(status);
+}
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
namespace __lsan {
void InitializeInterceptors() {
+ InitializeSignalInterceptors();
+
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
- INTERCEPT_FUNCTION(cfree);
+ LSAN_MAYBE_INTERCEPT_CFREE;
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
- INTERCEPT_FUNCTION(memalign);
+ LSAN_MAYBE_INTERCEPT_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
INTERCEPT_FUNCTION(posix_memalign);
- INTERCEPT_FUNCTION(__libc_memalign);
INTERCEPT_FUNCTION(valloc);
- INTERCEPT_FUNCTION(pvalloc);
- INTERCEPT_FUNCTION(malloc_usable_size);
- INTERCEPT_FUNCTION(mallinfo);
- INTERCEPT_FUNCTION(mallopt);
+ LSAN_MAYBE_INTERCEPT_PVALLOC;
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+ LSAN_MAYBE_INTERCEPT_MALLINFO;
+ LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(_exit);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
--- /dev/null
+//=-- lsan_linux.cc -------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX
+
+#include "lsan_allocator.h"
+
+namespace __lsan {
+
+static THREADLOCAL u32 current_thread_tid = kInvalidTid;
+u32 GetCurrentThread() { return current_thread_tid; }
+void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX
--- /dev/null
+//===-- lsan_mac.cc -------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer, a memory leak checker.
+//
+// Mac-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "interception/interception.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+// Support for the following functions from libdispatch on Mac OS:
+// dispatch_async_f()
+// dispatch_async()
+// dispatch_sync_f()
+// dispatch_sync()
+// dispatch_after_f()
+// dispatch_after()
+// dispatch_group_async_f()
+// dispatch_group_async()
+// TODO(glider): libdispatch API contains other functions that we don't support
+// yet.
+//
+// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
+// they can cause jobs to run on a thread different from the current one.
+// TODO(glider): if so, we need a test for this (otherwise we should remove
+// them).
+//
+// The following functions use dispatch_barrier_async_f() (which isn't a library
+// function but is exported) and are thus supported:
+// dispatch_source_set_cancel_handler_f()
+// dispatch_source_set_cancel_handler()
+// dispatch_source_set_event_handler_f()
+// dispatch_source_set_event_handler()
+//
+// The reference manual for Grand Central Dispatch is available at
+// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
+// The implementation details are at
+// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+
+typedef void *dispatch_group_t;
+typedef void *dispatch_queue_t;
+typedef void *dispatch_source_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void *(*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} lsan_block_context_t;
+
+ALWAYS_INLINE
+void lsan_register_worker_thread(int parent_tid) {
+ if (GetCurrentThread() == kInvalidTid) {
+ u32 tid = ThreadCreate(parent_tid, 0, true);
+ ThreadStart(tid, GetTid());
+ SetCurrentThread(tid);
+ }
+}
+
+// For use by only those functions that allocated the context via
+// alloc_lsan_context().
+extern "C" void lsan_dispatch_call_block_and_release(void *block) {
+ lsan_block_context_t *context = (lsan_block_context_t *)block;
+ VReport(2,
+ "lsan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
+ lsan_register_worker_thread(context->parent_tid);
+ // Call the original dispatcher for the block.
+ context->func(context->block);
+ lsan_free(context);
+}
+
+} // namespace __lsan
+
+using namespace __lsan; // NOLINT
+
+// Wrap |ctxt| and |func| into an lsan_block_context_t.
+// The caller retains control of the allocated context.
+extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ lsan_block_context_t *lsan_ctxt =
+ (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
+ lsan_ctxt->block = ctxt;
+ lsan_ctxt->func = func;
+ lsan_ctxt->parent_tid = GetCurrentThread();
+ return lsan_ctxt;
+}
+
+// Define interceptor for dispatch_*_f function with the three most common
+// parameters: dispatch_queue_t, context, dispatch_function_t.
+#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
+ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
+ dispatch_function_t func) { \
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
+ return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
+ lsan_dispatch_call_block_and_release); \
+ }
+
+INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
+
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
+ void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
+ lsan_dispatch_call_block_and_release);
+}
+
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ REAL(dispatch_group_async_f)
+ (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
+}
+
+#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" {
+void dispatch_async(dispatch_queue_t dq, void (^work)(void));
+void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
+ void (^work)(void));
+void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void));
+void dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ void (^work)(void));
+void dispatch_source_set_event_handler(dispatch_source_t ds,
+ void (^work)(void));
+}
+
+#define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThread(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
+
+INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_async)(dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
+ dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_group_async)(dg, dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_after)(when, queue, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_event_handler)(ds, lsan_block);
+}
+#endif
+
+#endif // SANITIZER_MAC
--- /dev/null
+//===-- lsan_malloc_mac.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_MAC
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
+#include "lsan_common.h"
namespace __lsan {
-const u32 kInvalidTid = (u32) -1;
-
static ThreadRegistry *thread_registry;
-static THREADLOCAL u32 current_thread_tid = kInvalidTid;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
-u32 GetCurrentThread() {
- return current_thread_tid;
-}
-
-void SetCurrentThread(u32 tid) {
- current_thread_tid = tid;
-}
-
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
/* arg */ nullptr);
}
-void ThreadStart(u32 tid, uptr os_id) {
+void ThreadStart(u32 tid, tid_t os_id, bool workerthread) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
- thread_registry->StartThread(tid, os_id, &args);
+ thread_registry->StartThread(tid, os_id, workerthread, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
+ SetCurrentThread(kInvalidTid);
}
ThreadContext *CurrentThreadContext() {
///// Interface to the common LSan module. /////
-bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
return true;
}
-void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg) {
}
void InitializeThreadRegistry();
-void ThreadStart(u32 tid, uptr os_id);
+void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false);
void ThreadFinish();
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
void ThreadJoin(u32 tid);
noinst_LTLIBRARIES = libsanitizer_common.la
sanitizer_common_files = \
+ sancov_flags.cc \
sanitizer_allocator.cc \
+ sanitizer_allocator_checks.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage_libcdep.cc \
- sanitizer_coverage_mapping_libcdep.cc \
+ sanitizer_coverage_libcdep_new.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
+ sanitizer_errno.cc \
+ sanitizer_file.cc \
sanitizer_flags.cc \
sanitizer_flag_parser.cc \
sanitizer_libc.cc \
sanitizer_linux_libcdep.cc \
sanitizer_linux_s390.cc \
sanitizer_mac.cc \
+ sanitizer_mac_libcdep.cc \
sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
+ sanitizer_stoptheworld_mac.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
am__DEPENDENCIES_1 =
-am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
- sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
- sanitizer_coverage_mapping_libcdep.lo \
+am__objects_1 = sancov_flags.lo sanitizer_allocator.lo \
+ sanitizer_allocator_checks.lo sanitizer_common.lo \
+ sanitizer_common_libcdep.lo sanitizer_coverage_libcdep_new.lo \
sanitizer_deadlock_detector1.lo \
- sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
- sanitizer_flag_parser.lo sanitizer_libc.lo \
- sanitizer_libignore.lo sanitizer_linux.lo \
+ sanitizer_deadlock_detector2.lo sanitizer_errno.lo \
+ sanitizer_file.lo sanitizer_flags.lo sanitizer_flag_parser.lo \
+ sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
- sanitizer_mac.lo sanitizer_persistent_allocator.lo \
+ sanitizer_mac.lo sanitizer_mac_libcdep.lo \
+ sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
sanitizer_stacktrace_libcdep.lo sanitizer_symbolizer_mac.lo \
sanitizer_stacktrace_printer.lo \
sanitizer_stoptheworld_linux_libcdep.lo \
- sanitizer_suppressions.lo sanitizer_symbolizer.lo \
- sanitizer_symbolizer_libbacktrace.lo \
+ sanitizer_stoptheworld_mac.lo sanitizer_suppressions.lo \
+ sanitizer_symbolizer.lo sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_termination.lo \
ACLOCAL_AMFLAGS = -I m4
noinst_LTLIBRARIES = libsanitizer_common.la
sanitizer_common_files = \
+ sancov_flags.cc \
sanitizer_allocator.cc \
+ sanitizer_allocator_checks.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage_libcdep.cc \
- sanitizer_coverage_mapping_libcdep.cc \
+ sanitizer_coverage_libcdep_new.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
+ sanitizer_errno.cc \
+ sanitizer_file.cc \
sanitizer_flags.cc \
sanitizer_flag_parser.cc \
sanitizer_libc.cc \
sanitizer_linux_libcdep.cc \
sanitizer_linux_s390.cc \
sanitizer_mac.cc \
+ sanitizer_mac_libcdep.cc \
sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_symbolizer_mac.cc \
sanitizer_stacktrace_printer.cc \
sanitizer_stoptheworld_linux_libcdep.cc \
+ sanitizer_stoptheworld_mac.cc \
sanitizer_suppressions.cc \
sanitizer_symbolizer.cc \
sanitizer_symbolizer_libbacktrace.cc \
distclean-compile:
-rm -f *.tab.c
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sancov_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator_checks.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_libcdep.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_libcdep_new.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_errno.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_file.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flag_parser.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_x86_64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stacktrace_printer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_linux_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stoptheworld_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@
--- /dev/null
+//===-- sancov_flags.cc -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sancov_flags.h"
+#include "sanitizer_flag_parser.h"
+#include "sanitizer_platform.h"
+
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __sancov_default_options, void) {
+ return "";
+}
+
+using namespace __sanitizer;
+
+namespace __sancov {
+
+SancovFlags sancov_flags_dont_use_directly; // use via flags();
+
+void SancovFlags::SetDefaults() {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+}
+
+static void RegisterSancovFlags(FlagParser *parser, SancovFlags *f) {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+}
+
+static const char *MaybeCallSancovDefaultOptions() {
+ return (&__sancov_default_options) ? __sancov_default_options() : "";
+}
+
+void InitializeSancovFlags() {
+ SancovFlags *f = sancov_flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterSancovFlags(&parser, f);
+
+ parser.ParseString(MaybeCallSancovDefaultOptions());
+ parser.ParseString(GetEnv("SANCOV_OPTIONS"));
+
+ ReportUnrecognizedFlags();
+ if (f->help) parser.PrintFlagDescriptions();
+}
+
+} // namespace __sancov
--- /dev/null
+//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANCOV_FLAGS_H
+#define SANCOV_FLAGS_H
+
+#include "sanitizer_flag_parser.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sancov {
+
+struct SancovFlags {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+
+ void SetDefaults();
+};
+
+extern SancovFlags sancov_flags_dont_use_directly;
+
+inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
+
+void InitializeSancovFlags();
+
+} // namespace __sancov
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
+__sancov_default_options();
+
+#endif
--- /dev/null
+//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANCOV_FLAG
+#error "Defnine SANCOV_FLAG prior to including this file!"
+#endif
+
+SANCOV_FLAG(bool, symbolize, true,
+ "If set, converage information will be symbolized by sancov tool "
+ "after dumping.")
+
+SANCOV_FLAG(bool, help, false, "Print flags help.")
~Handle();
T *operator->();
+ T &operator*();
+ const T &operator*() const;
bool created() const;
bool exists() const;
return &cell_->val;
}
+template <typename T, uptr kSize>
+const T &AddrHashMap<T, kSize>::Handle::operator*() const {
+ return cell_->val;
+}
+
+template <typename T, uptr kSize>
+T &AddrHashMap<T, kSize>::Handle::operator*() {
+ return cell_->val;
+}
+
template<typename T, uptr kSize>
bool AddrHashMap<T, kSize>::Handle::created() const {
return created_;
#include "sanitizer_allocator.h"
+#include "sanitizer_allocator_checks.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(/* may_return_null*/ false);
+ internal_allocator_instance->Init(kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
return internal_allocator()->Allocate(&internal_allocator_cache, size,
- alignment, false);
+ alignment);
}
- return internal_allocator()->Allocate(cache, size, alignment, false);
+ return internal_allocator()->Allocate(cache, size, alignment);
}
static void *RawInternalRealloc(void *ptr, uptr size,
}
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
- if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDieOnBadRequest();
+ if (UNLIKELY(CheckForCallocOverflow(count, size)))
+ return InternalAllocator::FailureHandler::OnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
low_level_alloc_callback = callback;
}
-bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
- if (!size) return false;
- uptr max = (uptr)-1L;
- return (max / size) < n;
-}
-
-static atomic_uint8_t reporting_out_of_memory = {0};
+static atomic_uint8_t allocator_out_of_memory = {0};
+static atomic_uint8_t allocator_may_return_null = {0};
-bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+bool IsAllocatorOutOfMemory() {
+ return atomic_load_relaxed(&allocator_out_of_memory);
+}
-void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
- if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
Die();
}
+bool AllocatorMayReturnNull() {
+ return atomic_load(&allocator_may_return_null, memory_order_relaxed);
+}
+
+void SetAllocatorMayReturnNull(bool may_return_null) {
+ atomic_store(&allocator_may_return_null, may_return_null,
+ memory_order_relaxed);
+}
+
+void *ReturnNullOrDieOnFailure::OnBadRequest() {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+}
+
+void *ReturnNullOrDieOnFailure::OnOOM() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+}
+
+void NORETURN *DieOnFailure::OnBadRequest() {
+ ReportAllocatorCannotReturnNull();
+}
+
+void NORETURN *DieOnFailure::OnOOM() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+ ReportAllocatorCannotReturnNull();
+}
+
} // namespace __sanitizer
namespace __sanitizer {
-// Returns true if ReportAllocatorCannotReturnNull(true) was called.
-// Can be use to avoid memory hungry operations.
-bool IsReportingOOM();
+// Since flags are immutable and allocator behavior can be changed at runtime
+// (unit tests or ASan on Android are some examples), allocator_may_return_null
+// flag value is cached here and can be altered later.
+bool AllocatorMayReturnNull();
+void SetAllocatorMayReturnNull(bool may_return_null);
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
+// Allocator failure handling policies:
+// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
+// dies otherwise.
+struct ReturnNullOrDieOnFailure {
+ static void *OnBadRequest();
+ static void *OnOOM();
+};
+// Always dies on the failure.
+struct DieOnFailure {
+ static void NORETURN *OnBadRequest();
+ static void NORETURN *OnOOM();
+};
+
+// Returns true if allocator detected OOM condition. Can be used to avoid memory
+// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
+bool IsAllocatorOutOfMemory();
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
-bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
-
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"
--- /dev/null
+//===-- sanitizer_allocator_checks.cc ---------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
+// allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_errno.h"
+
+namespace __sanitizer {
+
+void SetErrnoToENOMEM() {
+ errno = errno_ENOMEM;
+}
+
+} // namespace __sanitizer
--- /dev/null
+//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
+// allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_CHECKS_H
+#define SANITIZER_ALLOCATOR_CHECKS_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+
+// The following is defined in a separate compilation unit to avoid pulling in
+// sanitizer_errno.h in this header, which leads to conflicts when other system
+// headers include errno.h. This is usually the result of an unlikely event,
+// and as such we do not care as much about having it inlined.
+void SetErrnoToENOMEM();
+
+// A common errno setting logic shared by almost all sanitizer allocator APIs.
+INLINE void *SetErrnoOnNull(void *ptr) {
+ if (UNLIKELY(!ptr))
+ SetErrnoToENOMEM();
+ return ptr;
+}
+
+// In case of the check failure, the caller of the following Check... functions
+// should "return POLICY::OnBadRequest();" where POLICY is the current allocator
+// failure handling policy.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment for POSIX implementation,
+// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
+// of alignment.
+INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
+#if SANITIZER_POSIX
+ return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0;
+#else
+ return size % alignment == 0;
+#endif
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
+ return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT
+}
+
+// Returns true if calloc(size, n) call overflows on size*n calculation.
+INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
+ if (!size)
+ return false;
+ uptr max = (uptr)-1L;
+ return (max / size) < n;
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of page_size.
+INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
+ return RoundUpTo(size, page_size) < size;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_CHECKS_H
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
+ typedef typename SecondaryAllocator::FailureHandler FailureHandler;
- void InitLinkerInitialized(bool may_return_null) {
- secondary_.InitLinkerInitialized(may_return_null);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.InitLinkerInitialized();
stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
}
- void Init(bool may_return_null) {
- secondary_.Init(may_return_null);
+ void Init(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.Init();
stats_.Init();
- InitCommon(may_return_null);
}
- void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false, bool check_rss_limit = false) {
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
- if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
- if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ if (size + alignment < size)
+ return FailureHandler::OnBadRequest();
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
if (alignment > 8)
size = RoundUpTo(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
- if (from_primary)
+ if (primary_.CanAllocate(size, alignment))
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
- res = secondary_.Allocate(&stats_, size, alignment);
+ res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (!res)
+ return FailureHandler::OnOOM();
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
- if (cleared && res && from_primary)
- internal_bzero_aligned16(res, RoundUpTo(size, 16));
return res;
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(true);
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
}
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- bool RssLimitIsExceeded() {
- return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
- }
-
- void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
- atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
- memory_order_release);
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
void Deallocate(AllocatorCache *cache, void *p) {
primary_.ForceUnlock();
}
- void ReleaseToOS() { primary_.ReleaseToOS(); }
-
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
- atomic_uint8_t rss_limit_is_exceeded_;
};
void (*free_hook)(const void *));
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
+ void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+ void __sanitizer_free_hook(void *ptr);
-
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- void __sanitizer_print_memory_profile(int top_percent);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
// purposes.
typedef CompactSizeClassMap InternalSizeClassMap;
-static const uptr kInternalAllocatorSpace = 0;
-static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kInternalAllocatorRegionSizeLog = 20;
-#if SANITIZER_WORDSIZE == 32
static const uptr kInternalAllocatorNumRegions =
- kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
+ SANITIZER_MMAP_RANGE_SIZE >> kInternalAllocatorRegionSizeLog;
+#if SANITIZER_WORDSIZE == 32
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
-static const uptr kInternalAllocatorNumRegions =
- kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
-typedef SizeClassAllocator32<
- kInternalAllocatorSpace, kInternalAllocatorSize, 0, InternalSizeClassMap,
- kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator;
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef InternalSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = kInternalAllocatorRegionSizeLog;
+ typedef __sanitizer::ByteMap ByteMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<> > InternalAllocator;
+ LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
+ > InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0);
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
- typedef typename Allocator::SizeClassMapT SizeClassMap;
- typedef typename Allocator::CompactPtrT CompactPtrT;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(c, allocator, class_id);
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(c, allocator, class_id)))
+ return nullptr;
+ }
+ stats_.Add(AllocatorStatAllocated, c->class_size);
CHECK_GT(c->count, 0);
CompactPtrT chunk = c->chunks[--c->count];
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
}
void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
while (c->count > 0)
- Drain(c, allocator, class_id, c->count);
+ Drain(c, allocator, i, c->count);
}
}
- // private:
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
struct PerClass {
u32 count;
u32 max_count;
+ uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
- if (per_class_[1].max_count)
+ if (LIKELY(per_class_[1].max_count))
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ c->class_size = Allocator::ClassIdToSize(i);
}
}
- NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
+ NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
InitCache();
- uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
- allocator->GetFromAllocator(&stats_, class_id, c->chunks,
- num_requested_chunks);
+ uptr num_requested_chunks = c->max_count / 2;
+ if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks)))
+ return false;
c->count = num_requested_chunks;
+ return true;
}
NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
struct SizeClassAllocator32LocalCache {
typedef SizeClassAllocator Allocator;
typedef typename Allocator::TransferBatch TransferBatch;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
s->Register(&stats_);
}
+ // Returns a TransferBatch suitable for class_id.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ Deallocate(allocator, batch_class_id, b);
+ }
+
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(allocator, class_id);
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(allocator, class_id)))
+ return nullptr;
+ }
+ stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
return res;
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
}
void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
while (c->count > 0)
- Drain(allocator, class_id);
+ Drain(allocator, i);
}
}
- // private:
- typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
+ // allocated from kBatchClassID size class (except for those that are needed
+ // for kBatchClassID itself). The goal is to have TransferBatches in a totally
+ // different region of RAM to improve security.
+ static const bool kUseSeparateSizeClassForBatch =
+ Allocator::kUseSeparateSizeClassForBatch;
+
struct PerClass {
uptr count;
uptr max_count;
+ uptr class_size;
+ uptr batch_class_id;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
- if (per_class_[1].max_count)
+ if (LIKELY(per_class_[1].max_count))
return;
+ const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
- c->max_count = 2 * TransferBatch::MaxCached(i);
+ uptr max_cached = TransferBatch::MaxCached(i);
+ c->max_count = 2 * max_cached;
+ c->class_size = Allocator::ClassIdToSize(i);
+ // Precompute the class id to use to store batches for the current class
+ // id. 0 means the class size is large enough to store a batch within one
+ // of the chunks. If using a separate size class, it will always be
+ // kBatchClassID, except for kBatchClassID itself.
+ if (kUseSeparateSizeClassForBatch) {
+ c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
+ } else {
+ c->batch_class_id = (c->class_size <
+ TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
+ batch_class_id : 0;
+ }
}
}
- // TransferBatch class is declared in SizeClassAllocator.
- // We transfer chunks between central and thread-local free lists in batches.
- // For small size classes we allocate batches separately.
- // For large size classes we may use one of the chunks to store the batch.
- // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
- static uptr SizeClassForTransferBatch(uptr class_id) {
- if (Allocator::ClassIdToSize(class_id) <
- TransferBatch::AllocationSizeRequiredForNElements(
- TransferBatch::MaxCached(class_id)))
- return SizeClassMap::ClassID(sizeof(TransferBatch));
- return 0;
- }
-
- // Returns a TransferBatch suitable for class_id.
- // For small size classes allocates the batch from the allocator.
- // For large size classes simply returns b.
- TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
- TransferBatch *b) {
- if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
- return (TransferBatch*)Allocate(allocator, batch_class_id);
- return b;
- }
-
- // Destroys TransferBatch b.
- // For small size classes deallocates b to the allocator.
- // Does notthing for large size classes.
- void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
- TransferBatch *b) {
- if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
- Deallocate(allocator, batch_class_id, b);
- }
-
- NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ if (UNLIKELY(!b))
+ return false;
CHECK_GT(b->Count(), 0);
b->CopyToArray(c->batch);
c->count = b->Count();
DestroyBatch(class_id, allocator, b);
+ return true;
}
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
uptr first_idx_to_drain = c->count - cnt;
TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ // Failure to allocate a batch while releasing memory is non recoverable.
+ // TODO(alekseys): Figure out how to do it without allocating a new batch.
+ if (UNLIKELY(!b))
+ DieOnFailure::OnOOM();
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
&c->batch[first_idx_to_drain], cnt);
c->count -= cnt;
// be returned by MmapOrDie().
//
// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
+// kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
//
// In order to avoid false sharing the objects of this class should be
// chache-line aligned.
-template <const uptr kSpaceBeg, const u64 kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- const uptr kRegionSizeLog,
- class ByteMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
+
+struct SizeClassAllocator32FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ kUseSeparateSizeClassForBatch = 2,
+ };
+};
+
+template <class Params>
class SizeClassAllocator32 {
public:
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const u64 kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = Params::kRegionSizeLog;
+ typedef typename Params::ByteMap ByteMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks = Params::kFlags &
+ SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
+ static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
+ SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
+
struct TransferBatch {
static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
static const uptr kBatchSize = sizeof(TransferBatch);
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
- COMPILER_CHECK(sizeof(TransferBatch) ==
- SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+ COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
static uptr ClassIdToSize(uptr class_id) {
- return SizeClassMap::Size(class_id);
+ return (class_id == SizeClassMap::kBatchClassID) ?
+ kBatchSize : SizeClassMap::Size(class_id);
}
- typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocator32<Params> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
- void Init() {
+ void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
void *MapWithCallback(uptr size) {
- size = RoundUpTo(size, GetPageSizeCached());
void *res = MmapOrDie(size, "SizeClassAllocator32");
MapUnmapCallback().OnMap((uptr)res, size);
return res;
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
- if (sci->free_list.empty())
- PopulateFreeList(stat, c, sci, class_id);
+ if (sci->free_list.empty() &&
+ UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
+ return nullptr;
CHECK(!sci->free_list.empty());
TransferBatch *b = sci->free_list.front();
sci->free_list.pop_front();
NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
TransferBatch *b) {
CHECK_LT(class_id, kNumClasses);
+ CHECK_GT(b->Count(), 0);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
- CHECK_GT(b->Count(), 0);
sci->free_list.push_front(b);
}
return 0;
}
- // This is empty here. Currently only implemented in 64-bit allocator.
- void ReleaseToOS() { }
-
-
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
+ kRegionSize, kRegionSize, "SizeClassAllocator32"));
+ if (UNLIKELY(!res))
+ return 0;
MapUnmapCallback().OnMap(res, kRegionSize);
stat->Add(AllocatorStatMapped, kRegionSize);
- CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ CHECK(IsAligned(res, kRegionSize));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
}
return &size_class_info_array[class_id];
}
- void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
SizeClassInfo *sci, uptr class_id) {
uptr size = ClassIdToSize(class_id);
uptr reg = AllocateRegion(stat, class_id);
+ if (UNLIKELY(!reg))
+ return false;
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = TransferBatch::MaxCached(class_id);
+ CHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (!b) {
b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+ if (UNLIKELY(!b))
+ return false;
b->Clear();
}
b->Add((void*)i);
if (b->Count() == max_count) {
- CHECK_GT(b->Count(), 0);
sci->free_list.push_back(b);
b = nullptr;
}
CHECK_GT(b->Count(), 0);
sci->free_list.push_back(b);
}
+ return true;
}
ByteMap possible_regions;
// as a 4-byte integer (offset from the region start shifted right by 4).
typedef u32 CompactPtrT;
static const uptr kCompactPtrScale = 4;
- CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
}
- uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
- void Init() {
+ void Init(s32 release_to_os_interval_ms) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
if (kUsingConstantSpaceBeg) {
CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
- MapWithCallback(SpaceEnd(), AdditionalSize());
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
}
- void MapWithCallback(uptr beg, uptr size) {
- CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
- MapUnmapCallback().OnMap(beg, size);
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
}
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
}
static bool CanAllocate(uptr size, uptr alignment) {
BlockingMutexLock l(®ion->mutex);
uptr old_num_chunks = region->num_freed_chunks;
uptr new_num_freed_chunks = old_num_chunks + n_chunks;
- EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
+ // Failure to allocate free array space while releasing memory is non
+ // recoverable.
+ if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
+ new_num_freed_chunks)))
+ DieOnFailure::OnOOM();
for (uptr i = 0; i < n_chunks; i++)
free_array[old_num_chunks + i] = chunks[i];
region->num_freed_chunks = new_num_freed_chunks;
- region->n_freed += n_chunks;
+ region->stats.n_freed += n_chunks;
+
+ MaybeReleaseToOS(class_id);
}
- NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
CompactPtrT *chunks, uptr n_chunks) {
RegionInfo *region = GetRegionInfo(class_id);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
BlockingMutexLock l(®ion->mutex);
if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
- PopulateFreeArray(stat, class_id, region,
- n_chunks - region->num_freed_chunks);
+ if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks)))
+ return false;
CHECK_GE(region->num_freed_chunks, n_chunks);
}
region->num_freed_chunks -= n_chunks;
uptr base_idx = region->num_freed_chunks;
for (uptr i = 0; i < n_chunks; i++)
chunks[i] = free_array[base_idx + i];
- region->n_allocated += n_chunks;
+ region->stats.n_allocated += n_chunks;
+ return true;
}
-
bool PointerIsMine(const void *p) {
uptr P = reinterpret_cast<uptr>(p);
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
space_beg;
}
- uptr GetRegionBeginBySizeClass(uptr class_id) {
+ uptr GetRegionBeginBySizeClass(uptr class_id) const {
return SpaceBeg() + kRegionSize * class_id;
}
// Test-only.
void TestOnlyUnmap() {
- UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
+ UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
}
static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
void PrintStats(uptr class_id, uptr rss) {
RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user == 0) return;
- uptr in_use = region->n_allocated - region->n_freed;
+ uptr in_use = region->stats.n_allocated - region->stats.n_freed;
uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
Printf(
- " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
- "num_freed_chunks %zd"
- " avail: %zd rss: %zdK releases: %zd\n",
- class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
- region->n_allocated, region->n_freed, in_use,
- region->num_freed_chunks, avail_chunks, rss >> 10,
- region->rtoi.num_releases);
+ "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
+ "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
+ "last released: %6zdK region: 0x%zx\n",
+ region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
+ region->mapped_user >> 10, region->stats.n_allocated,
+ region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
+ rss >> 10, region->rtoi.num_releases,
+ region->rtoi.last_released_bytes >> 10,
+ SpaceBeg() + kRegionSize * class_id);
}
void PrintStats() {
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
RegionInfo *region = GetRegionInfo(class_id);
total_mapped += region->mapped_user;
- n_allocated += region->n_allocated;
- n_freed += region->n_freed;
+ n_allocated += region->stats.n_allocated;
+ n_freed += region->stats.n_freed;
}
Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
"remains %zd\n",
GetPageSizeCached());
}
- void ReleaseToOS() {
- for (uptr class_id = 1; class_id < kNumClasses; class_id++)
- ReleaseToOS(class_id);
- }
-
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+ // A packed array of counters. Each counter occupies 2^n bits, enough to store
+ // counter's max_value. Ctor will try to allocate the required buffer via
+ // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
+ // whether the initialization was successful by checking IsAllocated() result.
+ // For the performance sake, none of the accessors check the validity of the
+ // arguments, it is assumed that index is always in [0, n) range and the value
+ // is not incremented past max_value.
+ template<class MemoryMapperT>
+ class PackedCounterArray {
+ public:
+ PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
+ : n(num_counters), memory_mapper(mapper) {
+ CHECK_GT(num_counters, 0);
+ CHECK_GT(max_value, 0);
+ constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's index and offset.
+ uptr counter_size_bits =
+ RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
+ CHECK_LE(counter_size_bits, kMaxCounterBits);
+ counter_size_bits_log = Log2(counter_size_bits);
+ counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
+
+ uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
+ CHECK_GT(packing_ratio, 0);
+ packing_ratio_log = Log2(packing_ratio);
+ bit_offset_mask = packing_ratio - 1;
+
+ buffer_size =
+ (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
+ sizeof(*buffer);
+ buffer = reinterpret_cast<u64*>(
+ memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
+ }
+ ~PackedCounterArray() {
+ if (buffer) {
+ memory_mapper->UnmapPackedCounterArrayBuffer(
+ reinterpret_cast<uptr>(buffer), buffer_size);
+ }
+ }
+
+ bool IsAllocated() const {
+ return !!buffer;
+ }
+
+ u64 GetCount() const {
+ return n;
+ }
+
+ uptr Get(uptr i) const {
+ DCHECK_LT(i, n);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ return (buffer[index] >> bit_offset) & counter_mask;
+ }
+
+ void Inc(uptr i) const {
+ DCHECK_LT(Get(i), counter_mask);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ buffer[index] += 1ULL << bit_offset;
+ }
+
+ void IncRange(uptr from, uptr to) const {
+ DCHECK_LE(from, to);
+ for (uptr i = from; i <= to; i++)
+ Inc(i);
+ }
+
+ private:
+ const u64 n;
+ u64 counter_size_bits_log;
+ u64 counter_mask;
+ u64 packing_ratio_log;
+ u64 bit_offset_mask;
+
+ MemoryMapperT* const memory_mapper;
+ u64 buffer_size;
+ u64* buffer;
+ };
+
+ template<class MemoryMapperT>
+ class FreePagesRangeTracker {
+ public:
+ explicit FreePagesRangeTracker(MemoryMapperT* mapper)
+ : memory_mapper(mapper),
+ page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
+ in_the_range(false), current_page(0), current_range_start_page(0) {}
+
+ void NextPage(bool freed) {
+ if (freed) {
+ if (!in_the_range) {
+ current_range_start_page = current_page;
+ in_the_range = true;
+ }
+ } else {
+ CloseOpenedRange();
+ }
+ current_page++;
+ }
+
+ void Done() {
+ CloseOpenedRange();
+ }
+
+ private:
+ void CloseOpenedRange() {
+ if (in_the_range) {
+ memory_mapper->ReleasePageRangeToOS(
+ current_range_start_page << page_size_scaled_log,
+ current_page << page_size_scaled_log);
+ in_the_range = false;
+ }
+ }
+
+ MemoryMapperT* const memory_mapper;
+ const uptr page_size_scaled_log;
+ bool in_the_range;
+ uptr current_page;
+ uptr current_range_start_page;
+ };
+
+ // Iterates over the free_array to identify memory pages containing freed
+ // chunks only and returns these pages back to OS.
+ // allocated_pages_count is the total number of pages allocated for the
+ // current bucket.
+ template<class MemoryMapperT>
+ static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
+ uptr free_array_count, uptr chunk_size,
+ uptr allocated_pages_count,
+ MemoryMapperT *memory_mapper) {
+ const uptr page_size = GetPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr full_pages_chunk_count_max;
+ bool same_chunk_count_per_page;
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Same number of chunks per page, no cross overs.
+ full_pages_chunk_count_max = page_size / chunk_size;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
+ chunk_size % (page_size % chunk_size) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 2;
+ same_chunk_count_per_page = false;
+ } else if (chunk_size > page_size && chunk_size % page_size == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ full_pages_chunk_count_max = 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size > page_size) {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ full_pages_chunk_count_max = 2;
+ same_chunk_count_per_page = false;
+ } else {
+ UNREACHABLE("All chunk_size/page_size ratios must be handled.");
+ }
+
+ PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
+ full_pages_chunk_count_max,
+ memory_mapper);
+ if (!counters.IsAllocated())
+ return;
+
+ const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
+ const uptr page_size_scaled = page_size >> kCompactPtrScale;
+ const uptr page_size_scaled_log = Log2(page_size_scaled);
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Each chunk affects one page only.
+ for (uptr i = 0; i < free_array_count; i++)
+ counters.Inc(free_array[i] >> page_size_scaled_log);
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (uptr i = 0; i < free_array_count; i++) {
+ counters.IncRange(
+ free_array[i] >> page_size_scaled_log,
+ (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
+ if (same_chunk_count_per_page) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr i = 0; i < counters.GetCount(); i++)
+ range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
+ } else {
+ // Show path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr pn =
+ chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
+ const uptr pnc = pn * chunk_size_scaled;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr prev_page_boundary = 0;
+ uptr current_boundary = 0;
+ for (uptr i = 0; i < counters.GetCount(); i++) {
+ uptr page_boundary = prev_page_boundary + page_size_scaled;
+ uptr chunks_per_page = pn;
+ if (current_boundary < page_boundary) {
+ if (current_boundary > prev_page_boundary)
+ chunks_per_page++;
+ current_boundary += pnc;
+ if (current_boundary < page_boundary) {
+ chunks_per_page++;
+ current_boundary += chunk_size_scaled;
+ }
+ }
+ prev_page_boundary = page_boundary;
+
+ range_tracker.NextPage(counters.Get(i) == chunks_per_page);
+ }
+ }
+ range_tracker.Done();
+ }
+
private:
+ friend class MemoryMapper;
+
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
// In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
static const uptr kFreeArrayMapSize = 1 << 16;
- // Granularity of ReleaseToOs (aka madvise).
- static const uptr kReleaseToOsGranularity = 1 << 12;
+
+ atomic_sint32_t release_to_os_interval_ms_;
+
+ struct Stats {
+ uptr n_allocated;
+ uptr n_freed;
+ };
struct ReleaseToOsInfo {
uptr n_freed_at_last_release;
uptr num_releases;
+ u64 last_release_at_ns;
+ u64 last_released_bytes;
};
struct RegionInfo {
uptr allocated_meta; // Bytes allocated for metadata.
uptr mapped_user; // Bytes mapped for user memory.
uptr mapped_meta; // Bytes mapped for metadata.
- u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
- uptr n_allocated, n_freed; // Just stats.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ bool exhausted; // Whether region is out of space for new chunks.
+ Stats stats;
ReleaseToOsInfo rtoi;
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
Swap(a[i], a[RandN(rand_state, i + 1)]);
}
- RegionInfo *GetRegionInfo(uptr class_id) {
+ RegionInfo *GetRegionInfo(uptr class_id) const {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions =
reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
return ®ions[class_id];
}
- uptr GetMetadataEnd(uptr region_beg) {
+ uptr GetMetadataEnd(uptr region_beg) const {
return region_beg + kRegionSize - kFreeArraySize;
}
- uptr GetChunkIdx(uptr chunk, uptr size) {
+ uptr GetChunkIdx(uptr chunk, uptr size) const {
if (!kUsingConstantSpaceBeg)
chunk -= SpaceBeg();
return (u32)offset / (u32)size;
}
- CompactPtrT *GetFreeArray(uptr region_beg) {
- return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
- kFreeArraySize);
+ CompactPtrT *GetFreeArray(uptr region_beg) const {
+ return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
}
- void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ bool MapWithCallback(uptr beg, uptr size) {
+ uptr mapped = reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(beg, size));
+ if (UNLIKELY(!mapped))
+ return false;
+ CHECK_EQ(beg, mapped);
+ MapUnmapCallback().OnMap(beg, size);
+ return true;
+ }
+
+ void MapWithCallbackOrDie(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallbackOrDie(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
uptr num_freed_chunks) {
uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
if (region->mapped_free_array < needed_space) {
- CHECK_LE(needed_space, kFreeArraySize);
uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ CHECK_LE(new_mapped_free_array, kFreeArraySize);
uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
region->mapped_free_array;
uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
- MapWithCallback(current_map_end, new_map_size);
+ if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size)))
+ return false;
region->mapped_free_array = new_mapped_free_array;
}
+ return true;
}
-
- NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
RegionInfo *region, uptr requested_count) {
// region->mutex is held.
- uptr size = ClassIdToSize(class_id);
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + requested_count * size;
- uptr region_beg = GetRegionBeginBySizeClass(class_id);
- if (end_idx > region->mapped_user) {
+ const uptr size = ClassIdToSize(class_id);
+ const uptr new_space_beg = region->allocated_user;
+ const uptr new_space_end = new_space_beg + requested_count * size;
+ const uptr region_beg = GetRegionBeginBySizeClass(class_id);
+
+ // Map more space for chunks, if necessary.
+ if (new_space_end > region->mapped_user) {
if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
// Do the mmap for the user memory.
uptr map_size = kUserMapSize;
- while (end_idx > region->mapped_user + map_size)
+ while (new_space_end > region->mapped_user + map_size)
map_size += kUserMapSize;
- CHECK_GE(region->mapped_user + map_size, end_idx);
- MapWithCallback(region_beg + region->mapped_user, map_size);
+ CHECK_GE(region->mapped_user + map_size, new_space_end);
+ if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
+ map_size)))
+ return false;
stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
- CompactPtrT *free_array = GetFreeArray(region_beg);
- uptr total_count = (region->mapped_user - beg_idx) / size;
- uptr num_freed_chunks = region->num_freed_chunks;
- EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
- for (uptr i = 0; i < total_count; i++) {
- uptr chunk = beg_idx + i * size;
- free_array[num_freed_chunks + total_count - 1 - i] =
- PointerToCompactPtr(0, chunk);
+ const uptr new_chunks_count = (region->mapped_user - new_space_beg) / size;
+
+ // Calculate the required space for metadata.
+ const uptr requested_allocated_meta =
+ region->allocated_meta + new_chunks_count * kMetadataSize;
+ uptr requested_mapped_meta = region->mapped_meta;
+ while (requested_allocated_meta > requested_mapped_meta)
+ requested_mapped_meta += kMetaMapSize;
+ // Check whether this size class is exhausted.
+ if (region->mapped_user + requested_mapped_meta >
+ kRegionSize - kFreeArraySize) {
+ if (!region->exhausted) {
+ region->exhausted = true;
+ Printf("%s: Out of memory. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize >> 20, size);
+ }
+ return false;
+ }
+ // Map more space for metadata, if necessary.
+ if (requested_mapped_meta > region->mapped_meta) {
+ if (UNLIKELY(!MapWithCallback(
+ GetMetadataEnd(region_beg) - requested_mapped_meta,
+ requested_mapped_meta - region->mapped_meta)))
+ return false;
+ region->mapped_meta = requested_mapped_meta;
}
+
+ // If necessary, allocate more space for the free array and populate it with
+ // newly allocated chunks.
+ const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
+ if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
+ return false;
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ for (uptr i = 0, chunk = new_space_beg; i < new_chunks_count;
+ i++, chunk += size)
+ free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
if (kRandomShuffleChunks)
- RandomShuffle(&free_array[num_freed_chunks], total_count,
+ RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
®ion->rand_state);
- region->num_freed_chunks += total_count;
- region->allocated_user += total_count * size;
- CHECK_LE(region->allocated_user, region->mapped_user);
- region->allocated_meta += total_count * kMetadataSize;
- if (region->allocated_meta > region->mapped_meta) {
- uptr map_size = kMetaMapSize;
- while (region->allocated_meta > region->mapped_meta + map_size)
- map_size += kMetaMapSize;
- // Do the mmap for the metadata.
- CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
- MapWithCallback(GetMetadataEnd(region_beg) -
- region->mapped_meta - map_size, map_size);
- region->mapped_meta += map_size;
- }
+ // All necessary memory is mapped and now it is safe to advance all
+ // 'allocated_*' counters.
+ region->num_freed_chunks += new_chunks_count;
+ region->allocated_user += new_chunks_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+ region->allocated_meta = requested_allocated_meta;
CHECK_LE(region->allocated_meta, region->mapped_meta);
- if (region->mapped_user + region->mapped_meta >
- kRegionSize - kFreeArraySize) {
- Printf("%s: Out of memory. Dying. ", SanitizerToolName);
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize / 1024 / 1024, size);
- Die();
- }
- }
+ region->exhausted = false;
+
+ // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
+ // MaybeReleaseToOS from releasing just allocated pages or protect these
+ // not yet used chunks some other way.
- bool MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
- CompactPtrT first, CompactPtrT last) {
- uptr beg_ptr = CompactPtrToPointer(region_beg, first);
- uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
- CHECK_GE(end_ptr - beg_ptr, kReleaseToOsGranularity);
- beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity);
- end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity);
- if (end_ptr == beg_ptr) return false;
- ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr);
return true;
}
- // Releases some RAM back to OS.
- // Algorithm:
- // * Lock the region.
- // * Sort the chunks.
- // * Find ranges fully covered by free-d chunks
- // * Release them to OS with madvise.
- //
- // TODO(kcc): make sure we don't do it too frequently.
- void ReleaseToOS(uptr class_id) {
+ class MemoryMapper {
+ public:
+ MemoryMapper(const ThisT& base_allocator, uptr class_id)
+ : allocator(base_allocator),
+ region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
+ released_ranges_count(0),
+ released_bytes(0) {
+ }
+
+ uptr GetReleasedRangesCount() const {
+ return released_ranges_count;
+ }
+
+ uptr GetReleasedBytes() const {
+ return released_bytes;
+ }
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ // TODO(alekseyshl): The idea to explore is to check if we have enough
+ // space between num_freed_chunks*sizeof(CompactPtrT) and
+ // mapped_free_array to fit buffer_size bytes and use that space instead
+ // of mapping a temporary one.
+ return reinterpret_cast<uptr>(
+ MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
+ }
+
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
+ UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
+ }
+
+ // Releases [from, to) range of pages back to OS.
+ void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
+ const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
+ const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
+ ReleaseMemoryPagesToOS(from_page, to_page);
+ released_ranges_count++;
+ released_bytes += to_page - from_page;
+ }
+
+ private:
+ const ThisT& allocator;
+ const uptr region_base;
+ uptr released_ranges_count;
+ uptr released_bytes;
+ };
+
+ // Attempts to release RAM occupied by freed chunks back to OS. The region is
+ // expected to be locked.
+ void MaybeReleaseToOS(uptr class_id) {
RegionInfo *region = GetRegionInfo(class_id);
- uptr region_beg = GetRegionBeginBySizeClass(class_id);
- CompactPtrT *free_array = GetFreeArray(region_beg);
- uptr chunk_size = ClassIdToSize(class_id);
- uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
- const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale;
- BlockingMutexLock l(®ion->mutex);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
uptr n = region->num_freed_chunks;
- if (n * chunk_size < kReleaseToOsGranularity)
- return; // No chance to release anything.
- if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size <
- kReleaseToOsGranularity)
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->stats.n_freed -
+ region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
return; // Nothing new to release.
- SortArray(free_array, n);
- uptr beg = free_array[0];
- uptr prev = free_array[0];
- for (uptr i = 1; i < n; i++) {
- uptr chunk = free_array[i];
- CHECK_GT(chunk, prev);
- if (chunk - prev != scaled_chunk_size) {
- CHECK_GT(chunk - prev, scaled_chunk_size);
- if (prev + scaled_chunk_size - beg >= kScaledGranularity) {
- MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev);
- region->rtoi.n_freed_at_last_release = region->n_freed;
- region->rtoi.num_releases++;
- }
- beg = chunk;
- }
- prev = chunk;
}
+
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime())
+ return; // Memory was returned recently.
+
+ MemoryMapper memory_mapper(*this, class_id);
+
+ ReleaseFreeMemoryToOS<MemoryMapper>(
+ GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
+ RoundUpTo(region->allocated_user, page_size) / page_size,
+ &memory_mapper);
+
+ if (memory_mapper.GetReleasedRangesCount() > 0) {
+ region->rtoi.n_freed_at_last_release = region->stats.n_freed;
+ region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
+ region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+ }
+ region->rtoi.last_release_at_ns = NanoTime();
}
};
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class FailureHandlerT = ReturnNullOrDieOnFailure>
class LargeMmapAllocator {
public:
- void InitLinkerInitialized(bool may_return_null) {
+ typedef FailureHandlerT FailureHandler;
+
+ void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void Init(bool may_return_null) {
+ void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
+ InitLinkerInitialized();
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
if (alignment > page_size_)
map_size += alignment;
// Overflow.
- if (map_size < size) return ReturnNullOrDieOnBadRequest();
+ if (map_size < size)
+ return FailureHandler::OnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
- MmapOrDie(map_size, "LargeMmapAllocator"));
+ MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
+ if (!map_beg)
+ return FailureHandler::OnOOM();
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
return reinterpret_cast<void*>(res);
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
return GetUser(h);
}
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ SortArray(reinterpret_cast<uptr*>(chunks_), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
// This function does the same as GetBlockBegin, but is much faster.
// Must be called with the allocator locked.
void *GetBlockBeginFastLocked(void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return nullptr;
- if (!chunks_sorted_) {
- // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
- SortArray(reinterpret_cast<uptr*>(chunks_), n);
- for (uptr i = 0; i < n; i++)
- chunks_[i]->chunk_idx = i;
- chunks_sorted_ = true;
- min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
- max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
- chunks_[n - 1]->map_size;
- }
+ EnsureSortedChunks();
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ auto max_mmap_ =
+ reinterpret_cast<uptr>(chunks_[n - 1]) + chunks_[n - 1]->map_size;
if (p < min_mmap_ || p >= max_mmap_)
return nullptr;
uptr beg = 0, end = n - 1;
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr i = 0; i < n_chunks_; i++)
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ auto t = chunks_[i];
callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks_[i], t);
+ CHECK_EQ(chunks_[i]->chunk_idx, i);
+ }
}
private:
uptr page_size_;
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
- uptr min_mmap_, max_mmap_;
bool chunks_sorted_;
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
- kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
static const uptr kLargestClassID = kNumClasses - 2;
+ static const uptr kBatchClassID = kNumClasses - 1;
COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
static const uptr kNumClassesRounded =
kNumClasses <= 32 ? 32 :
kNumClasses <= 128 ? 128 : 256;
static uptr Size(uptr class_id) {
+ // Estimate the result for kBatchClassID because this class does not know
+ // the exact size of TransferBatch. It's OK since we are using the actual
+ // sizeof(TransferBatch) where it matters.
+ if (UNLIKELY(class_id == kBatchClassID))
+ return kMaxNumCachedHint * sizeof(uptr);
if (class_id <= kMidClass)
return kMinSize * class_id;
class_id -= kMidClass;
}
static uptr ClassID(uptr size) {
+ if (UNLIKELY(size > kMaxSize))
+ return 0;
if (size <= kMidSize)
return (size + kMinSize - 1) >> kMinSizeLog;
- if (size > kMaxSize) return 0;
uptr l = MostSignificantSetBitIndex(size);
uptr hbits = (size >> (l - S)) & M;
uptr lbits = size & ((1 << (l - S)) - 1);
}
static uptr MaxCachedHint(uptr class_id) {
- if (class_id == 0) return 0;
+ // Estimate the result for kBatchClassID because this class does not know
+ // the exact size of TransferBatch. We need to cache fewer batches than user
+ // chunks, so this number can be small.
+ if (UNLIKELY(class_id == kBatchClassID))
+ return 16;
+ if (UNLIKELY(class_id == 0))
+ return 0;
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
return Max<uptr>(1, Min(kMaxNumCachedHint, n));
}
uptr p = prev_s ? (d * 100 / prev_s) : 0;
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
uptr cached = MaxCachedHint(i) * s;
+ if (i == kBatchClassID)
+ d = p = l = 0;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
// Printf("Validate: c%zd\n", c);
uptr s = Size(c);
CHECK_NE(s, 0U);
+ if (c == kBatchClassID)
+ continue;
CHECK_EQ(ClassID(s), c);
- if (c != kNumClasses - 1)
+ if (c < kLargestClassID)
CHECK_EQ(ClassID(s + 1), c + 1);
CHECK_EQ(ClassID(s - 1), c);
- if (c)
- CHECK_GT(Size(c), Size(c-1));
+ CHECK_GT(Size(c), Size(c - 1));
}
CHECK_EQ(ClassID(kMaxSize + 1), 0);
CHECK_LT(c, kNumClasses);
CHECK_GE(Size(c), s);
if (c > 0)
- CHECK_LT(Size(c-1), s);
+ CHECK_LT(Size(c - 1), s);
}
}
};
volatile Type val_dont_use;
};
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
return v;
}
-template<typename T>
-INLINE bool atomic_compare_exchange_strong(volatile T *a,
- typename T::Type *cmp,
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
Type cmpv = *cmp;
- Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
- if (prev == cmpv)
- return true;
+ Type prev;
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+ if (sizeof(*a) == 8) {
+ Type volatile *val_ptr = const_cast<Type volatile *>(&a->val_dont_use);
+ prev = __mips_sync_val_compare_and_swap<u64>(
+ reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmpv, (u64)xchg);
+ } else {
+ prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+ }
+#else
+ prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+#endif
+ if (prev == cmpv) return true;
*cmp = prev;
return false;
}
namespace __sanitizer {
+// MIPS32 does not support atomic > 4 bytes. To address this lack of
+// functionality, the sanitizer library provides helper methods which use an
+// internal spin lock mechanism to emulate atomic oprations when the size is
+// 8 bytes.
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+static void __spin_lock(volatile int *lock) {
+ while (__sync_lock_test_and_set(lock, 1))
+ while (*lock) {
+ }
+}
+
+static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
+
+
+// Make sure the lock is on its own cache line to prevent false sharing.
+// Put it inside a struct that is aligned and padded to the typical MIPS
+// cacheline which is 32 bytes.
+static struct {
+ int lock;
+ char pad[32 - sizeof(int)];
+} __attribute__((aligned(32))) lock = {0};
+
+template <class T>
+T __mips_sync_fetch_and_add(volatile T *ptr, T val) {
+ T ret;
+
+ __spin_lock(&lock.lock);
+
+ ret = *ptr;
+ *ptr = ret + val;
+
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+
+template <class T>
+T __mips_sync_val_compare_and_swap(volatile T *ptr, T oldval, T newval) {
+ T ret;
+ __spin_lock(&lock.lock);
+
+ ret = *ptr;
+ if (ret == oldval) *ptr = newval;
+
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+#endif
+
INLINE void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
// 64-bit load on 32-bit platform.
// Gross, but simple and reliable.
// Assume that it is not in read-only memory.
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+ typename T::Type volatile *val_ptr =
+ const_cast<typename T::Type volatile *>(&a->val_dont_use);
+ v = __mips_sync_fetch_and_add<u64>(
+ reinterpret_cast<u64 volatile *>(val_ptr), 0);
+#else
v = __sync_fetch_and_add(
const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+#endif
}
return v;
}
typename T::Type cmp = a->val_dont_use;
typename T::Type cur;
for (;;) {
+#if defined(_MIPS_SIM) && _MIPS_SIM == _ABIO32
+ typename T::Type volatile *val_ptr =
+ const_cast<typename T::Type volatile *>(&a->val_dont_use);
+ cur = __mips_sync_val_compare_and_swap<u64>(
+ reinterpret_cast<u64 volatile *>(val_ptr), (u64)cmp, (u64)v);
+#else
cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+#endif
if (cmp == v)
break;
cmp = cur;
atomic_uint32_t current_verbosity;
uptr PageSizeCached;
-StaticSpinMutex report_file_mu;
-ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
-
-void RawWrite(const char *buffer) {
- report_file.Write(buffer, internal_strlen(buffer));
-}
-
-void ReportFile::ReopenIfNecessary() {
- mu->CheckLocked();
- if (fd == kStdoutFd || fd == kStderrFd) return;
-
- uptr pid = internal_getpid();
- // If in tracer, use the parent's file.
- if (pid == stoptheworld_tracer_pid)
- pid = stoptheworld_tracer_ppid;
- if (fd != kInvalidFd) {
- // If the report file is already opened by the current process,
- // do nothing. Otherwise the report file was opened by the parent
- // process, close it now.
- if (fd_pid == pid)
- return;
- else
- CloseFile(fd);
- }
-
- const char *exe_name = GetProcessName();
- if (common_flags()->log_exe_name && exe_name) {
- internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
- exe_name, pid);
- } else {
- internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
- }
- fd = OpenFile(full_path, WrOnly);
- if (fd == kInvalidFd) {
- const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
- WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
- WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
- Die();
- }
- fd_pid = pid;
-}
-
-void ReportFile::SetReportPath(const char *path) {
- if (!path)
- return;
- uptr len = internal_strlen(path);
- if (len > sizeof(path_prefix) - 100) {
- Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
- path[0], path[1], path[2], path[3],
- path[4], path[5], path[6], path[7]);
- Die();
- }
-
- SpinMutexLock l(mu);
- if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
- CloseFile(fd);
- fd = kInvalidFd;
- if (internal_strcmp(path, "stdout") == 0) {
- fd = kStdoutFd;
- } else if (internal_strcmp(path, "stderr") == 0) {
- fd = kStderrFd;
- } else {
- internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
- }
-}
-
// PID of the tracer task in StopTheWorld. It shares the address space with the
// main process, but has a different PID and thus requires special handling.
uptr stoptheworld_tracer_pid = 0;
// writing to the same log file.
uptr stoptheworld_tracer_ppid = 0;
+StaticSpinMutex CommonSanitizerReportMutex;
+
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
const char *mmap_type, error_t err,
bool raw_report) {
UNREACHABLE("unable to mmap");
}
-bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
- uptr *read_len, uptr max_len, error_t *errno_p) {
- uptr PageSize = GetPageSizeCached();
- uptr kMinFileLen = PageSize;
- *buff = nullptr;
- *buff_size = 0;
- *read_len = 0;
- // The files we usually open are not seekable, so try different buffer sizes.
- for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
- fd_t fd = OpenFile(file_name, RdOnly, errno_p);
- if (fd == kInvalidFd) return false;
- UnmapOrDie(*buff, *buff_size);
- *buff = (char*)MmapOrDie(size, __func__);
- *buff_size = size;
- *read_len = 0;
- // Read up to one page at a time.
- bool reached_eof = false;
- while (*read_len + PageSize <= size) {
- uptr just_read;
- if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) {
- UnmapOrDie(*buff, *buff_size);
- return false;
- }
- if (just_read == 0) {
- reached_eof = true;
- break;
- }
- *read_len += just_read;
- }
- CloseFile(fd);
- if (reached_eof) // We've read the whole file.
- break;
- }
- return true;
-}
-
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
return module;
}
-void ReportErrorSummary(const char *error_message) {
+void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
if (!common_flags()->print_summary)
return;
InternalScopedString buff(kMaxSummaryLength);
- buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message);
+ buff.append("SUMMARY: %s: %s",
+ alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
__sanitizer_report_error_summary(buff.data());
}
#if !SANITIZER_GO
-void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
- if (!common_flags()->print_summary)
- return;
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name) {
+ if (!common_flags()->print_summary) return;
InternalScopedString buff(kMaxSummaryLength);
buff.append("%s ", error_type);
RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
- ReportErrorSummary(buff.data());
+ ReportErrorSummary(buff.data(), alt_tool_name);
}
#endif
base_address_ = base_address;
}
+void LoadedModule::set(const char *module_name, uptr base_address,
+ ModuleArch arch, u8 uuid[kModuleUUIDSize],
+ bool instrumented) {
+ set(module_name, base_address);
+ arch_ = arch;
+ internal_memcpy(uuid_, uuid, sizeof(uuid_));
+ instrumented_ = instrumented;
+}
+
void LoadedModule::clear() {
InternalFree(full_name_);
+ base_address_ = 0;
+ max_executable_address_ = 0;
full_name_ = nullptr;
+ arch_ = kModuleArchUnknown;
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ instrumented_ = false;
while (!ranges_.empty()) {
AddressRange *r = ranges_.front();
ranges_.pop_front();
}
}
-void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
+void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
+ bool writable, const char *name) {
void *mem = InternalAlloc(sizeof(AddressRange));
- AddressRange *r = new(mem) AddressRange(beg, end, executable);
+ AddressRange *r =
+ new(mem) AddressRange(beg, end, executable, writable, name);
ranges_.push_back(r);
+ if (executable && end > max_executable_address_)
+ max_executable_address_ = end;
}
bool LoadedModule::containsAddress(uptr address) const {
return true;
}
-static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
-
-char *FindPathToBinary(const char *name) {
- if (FileExists(name)) {
- return internal_strdup(name);
- }
-
- const char *path = GetEnv("PATH");
- if (!path)
- return nullptr;
- uptr name_len = internal_strlen(name);
- InternalScopedBuffer<char> buffer(kMaxPathLength);
- const char *beg = path;
- while (true) {
- const char *end = internal_strchrnul(beg, kPathSeparator);
- uptr prefix_len = end - beg;
- if (prefix_len + name_len + 2 <= kMaxPathLength) {
- internal_memcpy(buffer.data(), beg, prefix_len);
- buffer[prefix_len] = '/';
- internal_memcpy(&buffer[prefix_len + 1], name, name_len);
- buffer[prefix_len + 1 + name_len] = '\0';
- if (FileExists(buffer.data()))
- return internal_strdup(buffer.data());
- }
- if (*end == '\0') break;
- beg = end + 1;
- }
- return nullptr;
-}
-
static char binary_name_cache_str[kMaxPathLength];
static char process_name_cache_str[kMaxPathLength];
using namespace __sanitizer; // NOLINT
extern "C" {
-void __sanitizer_set_report_path(const char *path) {
- report_file.SetReportPath(path);
-}
-
-void __sanitizer_set_report_fd(void *fd) {
- report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
- report_file.fd_pid = internal_getpid();
-}
-
-void __sanitizer_report_error_summary(const char *error_summary) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,
+ const char *error_summary) {
Printf("%s\n", error_summary);
}
void (*free_hook)(const void *)) {
return InstallMallocFreeHooks(malloc_hook, free_hook);
}
-
-#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_print_memory_profile(int top_percent) {
- (void)top_percent;
-}
-#endif
} // extern "C"
#endif
namespace __sanitizer {
-struct StackTrace;
+
struct AddressInfo;
+struct BufferedStackTrace;
+struct SignalContext;
+struct StackTrace;
// Constants.
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
uptr GetMmapGranularity();
uptr GetMaxVirtualAddress();
// Threads
-uptr GetTid();
+tid_t GetTid();
uptr GetThreadSelf();
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom);
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
+// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
+// case returns nullptr.
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
const char *name = nullptr);
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
+// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
+// that case returns nullptr.
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
void *MmapNoAccess(uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
+// Dies on all but out of memory errors, in the latter case returns nullptr.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type);
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
bool MprotectReadOnly(uptr addr, uptr size);
// Find an available address space.
-uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
-void ReleaseMemoryToOS(uptr addr, uptr size);
+// Releases memory pages entirely within the [beg, end] address range. Noop if
+// the provided range does not contain at least one entire page.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
uptr GetRSS();
void RunMallocHooks(const void *ptr, uptr size);
void RunFreeHooks(const void *ptr);
+typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
+ /*out*/uptr *stats, uptr stats_size);
+
+// Parse the contents of /proc/self/smaps and generate a memory profile.
+// |cb| is a tool-specific callback that fills the |stats| array containing
+// |stats_size| elements.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
+
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
// FIXME: use InternalAlloc instead of MmapOrDie once
void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
// IO
+void CatastrophicErrorWrite(const char *buffer, uptr length);
void RawWrite(const char *buffer);
bool ColorizeReports();
void RemoveANSIEscapeSequencesFromString(char *buffer);
} while (0)
// Can be used to prevent mixing error reports from different sanitizers.
+// FIXME: Replace with ScopedErrorReportLock and hide.
extern StaticSpinMutex CommonSanitizerReportMutex;
-struct ReportFile {
- void Write(const char *buffer, uptr length);
- bool SupportsColors();
- void SetReportPath(const char *path);
-
- // Don't use fields directly. They are only declared public to allow
- // aggregate initialization.
-
- // Protects fields below.
- StaticSpinMutex *mu;
- // Opened file descriptor. Defaults to stderr. It may be equal to
- // kInvalidFd, in which case new file will be opened when necessary.
- fd_t fd;
- // Path prefix of report file, set via __sanitizer_set_report_path.
- char path_prefix[kMaxPathLength];
- // Full path to report, obtained as <path_prefix>.PID
- char full_path[kMaxPathLength];
- // PID of the process that opened fd. If a fork() occurs,
- // the PID of child will be different from fd_pid.
- uptr fd_pid;
+// Lock sanitizer error reporting and protects against nested errors.
+class ScopedErrorReportLock {
+ public:
+ ScopedErrorReportLock();
+ ~ScopedErrorReportLock();
- private:
- void ReopenIfNecessary();
+ static void CheckLocked();
};
-extern ReportFile report_file;
extern uptr stoptheworld_tracer_pid;
extern uptr stoptheworld_tracer_ppid;
-enum FileAccessMode {
- RdOnly,
- WrOnly,
- RdWr
-};
-
-// Returns kInvalidFd on error.
-fd_t OpenFile(const char *filename, FileAccessMode mode,
- error_t *errno_p = nullptr);
-void CloseFile(fd_t);
-
-// Return true on success, false on error.
-bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
- uptr *bytes_read = nullptr, error_t *error_p = nullptr);
-bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
- uptr *bytes_written = nullptr, error_t *error_p = nullptr);
-
-bool RenameFile(const char *oldpath, const char *newpath,
- error_t *error_p = nullptr);
-
-// Scoped file handle closer.
-struct FileCloser {
- explicit FileCloser(fd_t fd) : fd(fd) {}
- ~FileCloser() { CloseFile(fd); }
- fd_t fd;
-};
-
-bool SupportsColoredOutput(fd_t fd);
-
// Opens the file 'file_name" and reads up to 'max_len' bytes.
// The resulting buffer is mmaped and stored in '*buff'.
// The size of the mmaped region is stored in '*buff_size'.
bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
uptr *read_len, uptr max_len = 1 << 26,
error_t *errno_p = nullptr);
-// Maps given file to virtual memory, and returns pointer to it
-// (or NULL if mapping fails). Stores the size of mmaped region
-// in '*buff_size'.
-void *MapFileToMemory(const char *file_name, uptr *buff_size);
-void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
bool IsAccessibleMemoryRange(uptr beg, uptr size);
void CacheBinaryName();
void DisableCoreDumperIfNecessary();
void DumpProcessMap();
-bool FileExists(const char *filename);
+void PrintModuleMap();
const char *GetEnv(const char *name);
bool SetEnv(const char *name, const char *value);
-const char *GetPwd();
-char *FindPathToBinary(const char *name);
-bool IsPathSeparator(const char c);
-bool IsAbsolutePath(const char *path);
-// Starts a subprocess and returs its pid.
-// If *_fd parameters are not kInvalidFd their corresponding input/output
-// streams will be redirect to the file. The files will always be closed
-// in parent process even in case of an error.
-// The child process will close all fds after STDERR_FILENO
-// before passing control to a program.
-pid_t StartSubprocess(const char *filename, const char *const argv[],
- fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
- fd_t stderr_fd = kInvalidFd);
-// Checks if specified process is still running
-bool IsProcessRunning(pid_t pid);
-// Waits for the process to finish and returns its exit code.
-// Returns -1 in case of an error.
-int WaitForProcess(pid_t pid);
u32 GetUid();
void ReExec();
void SetAddressSpaceUnlimited();
void AdjustStackSize(void *attr);
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
-void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void SetSandboxingCallback(void (*f)());
-void CoverageUpdateMapping();
-void CovBeforeFork();
-void CovAfterFork(int child_pid);
-
void InitializeCoverage(bool enabled, const char *coverage_dir);
-void ReInitializeCoverage(bool enabled, const char *coverage_dir);
void InitTlsSize();
uptr GetTlsSize();
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
-// Callback to be called when we want to try releasing unused allocator memory
-// back to the OS.
-typedef void (*AllocatorReleaseToOSCallback)();
-// The callback should be registered once at the tool init time.
-void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback);
-
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
-bool IsHandledDeadlySignal(int signum);
+HandleSignalMode GetHandleSignalMode(int signum);
void InstallDeadlySignalHandlers(SignalHandlerType handler);
+
+// Signal reporting.
+// Each sanitizer uses slightly different implementation of stack unwinding.
+typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
+ const void *callback_context,
+ BufferedStackTrace *stack);
+// Print deadly signal report and die.
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context);
+
+// Part of HandleDeadlySignal, exposed for asan.
+void StartReportDeadlySignal();
+// Part of HandleDeadlySignal, exposed for asan.
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context);
+
// Alternative signal stack (POSIX-only).
void SetAlternateSignalStack();
void UnsetAlternateSignalStack();
// Construct a one-line string:
// SUMMARY: SanitizerToolName: error_message
// and pass it to __sanitizer_report_error_summary.
-void ReportErrorSummary(const char *error_message);
+// If alt_tool_name is provided, it's used in place of SanitizerToolName.
+void ReportErrorSummary(const char *error_message,
+ const char *alt_tool_name = nullptr);
// Same as above, but construct error_message as:
// error_type file:line[:column][ function]
-void ReportErrorSummary(const char *error_type, const AddressInfo &info);
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name = nullptr);
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
-void ReportErrorSummary(const char *error_type, const StackTrace *trace);
+void ReportErrorSummary(const char *error_type, const StackTrace *trace,
+ const char *alt_tool_name = nullptr);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
uptr capacity() const {
return capacity_;
}
+ void resize(uptr new_size) {
+ Resize(new_size);
+ if (new_size > size_) {
+ internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
+ }
+ size_ = new_size;
+ }
void clear() { size_ = 0; }
bool empty() const { return size() == 0; }
}
}
-template<class Container, class Value, class Compare>
-uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
- const Value &val, Compare comp) {
- uptr not_found = last + 1;
- while (last >= first) {
+// Works like std::lower_bound: finds the first element that is not less
+// than the val.
+template <class Container, class Value, class Compare>
+uptr InternalLowerBound(const Container &v, uptr first, uptr last,
+ const Value &val, Compare comp) {
+ while (last > first) {
uptr mid = (first + last) / 2;
if (comp(v[mid], val))
first = mid + 1;
- else if (comp(val, v[mid]))
- last = mid - 1;
else
- return mid;
+ last = mid;
+ }
+ return first;
+}
+
+enum ModuleArch {
+ kModuleArchUnknown,
+ kModuleArchI386,
+ kModuleArchX86_64,
+ kModuleArchX86_64H,
+ kModuleArchARMV6,
+ kModuleArchARMV7,
+ kModuleArchARMV7S,
+ kModuleArchARMV7K,
+ kModuleArchARM64
+};
+
+// When adding a new architecture, don't forget to also update
+// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
+inline const char *ModuleArchToString(ModuleArch arch) {
+ switch (arch) {
+ case kModuleArchUnknown:
+ return "";
+ case kModuleArchI386:
+ return "i386";
+ case kModuleArchX86_64:
+ return "x86_64";
+ case kModuleArchX86_64H:
+ return "x86_64h";
+ case kModuleArchARMV6:
+ return "armv6";
+ case kModuleArchARMV7:
+ return "armv7";
+ case kModuleArchARMV7S:
+ return "armv7s";
+ case kModuleArchARMV7K:
+ return "armv7k";
+ case kModuleArchARM64:
+ return "arm64";
}
- return not_found;
+ CHECK(0 && "Invalid module arch");
+ return "";
}
+const uptr kModuleUUIDSize = 16;
+const uptr kMaxSegName = 16;
+
// Represents a binary loaded into virtual memory (e.g. this can be an
// executable or a shared object).
class LoadedModule {
public:
- LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
+ LoadedModule()
+ : full_name_(nullptr),
+ base_address_(0),
+ max_executable_address_(0),
+ arch_(kModuleArchUnknown),
+ instrumented_(false) {
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ ranges_.clear();
+ }
void set(const char *module_name, uptr base_address);
+ void set(const char *module_name, uptr base_address, ModuleArch arch,
+ u8 uuid[kModuleUUIDSize], bool instrumented);
void clear();
- void addAddressRange(uptr beg, uptr end, bool executable);
+ void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
+ const char *name = nullptr);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
+ uptr max_executable_address() const { return max_executable_address_; }
+ ModuleArch arch() const { return arch_; }
+ const u8 *uuid() const { return uuid_; }
+ bool instrumented() const { return instrumented_; }
struct AddressRange {
AddressRange *next;
uptr beg;
uptr end;
bool executable;
-
- AddressRange(uptr beg, uptr end, bool executable)
- : next(nullptr), beg(beg), end(end), executable(executable) {}
+ bool writable;
+ char name[kMaxSegName];
+
+ AddressRange(uptr beg, uptr end, bool executable, bool writable,
+ const char *name)
+ : next(nullptr),
+ beg(beg),
+ end(end),
+ executable(executable),
+ writable(writable) {
+ internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
+ }
};
const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
private:
char *full_name_; // Owned.
uptr base_address_;
+ uptr max_executable_address_;
+ ModuleArch arch_;
+ u8 uuid_[kModuleUUIDSize];
+ bool instrumented_;
IntrusiveList<AddressRange> ranges_;
};
// filling this information.
class ListOfModules {
public:
- ListOfModules() : modules_(kInitialCapacity) {}
+ ListOfModules() : initialized(false) {}
~ListOfModules() { clear(); }
void init();
+ void fallbackInit(); // Uses fallback init if available, otherwise clears
const LoadedModule *begin() const { return modules_.begin(); }
LoadedModule *begin() { return modules_.begin(); }
const LoadedModule *end() const { return modules_.end(); }
for (auto &module : modules_) module.clear();
modules_.clear();
}
+ void clearOrInit() {
+ initialized ? clear() : modules_.Initialize(kInitialCapacity);
+ initialized = true;
+ }
- InternalMmapVector<LoadedModule> modules_;
+ InternalMmapVectorNoCtor<LoadedModule> modules_;
// We rarely have more than 16K loaded modules.
static const uptr kInitialCapacity = 1 << 14;
+ bool initialized;
};
// Callback type for iterating over a set of memory ranges.
#if SANITIZER_LINUX
// Initialize Android logging. Any writes before this are silently lost.
void AndroidLogInit();
+void SetAbortMessage(const char *);
#else
INLINE void AndroidLogInit() {}
+// FIXME: MacOS implementation could use CRSetCrashLogMessage.
+INLINE void SetAbortMessage(const char *) {}
#endif
#if SANITIZER_ANDROID
}
struct SignalContext {
+ void *siginfo;
void *context;
uptr addr;
uptr pc;
uptr sp;
uptr bp;
bool is_memory_access;
-
enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
- SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp,
- bool is_memory_access, WriteFlag write_flag)
- : context(context),
- addr(addr),
- pc(pc),
- sp(sp),
- bp(bp),
- is_memory_access(is_memory_access),
- write_flag(write_flag) {}
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ SignalContext() = default;
// Creates signal context in a platform-specific manner.
- static SignalContext Create(void *siginfo, void *context);
+ // SignalContext is going to keep pointers to siginfo and context without
+ // owning them.
+ SignalContext(void *siginfo, void *context)
+ : siginfo(siginfo),
+ context(context),
+ addr(GetAddress()),
+ is_memory_access(IsMemoryAccess()),
+ write_flag(GetWriteFlag()) {
+ InitPcSpBp();
+ }
- // Returns true if the "context" indicates a memory write.
- static WriteFlag GetWriteFlag(void *context);
-};
+ static void DumpAllRegisters(void *context);
+
+ // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
+ int GetType() const;
+
+ // String description of the signal.
+ const char *Describe() const;
+
+ // Returns true if signal is stack overflow.
+ bool IsStackOverflow() const;
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
+ private:
+ // Platform specific initialization.
+ void InitPcSpBp();
+ uptr GetAddress() const;
+ WriteFlag GetWriteFlag() const;
+ bool IsMemoryAccess() const;
+};
void MaybeReexec();
uptr allocated;
};
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
+void CheckNoDeepBind(const char *filename, int flag);
+
+// Returns the requested amount of random data (up to 256 bytes) that can then
+// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
+bool GetRandom(void *buffer, uptr length, bool blocking = true);
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
// COMMON_INTERCEPTOR_SET_THREAD_NAME
// COMMON_INTERCEPTOR_ON_DLOPEN
// COMMON_INTERCEPTOR_ON_EXIT
-// COMMON_INTERCEPTOR_MUTEX_LOCK
+// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
// COMMON_INTERCEPTOR_MUTEX_UNLOCK
// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
+// COMMON_INTERCEPTOR_COPY_STRING
+// COMMON_INTERCEPTOR_STRNDUP_IMPL
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_addrhashmap.h"
+#include "sanitizer_errno.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_platform_interceptors.h"
+#include "sanitizer_symbolizer.h"
#include "sanitizer_tls_get_addr.h"
#include <stdarg.h>
#if SANITIZER_INTERCEPTOR_HOOKS
-#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \
- do { \
- if (f) \
- f(__VA_ARGS__); \
- } while (false);
-#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
- extern "C" { \
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); \
- } // extern "C"
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {}
#else
#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)
#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)
#define iconv __bsd_iconv
#endif
+// Platform-specific options.
+#if SANITIZER_MAC
+namespace __sanitizer {
+bool PlatformHasDifferentMemcpyAndMemmove();
+}
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
+ (__sanitizer::PlatformHasDifferentMemcpyAndMemmove())
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // SANITIZER_MAC
+
#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
#endif
#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
#endif
-#ifndef COMMON_INTERCEPTOR_MUTEX_LOCK
-#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) {}
+#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
#endif
#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
#endif
-#define COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n) \
- COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
- common_flags()->strict_string_checks ? (len) + 1 : (n) )
-
#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
- COMMON_INTERCEPTOR_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
+ common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
-#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {}
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ CheckNoDeepBind(filename, flag);
#endif
#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
COMMON_INTERCEPT_FUNCTION(fn)
#endif
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_COPY_STRING
+#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL
+#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \
+ COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \
+ uptr copy_length = internal_strnlen(s, size); \
+ char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \
+ if (common_flags()->intercept_strndup) { \
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \
+ } \
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
+ internal_memcpy(new_mem, s, copy_length); \
+ new_mem[copy_length] = '\0'; \
+ return new_mem;
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
static MetadataHashMap *interceptor_metadata_map;
-#if SI_NOT_WINDOWS
+#if SI_POSIX
UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,
const FileMetadata &file) {
MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);
MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);
CHECK(h.exists());
}
-#endif // SI_NOT_WINDOWS
+#endif // SI_POSIX
#if SANITIZER_INTERCEPT_STRLEN
INTERCEPTOR(SIZE_T, strlen, const char *s) {
#define INIT_STRNLEN
#endif
+#if SANITIZER_INTERCEPT_STRNDUP
+INTERCEPTOR(char*, strndup, const char *s, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
+}
+#define INIT_STRNDUP COMMON_INTERCEPT_FUNCTION(strndup)
+#else
+#define INIT_STRNDUP
+#endif // SANITIZER_INTERCEPT_STRNDUP
+
+#if SANITIZER_INTERCEPT___STRNDUP
+INTERCEPTOR(char*, __strndup, const char *s, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
+}
+#define INIT___STRNDUP COMMON_INTERCEPT_FUNCTION(__strndup)
+#else
+#define INIT___STRNDUP
+#endif // SANITIZER_INTERCEPT___STRNDUP
+
#if SANITIZER_INTERCEPT_TEXTDOMAIN
INTERCEPTOR(char*, textdomain, const char *domainname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
- COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
+ if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
char *domain = REAL(textdomain)(domainname);
if (domain) {
COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_STRING(ctx, s1, Min(i + 1, size));
- COMMON_INTERCEPTOR_READ_STRING(ctx, s2, Min(i + 1, size));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
s2, size, result);
}
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
- const char *s1, const char *s2, uptr n,
+ const char *s1, const char *s2, uptr size,
int result)
-INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n);
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);
unsigned char c1 = 0, c2 = 0;
uptr i;
- for (i = 0; i < n; i++) {
+ for (i = 0; i < size; i++) {
c1 = (unsigned char)s1[i];
c2 = (unsigned char)s2[i];
if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n));
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
int result = CharCaseCmp(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
- s1, s2, n, result);
+ s1, s2, size, result);
return result;
}
const char *s2) {
uptr len1 = REAL(strlen)(s1);
uptr len2 = REAL(strlen)(s2);
- COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s1, len1,
- r ? r - s1 + len2 : len1 + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
}
#endif
#define INIT_STRCASESTR
#endif
+#if SANITIZER_INTERCEPT_STRTOK
+
+INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters);
+ if (!common_flags()->intercept_strtok) {
+ return REAL(strtok)(str, delimiters);
+ }
+ if (common_flags()->strict_string_checks) {
+ // If strict_string_checks is enabled, we check the whole first argument
+ // string on the first call (strtok saves this string in a static buffer
+ // for subsequent calls). We do not need to check strtok's result.
+ // As the delimiters can change, we check them every call.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
+ REAL(strlen)(delimiters) + 1);
+ return REAL(strtok)(str, delimiters);
+ } else {
+ // However, when strict_string_checks is disabled we cannot check the
+ // whole string on the first call. Instead, we check the result string
+ // which is guaranteed to be a NULL-terminated substring of the first
+ // argument. We also conservatively check one character of str and the
+ // delimiters.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
+ char *result = REAL(strtok)(str, delimiters);
+ if (result != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+ } else if (str != nullptr) {
+ // No delimiter were found, it's safe to assume that the entire str was
+ // scanned.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ return result;
+ }
+}
+
+#define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok)
+#else
+#define INIT_STRTOK
+#endif
+
#if SANITIZER_INTERCEPT_MEMMEM
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
const void *s1, SIZE_T len1, const void *s2,
return internal_strchr(s, c);
COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c);
char *result = REAL(strchr)(s, c);
- uptr len = internal_strlen(s);
- uptr n = result ? result - s + 1 : len + 1;
- if (common_flags()->intercept_strchr)
- COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n);
+ if (common_flags()->intercept_strchr) {
+ // Keep strlen as macro argument, as macro may ignore it.
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s,
+ (result ? result - s : REAL(strlen)(s)) + 1);
+ }
return result;
}
#define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr)
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return internal_strrchr(s, c);
COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
- uptr len = internal_strlen(s);
if (common_flags()->intercept_strchr)
- COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, len + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
return REAL(strrchr)(s, c);
}
#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
#endif
#if SANITIZER_INTERCEPT_MEMSET
-INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memset(dst, v, size);
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size);
- if (common_flags()->intercept_intrin)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- return REAL(memset)(dst, v, size);
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
}
#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
#endif
#if SANITIZER_INTERCEPT_MEMMOVE
-INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
- return internal_memmove(dst, src, size);
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
- return REAL(memmove)(dst, src, size);
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
}
#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
#endif
#if SANITIZER_INTERCEPT_MEMCPY
-INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same
- // implementation. We need to use internal_memmove here.
- return internal_memmove(dst, src, size);
- }
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size);
- if (common_flags()->intercept_intrin) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
- }
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
// N.B.: If we switch this to internal_ we'll have to use internal_memmove
// due to memcpy being an alias of memmove on OS X.
- return REAL(memcpy)(dst, src, size);
+ void *ctx;
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+ } else {
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+ }
}
-#define INIT_MEMCPY COMMON_INTERCEPT_FUNCTION(memcpy)
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
#else
#define INIT_MEMCPY
#endif
#define INIT_FREXPF_FREXPL
#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL
-#if SI_NOT_WINDOWS
+#if SI_POSIX
static void write_iovec(void *ctx, struct __sanitizer_iovec *iovec,
SIZE_T iovlen, SIZE_T maxlen) {
for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
#define INIT_READ
#endif
+#if SANITIZER_INTERCEPT_FREAD
+INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size);
+ return res;
+}
+#define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread)
+#else
+#define INIT_FREAD
+#endif
+
#if SANITIZER_INTERCEPT_PREAD
INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void *ctx;
#define INIT_WRITE
#endif
+#if SANITIZER_INTERCEPT_FWRITE
+INTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file);
+ SIZE_T res = REAL(fwrite)(p, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size);
+ return res;
+}
+#define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite)
+#else
+#define INIT_FWRITE
+#endif
+
#if SANITIZER_INTERCEPT_PWRITE
INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
void *ctx;
#if SANITIZER_INTERCEPT_SCANF
#define INIT_SCANF \
- COMMON_INTERCEPT_FUNCTION(scanf); \
- COMMON_INTERCEPT_FUNCTION(sscanf); \
- COMMON_INTERCEPT_FUNCTION(fscanf); \
- COMMON_INTERCEPT_FUNCTION(vscanf); \
- COMMON_INTERCEPT_FUNCTION(vsscanf); \
- COMMON_INTERCEPT_FUNCTION(vfscanf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);
#else
#define INIT_SCANF
#endif
#if SANITIZER_INTERCEPT_PRINTF
#define INIT_PRINTF \
- COMMON_INTERCEPT_FUNCTION(printf); \
- COMMON_INTERCEPT_FUNCTION(sprintf); \
- COMMON_INTERCEPT_FUNCTION(snprintf); \
- COMMON_INTERCEPT_FUNCTION(asprintf); \
- COMMON_INTERCEPT_FUNCTION(fprintf); \
- COMMON_INTERCEPT_FUNCTION(vprintf); \
- COMMON_INTERCEPT_FUNCTION(vsprintf); \
- COMMON_INTERCEPT_FUNCTION(vsnprintf); \
- COMMON_INTERCEPT_FUNCTION(vasprintf); \
- COMMON_INTERCEPT_FUNCTION(vfprintf);
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
#else
#define INIT_PRINTF
#endif
#endif
#if SANITIZER_INTERCEPT_STRERROR_R
+// There are 2 versions of strerror_r:
+// * POSIX version returns 0 on success, negative error code on failure,
+// writes message to buf.
+// * GNU version returns message pointer, which points to either buf or some
+// static storage.
+#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
+ SANITIZER_MAC || SANITIZER_ANDROID
+// POSIX version. Spec is not clear on whether buf is NULL-terminated.
+// At least on OSX, buf contents are valid even when the call fails.
+INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(strerror_r)(errnum, buf, buflen);
+
+ SIZE_T sz = internal_strnlen(buf, buflen);
+ if (sz < buflen) ++sz;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
+ return res;
+}
+#else
+// GNU version.
INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
- // There are 2 versions of strerror_r:
- // * POSIX version returns 0 on success, negative error code on failure,
- // writes message to buf.
- // * GNU version returns message pointer, which points to either buf or some
- // static storage.
- SIZE_T posix_res = (SIZE_T)res;
- if (posix_res < 1024 || posix_res > (SIZE_T) - 1024) {
- // POSIX version. Spec is not clear on whether buf is NULL-terminated.
- // At least on OSX, buf contents are valid even when the call fails.
- SIZE_T sz = internal_strnlen(buf, buflen);
- if (sz < buflen) ++sz;
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
- } else {
- // GNU version.
+ if (res == buf)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
- }
+ else
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
return res;
}
+#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
+ //SANITIZER_MAC
#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
#else
#define INIT_STRERROR_R
if (fds && nfds) read_pollfd(ctx, fds, nfds);
if (timeout_ts)
COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout_ts, struct_timespec_sz);
- // FIXME: read sigmask when all of sigemptyset, etc are intercepted.
+ if (sigmask) COMMON_INTERCEPTOR_READ_RANGE(ctx, sigmask, sizeof(*sigmask));
int res =
COMMON_INTERCEPTOR_BLOCK_REAL(ppoll)(fds, nfds, timeout_ts, sigmask);
if (fds && nfds) write_pollfd(ctx, fds, nfds);
INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);
- // FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);
- // FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);
if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
- // FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
__sanitizer_sigset_t *oldset) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
- // FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
INTERCEPTOR(int, pthread_mutex_lock, void *m) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
+ COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
int res = REAL(pthread_mutex_lock)(m);
if (res == errno_EOWNERDEAD)
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
if (res == 0 || res == errno_EOWNERDEAD)
- COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
+ COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
if (res == errno_EINVAL)
COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
return res;
// its metadata. See
// https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
- if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
+ if (outbuf && *outbuf > outbuf_orig) {
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz);
}
#endif
#if SANITIZER_INTERCEPT_AEABI_MEM
-DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr)
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr)
-
INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
- return WRAP(memmove)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
- return WRAP(memcpy)(to, from, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
}
+
// Note the argument order.
INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
- return WRAP(memset)(block, c, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT_AEABI_MEM \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
#endif // SANITIZER_INTERCEPT_AEABI_MEM
#if SANITIZER_INTERCEPT___BZERO
-DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr);
-
INTERCEPTOR(void *, __bzero, void *block, uptr size) {
- return WRAP(memset)(block, 0, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
}
+
#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
#else
#define INIT___BZERO
if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
void *res = REAL(dlopen)(filename, flag);
+ Symbolizer::GetOrInit()->InvalidateModuleList();
COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
return res;
}
void *ctx;
COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);
int res = REAL(dlclose)(handle);
+ Symbolizer::GetOrInit()->InvalidateModuleList();
COMMON_INTERCEPTOR_LIBRARY_UNLOADED();
return res;
}
// FIXME: add other *stat interceptor
+#if SANITIZER_INTERCEPT_UTMP
+INTERCEPTOR(void *, getutent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);
+ void *res = REAL(getutent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);
+ void *res = REAL(getutid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);
+ void *res = REAL(getutline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+#define INIT_UTMP \
+ COMMON_INTERCEPT_FUNCTION(getutent); \
+ COMMON_INTERCEPT_FUNCTION(getutid); \
+ COMMON_INTERCEPT_FUNCTION(getutline);
+#else
+#define INIT_UTMP
+#endif
+
+#if SANITIZER_INTERCEPT_UTMPX
+INTERCEPTOR(void *, getutxent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);
+ void *res = REAL(getutxent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);
+ void *res = REAL(getutxid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);
+ void *res = REAL(getutxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+#define INIT_UTMPX \
+ COMMON_INTERCEPT_FUNCTION(getutxent); \
+ COMMON_INTERCEPT_FUNCTION(getutxid); \
+ COMMON_INTERCEPT_FUNCTION(getutxline);
+#else
+#define INIT_UTMPX
+#endif
+
+#if SANITIZER_INTERCEPT_GETLOADAVG
+INTERCEPTOR(int, getloadavg, double *loadavg, int nelem) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem);
+ int res = REAL(getloadavg)(loadavg, nelem);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg));
+ return res;
+}
+#define INIT_GETLOADAVG \
+ COMMON_INTERCEPT_FUNCTION(getloadavg);
+#else
+#define INIT_GETLOADAVG
+#endif
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif
+
+INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
+ SIZE_T res = REAL(wcslen)(s);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1));
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n);
+ SIZE_T res = REAL(wcsnlen)(s, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n));
+ return res;
+}
+#define INIT_WCSLEN \
+ COMMON_INTERCEPT_FUNCTION(wcslen); \
+ COMMON_INTERCEPT_FUNCTION(wcsnlen);
+
+#if SANITIZER_INTERCEPT_WCSCAT
+INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);
+ SIZE_T src_size = REAL(wcslen)(src);
+ SIZE_T dst_size = REAL(wcslen)(dst);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
+ (src_size + 1) * sizeof(wchar_t));
+ return REAL(wcscat)(dst, src); // NOLINT
+}
+
+INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);
+ SIZE_T src_size = REAL(wcsnlen)(src, n);
+ SIZE_T dst_size = REAL(wcslen)(dst);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src,
+ Min(src_size + 1, n) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
+ (src_size + 1) * sizeof(wchar_t));
+ return REAL(wcsncat)(dst, src, n); // NOLINT
+}
+#define INIT_WCSCAT \
+ COMMON_INTERCEPT_FUNCTION(wcscat); \
+ COMMON_INTERCEPT_FUNCTION(wcsncat);
+#else
+#define INIT_WCSCAT
+#endif
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
INIT_TEXTDOMAIN;
INIT_STRLEN;
INIT_STRNLEN;
+ INIT_STRNDUP;
+ INIT___STRNDUP;
INIT_STRCMP;
INIT_STRNCMP;
INIT_STRCASECMP;
INIT_STRCHRNUL;
INIT_STRRCHR;
INIT_STRSPN;
+ INIT_STRTOK;
INIT_STRPBRK;
INIT_MEMSET;
INIT_MEMMOVE;
INIT_MEMRCHR;
INIT_MEMMEM;
INIT_READ;
+ INIT_FREAD;
INIT_PREAD;
INIT_PREAD64;
INIT_READV;
INIT_PREADV;
INIT_PREADV64;
INIT_WRITE;
+ INIT_FWRITE;
INIT_PWRITE;
INIT_PWRITE64;
INIT_WRITEV;
INIT___LXSTAT;
INIT___LXSTAT64;
// FIXME: add other *stat interceptors.
+ INIT_UTMP;
+ INIT_UTMPX;
+ INIT_GETLOADAVG;
+ INIT_WCSLEN;
+ INIT_WCSCAT;
}
continue;
int size = scanf_get_value_size(&dir);
if (size == FSS_INVALID) {
- Report("WARNING: unexpected format specifier in scanf interceptor: "
- "%.*s\n", dir.end - dir.begin, dir.begin);
+ Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
+ SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
break;
}
void *argp = va_arg(aq, void *);
}
static int printf_get_value_size(PrintfDirective *dir) {
- if (dir->convSpecifier == 'm') {
- return sizeof(char *);
- }
-
if (char_is_one_of(dir->convSpecifier, "cCsS")) {
unsigned charSize =
format_get_char_size(dir->convSpecifier, dir->lengthModifier);
// Dynamic precision
SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
}
+ // %m does not require an argument: strlen(errno).
+ if (dir.convSpecifier == 'm')
+ continue;
int size = printf_get_value_size(&dir);
if (size == FSS_INVALID) {
- Report("WARNING: unexpected format specifier in printf "
- "interceptor: %.*s\n", dir.end - dir.begin, dir.begin);
+ static int ReportedOnce;
+ if (!ReportedOnce++)
+ Report(
+ "%s: WARNING: unexpected format specifier in printf "
+ "interceptor: %.*s (reported once per process)\n",
+ SanitizerToolName, dir.end - dir.begin, dir.begin);
break;
}
if (dir.convSpecifier == 'n') {
--- /dev/null
+//===-- sanitizer_common_interface.inc ------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_set_report_fd)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
+// Sanitizer weak hooks
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)
+// Stacktrace interface.
+INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
+INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
+INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
+INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
--- /dev/null
+//===-- sanitizer_common_interface_posix.inc ------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list only available for Posix systems.
+//===----------------------------------------------------------------------===//
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
#include "sanitizer_common.h"
#include "sanitizer_allocator_interface.h"
+#include "sanitizer_file.h"
#include "sanitizer_flags.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_report_decorator.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
namespace __sanitizer {
+#if !SANITIZER_FUCHSIA
+
bool ReportFile::SupportsColors() {
SpinMutexLock l(mu);
ReopenIfNecessary();
return SupportsColoredOutput(fd);
}
+static INLINE bool ReportSupportsColors() {
+ return report_file.SupportsColors();
+}
+
+#else // SANITIZER_FUCHSIA
+
+// Fuchsia's logs always go through post-processing that handles colorization.
+static INLINE bool ReportSupportsColors() { return true; }
+
+#endif // !SANITIZER_FUCHSIA
+
bool ColorizeReports() {
// FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
// printing on Windows.
const char *flag = common_flags()->color;
return internal_strcmp(flag, "always") == 0 ||
- (internal_strcmp(flag, "auto") == 0 && report_file.SupportsColors());
+ (internal_strcmp(flag, "auto") == 0 && ReportSupportsColors());
}
static void (*sandboxing_callback)();
sandboxing_callback = f;
}
-void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
+void ReportErrorSummary(const char *error_type, const StackTrace *stack,
+ const char *alt_tool_name) {
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
- ReportErrorSummary(error_type, frame->info);
+ ReportErrorSummary(error_type, frame->info, alt_tool_name);
frame->ClearAll();
#endif
}
SoftRssLimitExceededCallback = Callback;
}
-static AllocatorReleaseToOSCallback ReleseCallback;
-void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) {
- CHECK_EQ(ReleseCallback, nullptr);
- ReleseCallback = Callback;
-}
-
#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
bool heap_profile = common_flags()->heap_profile;
- bool allocator_release_to_os = common_flags()->allocator_release_to_os;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
SoftRssLimitExceededCallback(false);
}
}
- if (allocator_release_to_os && ReleseCallback) ReleseCallback();
if (heap_profile &&
current_rss_mb > rss_during_last_reported_profile * 1.1) {
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
- __sanitizer_print_memory_profile(90);
+ __sanitizer_print_memory_profile(90, 20);
rss_during_last_reported_profile = current_rss_mb;
}
}
}
#endif
+#if !SANITIZER_FUCHSIA && !SANITIZER_GO
+void StartReportDeadlySignal() {
+ // Write the first message using fd=2, just in case.
+ // It may actually fail to write in case stderr is closed.
+ CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName));
+ static const char kDeadlySignal[] = ":DEADLYSIGNAL\n";
+ CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1);
+}
+
+static void MaybeReportNonExecRegion(uptr pc) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
+ Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
+ }
+#endif
+}
+
+static void PrintMemoryByte(InternalScopedString *str, const char *before,
+ u8 byte) {
+ SanitizerCommonDecorator d;
+ str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
+ d.Default());
+}
+
+static void MaybeDumpInstructionBytes(uptr pc) {
+ if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
+ return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+static void MaybeDumpRegisters(void *context) {
+ if (!common_flags()->dump_registers) return;
+ SignalContext::DumpAllRegisters(context);
+}
+
+static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ SanitizerCommonDecorator d;
+ Printf("%s", d.Warning());
+ static const char kDescription[] = "stack-overflow";
+ Report("ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\n",
+ SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc,
+ (void *)sig.bp, (void *)sig.sp, tid);
+ Printf("%s", d.Default());
+ InternalScopedBuffer<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ unwind(sig, unwind_context, stack);
+ stack->Print();
+ ReportErrorSummary(kDescription, stack);
+}
+
+static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ SanitizerCommonDecorator d;
+ Printf("%s", d.Warning());
+ const char *description = sig.Describe();
+ Report("ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\n",
+ SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc,
+ (void *)sig.bp, (void *)sig.sp, tid);
+ Printf("%s", d.Default());
+ if (sig.pc < GetPageSizeCached())
+ Report("Hint: pc points to the zero page.\n");
+ if (sig.is_memory_access) {
+ const char *access_type =
+ sig.write_flag == SignalContext::WRITE
+ ? "WRITE"
+ : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ Report("The signal is caused by a %s memory access.\n", access_type);
+ if (sig.addr < GetPageSizeCached())
+ Report("Hint: address points to the zero page.\n");
+ }
+ MaybeReportNonExecRegion(sig.pc);
+ InternalScopedBuffer<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ unwind(sig, unwind_context, stack);
+ stack->Print();
+ MaybeDumpInstructionBytes(sig.pc);
+ MaybeDumpRegisters(sig.context);
+ Printf("%s can not provide additional info.\n", SanitizerToolName);
+ ReportErrorSummary(description, stack);
+}
+
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ if (sig.IsStackOverflow())
+ ReportStackOverflowImpl(sig, tid, unwind, unwind_context);
+ else
+ ReportDeadlySignalImpl(sig, tid, unwind, unwind_context);
+}
+
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ StartReportDeadlySignal();
+ ScopedErrorReportLock rl;
+ SignalContext sig(siginfo, context);
+ ReportDeadlySignal(sig, tid, unwind, unwind_context);
+ Report("ABORTING\n");
+ Die();
+}
+
+#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
+
void WriteToSyslog(const char *msg) {
InternalScopedString msg_copy(kErrorMessageBufferSize);
msg_copy.append("%s", msg);
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
- !common_flags()->allocator_release_to_os &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
}
+static atomic_uintptr_t reporting_thread = {0};
+
+ScopedErrorReportLock::ScopedErrorReportLock() {
+ uptr current = GetThreadSelf();
+ for (;;) {
+ uptr expected = 0;
+ if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
+ memory_order_relaxed)) {
+ // We've claimed reporting_thread so proceed.
+ CommonSanitizerReportMutex.Lock();
+ return;
+ }
+
+ if (expected == current) {
+ // This is either asynch signal or nested error during error reporting.
+ // Fail simple to avoid deadlocks in Report().
+
+ // Can't use Report() here because of potential deadlocks in nested
+ // signal handlers.
+ CatastrophicErrorWrite(SanitizerToolName,
+ internal_strlen(SanitizerToolName));
+ static const char msg[] = ": nested bug in the same thread, aborting.\n";
+ CatastrophicErrorWrite(msg, sizeof(msg) - 1);
+
+ internal__exit(common_flags()->exitcode);
+ }
+
+ internal_sched_yield();
+ }
+}
+
+ScopedErrorReportLock::~ScopedErrorReportLock() {
+ CommonSanitizerReportMutex.Unlock();
+ atomic_store_relaxed(&reporting_thread, 0);
+}
+
+void ScopedErrorReportLock::CheckLocked() {
+ CommonSanitizerReportMutex.CheckLocked();
+}
+
} // namespace __sanitizer
-void NOINLINE
-__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
+ __sanitizer_sandbox_arguments *args) {
__sanitizer::PrepareForSandboxing(args);
if (__sanitizer::sandboxing_callback)
__sanitizer::sandboxing_callback();
--- /dev/null
+//===-- sanitizer_common_nolibc.cc ----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains stubs for libc function to facilitate optional use of
+// libc in no-libcdep sources.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+// The Windows implementations of these functions use the win32 API directly,
+// bypassing libc.
+#if !SANITIZER_WINDOWS
+#if SANITIZER_LINUX
+bool ShouldLogAfterPrintf() { return false; }
+void LogMessageOnPrintf(const char *str) {}
+#endif
+void WriteToSyslog(const char *buffer) {}
+void Abort() { internal__exit(1); }
+void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+#endif // !SANITIZER_WINDOWS
+
+#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+void ListOfModules::init() {}
+#endif
+
+} // namespace __sanitizer
--- /dev/null
+//===-- sanitizer_coverage_fuchsia.cc ------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version.
+//
+// This Fuchsia-specific implementation uses the same basic scheme and the
+// same simple '.sancov' file format as the generic implementation. The
+// difference is that we just produce a single blob of output for the whole
+// program, not a separate one per DSO. We do not sort the PC table and do
+// not prune the zeros, so the resulting file is always as large as it
+// would be to report 100% coverage. Implicit tracing information about
+// the address ranges of DSOs allows offline tools to split the one big
+// blob into separate files that the 'sancov' tool can understand.
+//
+// Unlike the traditional implementation that uses an atexit hook to write
+// out data files at the end, the results on Fuchsia do not go into a file
+// per se. The 'coverage_dir' option is ignored. Instead, they are stored
+// directly into a shared memory object (a Zircon VMO). At exit, that VMO
+// is handed over to a system service that's responsible for getting the
+// data out to somewhere that it can be fed into the sancov tool (where and
+// how is not our problem).
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+using namespace __sanitizer; // NOLINT
+
+namespace __sancov {
+namespace {
+
+// TODO(mcgrathr): Move the constant into a header shared with other impls.
+constexpr u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static_assert(SANITIZER_WORDSIZE == 64, "Fuchsia is always LP64");
+
+constexpr const char kSancovSinkName[] = "sancov";
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController {
+ public:
+ // For each PC location being tracked, there is a u32 reserved in global
+ // data called the "guard". At startup, we assign each guard slot a
+ // unique index into the big results array. Later during runtime, the
+ // first call to TracePcGuard (below) will store the corresponding PC at
+ // that index in the array. (Each later call with the same guard slot is
+ // presumed to be from the same PC.) Then it clears the guard slot back
+ // to zero, which tells the compiler not to bother calling in again. At
+ // the end of the run, we have a big array where each element is either
+ // zero or is a tracked PC location that was hit in the trace.
+
+ // This is called from global constructors. Each translation unit has a
+ // contiguous array of guard slots, and a constructor that calls here
+ // with the bounds of its array. Those constructors are allowed to call
+ // here more than once for the same array. Usually all of these
+ // constructors run in the initial thread, but it's possible that a
+ // dlopen call on a secondary thread will run constructors that get here.
+ void InitTracePcGuard(u32 *start, u32 *end) {
+ if (end > start && *start == 0 && common_flags()->coverage) {
+ // Complete the setup before filling in any guards with indices.
+ // This avoids the possibility of code called from Setup reentering
+ // TracePcGuard.
+ u32 idx = Setup(end - start);
+ for (u32 *p = start; p < end; ++p) {
+ *p = idx++;
+ }
+ }
+ }
+
+ void TracePcGuard(u32 *guard, uptr pc) {
+ atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard);
+ u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
+ if (idx > 0) array_[idx] = pc;
+ }
+
+ void Dump() {
+ BlockingMutexLock locked(&setup_lock_);
+ if (array_) {
+ CHECK_NE(vmo_, ZX_HANDLE_INVALID);
+
+ // Publish the VMO to the system, where it can be collected and
+ // analyzed after this process exits. This always consumes the VMO
+ // handle. Any failure is just logged and not indicated to us.
+ __sanitizer_publish_data(kSancovSinkName, vmo_);
+ vmo_ = ZX_HANDLE_INVALID;
+
+ // This will route to __sanitizer_log_write, which will ensure that
+ // information about shared libraries is written out. This message
+ // uses the `dumpfile` symbolizer markup element to highlight the
+ // dump. See the explanation for this in:
+ // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
+ Printf("SanitizerCoverage: {{{dumpfile:%s:%s}}} with up to %u PCs\n",
+ kSancovSinkName, vmo_name_, next_index_ - 1);
+ }
+ }
+
+ private:
+ // We map in the largest possible view into the VMO: one word
+ // for every possible 32-bit index value. This avoids the need
+ // to change the mapping when increasing the size of the VMO.
+ // We can always spare the 32G of address space.
+ static constexpr size_t MappingSize = sizeof(uptr) << 32;
+
+ BlockingMutex setup_lock_;
+ uptr *array_;
+ u32 next_index_;
+ zx_handle_t vmo_;
+ char vmo_name_[ZX_MAX_NAME_LEN];
+
+ size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
+
+ u32 Setup(u32 num_guards) {
+ BlockingMutexLock locked(&setup_lock_);
+ DCHECK(common_flags()->coverage);
+
+ if (next_index_ == 0) {
+ CHECK_EQ(vmo_, ZX_HANDLE_INVALID);
+ CHECK_EQ(array_, nullptr);
+
+ // The first sample goes at [1] to reserve [0] for the magic number.
+ next_index_ = 1 + num_guards;
+
+ zx_status_t status = _zx_vmo_create(DataSize(), 0, &vmo_);
+ CHECK_EQ(status, ZX_OK);
+
+ // Give the VMO a name including our process KOID so it's easy to spot.
+ internal_snprintf(vmo_name_, sizeof(vmo_name_), "%s.%zu", kSancovSinkName,
+ internal_getpid());
+ _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_,
+ internal_strlen(vmo_name_));
+
+ // Map the largest possible view we might need into the VMO. Later
+ // we might need to increase the VMO's size before we can use larger
+ // indices, but we'll never move the mapping address so we don't have
+ // any multi-thread synchronization issues with that.
+ uintptr_t mapping;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), 0, vmo_, 0, MappingSize,
+ ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &mapping);
+ CHECK_EQ(status, ZX_OK);
+
+ // Hereafter other threads are free to start storing into
+ // elements [1, next_index_) of the big array.
+ array_ = reinterpret_cast<uptr *>(mapping);
+
+ // Store the magic number.
+ // Hereafter, the VMO serves as the contents of the '.sancov' file.
+ array_[0] = Magic64;
+
+ return 1;
+ } else {
+ // The VMO is already mapped in, but it's not big enough to use the
+ // new indices. So increase the size to cover the new maximum index.
+
+ CHECK_NE(vmo_, ZX_HANDLE_INVALID);
+ CHECK_NE(array_, nullptr);
+
+ uint32_t first_index = next_index_;
+ next_index_ += num_guards;
+
+ zx_status_t status = _zx_vmo_set_size(vmo_, DataSize());
+ CHECK_EQ(status, ZX_OK);
+
+ return first_index;
+ }
+ }
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+} // namespace __sancov
+
+namespace __sanitizer {
+void InitializeCoverage(bool enabled, const char *dir) {
+ CHECK_EQ(enabled, common_flags()->coverage);
+ CHECK_EQ(dir, common_flags()->coverage_dir);
+
+ static bool coverage_enabled = false;
+ if (!coverage_enabled) {
+ coverage_enabled = enabled;
+ Atexit(__sanitizer_cov_dump);
+ AddDieCallback(__sanitizer_cov_dump);
+ }
+}
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr *pcs, uptr len) {
+ UNIMPLEMENTED();
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) {
+ if (!*guard) return;
+ __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
+ u32 *start, u32 *end) {
+ if (start == end || *start) return;
+ __sancov::pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ __sancov::pc_guard_controller.Dump();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
+ __sanitizer_dump_trace_pc_guard_coverage();
+}
+// Default empty implementations (weak). Users should redefine them.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
+} // extern "C"
+
+#endif // !SANITIZER_FUCHSIA
--- /dev/null
+//===-- sanitizer_coverage_interface.inc ----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_reset)
+INTERFACE_FUNCTION(__sanitizer_dump_coverage)
+INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
+INTERFACE_WEAK_FUNCTION(__sancov_default_options)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_pcs_init)
+++ /dev/null
-//===-- sanitizer_coverage.cc ---------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Sanitizer Coverage.
-// This file implements run-time support for a poor man's coverage tool.
-//
-// Compiler instrumentation:
-// For every interesting basic block the compiler injects the following code:
-// if (Guard < 0) {
-// __sanitizer_cov(&Guard);
-// }
-// At the module start up time __sanitizer_cov_module_init sets the guards
-// to consecutive negative numbers (-1, -2, -3, ...).
-// It's fine to call __sanitizer_cov more than once for a given block.
-//
-// Run-time:
-// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
-// and atomically set Guard to -Guard.
-// - __sanitizer_cov_dump: dump the coverage data to disk.
-// For every module of the current process that has coverage data
-// this will create a file module_name.PID.sancov.
-//
-// The file format is simple: the first 8 bytes is the magic,
-// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the
-// magic defines the size of the following offsets.
-// The rest of the data is the offsets in the module.
-//
-// Eventually, this coverage implementation should be obsoleted by a more
-// powerful general purpose Clang/LLVM coverage instrumentation.
-// Consider this implementation as prototype.
-//
-// FIXME: support (or at least test with) dlclose.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
-#include "sanitizer_flags.h"
-
-using namespace __sanitizer;
-
-static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
-static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
-static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
-static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
-
-static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
-
-static atomic_uintptr_t coverage_counter;
-static atomic_uintptr_t caller_callee_counter;
-
-static void ResetGlobalCounters() {
- return atomic_store(&coverage_counter, 0, memory_order_relaxed);
- return atomic_store(&caller_callee_counter, 0, memory_order_relaxed);
-}
-
-// pc_array is the array containing the covered PCs.
-// To make the pc_array thread- and async-signal-safe it has to be large enough.
-// 128M counters "ought to be enough for anybody" (4M on 32-bit).
-
-// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
-// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
-// dump current memory layout to another file.
-
-static bool cov_sandboxed = false;
-static fd_t cov_fd = kInvalidFd;
-static unsigned int cov_max_block_size = 0;
-static bool coverage_enabled = false;
-static const char *coverage_dir;
-
-namespace __sanitizer {
-
-class CoverageData {
- public:
- void Init();
- void Enable();
- void Disable();
- void ReInit();
- void BeforeFork();
- void AfterFork(int child_pid);
- void Extend(uptr npcs);
- void Add(uptr pc, u32 *guard);
- void IndirCall(uptr caller, uptr callee, uptr callee_cache[],
- uptr cache_size);
- void DumpCallerCalleePairs();
- void DumpTrace();
- void DumpAsBitSet();
- void DumpCounters();
- void DumpOffsets();
- void DumpAll();
-
- ALWAYS_INLINE
- void TraceBasicBlock(u32 *id);
-
- void InitializeGuardArray(s32 *guards);
- void InitializeGuards(s32 *guards, uptr n, const char *module_name,
- uptr caller_pc);
- void InitializeCounters(u8 *counters, uptr n);
- void ReinitializeGuards();
- uptr GetNumberOf8bitCounters();
- uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
-
- uptr *data();
- uptr size() const;
-
- private:
- struct NamedPcRange {
- const char *copied_module_name;
- uptr beg, end; // elements [beg,end) in pc_array.
- };
-
- void DirectOpen();
- void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
- void GetRangeOffsets(const NamedPcRange& r, Symbolizer* s,
- InternalMmapVector<uptr>* offsets) const;
-
- // Maximal size pc array may ever grow.
- // We MmapNoReserve this space to ensure that the array is contiguous.
- static const uptr kPcArrayMaxSize =
- FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
- // The amount file mapping for the pc array is grown by.
- static const uptr kPcArrayMmapSize = 64 * 1024;
-
- // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
- // much RAM as it really needs.
- uptr *pc_array;
- // Index of the first available pc_array slot.
- atomic_uintptr_t pc_array_index;
- // Array size.
- atomic_uintptr_t pc_array_size;
- // Current file mapped size of the pc array.
- uptr pc_array_mapped_size;
- // Descriptor of the file mapped pc array.
- fd_t pc_fd;
-
- // Vector of coverage guard arrays, protected by mu.
- InternalMmapVectorNoCtor<s32*> guard_array_vec;
-
- // Vector of module and compilation unit pc ranges.
- InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
- InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
-
- struct CounterAndSize {
- u8 *counters;
- uptr n;
- };
-
- InternalMmapVectorNoCtor<CounterAndSize> counters_vec;
- uptr num_8bit_counters;
-
- // Caller-Callee (cc) array, size and current index.
- static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24);
- uptr **cc_array;
- atomic_uintptr_t cc_array_index;
- atomic_uintptr_t cc_array_size;
-
- // Tracing event array, size and current pointer.
- // We record all events (basic block entries) in a global buffer of u32
- // values. Each such value is the index in pc_array.
- // So far the tracing is highly experimental:
- // - not thread-safe;
- // - does not support long traces;
- // - not tuned for performance.
- static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30);
- u32 *tr_event_array;
- uptr tr_event_array_size;
- u32 *tr_event_pointer;
- static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
-
- StaticSpinMutex mu;
-};
-
-static CoverageData coverage_data;
-
-void CovUpdateMapping(const char *path, uptr caller_pc = 0);
-
-void CoverageData::DirectOpen() {
- InternalScopedString path(kMaxPathLength);
- internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
- coverage_dir, internal_getpid());
- pc_fd = OpenFile(path.data(), RdWr);
- if (pc_fd == kInvalidFd) {
- Report("Coverage: failed to open %s for reading/writing\n", path.data());
- Die();
- }
-
- pc_array_mapped_size = 0;
- CovUpdateMapping(coverage_dir);
-}
-
-void CoverageData::Init() {
- pc_fd = kInvalidFd;
-}
-
-void CoverageData::Enable() {
- if (pc_array)
- return;
- pc_array = reinterpret_cast<uptr *>(
- MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
- atomic_store(&pc_array_index, 0, memory_order_relaxed);
- if (common_flags()->coverage_direct) {
- atomic_store(&pc_array_size, 0, memory_order_relaxed);
- } else {
- atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
- }
-
- cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie(
- sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array"));
- atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed);
- atomic_store(&cc_array_index, 0, memory_order_relaxed);
-
- // Allocate tr_event_array with a guard page at the end.
- tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie(
- sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(),
- "CovInit::tr_event_array"));
- MprotectNoAccess(
- reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]),
- GetMmapGranularity());
- tr_event_array_size = kTrEventArrayMaxSize;
- tr_event_pointer = tr_event_array;
-
- num_8bit_counters = 0;
-}
-
-void CoverageData::InitializeGuardArray(s32 *guards) {
- Enable(); // Make sure coverage is enabled at this point.
- s32 n = guards[0];
- for (s32 j = 1; j <= n; j++) {
- uptr idx = atomic_load_relaxed(&pc_array_index);
- atomic_store_relaxed(&pc_array_index, idx + 1);
- guards[j] = -static_cast<s32>(idx + 1);
- }
-}
-
-void CoverageData::Disable() {
- if (pc_array) {
- UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize);
- pc_array = nullptr;
- }
- if (cc_array) {
- UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize);
- cc_array = nullptr;
- }
- if (tr_event_array) {
- UnmapOrDie(tr_event_array,
- sizeof(tr_event_array[0]) * kTrEventArrayMaxSize +
- GetMmapGranularity());
- tr_event_array = nullptr;
- tr_event_pointer = nullptr;
- }
- if (pc_fd != kInvalidFd) {
- CloseFile(pc_fd);
- pc_fd = kInvalidFd;
- }
-}
-
-void CoverageData::ReinitializeGuards() {
- // Assuming single thread.
- atomic_store(&pc_array_index, 0, memory_order_relaxed);
- for (uptr i = 0; i < guard_array_vec.size(); i++)
- InitializeGuardArray(guard_array_vec[i]);
-}
-
-void CoverageData::ReInit() {
- Disable();
- if (coverage_enabled) {
- if (common_flags()->coverage_direct) {
- // In memory-mapped mode we must extend the new file to the known array
- // size.
- uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
- uptr npcs = size / sizeof(uptr);
- Enable();
- if (size) Extend(npcs);
- if (coverage_enabled) CovUpdateMapping(coverage_dir);
- } else {
- Enable();
- }
- }
- // Re-initialize the guards.
- // We are single-threaded now, no need to grab any lock.
- CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0);
- ReinitializeGuards();
-}
-
-void CoverageData::BeforeFork() {
- mu.Lock();
-}
-
-void CoverageData::AfterFork(int child_pid) {
- // We are single-threaded so it's OK to release the lock early.
- mu.Unlock();
- if (child_pid == 0) ReInit();
-}
-
-// Extend coverage PC array to fit additional npcs elements.
-void CoverageData::Extend(uptr npcs) {
- if (!common_flags()->coverage_direct) return;
- SpinMutexLock l(&mu);
-
- uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
- size += npcs * sizeof(uptr);
-
- if (coverage_enabled && size > pc_array_mapped_size) {
- if (pc_fd == kInvalidFd) DirectOpen();
- CHECK_NE(pc_fd, kInvalidFd);
-
- uptr new_mapped_size = pc_array_mapped_size;
- while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
- CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize);
-
- // Extend the file and map the new space at the end of pc_array.
- uptr res = internal_ftruncate(pc_fd, new_mapped_size);
- int err;
- if (internal_iserror(res, &err)) {
- Printf("failed to extend raw coverage file: %d\n", err);
- Die();
- }
-
- uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size;
- void *p = MapWritableFileToMemory((void *)next_map_base,
- new_mapped_size - pc_array_mapped_size,
- pc_fd, pc_array_mapped_size);
- CHECK_EQ((uptr)p, next_map_base);
- pc_array_mapped_size = new_mapped_size;
- }
-
- atomic_store(&pc_array_size, size, memory_order_release);
-}
-
-void CoverageData::InitializeCounters(u8 *counters, uptr n) {
- if (!counters) return;
- CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0);
- n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned.
- SpinMutexLock l(&mu);
- counters_vec.push_back({counters, n});
- num_8bit_counters += n;
-}
-
-void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg,
- uptr range_end) {
- auto sym = Symbolizer::GetOrInit();
- if (!sym)
- return;
- const char *module_name = sym->GetModuleNameForPc(caller_pc);
- if (!module_name) return;
- if (module_name_vec.empty() ||
- module_name_vec.back().copied_module_name != module_name)
- module_name_vec.push_back({module_name, range_beg, range_end});
- else
- module_name_vec.back().end = range_end;
-}
-
-void CoverageData::InitializeGuards(s32 *guards, uptr n,
- const char *comp_unit_name,
- uptr caller_pc) {
- // The array 'guards' has n+1 elements, we use the element zero
- // to store 'n'.
- CHECK_LT(n, 1 << 30);
- guards[0] = static_cast<s32>(n);
- InitializeGuardArray(guards);
- SpinMutexLock l(&mu);
- uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed);
- uptr range_beg = range_end - n;
- comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end});
- guard_array_vec.push_back(guards);
- UpdateModuleNameVec(caller_pc, range_beg, range_end);
-}
-
-static const uptr kBundleCounterBits = 16;
-
-// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64
-// we insert the global counter into the first 16 bits of the PC.
-uptr BundlePcAndCounter(uptr pc, uptr counter) {
- if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
- return pc;
- static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1;
- if (counter > kMaxCounter)
- counter = kMaxCounter;
- CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits));
- return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits));
-}
-
-uptr UnbundlePc(uptr bundle) {
- if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
- return bundle;
- return (bundle << kBundleCounterBits) >> kBundleCounterBits;
-}
-
-uptr UnbundleCounter(uptr bundle) {
- if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs)
- return 0;
- return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits);
-}
-
-// If guard is negative, atomically set it to -guard and store the PC in
-// pc_array.
-void CoverageData::Add(uptr pc, u32 *guard) {
- atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
- s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed);
- if (guard_value >= 0) return;
-
- atomic_store(atomic_guard, -guard_value, memory_order_relaxed);
- if (!pc_array) return;
-
- uptr idx = -guard_value - 1;
- if (idx >= atomic_load(&pc_array_index, memory_order_acquire))
- return; // May happen after fork when pc_array_index becomes 0.
- CHECK_LT(idx * sizeof(uptr),
- atomic_load(&pc_array_size, memory_order_acquire));
- uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed);
- pc_array[idx] = BundlePcAndCounter(pc, counter);
-}
-
-// Registers a pair caller=>callee.
-// When a given caller is seen for the first time, the callee_cache is added
-// to the global array cc_array, callee_cache[0] is set to caller and
-// callee_cache[1] is set to cache_size.
-// Then we are trying to add callee to callee_cache [2,cache_size) if it is
-// not there yet.
-// If the cache is full we drop the callee (may want to fix this later).
-void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[],
- uptr cache_size) {
- if (!cc_array) return;
- atomic_uintptr_t *atomic_callee_cache =
- reinterpret_cast<atomic_uintptr_t *>(callee_cache);
- uptr zero = 0;
- if (atomic_compare_exchange_strong(&atomic_callee_cache[0], &zero, caller,
- memory_order_seq_cst)) {
- uptr idx = atomic_fetch_add(&cc_array_index, 1, memory_order_relaxed);
- CHECK_LT(idx * sizeof(uptr),
- atomic_load(&cc_array_size, memory_order_acquire));
- callee_cache[1] = cache_size;
- cc_array[idx] = callee_cache;
- }
- CHECK_EQ(atomic_load(&atomic_callee_cache[0], memory_order_relaxed), caller);
- for (uptr i = 2; i < cache_size; i++) {
- uptr was = 0;
- if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee,
- memory_order_seq_cst)) {
- atomic_fetch_add(&caller_callee_counter, 1, memory_order_relaxed);
- return;
- }
- if (was == callee) // Already have this callee.
- return;
- }
-}
-
-uptr CoverageData::GetNumberOf8bitCounters() {
- return num_8bit_counters;
-}
-
-// Map every 8bit counter to a 8-bit bitset and clear the counter.
-uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) {
- uptr num_new_bits = 0;
- uptr cur = 0;
- // For better speed we map 8 counters to 8 bytes of bitset at once.
- static const uptr kBatchSize = 8;
- CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0);
- for (uptr i = 0, len = counters_vec.size(); i < len; i++) {
- u8 *c = counters_vec[i].counters;
- uptr n = counters_vec[i].n;
- CHECK_EQ(n % 16, 0);
- CHECK_EQ(cur % kBatchSize, 0);
- CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0);
- if (!bitset) {
- internal_bzero_aligned16(c, n);
- cur += n;
- continue;
- }
- for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) {
- CHECK_LT(cur, num_8bit_counters);
- u64 *pc64 = reinterpret_cast<u64*>(c + j);
- u64 *pb64 = reinterpret_cast<u64*>(bitset + cur);
- u64 c64 = *pc64;
- u64 old_bits_64 = *pb64;
- u64 new_bits_64 = old_bits_64;
- if (c64) {
- *pc64 = 0;
- for (uptr k = 0; k < kBatchSize; k++) {
- u64 x = (c64 >> (8 * k)) & 0xff;
- if (x) {
- u64 bit = 0;
- /**/ if (x >= 128) bit = 128;
- else if (x >= 32) bit = 64;
- else if (x >= 16) bit = 32;
- else if (x >= 8) bit = 16;
- else if (x >= 4) bit = 8;
- else if (x >= 3) bit = 4;
- else if (x >= 2) bit = 2;
- else if (x >= 1) bit = 1;
- u64 mask = bit << (8 * k);
- if (!(new_bits_64 & mask)) {
- num_new_bits++;
- new_bits_64 |= mask;
- }
- }
- }
- *pb64 = new_bits_64;
- }
- }
- }
- CHECK_EQ(cur, num_8bit_counters);
- return num_new_bits;
-}
-
-uptr *CoverageData::data() {
- return pc_array;
-}
-
-uptr CoverageData::size() const {
- return atomic_load(&pc_array_index, memory_order_relaxed);
-}
-
-// Block layout for packed file format: header, followed by module name (no
-// trailing zero), followed by data blob.
-struct CovHeader {
- int pid;
- unsigned int module_name_length;
- unsigned int data_length;
-};
-
-static void CovWritePacked(int pid, const char *module, const void *blob,
- unsigned int blob_size) {
- if (cov_fd == kInvalidFd) return;
- unsigned module_name_length = internal_strlen(module);
- CovHeader header = {pid, module_name_length, blob_size};
-
- if (cov_max_block_size == 0) {
- // Writing to a file. Just go ahead.
- WriteToFile(cov_fd, &header, sizeof(header));
- WriteToFile(cov_fd, module, module_name_length);
- WriteToFile(cov_fd, blob, blob_size);
- } else {
- // Writing to a socket. We want to split the data into appropriately sized
- // blocks.
- InternalScopedBuffer<char> block(cov_max_block_size);
- CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
- uptr header_size_with_module = sizeof(header) + module_name_length;
- CHECK_LT(header_size_with_module, cov_max_block_size);
- unsigned int max_payload_size =
- cov_max_block_size - header_size_with_module;
- char *block_pos = block.data();
- internal_memcpy(block_pos, &header, sizeof(header));
- block_pos += sizeof(header);
- internal_memcpy(block_pos, module, module_name_length);
- block_pos += module_name_length;
- char *block_data_begin = block_pos;
- const char *blob_pos = (const char *)blob;
- while (blob_size > 0) {
- unsigned int payload_size = Min(blob_size, max_payload_size);
- blob_size -= payload_size;
- internal_memcpy(block_data_begin, blob_pos, payload_size);
- blob_pos += payload_size;
- ((CovHeader *)block.data())->data_length = payload_size;
- WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size);
- }
- }
-}
-
-// If packed = false: <name>.<pid>.<sancov> (name = module name).
-// If packed = true and name == 0: <pid>.<sancov>.<packed>.
-// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
-// user-supplied).
-static fd_t CovOpenFile(InternalScopedString *path, bool packed,
- const char *name, const char *extension = "sancov") {
- path->clear();
- if (!packed) {
- CHECK(name);
- path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(),
- extension);
- } else {
- if (!name)
- path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(),
- extension);
- else
- path->append("%s/%s.%s.packed", coverage_dir, name, extension);
- }
- error_t err;
- fd_t fd = OpenFile(path->data(), WrOnly, &err);
- if (fd == kInvalidFd)
- Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
- path->data(), err);
- return fd;
-}
-
-// Dump trace PCs and trace events into two separate files.
-void CoverageData::DumpTrace() {
- uptr max_idx = tr_event_pointer - tr_event_array;
- if (!max_idx) return;
- auto sym = Symbolizer::GetOrInit();
- if (!sym)
- return;
- InternalScopedString out(32 << 20);
- for (uptr i = 0, n = size(); i < n; i++) {
- const char *module_name = "<unknown>";
- uptr module_address = 0;
- sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name,
- &module_address);
- out.append("%s 0x%zx\n", module_name, module_address);
- }
- InternalScopedString path(kMaxPathLength);
- fd_t fd = CovOpenFile(&path, false, "trace-points");
- if (fd == kInvalidFd) return;
- WriteToFile(fd, out.data(), out.length());
- CloseFile(fd);
-
- fd = CovOpenFile(&path, false, "trace-compunits");
- if (fd == kInvalidFd) return;
- out.clear();
- for (uptr i = 0; i < comp_unit_name_vec.size(); i++)
- out.append("%s\n", comp_unit_name_vec[i].copied_module_name);
- WriteToFile(fd, out.data(), out.length());
- CloseFile(fd);
-
- fd = CovOpenFile(&path, false, "trace-events");
- if (fd == kInvalidFd) return;
- uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]);
- u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array);
- // The trace file could be huge, and may not be written with a single syscall.
- while (bytes_to_write) {
- uptr actually_written;
- if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) &&
- actually_written <= bytes_to_write) {
- bytes_to_write -= actually_written;
- event_bytes += actually_written;
- } else {
- break;
- }
- }
- CloseFile(fd);
- VReport(1, " CovDump: Trace: %zd PCs written\n", size());
- VReport(1, " CovDump: Trace: %zd Events written\n", max_idx);
-}
-
-// This function dumps the caller=>callee pairs into a file as a sequence of
-// lines like "module_name offset".
-void CoverageData::DumpCallerCalleePairs() {
- uptr max_idx = atomic_load(&cc_array_index, memory_order_relaxed);
- if (!max_idx) return;
- auto sym = Symbolizer::GetOrInit();
- if (!sym)
- return;
- InternalScopedString out(32 << 20);
- uptr total = 0;
- for (uptr i = 0; i < max_idx; i++) {
- uptr *cc_cache = cc_array[i];
- CHECK(cc_cache);
- uptr caller = cc_cache[0];
- uptr n_callees = cc_cache[1];
- const char *caller_module_name = "<unknown>";
- uptr caller_module_address = 0;
- sym->GetModuleNameAndOffsetForPC(caller, &caller_module_name,
- &caller_module_address);
- for (uptr j = 2; j < n_callees; j++) {
- uptr callee = cc_cache[j];
- if (!callee) break;
- total++;
- const char *callee_module_name = "<unknown>";
- uptr callee_module_address = 0;
- sym->GetModuleNameAndOffsetForPC(callee, &callee_module_name,
- &callee_module_address);
- out.append("%s 0x%zx\n%s 0x%zx\n", caller_module_name,
- caller_module_address, callee_module_name,
- callee_module_address);
- }
- }
- InternalScopedString path(kMaxPathLength);
- fd_t fd = CovOpenFile(&path, false, "caller-callee");
- if (fd == kInvalidFd) return;
- WriteToFile(fd, out.data(), out.length());
- CloseFile(fd);
- VReport(1, " CovDump: %zd caller-callee pairs written\n", total);
-}
-
-// Record the current PC into the event buffer.
-// Every event is a u32 value (index in tr_pc_array_index) so we compute
-// it once and then cache in the provided 'cache' storage.
-//
-// This function will eventually be inlined by the compiler.
-void CoverageData::TraceBasicBlock(u32 *id) {
- // Will trap here if
- // 1. coverage is not enabled at run-time.
- // 2. The array tr_event_array is full.
- *tr_event_pointer = *id - 1;
- tr_event_pointer++;
-}
-
-void CoverageData::DumpCounters() {
- if (!common_flags()->coverage_counters) return;
- uptr n = coverage_data.GetNumberOf8bitCounters();
- if (!n) return;
- InternalScopedBuffer<u8> bitset(n);
- coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data());
- InternalScopedString path(kMaxPathLength);
-
- for (uptr m = 0; m < module_name_vec.size(); m++) {
- auto r = module_name_vec[m];
- CHECK(r.copied_module_name);
- CHECK_LE(r.beg, r.end);
- CHECK_LE(r.end, size());
- const char *base_name = StripModuleName(r.copied_module_name);
- fd_t fd =
- CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov");
- if (fd == kInvalidFd) return;
- WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg);
- CloseFile(fd);
- VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg,
- base_name);
- }
-}
-
-void CoverageData::DumpAsBitSet() {
- if (!common_flags()->coverage_bitset) return;
- if (!size()) return;
- InternalScopedBuffer<char> out(size());
- InternalScopedString path(kMaxPathLength);
- for (uptr m = 0; m < module_name_vec.size(); m++) {
- uptr n_set_bits = 0;
- auto r = module_name_vec[m];
- CHECK(r.copied_module_name);
- CHECK_LE(r.beg, r.end);
- CHECK_LE(r.end, size());
- for (uptr i = r.beg; i < r.end; i++) {
- uptr pc = UnbundlePc(pc_array[i]);
- out[i] = pc ? '1' : '0';
- if (pc)
- n_set_bits++;
- }
- const char *base_name = StripModuleName(r.copied_module_name);
- fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov");
- if (fd == kInvalidFd) return;
- WriteToFile(fd, out.data() + r.beg, r.end - r.beg);
- CloseFile(fd);
- VReport(1,
- " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n",
- r.end - r.beg, base_name, n_set_bits);
- }
-}
-
-
-void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym,
- InternalMmapVector<uptr>* offsets) const {
- offsets->clear();
- for (uptr i = 0; i < kNumWordsForMagic; i++)
- offsets->push_back(0);
- CHECK(r.copied_module_name);
- CHECK_LE(r.beg, r.end);
- CHECK_LE(r.end, size());
- for (uptr i = r.beg; i < r.end; i++) {
- uptr pc = UnbundlePc(pc_array[i]);
- uptr counter = UnbundleCounter(pc_array[i]);
- if (!pc) continue; // Not visited.
- uptr offset = 0;
- sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
- offsets->push_back(BundlePcAndCounter(offset, counter));
- }
-
- CHECK_GE(offsets->size(), kNumWordsForMagic);
- SortArray(offsets->data(), offsets->size());
- for (uptr i = 0; i < offsets->size(); i++)
- (*offsets)[i] = UnbundlePc((*offsets)[i]);
-}
-
-static void GenerateHtmlReport(const InternalMmapVector<char *> &cov_files) {
- if (!common_flags()->html_cov_report) {
- return;
- }
- char *sancov_path = FindPathToBinary(common_flags()->sancov_path);
- if (sancov_path == nullptr) {
- return;
- }
-
- InternalMmapVector<char *> sancov_argv(cov_files.size() * 2 + 3);
- sancov_argv.push_back(sancov_path);
- sancov_argv.push_back(internal_strdup("-html-report"));
- auto argv_deleter = at_scope_exit([&] {
- for (uptr i = 0; i < sancov_argv.size(); ++i) {
- InternalFree(sancov_argv[i]);
- }
- });
-
- for (const auto &cov_file : cov_files) {
- sancov_argv.push_back(internal_strdup(cov_file));
- }
-
- {
- ListOfModules modules;
- modules.init();
- for (const LoadedModule &module : modules) {
- sancov_argv.push_back(internal_strdup(module.full_name()));
- }
- }
-
- InternalScopedString report_path(kMaxPathLength);
- fd_t report_fd =
- CovOpenFile(&report_path, false /* packed */, GetProcessName(), "html");
- int pid = StartSubprocess(sancov_argv[0], sancov_argv.data(),
- kInvalidFd /* stdin */, report_fd /* std_out */);
- if (pid > 0) {
- int result = WaitForProcess(pid);
- if (result == 0)
- Printf("coverage report generated to %s\n", report_path.data());
- }
-}
-
-void CoverageData::DumpOffsets() {
- auto sym = Symbolizer::GetOrInit();
- if (!common_flags()->coverage_pcs) return;
- CHECK_NE(sym, nullptr);
- InternalMmapVector<uptr> offsets(0);
- InternalScopedString path(kMaxPathLength);
-
- InternalMmapVector<char *> cov_files(module_name_vec.size());
- auto cov_files_deleter = at_scope_exit([&] {
- for (uptr i = 0; i < cov_files.size(); ++i) {
- InternalFree(cov_files[i]);
- }
- });
-
- for (uptr m = 0; m < module_name_vec.size(); m++) {
- auto r = module_name_vec[m];
- GetRangeOffsets(r, sym, &offsets);
-
- uptr num_offsets = offsets.size() - kNumWordsForMagic;
- u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
- CHECK_EQ(*magic_p, 0ULL);
- // FIXME: we may want to write 32-bit offsets even in 64-mode
- // if all the offsets are small enough.
- *magic_p = kMagic;
-
- const char *module_name = StripModuleName(r.copied_module_name);
- if (cov_sandboxed) {
- if (cov_fd != kInvalidFd) {
- CovWritePacked(internal_getpid(), module_name, offsets.data(),
- offsets.size() * sizeof(offsets[0]));
- VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets);
- }
- } else {
- // One file per module per process.
- fd_t fd = CovOpenFile(&path, false /* packed */, module_name);
- if (fd == kInvalidFd) continue;
- WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
- CloseFile(fd);
- cov_files.push_back(internal_strdup(path.data()));
- VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
- }
- }
- if (cov_fd != kInvalidFd)
- CloseFile(cov_fd);
-
- GenerateHtmlReport(cov_files);
-}
-
-void CoverageData::DumpAll() {
- if (!coverage_enabled || common_flags()->coverage_direct) return;
- if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
- return;
- DumpAsBitSet();
- DumpCounters();
- DumpTrace();
- DumpOffsets();
- DumpCallerCalleePairs();
-}
-
-void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
- if (!args) return;
- if (!coverage_enabled) return;
- cov_sandboxed = args->coverage_sandboxed;
- if (!cov_sandboxed) return;
- cov_max_block_size = args->coverage_max_block_size;
- if (args->coverage_fd >= 0) {
- cov_fd = (fd_t)args->coverage_fd;
- } else {
- InternalScopedString path(kMaxPathLength);
- // Pre-open the file now. The sandbox won't allow us to do it later.
- cov_fd = CovOpenFile(&path, true /* packed */, nullptr);
- }
-}
-
-fd_t MaybeOpenCovFile(const char *name) {
- CHECK(name);
- if (!coverage_enabled) return kInvalidFd;
- InternalScopedString path(kMaxPathLength);
- return CovOpenFile(&path, true /* packed */, name);
-}
-
-void CovBeforeFork() {
- coverage_data.BeforeFork();
-}
-
-void CovAfterFork(int child_pid) {
- coverage_data.AfterFork(child_pid);
-}
-
-static void MaybeDumpCoverage() {
- if (common_flags()->coverage)
- __sanitizer_cov_dump();
-}
-
-void InitializeCoverage(bool enabled, const char *dir) {
- if (coverage_enabled)
- return; // May happen if two sanitizer enable coverage in the same process.
- coverage_enabled = enabled;
- coverage_dir = dir;
- coverage_data.Init();
- if (enabled) coverage_data.Enable();
- if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump);
- AddDieCallback(MaybeDumpCoverage);
-}
-
-void ReInitializeCoverage(bool enabled, const char *dir) {
- coverage_enabled = enabled;
- coverage_dir = dir;
- coverage_data.ReInit();
-}
-
-void CoverageUpdateMapping() {
- if (coverage_enabled)
- CovUpdateMapping(coverage_dir);
-}
-
-} // namespace __sanitizer
-
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) {
- coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
- guard);
-}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) {
- atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard);
- if (static_cast<s32>(
- __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0)
- __sanitizer_cov(guard);
-}
-SANITIZER_INTERFACE_ATTRIBUTE void
-__sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) {
- coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()),
- callee, callee_cache16, 16);
-}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
- coverage_enabled = true;
- coverage_dir = common_flags()->coverage_dir;
- coverage_data.Init();
-}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
- coverage_data.DumpAll();
-}
-SANITIZER_INTERFACE_ATTRIBUTE void
-__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,
- const char *comp_unit_name) {
- coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC());
- coverage_data.InitializeCounters(counters, npcs);
- if (!common_flags()->coverage_direct) return;
- if (SANITIZER_ANDROID && coverage_enabled) {
- // dlopen/dlclose interceptors do not work on Android, so we rely on
- // Extend() calls to update .sancov.map.
- CovUpdateMapping(coverage_dir, GET_CALLER_PC());
- }
- coverage_data.Extend(npcs);
-}
-SANITIZER_INTERFACE_ATTRIBUTE
-sptr __sanitizer_maybe_open_cov_file(const char *name) {
- return (sptr)MaybeOpenCovFile(name);
-}
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_total_unique_coverage() {
- return atomic_load(&coverage_counter, memory_order_relaxed);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_total_unique_caller_callee_pairs() {
- return atomic_load(&caller_callee_counter, memory_order_relaxed);
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_func_enter(u32 *id) {
- __sanitizer_cov_with_check(id);
- coverage_data.TraceBasicBlock(id);
-}
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_basic_block(u32 *id) {
- __sanitizer_cov_with_check(id);
- coverage_data.TraceBasicBlock(id);
-}
-SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_reset_coverage() {
- ResetGlobalCounters();
- coverage_data.ReinitializeGuards();
- internal_bzero_aligned16(
- coverage_data.data(),
- RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16));
-}
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_coverage_guards(uptr **data) {
- *data = coverage_data.data();
- return coverage_data.size();
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_get_number_of_counters() {
- return coverage_data.GetNumberOf8bitCounters();
-}
-
-SANITIZER_INTERFACE_ATTRIBUTE
-uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
- return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
-}
-// Default empty implementations (weak). Users should redefine them.
-#if !SANITIZER_WINDOWS // weak does not work on Windows.
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp1() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp2() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp4() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_cmp8() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_switch() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_div4() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_div8() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_gep() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_pc_guard() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_pc_indir() {}
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __sanitizer_cov_trace_pc_guard_init() {}
-#endif // !SANITIZER_WINDOWS
-} // extern "C"
--- /dev/null
+//===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage Controller for Trace PC Guard.
+
+#include "sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+#include "sancov_flags.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+using AddressRange = LoadedModule::AddressRange;
+
+namespace __sancov {
+namespace {
+
+static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;
+static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;
+
+static fd_t OpenFile(const char* path) {
+ error_t err;
+ fd_t fd = OpenFile(path, WrOnly, &err);
+ if (fd == kInvalidFd)
+ Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
+ path, err);
+ return fd;
+}
+
+static void GetCoverageFilename(char* path, const char* name,
+ const char* extension) {
+ CHECK(name);
+ internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s",
+ common_flags()->coverage_dir, name, internal_getpid(),
+ extension);
+}
+
+static void WriteModuleCoverage(char* file_path, const char* module_name,
+ const uptr* pcs, uptr len) {
+ GetCoverageFilename(file_path, StripModuleName(module_name), "sancov");
+ fd_t fd = OpenFile(file_path);
+ WriteToFile(fd, &Magic, sizeof(Magic));
+ WriteToFile(fd, pcs, len * sizeof(*pcs));
+ CloseFile(fd);
+ Printf("SanitizerCoverage: %s: %zd PCs written\n", file_path, len);
+}
+
+static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
+ if (!len) return;
+
+ char* file_path = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ char* module_name = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr)));
+
+ internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));
+ SortArray(pcs, len);
+
+ bool module_found = false;
+ uptr last_base = 0;
+ uptr module_start_idx = 0;
+
+ for (uptr i = 0; i < len; ++i) {
+ const uptr pc = pcs[i];
+ if (!pc) continue;
+
+ if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
+ Printf("ERROR: unknown pc 0x%x (may happen if dlclose is used)\n", pc);
+ continue;
+ }
+ uptr module_base = pc - pcs[i];
+
+ if (module_base != last_base || !module_found) {
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ i - module_start_idx);
+ }
+
+ last_base = module_base;
+ module_start_idx = i;
+ module_found = true;
+ __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
+ &pcs[i]);
+ }
+ }
+
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ len - module_start_idx);
+ }
+
+ InternalFree(file_path);
+ InternalFree(module_name);
+ InternalFree(pcs);
+}
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController {
+ public:
+ void Initialize() {
+ CHECK(!initialized);
+
+ initialized = true;
+ InitializeSancovFlags();
+
+ pc_vector.Initialize(0);
+ }
+
+ void InitTracePcGuard(u32* start, u32* end) {
+ if (!initialized) Initialize();
+ CHECK(!*start);
+ CHECK_NE(start, end);
+
+ u32 i = pc_vector.size();
+ for (u32* p = start; p < end; p++) *p = ++i;
+ pc_vector.resize(i);
+ }
+
+ void TracePcGuard(u32* guard, uptr pc) {
+ u32 idx = *guard;
+ if (!idx) return;
+ // we start indices from 1.
+ atomic_uintptr_t* pc_ptr =
+ reinterpret_cast<atomic_uintptr_t*>(&pc_vector[idx - 1]);
+ if (atomic_load(pc_ptr, memory_order_relaxed) == 0)
+ atomic_store(pc_ptr, pc, memory_order_relaxed);
+ }
+
+ void Reset() {
+ internal_memset(&pc_vector[0], 0, sizeof(pc_vector[0]) * pc_vector.size());
+ }
+
+ void Dump() {
+ if (!initialized || !common_flags()->coverage) return;
+ __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());
+ }
+
+ private:
+ bool initialized;
+ InternalMmapVectorNoCtor<uptr> pc_vector;
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+} // namespace __sancov
+
+namespace __sanitizer {
+void InitializeCoverage(bool enabled, const char *dir) {
+ static bool coverage_enabled = false;
+ if (coverage_enabled)
+ return; // May happen if two sanitizer enable coverage in the same process.
+ coverage_enabled = enabled;
+ Atexit(__sanitizer_cov_dump);
+ AddDieCallback(__sanitizer_cov_dump);
+}
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr* pcs, uptr len) {
+ return __sancov::SanitizerDumpCoverage(pcs, len);
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
+ if (!*guard) return;
+ __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
+ u32* start, u32* end) {
+ if (start == end || *start) return;
+ __sancov::pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ __sancov::pc_guard_controller.Dump();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
+ __sanitizer_dump_trace_pc_guard_coverage();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_reset() {
+ __sancov::pc_guard_controller.Reset();
+}
+// Default empty implementations (weak). Users should redefine them.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
+} // extern "C"
+// Weak definition for code instrumented with -fsanitize-coverage=stack-depth
+// and later linked with code containing a strong definition.
+// E.g., -fsanitize=fuzzer-no-link
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;
+
+#endif // !SANITIZER_FUCHSIA
+++ /dev/null
-//===-- sanitizer_coverage_mapping.cc -------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Mmap-based implementation of sanitizer coverage.
-//
-// This is part of the implementation of code coverage that does not require
-// __sanitizer_cov_dump() call. Data is stored in 2 files per process.
-//
-// $pid.sancov.map describes process memory layout in the following text-based
-// format:
-// <pointer size in bits> // 1 line, 32 or 64
-// <mapping start> <mapping end> <base address> <dso name> // repeated
-// ...
-// Mapping lines are NOT sorted. This file is updated every time memory layout
-// is changed (i.e. in dlopen() and dlclose() interceptors).
-//
-// $pid.sancov.raw is a binary dump of PC values, sizeof(uptr) each. Again, not
-// sorted. This file is extended by 64Kb at a time and mapped into memory. It
-// contains one or more 0 words at the end, up to the next 64Kb aligned offset.
-//
-// To convert these 2 files to the usual .sancov format, run sancov.py rawunpack
-// $pid.sancov.raw.
-//
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_procmaps.h"
-
-namespace __sanitizer {
-
-static const uptr kMaxTextSize = 64 * 1024;
-
-struct CachedMapping {
- public:
- bool NeedsUpdate(uptr pc) {
- int new_pid = internal_getpid();
- if (last_pid == new_pid && pc && pc >= last_range_start &&
- pc < last_range_end)
- return false;
- last_pid = new_pid;
- return true;
- }
-
- void SetModuleRange(uptr start, uptr end) {
- last_range_start = start;
- last_range_end = end;
- }
-
- private:
- uptr last_range_start, last_range_end;
- int last_pid;
-};
-
-static CachedMapping cached_mapping;
-static StaticSpinMutex mapping_mu;
-
-void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
- if (!common_flags()->coverage_direct) return;
-
- SpinMutexLock l(&mapping_mu);
-
- if (!cached_mapping.NeedsUpdate(caller_pc))
- return;
-
- InternalScopedString text(kMaxTextSize);
-
- {
- text.append("%d\n", sizeof(uptr) * 8);
- ListOfModules modules;
- modules.init();
- for (const LoadedModule &module : modules) {
- const char *module_name = StripModuleName(module.full_name());
- uptr base = module.base_address();
- for (const auto &range : module.ranges()) {
- if (range.executable) {
- uptr start = range.beg;
- uptr end = range.end;
- text.append("%zx %zx %zx %s\n", start, end, base, module_name);
- if (caller_pc && caller_pc >= start && caller_pc < end)
- cached_mapping.SetModuleRange(start, end);
- }
- }
- }
- }
-
- error_t err;
- InternalScopedString tmp_path(64 + internal_strlen(coverage_dir));
- uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
- "%s/%zd.sancov.map.tmp", coverage_dir,
- internal_getpid());
- CHECK_LE(res, tmp_path.size());
- fd_t map_fd = OpenFile(tmp_path.data(), WrOnly, &err);
- if (map_fd == kInvalidFd) {
- Report("Coverage: failed to open %s for writing: %d\n", tmp_path.data(),
- err);
- Die();
- }
-
- if (!WriteToFile(map_fd, text.data(), text.length(), nullptr, &err)) {
- Printf("sancov.map write failed: %d\n", err);
- Die();
- }
- CloseFile(map_fd);
-
- InternalScopedString path(64 + internal_strlen(coverage_dir));
- res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
- coverage_dir, internal_getpid());
- CHECK_LE(res, path.size());
- if (!RenameFile(tmp_path.data(), path.data(), &err)) {
- Printf("sancov.map rename failed: %d\n", err);
- Die();
- }
-}
-
-} // namespace __sanitizer
--- /dev/null
+//===-- sanitizer_coverage_win_dll_thunk.cc -------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_dll_thunk.h"
+// Sanitizer Coverage interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DLL_THUNK
--- /dev/null
+//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cc -------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Coverage, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer coverage.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
--- /dev/null
+//===-- sanitizer_coverage_win_sections.cc --------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines delimiters for Sanitizer Coverage's section.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include <stdint.h>
+#pragma section(".SCOV$A", read, write) // NOLINT
+#pragma section(".SCOV$Z", read, write) // NOLINT
+extern "C" {
+__declspec(allocate(".SCOV$A")) uint32_t __start___sancov_guards = 0;
+__declspec(allocate(".SCOV$Z")) uint32_t __stop___sancov_guards = 0;
+}
+#endif // SANITIZER_WINDOWS
--- /dev/null
+//===-- sanitizer_coverage_win_weak_interception.cc -----------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Sanitizer Coverage when it implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_interface_internal.h"
+#include "sancov_flags.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC
--- /dev/null
+//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
+// callback to initialize them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_WIN_H
+#define SANITIZER_SYMBOLIZER_WIN_H
+
+#if !SANITIZER_WINDOWS
+#error "sanitizer_dbghelp.h is a Windows-only header"
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+
+namespace __sanitizer {
+
+extern decltype(::StackWalk64) *StackWalk64;
+extern decltype(::SymCleanup) *SymCleanup;
+extern decltype(::SymFromAddr) *SymFromAddr;
+extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+extern decltype(::SymInitialize) *SymInitialize;
+extern decltype(::SymSetOptions) *SymSetOptions;
+extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_WIN_H
--- /dev/null
+//===-- sanitizer_errno.cc --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno to avoid including errno.h and its dependencies into other
+// files (e.g. interceptors are not supposed to include any system headers).
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_errno_codes.h"
+#include "sanitizer_internal_defs.h"
+
+#include <errno.h>
+
+namespace __sanitizer {
+
+COMPILER_CHECK(errno_ENOMEM == ENOMEM);
+COMPILER_CHECK(errno_EBUSY == EBUSY);
+COMPILER_CHECK(errno_EINVAL == EINVAL);
+
+// EOWNERDEAD is not present in some older platforms.
+#if defined(EOWNERDEAD)
+extern const int errno_EOWNERDEAD = EOWNERDEAD;
+#else
+extern const int errno_EOWNERDEAD = -1;
+#endif
+
+} // namespace __sanitizer
--- /dev/null
+//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno to avoid including errno.h and its dependencies into sensitive
+// files (e.g. interceptors are not supposed to include any system headers).
+// It's ok to use errno.h directly when your file already depend on other system
+// includes though.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ERRNO_H
+#define SANITIZER_ERRNO_H
+
+#include "sanitizer_errno_codes.h"
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+# define __errno_location __error
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD
+# define __errno_location __errno
+#elif SANITIZER_WINDOWS
+# define __errno_location _errno
+#endif
+
+extern "C" int *__errno_location();
+
+#define errno (*__errno_location())
+
+#endif // SANITIZER_ERRNO_H
--- /dev/null
+//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno codes to avoid including errno.h and its dependencies into
+// sensitive files (e.g. interceptors are not supposed to include any system
+// headers).
+// It's ok to use errno.h directly when your file already depend on other system
+// includes though.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ERRNO_CODES_H
+#define SANITIZER_ERRNO_CODES_H
+
+namespace __sanitizer {
+
+#define errno_ENOMEM 12
+#define errno_EBUSY 16
+#define errno_EINVAL 22
+
+// Those might not present or their value differ on different platforms.
+extern const int errno_EOWNERDEAD;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ERRNO_CODES_H
--- /dev/null
+//===-- sanitizer_file.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. It defines filesystem-related interfaces. This
+// is separate from sanitizer_common.cc so that it's simpler to disable
+// all the filesystem support code for a port that doesn't use it.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+
+namespace __sanitizer {
+
+void CatastrophicErrorWrite(const char *buffer, uptr length) {
+ WriteToFile(kStderrFd, buffer, length);
+}
+
+StaticSpinMutex report_file_mu;
+ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
+
+void RawWrite(const char *buffer) {
+ report_file.Write(buffer, internal_strlen(buffer));
+}
+
+void ReportFile::ReopenIfNecessary() {
+ mu->CheckLocked();
+ if (fd == kStdoutFd || fd == kStderrFd) return;
+
+ uptr pid = internal_getpid();
+ // If in tracer, use the parent's file.
+ if (pid == stoptheworld_tracer_pid)
+ pid = stoptheworld_tracer_ppid;
+ if (fd != kInvalidFd) {
+ // If the report file is already opened by the current process,
+ // do nothing. Otherwise the report file was opened by the parent
+ // process, close it now.
+ if (fd_pid == pid)
+ return;
+ else
+ CloseFile(fd);
+ }
+
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
+ exe_name, pid);
+ } else {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
+ }
+ fd = OpenFile(full_path, WrOnly);
+ if (fd == kInvalidFd) {
+ const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+ WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
+ Die();
+ }
+ fd_pid = pid;
+}
+
+void ReportFile::SetReportPath(const char *path) {
+ if (!path)
+ return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+
+ SpinMutexLock l(mu);
+ if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
+ CloseFile(fd);
+ fd = kInvalidFd;
+ if (internal_strcmp(path, "stdout") == 0) {
+ fd = kStdoutFd;
+ } else if (internal_strcmp(path, "stderr") == 0) {
+ fd = kStderrFd;
+ } else {
+ internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ }
+}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
+ uptr PageSize = GetPageSizeCached();
+ uptr kMinFileLen = PageSize;
+ *buff = nullptr;
+ *buff_size = 0;
+ *read_len = 0;
+ // The files we usually open are not seekable, so try different buffer sizes.
+ for (uptr size = kMinFileLen; size <= max_len; size *= 2) {
+ fd_t fd = OpenFile(file_name, RdOnly, errno_p);
+ if (fd == kInvalidFd) return false;
+ UnmapOrDie(*buff, *buff_size);
+ *buff = (char*)MmapOrDie(size, __func__);
+ *buff_size = size;
+ *read_len = 0;
+ // Read up to one page at a time.
+ bool reached_eof = false;
+ while (*read_len + PageSize <= size) {
+ uptr just_read;
+ if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) {
+ UnmapOrDie(*buff, *buff_size);
+ return false;
+ }
+ if (just_read == 0) {
+ reached_eof = true;
+ break;
+ }
+ *read_len += just_read;
+ }
+ CloseFile(fd);
+ if (reached_eof) // We've read the whole file.
+ break;
+ }
+ return true;
+}
+
+static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
+
+char *FindPathToBinary(const char *name) {
+ if (FileExists(name)) {
+ return internal_strdup(name);
+ }
+
+ const char *path = GetEnv("PATH");
+ if (!path)
+ return nullptr;
+ uptr name_len = internal_strlen(name);
+ InternalScopedBuffer<char> buffer(kMaxPathLength);
+ const char *beg = path;
+ while (true) {
+ const char *end = internal_strchrnul(beg, kPathSeparator);
+ uptr prefix_len = end - beg;
+ if (prefix_len + name_len + 2 <= kMaxPathLength) {
+ internal_memcpy(buffer.data(), beg, prefix_len);
+ buffer[prefix_len] = '/';
+ internal_memcpy(&buffer[prefix_len + 1], name, name_len);
+ buffer[prefix_len + 1 + name_len] = '\0';
+ if (FileExists(buffer.data()))
+ return internal_strdup(buffer.data());
+ }
+ if (*end == '\0') break;
+ beg = end + 1;
+ }
+ return nullptr;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_set_report_path(const char *path) {
+ report_file.SetReportPath(path);
+}
+
+void __sanitizer_set_report_fd(void *fd) {
+ report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
+ report_file.fd_pid = internal_getpid();
+}
+} // extern "C"
+
+#endif // !SANITIZER_FUCHSIA
--- /dev/null
+//===-- sanitizer_file.h ---------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+// It declares filesystem-related interfaces. This is separate from
+// sanitizer_common.h so that it's simpler to disable all the filesystem
+// support code for a port that doesn't use it.
+//
+//===---------------------------------------------------------------------===//
+#ifndef SANITIZER_FILE_H
+#define SANITIZER_FILE_H
+
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+struct ReportFile {
+ void Write(const char *buffer, uptr length);
+ bool SupportsColors();
+ void SetReportPath(const char *path);
+
+ // Don't use fields directly. They are only declared public to allow
+ // aggregate initialization.
+
+ // Protects fields below.
+ StaticSpinMutex *mu;
+ // Opened file descriptor. Defaults to stderr. It may be equal to
+ // kInvalidFd, in which case new file will be opened when necessary.
+ fd_t fd;
+ // Path prefix of report file, set via __sanitizer_set_report_path.
+ char path_prefix[kMaxPathLength];
+ // Full path to report, obtained as <path_prefix>.PID
+ char full_path[kMaxPathLength];
+ // PID of the process that opened fd. If a fork() occurs,
+ // the PID of child will be different from fd_pid.
+ uptr fd_pid;
+
+ private:
+ void ReopenIfNecessary();
+};
+extern ReportFile report_file;
+
+enum FileAccessMode {
+ RdOnly,
+ WrOnly,
+ RdWr
+};
+
+// Returns kInvalidFd on error.
+fd_t OpenFile(const char *filename, FileAccessMode mode,
+ error_t *errno_p = nullptr);
+void CloseFile(fd_t);
+
+// Return true on success, false on error.
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
+ uptr *bytes_read = nullptr, error_t *error_p = nullptr);
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
+ uptr *bytes_written = nullptr, error_t *error_p = nullptr);
+
+bool RenameFile(const char *oldpath, const char *newpath,
+ error_t *error_p = nullptr);
+
+// Scoped file handle closer.
+struct FileCloser {
+ explicit FileCloser(fd_t fd) : fd(fd) {}
+ ~FileCloser() { CloseFile(fd); }
+ fd_t fd;
+};
+
+bool SupportsColoredOutput(fd_t fd);
+
+// OS
+const char *GetPwd();
+bool FileExists(const char *filename);
+char *FindPathToBinary(const char *name);
+bool IsPathSeparator(const char c);
+bool IsAbsolutePath(const char *path);
+// Starts a subprocess and returs its pid.
+// If *_fd parameters are not kInvalidFd their corresponding input/output
+// streams will be redirect to the file. The files will always be closed
+// in parent process even in case of an error.
+// The child process will close all fds after STDERR_FILENO
+// before passing control to a program.
+pid_t StartSubprocess(const char *filename, const char *const argv[],
+ fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
+ fd_t stderr_fd = kInvalidFd);
+// Checks if specified process is still running
+bool IsProcessRunning(pid_t pid);
+// Waits for the process to finish and returns its exit code.
+// Returns -1 in case of an error.
+int WaitForProcess(pid_t pid);
+
+// Maps given file to virtual memory, and returns pointer to it
+// (or NULL if mapping fails). Stores the size of mmaped region
+// in '*buff_size'.
+void *MapFileToMemory(const char *file_name, uptr *buff_size);
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FILE_H
bool Parse(const char *value) final;
};
-template <>
-inline bool FlagHandler<bool>::Parse(const char *value) {
+inline bool ParseBool(const char *value, bool *b) {
if (internal_strcmp(value, "0") == 0 ||
internal_strcmp(value, "no") == 0 ||
internal_strcmp(value, "false") == 0) {
- *t_ = false;
+ *b = false;
return true;
}
if (internal_strcmp(value, "1") == 0 ||
internal_strcmp(value, "yes") == 0 ||
internal_strcmp(value, "true") == 0) {
- *t_ = true;
+ *b = true;
return true;
}
+ return false;
+}
+
+template <>
+inline bool FlagHandler<bool>::Parse(const char *value) {
+ if (ParseBool(value, t_)) return true;
Printf("ERROR: Invalid value for bool option: '%s'\n", value);
return false;
}
+template <>
+inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
+ bool b;
+ if (ParseBool(value, &b)) {
+ *t_ = b ? kHandleSignalYes : kHandleSignalNo;
+ return true;
+ }
+ if (internal_strcmp(value, "2") == 0 ||
+ internal_strcmp(value, "exclusive") == 0) {
+ *t_ = kHandleSignalExclusive;
+ return true;
+ }
+ Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
+ return false;
+}
+
template <>
inline bool FlagHandler<const char *>::Parse(const char *value) {
- *t_ = internal_strdup(value);
+ *t_ = value;
return true;
}
namespace __sanitizer {
+enum HandleSignalMode {
+ kHandleSignalNo,
+ kHandleSignalYes,
+ kHandleSignalExclusive,
+};
+
struct CommonFlags {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "sanitizer_flags.inc"
COMMON_FLAG(
int, verbosity, 0,
"Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
-COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.")
+COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
COMMON_FLAG(
bool, leak_check_at_exit, true,
"Invoke leak checking in an atexit handler. Has no effect if "
COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
+COMMON_FLAG(int, print_module_map, 0,
+ "OS X only (0 - don't print, 1 - print only once before process "
+ "exits, 2 - print after each report).")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
-COMMON_FLAG(bool, handle_segv, true,
- "If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
-COMMON_FLAG(bool, handle_abort, false,
- "If set, registers the tool's custom SIGABRT handler.")
-COMMON_FLAG(bool, handle_sigill, false,
- "If set, registers the tool's custom SIGILL handler.")
-COMMON_FLAG(bool, handle_sigfpe, true,
- "If set, registers the tool's custom SIGFPE handler.")
-COMMON_FLAG(bool, allow_user_segv_handler, false,
- "If set, allows user to register a SEGV handler even if the tool "
- "registers one.")
+#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
+ "Controls custom tool's " #signal " handler (0 - do not registers the " \
+ "handler, 1 - register the handler and allow user to set own, " \
+ "2 - registers the handler and block user from changing it). "
+COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))
+COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGBUS))
+COMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT))
+COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL))
+COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
+#undef COMMON_FLAG_HANDLE_SIGNAL_HELP
+COMMON_FLAG(bool, allow_user_segv_handler, true,
+ "Deprecated. True has no effect, use handle_sigbus=1. If false, "
+ "handle_*=1 will be upgraded to handle_*=2.")
COMMON_FLAG(bool, use_sigaltstack, true,
"If set, uses alternate stack for signal handling.")
COMMON_FLAG(bool, detect_deadlocks, false,
" This limit does not affect memory allocations other than"
" malloc/new.")
COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
-COMMON_FLAG(bool, allocator_release_to_os, false,
- "Experimental. If true, try to periodically release unused"
- " memory to the OS.\n")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
+ "Experimental. Only affects a 64-bit allocator. If set, tries to "
+ "release unused memory to the OS, but not more often than this "
+ "interval (in milliseconds). Negative values mean do not attempt "
+ "to release memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
bool, coverage, false,
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).")
-COMMON_FLAG(bool, coverage_pcs, true,
- "If set (and if 'coverage' is set too), the coverage information "
- "will be dumped as a set of PC offsets for every module.")
-COMMON_FLAG(bool, coverage_order_pcs, false,
- "If true, the PCs will be dumped in the order they've"
- " appeared during the execution.")
-COMMON_FLAG(bool, coverage_bitset, false,
- "If set (and if 'coverage' is set too), the coverage information "
- "will also be dumped as a bitset to a separate file.")
-COMMON_FLAG(bool, coverage_counters, false,
- "If set (and if 'coverage' is set too), the bitmap that corresponds"
- " to coverage counters will be dumped.")
-COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID,
- "If set, coverage information will be dumped directly to a memory "
- "mapped file. This way data is not lost even if the process is "
- "suddenly killed.")
COMMON_FLAG(const char *, coverage_dir, ".",
"Target directory for coverage dumps. Defaults to the current "
"directory.")
COMMON_FLAG(bool, intercept_strspn, true,
"If set, uses custom wrappers for strspn and strcspn function "
"to find more errors.")
+COMMON_FLAG(bool, intercept_strtok, true,
+ "If set, uses a custom wrapper for the strtok function "
+ "to find more errors.")
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
COMMON_FLAG(bool, intercept_strlen, true,
"If set, uses custom wrappers for strlen and strnlen functions "
"to find more errors.")
+COMMON_FLAG(bool, intercept_strndup, true,
+ "If set, uses custom wrappers for strndup functions "
+ "to find more errors.")
COMMON_FLAG(bool, intercept_strchr, true,
"If set, uses custom wrappers for strchr, strchrnul, and strrchr "
"functions to find more errors.")
"(asan only).")
COMMON_FLAG(bool, html_cov_report, false, "Generate html coverage report.")
COMMON_FLAG(const char *, sancov_path, "sancov", "Sancov tool location.")
+COMMON_FLAG(bool, dump_instruction_bytes, false,
+ "If true, dump 16 bytes starting at the instruction that caused SEGV")
+COMMON_FLAG(bool, dump_registers, true,
+ "If true, dump values of CPU registers when SEGV happens. Only "
+ "available on OS X for now.")
--- /dev/null
+//===-- sanitizer_fuchsia.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and other sanitizer
+// run-time libraries and implements Fuchsia-specific functions from
+// sanitizer_common.h.
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_stacktrace.h"
+
+#include <limits.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <unwind.h>
+#include <zircon/errors.h>
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace __sanitizer {
+
+void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
+
+uptr internal_sched_yield() {
+ zx_status_t status = _zx_nanosleep(0);
+ CHECK_EQ(status, ZX_OK);
+ return 0; // Why doesn't this return void?
+}
+
+static void internal_nanosleep(zx_time_t ns) {
+ zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
+ CHECK_EQ(status, ZX_OK);
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ internal_nanosleep(ZX_SEC(seconds));
+ return 0;
+}
+
+u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); }
+
+uptr internal_getpid() {
+ zx_info_handle_basic_t info;
+ zx_status_t status =
+ _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
+ sizeof(info), NULL, NULL);
+ CHECK_EQ(status, ZX_OK);
+ uptr pid = static_cast<uptr>(info.koid);
+ CHECK_EQ(pid, info.koid);
+ return pid;
+}
+
+uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
+
+uptr GetTid() { return GetThreadSelf(); }
+
+void Abort() { abort(); }
+
+int Atexit(void (*function)(void)) { return atexit(function); }
+
+void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+
+void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
+
+void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
+ pthread_attr_t attr;
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ void *base;
+ size_t size;
+ CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
+ CHECK_EQ(pthread_attr_destroy(&attr), 0);
+
+ *stack_bottom = reinterpret_cast<uptr>(base);
+ *stack_top = *stack_bottom + size;
+}
+
+void MaybeReexec() {}
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void DisableCoreDumperIfNecessary() {}
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
+void StartReportDeadlySignal() {}
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {}
+void SetAlternateSignalStack() {}
+void UnsetAlternateSignalStack() {}
+void InitTlsSize() {}
+
+void PrintModuleMap() {}
+
+bool SignalContext::IsStackOverflow() const { return false; }
+void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
+const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
+
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ u32 max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = _Unwind_GetIP(ctx);
+ if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
+ : _URC_NO_REASON);
+}
+
+void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ CHECK_GT(size, 0);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it,
+ // unless there is only 1 frame in the stack trace (1 frame is always better
+ // than 0!).
+ PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
+ trace_buffer[0] = pc;
+}
+
+void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ u32 max_depth) {
+ CHECK_NE(context, nullptr);
+ UNREACHABLE("signal context doesn't exist");
+}
+
+enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
+
+BlockingMutex::BlockingMutex() {
+ // NOTE! It's important that this use internal_memset, because plain
+ // memset might be intercepted (e.g., actually be __asan_memset).
+ // Defining this so the compiler initializes each field, e.g.:
+ // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
+ // might result in the compiler generating a call to memset, which would
+ // have the same problem.
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+ zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m),
+ MtxSleeping, ZX_TIME_INFINITE);
+ if (status != ZX_ERR_BAD_STATE) // Normal race.
+ CHECK_EQ(status, ZX_OK);
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping) {
+ zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
+ CHECK_EQ(status, ZX_OK);
+ }
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+uptr GetPageSize() { return PAGE_SIZE; }
+
+uptr GetMmapGranularity() { return PAGE_SIZE; }
+
+sanitizer_shadow_bounds_t ShadowBounds;
+
+uptr GetMaxVirtualAddress() {
+ ShadowBounds = __sanitizer_shadow_bounds();
+ return ShadowBounds.memory_limit - 1;
+}
+
+static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
+ bool raw_report, bool die_for_nomem) {
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
+ raw_report);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
+ internal_strlen(mem_type));
+
+ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
+ uintptr_t addr;
+ status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size,
+ ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
+ _zx_handle_close(vmo);
+
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
+ raw_report);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ return DoAnonymousMmapOrDie(size, mem_type, false, false);
+}
+
+// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
+// Instead of doing exactly what they say, we make MmapNoAccess actually
+// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie
+// uses that VMAR instead of the root.
+
+zx_handle_t allocator_vmar = ZX_HANDLE_INVALID;
+uintptr_t allocator_vmar_base;
+size_t allocator_vmar_size;
+
+void *MmapNoAccess(uptr size) {
+ size = RoundUpTo(size, PAGE_SIZE);
+ CHECK_EQ(allocator_vmar, ZX_HANDLE_INVALID);
+ uintptr_t base;
+ zx_status_t status =
+ _zx_vmar_allocate(_zx_vmar_root_self(), 0, size,
+ ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE |
+ ZX_VM_FLAG_CAN_MAP_SPECIFIC,
+ &allocator_vmar, &base);
+ if (status != ZX_OK)
+ ReportMmapFailureAndDie(size, "sanitizer allocator address space",
+ "zx_vmar_allocate", status);
+
+ allocator_vmar_base = base;
+ allocator_vmar_size = size;
+ return reinterpret_cast<void *>(base);
+}
+
+constexpr const char kAllocatorVmoName[] = "sanitizer_allocator";
+
+static void *DoMmapFixedOrDie(uptr fixed_addr, uptr size, bool die_for_nomem) {
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmo_create", status);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, kAllocatorVmoName,
+ sizeof(kAllocatorVmoName) - 1);
+
+ DCHECK_GE(fixed_addr, allocator_vmar_base);
+ uintptr_t offset = fixed_addr - allocator_vmar_base;
+ DCHECK_LE(size, allocator_vmar_size);
+ DCHECK_GE(allocator_vmar_size - offset, size);
+
+ uintptr_t addr;
+ status = _zx_vmar_map(
+ allocator_vmar, offset, vmo, 0, size,
+ ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC,
+ &addr);
+ _zx_handle_close(vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmar_map", status);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ return DoMmapFixedOrDie(fixed_addr, size, true);
+}
+
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
+ return DoMmapFixedOrDie(fixed_addr, size, false);
+}
+
+// This should never be called.
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ UNIMPLEMENTED();
+}
+
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK_GE(size, PAGE_SIZE);
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
+ internal_strlen(mem_type));
+
+ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
+
+ // Map a larger size to get a chunk of address space big enough that
+ // it surely contains an aligned region of the requested size. Then
+ // overwrite the aligned middle portion with a mapping from the
+ // beginning of the VMO, and unmap the excess before and after.
+ size_t map_size = size + alignment;
+ uintptr_t addr;
+ status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
+ ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr);
+ if (status == ZX_OK) {
+ uintptr_t map_addr = addr;
+ uintptr_t map_end = map_addr + map_size;
+ addr = RoundUpTo(map_addr, alignment);
+ uintptr_t end = addr + size;
+ if (addr != map_addr) {
+ zx_info_vmar_t info;
+ status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
+ sizeof(info), NULL, NULL);
+ if (status == ZX_OK) {
+ uintptr_t new_addr;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size,
+ ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE |
+ ZX_VM_FLAG_SPECIFIC_OVERWRITE,
+ &new_addr);
+ if (status == ZX_OK) CHECK_EQ(new_addr, addr);
+ }
+ }
+ if (status == ZX_OK && addr != map_addr)
+ status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
+ if (status == ZX_OK && end != map_end)
+ status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
+ }
+ _zx_handle_close(vmo);
+
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_status_t status = _zx_vmar_unmap(_zx_vmar_root_self(),
+ reinterpret_cast<uintptr_t>(addr), size);
+ if (status != ZX_OK) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+
+ DecreaseTotalMmap(size);
+}
+
+// This is used on the shadow mapping, which cannot be changed.
+// Zircon doesn't have anything like MADV_DONTNEED.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+
+void DumpProcessMap() {
+ UNIMPLEMENTED(); // TODO(mcgrathr): write it
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ // TODO(mcgrathr): Figure out a better way.
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status == ZX_OK) {
+ while (size > 0) {
+ size_t wrote;
+ status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size,
+ &wrote);
+ if (status != ZX_OK) break;
+ CHECK_GT(wrote, 0);
+ CHECK_LE(wrote, size);
+ beg += wrote;
+ size -= wrote;
+ }
+ _zx_handle_close(vmo);
+ }
+ return status == ZX_OK;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
+ zx_handle_t vmo;
+ zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
+ if (status == ZX_OK) {
+ uint64_t vmo_size;
+ status = _zx_vmo_get_size(vmo, &vmo_size);
+ if (status == ZX_OK) {
+ if (vmo_size < max_len) max_len = vmo_size;
+ size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
+ uintptr_t addr;
+ status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size,
+ ZX_VM_FLAG_PERM_READ, &addr);
+ if (status == ZX_OK) {
+ *buff = reinterpret_cast<char *>(addr);
+ *buff_size = map_size;
+ *read_len = max_len;
+ }
+ }
+ _zx_handle_close(vmo);
+ }
+ if (status != ZX_OK && errno_p) *errno_p = status;
+ return status == ZX_OK;
+}
+
+void RawWrite(const char *buffer) {
+ __sanitizer_log_write(buffer, internal_strlen(buffer));
+}
+
+void CatastrophicErrorWrite(const char *buffer, uptr length) {
+ __sanitizer_log_write(buffer, length);
+}
+
+char **StoredArgv;
+char **StoredEnviron;
+
+char **GetArgv() { return StoredArgv; }
+
+const char *GetEnv(const char *name) {
+ if (StoredEnviron) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = StoredEnviron; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return nullptr;
+}
+
+uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
+ const char *argv0 = StoredArgv[0];
+ if (!argv0) argv0 = "<UNKNOWN>";
+ internal_strncpy(buf, argv0, buf_len);
+ return internal_strlen(buf);
+}
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+uptr MainThreadStackBase, MainThreadStackSize;
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
+ size_t size;
+ CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK);
+ CHECK_EQ(size, length);
+ return true;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_startup_hook(int argc, char **argv, char **envp,
+ void *stack_base, size_t stack_size) {
+ __sanitizer::StoredArgv = argv;
+ __sanitizer::StoredEnviron = envp;
+ __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
+ __sanitizer::MainThreadStackSize = stack_size;
+}
+
+void __sanitizer_set_report_path(const char *path) {
+ // Handle the initialization code in each sanitizer, but no other calls.
+ // This setting is never consulted on Fuchsia.
+ DCHECK_EQ(path, common_flags()->log_path);
+}
+
+void __sanitizer_set_report_fd(void *fd) {
+ UNREACHABLE("not available on Fuchsia");
+}
+} // extern "C"
+
+#endif // SANITIZER_FUCHSIA
--- /dev/null
+//===-- sanitizer_fuchsia.h ------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// Fuchsia-specific sanitizer support.
+//
+//===---------------------------------------------------------------------===//
+#ifndef SANITIZER_FUCHSIA_H
+#define SANITIZER_FUCHSIA_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+
+#include <zircon/sanitizer.h>
+
+namespace __sanitizer {
+
+extern uptr MainThreadStackBase, MainThreadStackSize;
+extern sanitizer_shadow_bounds_t ShadowBounds;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
+#endif // SANITIZER_FUCHSIA_H
void __sanitizer_report_error_summary(const char *error_summary);
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
- SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_annotate_contiguous_container(const void *beg,
SANITIZER_INTERFACE_ATTRIBUTE
const void *__sanitizer_contiguous_container_find_bad_address(
const void *beg, const void *mid, const void *end);
- } // extern "C"
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_get_module_and_offset_for_pc(
+ __sanitizer::uptr pc, char *module_path,
+ __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_switch();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_gep();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_indir();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
+ __sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_8bit_counters_init();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_pcs_init();
+} // extern "C"
#endif // SANITIZER_INTERFACE_INTERNAL_H
// Only use SANITIZER_*ATTRIBUTE* before the function return type!
#if SANITIZER_WINDOWS
+#if SANITIZER_IMPORT_INTERFACE
+# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)
+#else
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
-// FIXME find out what we need on Windows, if anything.
+#endif
# define SANITIZER_WEAK_ATTRIBUTE
#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !SANITIZER_GO
+// TLS is handled differently on different platforms
+#if SANITIZER_LINUX
+# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
+ __attribute__((tls_model("initial-exec"))) thread_local
+#else
+# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
+#endif
+
+//--------------------------- WEAK FUNCTIONS ---------------------------------//
+// When working with weak functions, to simplify the code and make it more
+// portable, when possible define a default implementation using this macro:
+//
+// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)
+//
+// For example:
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }
+//
+#if SANITIZER_WINDOWS
+#include "sanitizer_win_defs.h"
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)
+#else
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE \
+ ReturnType Name(__VA_ARGS__)
+#endif
+
+// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that
+// will evaluate to a null pointer when not defined.
+#ifndef SANITIZER_SUPPORTS_WEAK_HOOKS
+#if (SANITIZER_LINUX || SANITIZER_MAC) && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+// For some weak hooks that will be called very often and we want to avoid the
+// overhead of executing the default implementation when it is not necessary,
+// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default
+// implementation for platforms that doesn't support weak symbols. For example:
+//
+// #if !SANITIZER_SUPPORT_WEAK_HOOKS
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {
+// return a > b;
+// }
+// #endif
+//
+// And then use it as: if (compare_hook) compare_hook(a, b);
+//----------------------------------------------------------------------------//
+
// We can use .preinit_array section on Linux to call sanitizer initialization
// functions very early in the process startup (unless PIC macro is defined).
#endif
typedef int pid_t;
-// WARNING: OFF_T may be different from OS type off_t, depending on the value of
-// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
-// like pread and mmap, as opposed to pread64 and mmap64.
-// FreeBSD, Mac and Linux/x86-64 are special.
-#if SANITIZER_FREEBSD || SANITIZER_MAC || \
- (SANITIZER_LINUX && defined(__x86_64__))
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC || \
+ (SANITIZER_LINUX && defined(__x86_64__))
typedef u64 OFF_T;
#else
typedef uptr OFF_T;
# endif
#endif
+#if SANITIZER_MAC
+// On Darwin, thread IDs are 64-bit even on 32-bit systems.
+typedef u64 tid_t;
+#else
+typedef uptr tid_t;
+#endif
// ----------- ATTENTION -------------
// This header should NOT include any other headers to avoid portability issues.
#define CHECK_IMPL(c1, op, c2) \
do { \
- __sanitizer::u64 v1 = (u64)(c1); \
- __sanitizer::u64 v2 = (u64)(c2); \
+ __sanitizer::u64 v1 = (__sanitizer::u64)(c1); \
+ __sanitizer::u64 v2 = (__sanitizer::u64)(c2); \
if (UNLIKELY(!(v1 op v2))) \
__sanitizer::CheckFailed(__FILE__, __LINE__, \
"(" #c1 ") " #op " (" #c2 ")", v1, v2); \
enum LinkerInitialized { LINKER_INITIALIZED = 0 };
#if !defined(_MSC_VER) || defined(__clang__)
-# if SANITIZER_S390_31
-# define GET_CALLER_PC() \
- (uptr)__builtin_extract_return_addr(__builtin_return_address(0))
-# else
-# define GET_CALLER_PC() (uptr)__builtin_return_address(0)
-# endif
-# define GET_CURRENT_FRAME() (uptr)__builtin_frame_address(0)
+#if SANITIZER_S390_31
+#define GET_CALLER_PC() \
+ (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
+#endif
+#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
inline void Trap() {
__builtin_trap();
}
extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
# pragma intrinsic(_AddressOfReturnAddress)
-# define GET_CALLER_PC() (uptr)_ReturnAddress()
+#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()
// CaptureStackBackTrace doesn't need to know BP on Windows.
-# define GET_CURRENT_FRAME() (((uptr)_AddressOfReturnAddress()) + sizeof(uptr))
+#define GET_CURRENT_FRAME() \
+ (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
extern "C" void __ud2(void);
# pragma intrinsic(__ud2)
}
// Forces the compiler to generate a frame pointer in the function.
-#define ENABLE_FRAME_POINTER \
- do { \
- volatile uptr enable_fp; \
- enable_fp = GET_CURRENT_FRAME(); \
- (void)enable_fp; \
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile __sanitizer::uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ (void)enable_fp; \
} while (0)
} // namespace __sanitizer
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_NETBSD
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
}
// Scan suppressions list and find newly loaded and unloaded libraries.
- MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- InternalScopedString module(kMaxPathLength);
+ ListOfModules modules;
+ modules.init();
for (uptr i = 0; i < count_; i++) {
Lib *lib = &libs_[i];
bool loaded = false;
- proc_maps.Reset();
- uptr b, e, off, prot;
- while (proc_maps.Next(&b, &e, &off, module.data(), module.size(), &prot)) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
- continue;
- if (TemplateMatch(lib->templ, module.data()) ||
- (lib->real_name &&
- internal_strcmp(lib->real_name, module.data()) == 0)) {
+ for (const auto &mod : modules) {
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (!TemplateMatch(lib->templ, mod.full_name()) &&
+ !(lib->real_name &&
+ internal_strcmp(lib->real_name, mod.full_name()) == 0))
+ continue;
if (loaded) {
Report("%s: called_from_lib suppression '%s' is matched against"
" 2 libraries: '%s' and '%s'\n",
- SanitizerToolName, lib->templ, lib->name, module.data());
+ SanitizerToolName, lib->templ, lib->name, mod.full_name());
Die();
}
loaded = true;
VReport(1,
"Matched called_from_lib suppression '%s' against library"
" '%s'\n",
- lib->templ, module.data());
+ lib->templ, mod.full_name());
lib->loaded = true;
- lib->name = internal_strdup(module.data());
- const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
- code_ranges_[idx].begin = b;
- code_ranges_[idx].end = e;
- atomic_store(&loaded_count_, idx + 1, memory_order_release);
+ lib->name = internal_strdup(mod.full_name());
+ const uptr idx =
+ atomic_load(&ignored_ranges_count_, memory_order_relaxed);
+ CHECK_LT(idx, kMaxLibs);
+ ignored_code_ranges_[idx].begin = range.beg;
+ ignored_code_ranges_[idx].end = range.end;
+ atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
+ break;
}
}
if (lib->loaded && !loaded) {
Die();
}
}
+
+ // Track instrumented ranges.
+ if (track_instrumented_libs_) {
+ for (const auto &mod : modules) {
+ if (!mod.instrumented())
+ continue;
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
+ continue;
+ VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
+ range.beg, range.end, mod.full_name());
+ const uptr idx =
+ atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
+ CHECK_LT(idx, kMaxLibs);
+ instrumented_code_ranges_[idx].begin = range.beg;
+ instrumented_code_ranges_[idx].end = range.end;
+ atomic_store(&instrumented_ranges_count_, idx + 1,
+ memory_order_release);
+ }
+ }
+ }
}
void LibIgnore::OnLibraryUnloaded() {
} // namespace __sanitizer
-#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
+ // SANITIZER_NETBSD
// Must be called during initialization.
void AddIgnoredLibrary(const char *name_templ);
+ void IgnoreNoninstrumentedModules(bool enable) {
+ track_instrumented_libs_ = enable;
+ }
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);
// Must be called after a dynamic library is unloaded.
void OnLibraryUnloaded();
- // Checks whether the provided PC belongs to one of the ignored libraries.
- bool IsIgnored(uptr pc) const;
+ // Checks whether the provided PC belongs to one of the ignored libraries or
+ // the PC should be ignored because it belongs to an non-instrumented module
+ // (when ignore_noninstrumented_modules=1). Also returns true via
+ // "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
+ bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
+
+ // Checks whether the provided PC belongs to an instrumented module.
+ bool IsPcInstrumented(uptr pc) const;
private:
struct Lib {
uptr end;
};
+ inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
+ return (pc >= range.begin && pc < range.end);
+ }
+
static const uptr kMaxLibs = 128;
// Hot part:
- atomic_uintptr_t loaded_count_;
- LibCodeRange code_ranges_[kMaxLibs];
+ atomic_uintptr_t ignored_ranges_count_;
+ LibCodeRange ignored_code_ranges_[kMaxLibs];
+
+ atomic_uintptr_t instrumented_ranges_count_;
+ LibCodeRange instrumented_code_ranges_[kMaxLibs];
// Cold part:
BlockingMutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
+ bool track_instrumented_libs_;
// Disallow copying of LibIgnore objects.
LibIgnore(const LibIgnore&); // not implemented
void operator = (const LibIgnore&); // not implemented
};
-inline bool LibIgnore::IsIgnored(uptr pc) const {
- const uptr n = atomic_load(&loaded_count_, memory_order_acquire);
+inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
+ const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, ignored_code_ranges_[i])) {
+ *pc_in_ignored_lib = true;
+ return true;
+ }
+ }
+ *pc_in_ignored_lib = false;
+ if (track_instrumented_libs_ && !IsPcInstrumented(pc))
+ return true;
+ return false;
+}
+
+inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
+ const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
for (uptr i = 0; i < n; i++) {
- if (pc >= code_ranges_[i].begin && pc < code_ranges_[i].end)
+ if (IsInRange(pc, instrumented_code_ranges_[i]))
return true;
}
return false;
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
-#if !SANITIZER_FREEBSD
+#if SANITIZER_LINUX
#include <asm/param.h>
#endif
+#if SANITIZER_NETBSD
+#include <lwp.h>
+#endif
+
// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
// access stat from asm/stat.h, without conflicting with definition in
#include <ucontext.h>
#include <unistd.h>
+#if SANITIZER_LINUX
+#include <sys/utsname.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <sys/personality.h>
+#endif
+
#if SANITIZER_FREEBSD
#include <sys/exec.h>
#include <sys/sysctl.h>
-#include <vm/vm_param.h>
-#include <vm/pmap.h>
#include <machine/atomic.h>
extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
extern char **environ; // provided by crt1
#endif // SANITIZER_FREEBSD
+#if SANITIZER_NETBSD
+#include <limits.h> // For NAME_MAX
+#include <sys/sysctl.h>
+extern char **environ; // provided by crt1
+#endif // SANITIZER_NETBSD
+
#if !SANITIZER_ANDROID
#include <sys/signal.h>
#endif
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
+#if SANITIZER_LINUX && __GLIBC_PREREQ(2, 16)
+# define SANITIZER_USE_GETAUXVAL 1
+#else
+# define SANITIZER_USE_GETAUXVAL 0
+#endif
+
+#if SANITIZER_USE_GETAUXVAL
+#include <sys/auxv.h>
+#endif
+
#if SANITIZER_LINUX
// <linux/time.h>
struct kernel_timeval {
}
#endif
+#if SANITIZER_LINUX && defined(__NR_getrandom)
+# if !defined(GRND_NONBLOCK)
+# define GRND_NONBLOCK 1
+# endif
+# define SANITIZER_USE_GETRANDOM 1
+#else
+# define SANITIZER_USE_GETRANDOM 0
+#endif // SANITIZER_LINUX && defined(__NR_getrandom)
+
namespace __sanitizer {
#if SANITIZER_LINUX && defined(__x86_64__)
#if !SANITIZER_S390
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
OFF_T offset) {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if SANITIZER_NETBSD
+ return internal_syscall_ptr(SYSCALL(mmap), addr, length, prot, flags, fd,
+ (long)0, offset);
+#elif SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
offset);
#else
uptr internal_read(fd_t fd, void *buf, uptr count) {
sptr res;
+#if SANITIZER_NETBSD
+ HANDLE_EINTR(res, internal_syscall_ptr(SYSCALL(read), fd, buf, count));
+#else
HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf,
count));
+#endif
return res;
}
uptr internal_write(fd_t fd, const void *buf, uptr count) {
sptr res;
+#if SANITIZER_NETBSD
+ HANDLE_EINTR(res, internal_syscall_ptr(SYSCALL(write), fd, buf, count));
+#else
HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf,
count));
+#endif
return res;
}
uptr internal_ftruncate(fd_t fd, uptr size) {
sptr res;
+#if SANITIZER_NETBSD
+ HANDLE_EINTR(res, internal_syscall(SYSCALL(ftruncate), fd, 0, (s64)size));
+#else
HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
(OFF_T)size));
+#endif
return res;
}
-#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
out->st_atime = in->st_atime;
out->st_mtime = in->st_mtime;
out->st_ctime = in->st_ctime;
- out->st_ino = in->st_ino;
}
#endif
#if defined(__mips64)
+// Undefine compatibility macros from <sys/stat.h>
+// so that they would not clash with the kernel_stat
+// st_[a|m|c]time fields
+#undef st_atime
+#undef st_mtime
+#undef st_ctime
+#if defined(SANITIZER_ANDROID)
+// Bionic sys/stat.h defines additional macros
+// for compatibility with the old NDKs and
+// they clash with the kernel_stat structure
+// st_[a|m|c]time_nsec fields.
+#undef st_atime_nsec
+#undef st_mtime_nsec
+#undef st_ctime_nsec
+#endif
static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
out->st_dev = in->st_dev;
out->st_size = in->st_size;
out->st_blksize = in->st_blksize;
out->st_blocks = in->st_blocks;
- out->st_atime = in->st_atime_nsec;
- out->st_mtime = in->st_mtime_nsec;
- out->st_ctime = in->st_ctime_nsec;
- out->st_ino = in->st_ino;
+#if defined(__USE_MISC) || \
+ defined(__USE_XOPEN2K8) || \
+ defined(SANITIZER_ANDROID)
+ out->st_atim.tv_sec = in->st_atime;
+ out->st_atim.tv_nsec = in->st_atime_nsec;
+ out->st_mtim.tv_sec = in->st_mtime;
+ out->st_mtim.tv_nsec = in->st_mtime_nsec;
+ out->st_ctim.tv_sec = in->st_ctime;
+ out->st_ctim.tv_nsec = in->st_ctime_nsec;
+#else
+ out->st_atime = in->st_atime;
+ out->st_atimensec = in->st_atime_nsec;
+ out->st_mtime = in->st_mtime;
+ out->st_mtimensec = in->st_mtime_nsec;
+ out->st_ctime = in->st_ctime;
+ out->st_atimensec = in->st_ctime_nsec;
+#endif
}
#endif
uptr internal_stat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD
- return internal_syscall(SYSCALL(stat), path, buf);
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, 0);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, 0);
}
uptr internal_lstat(const char *path, void *buf) {
-#if SANITIZER_FREEBSD
+#if SANITIZER_NETBSD
return internal_syscall(SYSCALL(lstat), path, buf);
+#elif SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path,
+ (uptr)buf, AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, AT_SYMLINK_NOFOLLOW);
}
uptr internal_fstat(fd_t fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS || SANITIZER_NETBSD
# if SANITIZER_MIPS64
// For mips64, fstat syscall fills buffer in the format of kernel_stat
struct kernel_stat kbuf;
}
uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+#if SANITIZER_NETBSD
+ return internal_syscall_ptr(SYSCALL(readlink), path, buf, bufsize);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(readlinkat), AT_FDCWD,
(uptr)path, (uptr)buf, bufsize);
#else
}
void internal__exit(int exitcode) {
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
internal_syscall(SYSCALL(exit), exitcode);
#else
internal_syscall(SYSCALL(exit_group), exitcode);
return S_ISREG(st.st_mode);
}
-uptr GetTid() {
+tid_t GetTid() {
#if SANITIZER_FREEBSD
return (uptr)pthread_self();
+#elif SANITIZER_NETBSD
+ return _lwp_self();
#else
return internal_syscall(SYSCALL(gettid));
#endif
}
u64 NanoTime() {
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
timeval tv;
#else
kernel_timeval tv;
#endif
internal_memset(&tv, 0, sizeof(tv));
+#if SANITIZER_NETBSD
+ internal_syscall_ptr(SYSCALL(gettimeofday), &tv, NULL);
+#else
internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0);
+#endif
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
// 'environ' array (on FreeBSD) and does not use libc. This function should be
// called first inside __asan_init.
const char *GetEnv(const char *name) {
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
if (::environ != 0) {
uptr NameLen = internal_strlen(name);
for (char **Env = ::environ; *Env != 0; Env++) {
while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#elif SANITIZER_NETBSD
+ sched_yield(); /* No userspace futex-like synchromization */
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
#endif
void BlockingMutex::Unlock() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
- u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
CHECK_NE(v, MtxUnlocked);
if (v == MtxSleeping) {
#if SANITIZER_FREEBSD
_umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#elif SANITIZER_NETBSD
+ /* No userspace futex-like synchromization */
#else
internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
#endif
// The actual size of this structure is specified by d_reclen.
// Note that getdents64 uses a different structure format. We only provide the
// 32-bit syscall here.
+#if SANITIZER_NETBSD
+// struct dirent is different for Linux and us. At this moment, we use only
+// d_fileno (Linux call this d_ino), d_reclen, and d_name.
+struct linux_dirent {
+ u64 d_ino; // d_fileno
+ u16 d_reclen;
+ u16 d_namlen; // not used
+ u8 d_type; // not used
+ char d_name[NAME_MAX + 1];
+};
+#else
struct linux_dirent {
#if SANITIZER_X32 || defined(__aarch64__)
u64 d_ino;
#endif
char d_name[256];
};
+#endif
// Syscall wrappers.
uptr internal_ptrace(int request, int pid, void *addr, void *data) {
+#if SANITIZER_NETBSD
+ // XXX We need additional work for ptrace:
+ // - for request, we use PT_FOO whereas Linux uses PTRACE_FOO
+ // - data is int for us, but void * for Linux
+ // - Linux sometimes uses data in the case where we use addr instead
+ // At this moment, this function is used only within
+ // "#if SANITIZER_LINUX && defined(__x86_64__)" block in
+ // sanitizer_stoptheworld_linux_libcdep.cc.
+ return internal_syscall_ptr(SYSCALL(ptrace), request, pid, (uptr)addr,
+ (uptr)data);
+#else
return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
(uptr)data);
+#endif
}
uptr internal_waitpid(int pid, int *status, int options) {
+#if SANITIZER_NETBSD
+ return internal_syscall(SYSCALL(wait4), pid, status, options,
+ NULL /* rusage */);
+#else
return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
0 /* rusage */);
+#endif
}
uptr internal_getpid() {
}
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+#if SANITIZER_NETBSD
+ return internal_syscall(SYSCALL(getdents), fd, dirp, (uptr)count);
+#elif SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
#else
return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
}
uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+#if SANITIZER_NETBSD
+ return internal_syscall64(SYSCALL(lseek), fd, 0, offset, whence);
+#else
return internal_syscall(SYSCALL(lseek), fd, offset, whence);
+#endif
}
#if SANITIZER_LINUX
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
#else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
return true;
}
+#if SANITIZER_WORDSIZE == 32
+// Take care of unusable kernel area in top gigabyte.
+static uptr GetKernelAreaSize() {
+#if SANITIZER_LINUX && !SANITIZER_X32
+ const uptr gbyte = 1UL << 30;
+
+ // Firstly check if there are writable segments
+ // mapped to top gigabyte (e.g. stack).
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
+ }
+
+#if !SANITIZER_ANDROID
+ // Even if nothing is mapped, top Gb may still be accessible
+ // if we are running on 64-bit kernel.
+ // Uname may report misleading results if personality type
+ // is modified (e.g. under schroot) so check this as well.
+ struct utsname uname_info;
+ int pers = personality(0xffffffffUL);
+ if (!(pers & PER_MASK)
+ && uname(&uname_info) == 0
+ && internal_strstr(uname_info.machine, "64"))
+ return 0;
+#endif // SANITIZER_ANDROID
+
+ // Top gigabyte is reserved for kernel.
+ return gbyte;
+#else
+ return 0;
+#endif // SANITIZER_LINUX && !SANITIZER_X32
+}
+#endif // SANITIZER_WORDSIZE == 32
+
+uptr GetMaxVirtualAddress() {
+#if SANITIZER_NETBSD && defined(__x86_64__)
+ return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
+#elif SANITIZER_WORDSIZE == 64
+# if defined(__powerpc64__) || defined(__aarch64__)
+ // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
+ // We somehow need to figure out which one we are using now and choose
+ // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
+ // Note that with 'ulimit -s unlimited' the stack is moved away from the top
+ // of the address space, so simply checking the stack address is not enough.
+ // This should (does) work for both PowerPC64 Endian modes.
+ // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+# elif defined(__mips64)
+ return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
+# elif defined(__s390x__)
+ return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
+# else
+ return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+# endif
+#else // SANITIZER_WORDSIZE == 32
+# if defined(__s390__)
+ return (1ULL << 31) - 1; // 0x7fffffff;
+# else
+ uptr res = (1ULL << 32) - 1; // 0xffffffff;
+ if (!common_flags()->full_address_space)
+ res -= GetKernelAreaSize();
+ CHECK_LT(reinterpret_cast<uptr>(&res), res);
+ return res;
+# endif
+#endif // SANITIZER_WORDSIZE
+}
+
uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
#if SANITIZER_ANDROID
return 4096;
#elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
+#elif SANITIZER_USE_GETAUXVAL
+ return getauxval(AT_PAGESZ);
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
#endif
}
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
#if SANITIZER_FREEBSD
- const int Mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+ const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
+#else
+ const int Mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
+#endif
const char *default_module_name = "kern.proc.pathname";
size_t Size = buf_len;
bool IsErr = (sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
long long res;
-/* Stack frame offsets. */
-#if _CALL_ELF != 2
-#define FRAME_MIN_SIZE 112
-#define FRAME_TOC_SAVE 40
+// Stack frame structure.
+#if SANITIZER_PPC64V1
+// Back chain == 0 (SP + 112)
+// Frame (112 bytes):
+// Parameter save area (SP + 48), 8 doublewords
+// TOC save area (SP + 40)
+// Link editor doubleword (SP + 32)
+// Compiler doubleword (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 112
+# define FRAME_TOC_SAVE_OFFSET 40
+#elif SANITIZER_PPC64V2
+// Back chain == 0 (SP + 32)
+// Frame (32 bytes):
+// TOC save area (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 32
+# define FRAME_TOC_SAVE_OFFSET 24
#else
-#define FRAME_MIN_SIZE 32
-#define FRAME_TOC_SAVE 24
+# error "Unsupported PPC64 ABI"
#endif
if (!fn || !child_stack)
return -EINVAL;
CHECK_EQ(0, (uptr)child_stack % 16);
- child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
- ((unsigned long long *)child_stack)[0] = (uptr)fn;
- ((unsigned long long *)child_stack)[1] = (uptr)arg;
register int (*__fn)(void *) __asm__("r3") = fn;
register void *__cstack __asm__("r4") = child_stack;
register int __flags __asm__("r5") = flags;
- register void * __arg __asm__("r6") = arg;
- register int * __ptidptr __asm__("r7") = parent_tidptr;
- register void * __newtls __asm__("r8") = newtls;
- register int * __ctidptr __asm__("r9") = child_tidptr;
+ register void *__arg __asm__("r6") = arg;
+ register int *__ptidptr __asm__("r7") = parent_tidptr;
+ register void *__newtls __asm__("r8") = newtls;
+ register int *__ctidptr __asm__("r9") = child_tidptr;
__asm__ __volatile__(
- /* fn, arg, child_stack are saved acrVoss the syscall */
+ /* fn and arg are saved across the syscall */
"mr 28, %5\n\t"
- "mr 29, %6\n\t"
"mr 27, %8\n\t"
/* syscall
+ r0 == __NR_clone
r3 == flags
r4 == child_stack
r5 == parent_tidptr
"crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
"bne- cr1, 1f\n\t"
+ /* Set up stack frame */
+ "li 29, 0\n\t"
+ "stdu 29, -8(1)\n\t"
+ "stdu 1, -%12(1)\n\t"
/* Do the function call */
"std 2, %13(1)\n\t"
-#if _CALL_ELF != 2
+#if SANITIZER_PPC64V1
"ld 0, 0(28)\n\t"
"ld 2, 8(28)\n\t"
"mtctr 0\n\t"
-#else
+#elif SANITIZER_PPC64V2
"mr 12, 28\n\t"
"mtctr 12\n\t"
+#else
+# error "Unsupported PPC64 ABI"
#endif
"mr 3, 27\n\t"
"bctrl\n\t"
"1:\n\t"
"mr %0, 3\n\t"
: "=r" (res)
- : "0" (-1), "i" (EINVAL),
- "i" (__NR_clone), "i" (__NR_exit),
- "r" (__fn), "r" (__cstack), "r" (__flags),
- "r" (__arg), "r" (__ptidptr), "r" (__newtls),
- "r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE)
- : "cr0", "cr1", "memory", "ctr",
- "r0", "r29", "r27", "r28");
+ : "0" (-1),
+ "i" (EINVAL),
+ "i" (__NR_clone),
+ "i" (__NR_exit),
+ "r" (__fn),
+ "r" (__cstack),
+ "r" (__flags),
+ "r" (__arg),
+ "r" (__ptidptr),
+ "r" (__newtls),
+ "r" (__ctidptr),
+ "i" (FRAME_SIZE),
+ "i" (FRAME_TOC_SAVE_OFFSET)
+ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
+ return res;
+}
+#elif defined(__i386__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 7 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)flags;
+ ((unsigned int *)child_stack)[1] = (uptr)0;
+ ((unsigned int *)child_stack)[2] = (uptr)fn;
+ ((unsigned int *)child_stack)[3] = (uptr)arg;
+ __asm__ __volatile__(
+ /* %eax = syscall(%eax = SYSCALL(clone),
+ * %ebx = flags,
+ * %ecx = child_stack,
+ * %edx = parent_tidptr,
+ * %esi = new_tls,
+ * %edi = child_tidptr)
+ */
+
+ /* Obtain flags */
+ "movl (%%ecx), %%ebx\n"
+ /* Do the system call */
+ "pushl %%ebx\n"
+ "pushl %%esi\n"
+ "pushl %%edi\n"
+ /* Remember the flag value. */
+ "movl %%ebx, (%%ecx)\n"
+ "int $0x80\n"
+ "popl %%edi\n"
+ "popl %%esi\n"
+ "popl %%ebx\n"
+
+ /* if (%eax != 0)
+ * return;
+ */
+
+ "test %%eax,%%eax\n"
+ "jnz 1f\n"
+
+ /* terminate the stack frame */
+ "xorl %%ebp,%%ebp\n"
+ /* Call FN. */
+ "call *%%ebx\n"
+#ifdef PIC
+ "call here\n"
+ "here:\n"
+ "popl %%ebx\n"
+ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
+#endif
+ /* Call exit */
+ "movl %%eax, %%ebx\n"
+ "movl %2, %%eax\n"
+ "int $0x80\n"
+ "1:\n"
+ : "=a" (res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+ "c"(child_stack),
+ "d"(parent_tidptr),
+ "S"(newtls),
+ "D"(child_tidptr)
+ : "memory");
+ return res;
+}
+#elif defined(__arm__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ unsigned int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)fn;
+ ((unsigned int *)child_stack)[1] = (uptr)arg;
+ register int r0 __asm__("r0") = flags;
+ register void *r1 __asm__("r1") = child_stack;
+ register int *r2 __asm__("r2") = parent_tidptr;
+ register void *r3 __asm__("r3") = newtls;
+ register int *r4 __asm__("r4") = child_tidptr;
+ register int r7 __asm__("r7") = __NR_clone;
+
+#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
+# define ARCH_HAS_BX
+#endif
+#if __ARM_ARCH > 4
+# define ARCH_HAS_BLX
+#endif
+
+#ifdef ARCH_HAS_BX
+# ifdef ARCH_HAS_BLX
+# define BLX(R) "blx " #R "\n"
+# else
+# define BLX(R) "mov lr, pc; bx " #R "\n"
+# endif
+#else
+# define BLX(R) "mov lr, pc; mov pc," #R "\n"
+#endif
+
+ __asm__ __volatile__(
+ /* %r0 = syscall(%r7 = SYSCALL(clone),
+ * %r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = new_tls,
+ * %r4 = child_tidptr)
+ */
+
+ /* Do the system call */
+ "swi 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp r0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldr r0, [sp, #4]\n"
+ "ldr ip, [sp], #8\n"
+ BLX(ip)
+ /* Call _exit(%r0). */
+ "mov r7, %7\n"
+ "swi 0x0\n"
+ "1:\n"
+ "mov %0, r0\n"
+ : "=r"(res)
+ : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
+ "i"(__NR_exit)
+ : "memory");
return res;
}
#endif // defined(__x86_64__) && SANITIZER_LINUX
#endif
-bool IsHandledDeadlySignal(int signum) {
- if (common_flags()->handle_abort && signum == SIGABRT)
- return true;
- if (common_flags()->handle_sigill && signum == SIGILL)
- return true;
- if (common_flags()->handle_sigfpe && signum == SIGFPE)
- return true;
- return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+static HandleSignalMode GetHandleSignalModeImpl(int signum) {
+ switch (signum) {
+ case SIGABRT:
+ return common_flags()->handle_abort;
+ case SIGILL:
+ return common_flags()->handle_sigill;
+ case SIGFPE:
+ return common_flags()->handle_sigfpe;
+ case SIGSEGV:
+ return common_flags()->handle_segv;
+ case SIGBUS:
+ return common_flags()->handle_sigbus;
+ }
+ return kHandleSignalNo;
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ HandleSignalMode result = GetHandleSignalModeImpl(signum);
+ if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
+ return kHandleSignalExclusive;
+ return result;
}
#if !SANITIZER_GO
}
#endif
-SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
ucontext_t *ucontext = (ucontext_t *)context;
#if defined(__x86_64__) || defined(__i386__)
static const uptr PF_WRITE = 1U << 1;
#if SANITIZER_FREEBSD
uptr err = ucontext->uc_mcontext.mc_err;
+#elif SANITIZER_NETBSD
+ uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
#else
uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
#endif
#endif
}
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
+static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.arm_pc;
*pc = ucontext->uc_mcontext.mc_rip;
*bp = ucontext->uc_mcontext.mc_rbp;
*sp = ucontext->uc_mcontext.mc_rsp;
+#elif SANITIZER_NETBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.__gregs[_REG_RIP];
+ *bp = ucontext->uc_mcontext.__gregs[_REG_RBP];
+ *sp = ucontext->uc_mcontext.__gregs[_REG_RSP];
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_RIP];
*pc = ucontext->uc_mcontext.mc_eip;
*bp = ucontext->uc_mcontext.mc_ebp;
*sp = ucontext->uc_mcontext.mc_esp;
+#elif SANITIZER_NETBSD
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.__gregs[_REG_EIP];
+ *bp = ucontext->uc_mcontext.__gregs[_REG_EBP];
+ *sp = ucontext->uc_mcontext.__gregs[_REG_ESP];
# else
ucontext_t *ucontext = (ucontext_t*)context;
*pc = ucontext->uc_mcontext.gregs[REG_EIP];
#endif
}
+void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
+
void MaybeReexec() {
// No need to re-exec on Linux.
}
-uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+void PrintModuleMap() { }
+
+void CheckNoDeepBind(const char *filename, int flag) {
+#ifdef RTLD_DEEPBIND
+ if (flag & RTLD_DEEPBIND) {
+ Report(
+ "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
+ " which is incompatibe with sanitizer runtime "
+ "(see https://github.com/google/sanitizers/issues/611 for details"
+ "). If you want to run %s library under sanitizers please remove "
+ "RTLD_DEEPBIND from dlopen flags.\n",
+ filename, filename);
+ Die();
+ }
+#endif
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found) {
UNREACHABLE("FindAvailableMemoryRange is not available");
return 0;
}
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ if (!buffer || !length || length > 256)
+ return false;
+#if SANITIZER_USE_GETRANDOM
+ static atomic_uint8_t skip_getrandom_syscall;
+ if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
+ // Up to 256 bytes, getrandom will not be interrupted.
+ uptr res = internal_syscall(SYSCALL(getrandom), buffer, length,
+ blocking ? 0 : GRND_NONBLOCK);
+ int rverrno = 0;
+ if (internal_iserror(res, &rverrno) && rverrno == ENOSYS)
+ atomic_store_relaxed(&skip_getrandom_syscall, 1);
+ else if (res == length)
+ return true;
+ }
+#endif // SANITIZER_USE_GETRANDOM
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ uptr fd = internal_open("/dev/urandom", O_RDONLY);
+ if (internal_iserror(fd))
+ return false;
+ uptr res = internal_read(fd, buffer, length);
+ if (internal_iserror(res))
+ return false;
+ internal_close(fd);
+ return true;
+}
+
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#define SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
-#include "sanitizer_posix.h"
+#include "sanitizer_platform_limits_netbsd.h"
#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_posix.h"
struct link_map; // Opaque type returned by dlopen().
// the one in <dirent.h>, which is used by readdir().
struct linux_dirent;
+struct ProcSelfMapsBuff {
+ char *data;
+ uptr mmaped_size;
+ uptr len;
+};
+
+struct MemoryMappingLayoutData {
+ ProcSelfMapsBuff proc_self_maps;
+ const char *current;
+};
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
+
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const void* ss, void* oss);
#endif
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
- || defined(__powerpc64__) || defined(__s390__)
+ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
+ || defined(__arm__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
// Call cb for each region mapped by map.
void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
+
+#if SANITIZER_ANDROID
+
+#if defined(__aarch64__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
+#elif defined(__arm__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
+#elif defined(__mips__)
+// On mips32r1, this goes via a kernel illegal instruction trap that's
+// optimized for v1.
+# define __get_tls() \
+ ({ register void** __v asm("v1"); \
+ __asm__(".set push\n" \
+ ".set mips32r2\n" \
+ "rdhwr %0,$29\n" \
+ ".set pop\n" : "=r"(__v)); \
+ __v; })
+#elif defined(__i386__)
+# define __get_tls() \
+ ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
+#elif defined(__x86_64__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for TSan starting with N,
+// given that Android currently doesn't support ELF TLS. It is used to store
+// Sanitizers thread specific data.
+static const int TLS_SLOT_TSAN = 8;
+
+ALWAYS_INLINE uptr *get_android_tls_ptr() {
+ return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_TSAN]);
+}
+
+#endif // SANITIZER_ANDROID
+
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#endif // SANITIZER_LINUX_H
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_freebsd.h"
#include "sanitizer_linux.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
-#if SANITIZER_ANDROID || SANITIZER_FREEBSD
#include <dlfcn.h> // for dlsym()
-#endif
-
#include <link.h>
#include <pthread.h>
#include <signal.h>
// Find the mapping that contains a stack variable.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr start, end, offset;
+ MemoryMappedSegment segment;
uptr prev_end = 0;
- while (proc_maps.Next(&start, &end, &offset, nullptr, 0,
- /* protection */nullptr)) {
- if ((uptr)&rl < end)
- break;
- prev_end = end;
+ while (proc_maps.Next(&segment)) {
+ if ((uptr)&rl < segment.end) break;
+ prev_end = segment.end;
}
- CHECK((uptr)&rl >= start && (uptr)&rl < end);
+ CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
// Get stacksize from rlimit, but clip it so that it does not overlap
// with other mappings.
uptr stacksize = rl.rlim_cur;
- if (stacksize > end - prev_end)
- stacksize = end - prev_end;
+ if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
// When running with unlimited stack size, we still want to set some limit.
// The unlimited stack size is caused by 'ulimit -s unlimited'.
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
if (stacksize > kMaxThreadStackSize)
stacksize = kMaxThreadStackSize;
- *stack_top = end;
- *stack_bottom = end - stacksize;
+ *stack_top = segment.end;
+ *stack_bottom = segment.end - stacksize;
return;
}
pthread_attr_t attr;
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
- CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}
#endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
+ !SANITIZER_NETBSD
static uptr g_tls_size;
#ifdef __i386__
}
#else
void InitTlsSize() { }
-#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
+#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
+ // !SANITIZER_NETBSD
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \
- || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__)) \
- && SANITIZER_LINUX && !SANITIZER_ANDROID
+ || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) \
+ || defined(__arm__)) && SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
-#if defined(__x86_64__) || defined(__i386__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
#ifdef _CS_GNU_LIBC_VERSION
char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
char *end;
int minor = internal_simple_strtoll(buf + 8, &end, 10);
- if (end != buf + 8 && (*end == '\0' || *end == '.')) {
+ if (end != buf + 8 && (*end == '\0' || *end == '.' || *end == '-')) {
int patch = 0;
if (*end == '.')
// strtoll will return 0 if no valid conversion could be performed
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
val = 1728; // Assume only one particular version for x32.
+ // For ARM sizeof(struct pthread) changed in Glibc 2.23.
+ else if (SANITIZER_ARM)
+ val = minor <= 22 ? 1120 : 1216;
else if (minor <= 3)
val = FIRST_32_SECOND_64(1104, 1696);
else if (minor == 4)
# endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
- (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
- InitTlsSize();
- g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
+ RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
return kTlsPreTcbSize;
}
#endif
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-# elif defined(__aarch64__)
+# elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
# elif defined(__s390__)
uptr ThreadSelf() {
return (uptr)ThreadSelfSegbase()[2];
}
-#endif // SANITIZER_FREEBSD
+#elif SANITIZER_NETBSD
+uptr ThreadSelf() { return (uptr)pthread_self(); }
+#endif // SANITIZER_NETBSD
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
-# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__)
+# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
+ || defined(__arm__)
*addr = ThreadSelf();
*size = GetTlsSize();
# else
*addr = (uptr) dtv[2];
*size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
}
-#elif SANITIZER_ANDROID
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD
*addr = 0;
*size = 0;
#else
#if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_ANDROID
+#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD
uptr addr, size;
GetTls(&addr, &size);
return size;
+#elif defined(__mips__) || defined(__powerpc64__)
+ return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
return g_tls_size;
#endif
# endif
struct DlIteratePhdrData {
- InternalMmapVector<LoadedModule> *modules;
+ InternalMmapVectorNoCtor<LoadedModule> *modules;
bool first;
};
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
- cur_module.addAddressRange(cur_beg, cur_end, executable);
+ bool writable = phdr->p_flags & PF_W;
+ cur_module.addAddressRange(cur_beg, cur_end, executable,
+ writable);
}
}
data->modules->push_back(cur_module);
int (*)(struct dl_phdr_info *, size_t, void *), void *);
#endif
-void ListOfModules::init() {
- clear();
+static bool requiresProcmaps() {
#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
- u32 api_level = AndroidGetApiLevel();
// Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
// The runtime check allows the same library to work with
// both K and L (and future) Android releases.
- if (api_level <= ANDROID_LOLLIPOP_MR1) { // L or earlier
- MemoryMappingLayout memory_mapping(false);
- memory_mapping.DumpListOfModules(&modules_);
- return;
- }
+ return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
+#else
+ return false;
#endif
- DlIteratePhdrData data = {&modules_, true};
- dl_iterate_phdr(dl_iterate_phdr_cb, &data);
+}
+
+static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
+ MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
+ memory_mapping.DumpListOfModules(modules);
+}
+
+void ListOfModules::init() {
+ clearOrInit();
+ if (requiresProcmaps()) {
+ procmapsInit(&modules_);
+ } else {
+ DlIteratePhdrData data = {&modules_, true};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &data);
+ }
+}
+
+// When a custom loader is used, dl_iterate_phdr may not contain the full
+// list of modules. Allow callers to fall back to using procmaps.
+void ListOfModules::fallbackInit() {
+ if (!requiresProcmaps()) {
+ clearOrInit();
+ procmapsInit(&modules_);
+ } else {
+ clear();
+ }
}
// getrusage does not give us the current RSS, only the max RSS.
WriteToSyslog(str);
}
+#if SANITIZER_ANDROID
+extern "C" __attribute__((weak)) void android_set_abort_message(const char *);
+void SetAbortMessage(const char *str) {
+ if (&android_set_abort_message) android_set_abort_message(str);
+}
+#else
+void SetAbortMessage(const char *str) {}
+#endif
+
#endif // SANITIZER_LINUX
} // namespace __sanitizer
// 4.4.6+ is OK.
if (minor == 4 && patch >= 6)
return true;
+ if (minor == 4 && patch == 0 && ptr[0] == '-' &&
+ internal_strstr(buf.version, "Ubuntu")) {
+ // Check Ubuntu 16.04
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 13) // 4.4.0-13 or later
+ return true;
+ }
// Otherwise, OK if 4.5+.
return minor >= 5;
} else {
size_--;
}
+ void extract(Item *prev, Item *x) {
+ CHECK(!empty());
+ CHECK_NE(prev, nullptr);
+ CHECK_NE(x, nullptr);
+ CHECK_EQ(prev->next, x);
+ prev->next = x->next;
+ if (last_ == x)
+ last_ = prev;
+ size_--;
+ }
+
Item *front() { return first_; }
const Item *front() const { return first_; }
Item *back() { return last_; }
#include <stdio.h>
#include "sanitizer_common.h"
+#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
uptr internal_mmap(void *addr, size_t length, int prot, int flags,
int fd, u64 offset) {
if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL);
- if (__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
+ if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
return (uptr)mmap(addr, length, prot, flags, fd, offset);
}
uptr internal_munmap(void *addr, uptr length) {
- if (__munmap) return __munmap(addr, length);
+ if (&__munmap) return __munmap(addr, length);
return munmap(addr, length);
}
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset) {
- return sigprocmask(how, set, oldset);
+ // Don't use sigprocmask here, because it affects all threads.
+ return pthread_sigmask(how, set, oldset);
}
// Doesn't call pthread_atfork() handlers (but not available on 10.6).
extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;
int internal_fork() {
- if (__fork)
+ if (&__fork)
return __fork();
return fork();
}
return S_ISREG(st.st_mode);
}
-uptr GetTid() {
- // FIXME: This can potentially get truncated on 32-bit, where uptr is 4 bytes.
- uint64_t tid;
+tid_t GetTid() {
+ tid_t tid;
pthread_threadid_np(nullptr, &tid);
return tid;
}
void BlockingMutex::Lock() {
CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
CHECK_EQ(OS_SPINLOCK_INIT, 0);
- CHECK_NE(owner_, (uptr)pthread_self());
+ CHECK_EQ(owner_, 0);
OSSpinLockLock((OSSpinLock*)&opaque_storage_);
- CHECK(!owner_);
- owner_ = (uptr)pthread_self();
}
void BlockingMutex::Unlock() {
- CHECK(owner_ == (uptr)pthread_self());
- owner_ = 0;
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::CheckLocked() {
- CHECK_EQ((uptr)pthread_self(), owner_);
+ CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
}
u64 NanoTime() {
void InitTlsSize() {
}
+uptr TlsBaseAddr() {
+ uptr segbase = 0;
+#if defined(__x86_64__)
+ asm("movq %%gs:0,%0" : "=r"(segbase));
+#elif defined(__i386__)
+ asm("movl %%gs:0,%0" : "=r"(segbase));
+#endif
+ return segbase;
+}
+
+// The size of the tls on darwin does not appear to be well documented,
+// however the vm memory map suggests that it is 1024 uptrs in size,
+// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.
+uptr TlsSize() {
+#if defined(__x86_64__) || defined(__i386__)
+ return 1024 * sizeof(uptr);
+#else
+ return 0;
+#endif
+}
+
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
#if !SANITIZER_GO
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
*stk_size = stack_top - stack_bottom;
- *tls_addr = 0;
- *tls_size = 0;
+ *tls_addr = TlsBaseAddr();
+ *tls_size = TlsSize();
#else
*stk_addr = 0;
*stk_size = 0;
}
void ListOfModules::init() {
- clear();
+ clearOrInit();
MemoryMappingLayout memory_mapping(false);
memory_mapping.DumpListOfModules(&modules_);
}
-bool IsHandledDeadlySignal(int signum) {
+void ListOfModules::fallbackInit() { clear(); }
+
+static HandleSignalMode GetHandleSignalModeImpl(int signum) {
+ switch (signum) {
+ case SIGABRT:
+ return common_flags()->handle_abort;
+ case SIGILL:
+ return common_flags()->handle_sigill;
+ case SIGFPE:
+ return common_flags()->handle_sigfpe;
+ case SIGSEGV:
+ return common_flags()->handle_segv;
+ case SIGBUS:
+ return common_flags()->handle_sigbus;
+ }
+ return kHandleSignalNo;
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ // Handling fatal signals on watchOS and tvOS devices is disallowed.
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
- // Handling fatal signals on watchOS and tvOS devices is disallowed.
- return false;
- if (common_flags()->handle_abort && signum == SIGABRT)
- return true;
- return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+ return kHandleSignalNo;
+ HandleSignalMode result = GetHandleSignalModeImpl(signum);
+ if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
+ return kHandleSignalExclusive;
+ return result;
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
return result;
}
+bool PlatformHasDifferentMemcpyAndMemmove() {
+ // On OS X 10.7 memcpy() and memmove() are both resolved
+ // into memmove$VARIANT$sse42.
+ // See also https://github.com/google/sanitizers/issues/34.
+ // TODO(glider): need to check dynamically that memcpy() and memmove() are
+ // actually the same function.
+ return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
+}
+
uptr GetRSS() {
struct task_basic_info info;
unsigned count = TASK_BASIC_INFO_COUNT;
#endif
}
-SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
#if defined(__x86_64__) || defined(__i386__)
ucontext_t *ucontext = static_cast<ucontext_t*>(context);
return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
#endif
}
-void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
ucontext_t *ucontext = (ucontext_t*)context;
# if defined(__aarch64__)
*pc = ucontext->uc_mcontext->__ss.__pc;
# endif
}
+void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
+
#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
return *_NSGetArgv();
}
+#if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
+// The task_vm_info struct is normally provided by the macOS SDK, but we need
+// fields only available in 10.12+. Declare the struct manually to be able to
+// build against older SDKs.
+struct __sanitizer_task_vm_info {
+ mach_vm_size_t virtual_size;
+ integer_t region_count;
+ integer_t page_size;
+ mach_vm_size_t resident_size;
+ mach_vm_size_t resident_size_peak;
+ mach_vm_size_t device;
+ mach_vm_size_t device_peak;
+ mach_vm_size_t internal;
+ mach_vm_size_t internal_peak;
+ mach_vm_size_t external;
+ mach_vm_size_t external_peak;
+ mach_vm_size_t reusable;
+ mach_vm_size_t reusable_peak;
+ mach_vm_size_t purgeable_volatile_pmap;
+ mach_vm_size_t purgeable_volatile_resident;
+ mach_vm_size_t purgeable_volatile_virtual;
+ mach_vm_size_t compressed;
+ mach_vm_size_t compressed_peak;
+ mach_vm_size_t compressed_lifetime;
+ mach_vm_size_t phys_footprint;
+ mach_vm_address_t min_address;
+ mach_vm_address_t max_address;
+};
+#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
+ (sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))
+
+uptr GetTaskInfoMaxAddress() {
+ __sanitizer_task_vm_info vm_info = {};
+ mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;
+ int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
+ if (err == 0) {
+ return vm_info.max_address - 1;
+ } else {
+ // xnu cannot provide vm address limit
+ return 0x200000000 - 1;
+ }
+}
+#endif
+
+uptr GetMaxVirtualAddress() {
+#if SANITIZER_WORDSIZE == 64
+# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
+ // Get the maximum VM address
+ static uptr max_vm = GetTaskInfoMaxAddress();
+ CHECK(max_vm);
+ return max_vm;
+# else
+ return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+# endif
+#else // SANITIZER_WORDSIZE == 32
+ return (1ULL << 32) - 1; // 0xffffffff;
+#endif // SANITIZER_WORDSIZE
+}
+
uptr FindAvailableMemoryRange(uptr shadow_size,
uptr alignment,
- uptr left_padding) {
+ uptr left_padding,
+ uptr *largest_gap_found) {
typedef vm_region_submap_short_info_data_64_t RegionInfo;
enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
// Start searching for available memory region past PAGEZERO, which is
mach_vm_address_t address = start_address;
mach_vm_address_t free_begin = start_address;
kern_return_t kr = KERN_SUCCESS;
+ if (largest_gap_found) *largest_gap_found = 0;
while (kr == KERN_SUCCESS) {
mach_vm_size_t vmsize = 0;
natural_t depth = 0;
(vm_region_info_t)&vminfo, &count);
if (free_begin != address) {
// We found a free region [free_begin..address-1].
- uptr shadow_address = RoundUpTo((uptr)free_begin + left_padding,
- alignment);
- if (shadow_address + shadow_size < (uptr)address) {
- return shadow_address;
+ uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
+ uptr gap_end = RoundDownTo((uptr)address, alignment);
+ uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
+ if (shadow_size < gap_size) {
+ return gap_start;
+ }
+
+ if (largest_gap_found && *largest_gap_found < gap_size) {
+ *largest_gap_found = gap_size;
}
}
// Move to the next region.
// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void SignalContext::DumpAllRegisters(void *context) {
+ Report("Register values:\n");
+
+ ucontext_t *ucontext = (ucontext_t*)context;
+# define DUMPREG64(r) \
+ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG32(r) \
+ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG_(r) Printf(" "); DUMPREG(r);
+# define DUMPREG__(r) Printf(" "); DUMPREG(r);
+# define DUMPREG___(r) Printf(" "); DUMPREG(r);
+
+# if defined(__x86_64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
+ DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
+ DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
+ DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
+# elif defined(__i386__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
+ DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
+# elif defined(__aarch64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
+ DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
+ DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
+ DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
+ DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
+ DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
+ DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
+ DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n");
+# elif defined(__arm__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
+ DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
+ DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
+ DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
+# else
+# error "Unknown architecture"
+# endif
+
+# undef DUMPREG64
+# undef DUMPREG32
+# undef DUMPREG_
+# undef DUMPREG__
+# undef DUMPREG___
+# undef DUMPREG
+}
+
+static inline bool CompareBaseAddress(const LoadedModule &a,
+ const LoadedModule &b) {
+ return a.base_address() < b.base_address();
+}
+
+void FormatUUID(char *out, uptr size, const u8 *uuid) {
+ internal_snprintf(out, size,
+ "<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X>",
+ uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
+ uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+}
+
+void PrintModuleMap() {
+ Printf("Process module map:\n");
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
+ memory_mapping.DumpListOfModules(&modules);
+ InternalSort(&modules, modules.size(), CompareBaseAddress);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ char uuid_str[128];
+ FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
+ Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
+ modules[i].max_executable_address(), modules[i].full_name(),
+ ModuleArchToString(modules[i].arch()), uuid_str);
+ }
+ Printf("End of module map.\n");
+}
+
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
+
+// FIXME: implement on this platform.
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ UNIMPLEMENTED();
+}
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
namespace __sanitizer {
+struct MemoryMappingLayoutData {
+ int current_image;
+ u32 current_magic;
+ u32 current_filetype;
+ ModuleArch current_arch;
+ u8 current_uuid[kModuleUUIDSize];
+ int current_load_cmd_count;
+ char *current_load_cmd_addr;
+ bool current_instrumented;
+};
+
enum MacosVersion {
MACOS_VERSION_UNINITIALIZED = 0,
MACOS_VERSION_UNKNOWN,
char **GetEnviron();
+void RestrictMemoryToMaxAddress(uptr max_address);
+
} // namespace __sanitizer
extern "C" {
--- /dev/null
+//===-- sanitizer_mac_libcdep.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_mac.h"
+
+#include <sys/mman.h>
+
+namespace __sanitizer {
+
+void RestrictMemoryToMaxAddress(uptr max_address) {
+ uptr size_to_mmap = GetMaxVirtualAddress() + 1 - max_address;
+ void *res = MmapFixedNoAccess(max_address, size_to_mmap, "high gap");
+ CHECK(res != MAP_FAILED);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
// This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
mprotect(new_zone, allocated_size, PROT_READ);
}
+ // We're explicitly *NOT* registering the zone.
return new_zone;
}
+INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {
+ COMMON_MALLOC_ENTER();
+ // We don't need to do anything here. We're not registering new zones, so we
+ // don't to unregister. Just un-mprotect and free() the zone.
+ if (GetMacosVersion() >= MACOS_VERSION_LION) {
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);
+ }
+ if (zone->zone_name) {
+ COMMON_MALLOC_FREE((void *)zone->zone_name);
+ }
+ COMMON_MALLOC_FREE(zone);
+}
+
+extern unsigned malloc_num_zones;
+extern malloc_zone_t **malloc_zones;
+
+// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
+// libmalloc tries to set up a different zone as malloc_zones[0], it will call
+// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
+// make sure we are still the first (default) zone.
+INTERCEPTOR(int, mprotect, void *addr, size_t len, int prot) {
+ if (addr == malloc_zones && prot == PROT_READ) {
+ if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
+ for (unsigned i = 1; i < malloc_num_zones; i++) {
+ if (malloc_zones[i] == &sanitizer_zone) {
+ // Swap malloc_zones[0] and malloc_zones[i].
+ malloc_zones[i] = malloc_zones[0];
+ malloc_zones[0] = &sanitizer_zone;
+ break;
+ }
+ }
+ }
+ }
+ return REAL(mprotect)(addr, len, prot);
+}
+
INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
COMMON_MALLOC_ENTER();
return &sanitizer_zone;
BlockingMutex();
void Lock();
void Unlock();
+
+ // This function does not guarantee an explicit check that the calling thread
+ // is the thread which owns the mutex. This behavior, while more strictly
+ // correct, causes problems in cases like StopTheWorld, where a parent thread
+ // owns the mutex but a child checks that it is locked. Rather than
+ // maintaining complex state to work around those situations, the check only
+ // checks that the mutex is owned, and assumes callers to be generally
+ // well-behaved.
void CheckLocked();
private:
uptr opaque_storage_[10];
#ifndef SANITIZER_PLATFORM_H
#define SANITIZER_PLATFORM_H
-#if !defined(__linux__) && !defined(__FreeBSD__) && \
- !defined(__APPLE__) && !defined(_WIN32)
+#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
+ !defined(__APPLE__) && !defined(_WIN32) && !defined(__Fuchsia__)
# error "This operating system is not supported"
#endif
# define SANITIZER_FREEBSD 0
#endif
+#if defined(__NetBSD__)
+# define SANITIZER_NETBSD 1
+#else
+# define SANITIZER_NETBSD 0
+#endif
+
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
# define SANITIZER_ANDROID 0
#endif
-#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC)
+#if defined(__Fuchsia__)
+# define SANITIZER_FUCHSIA 1
+#else
+# define SANITIZER_FUCHSIA 0
+#endif
+
+#define SANITIZER_POSIX \
+ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_NETBSD)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64
# define SANITIZER_PPC64V2 0
#endif
+#if defined(__arm__)
+# define SANITIZER_ARM 1
+#else
+# define SANITIZER_ARM 0
+#endif
+
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if SANITIZER_ANDROID && defined(__aarch64__)
+# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
# define SANITIZER_CAN_USE_ALLOCATOR64 1
# elif defined(__mips64) || defined(__aarch64__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# define SANITIZER_GO 0
#endif
+// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
+// pthread_exit() performs unwinding that leads to dlopen'ing libgcc_s.so.
+// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
+// that this allocation happens in dynamic linker and should be ignored.
+#if SANITIZER_PPC || defined(__thumb__)
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
+#else
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
+#endif
+
+
#endif // SANITIZER_PLATFORM_H
#include "sanitizer_internal_defs.h"
+#if SANITIZER_POSIX
+# define SI_POSIX 1
+#else
+# define SI_POSIX 0
+#endif
+
#if !SANITIZER_WINDOWS
-# define SI_NOT_WINDOWS 1
-# include "sanitizer_platform_limits_posix.h"
+# define SI_WINDOWS 0
#else
-# define SI_NOT_WINDOWS 0
+# define SI_WINDOWS 1
+#endif
+
+#if (SI_POSIX != 0) == (SI_WINDOWS != 0) && !SANITIZER_FUCHSIA
+# error "Windows is not POSIX!"
+#endif
+
+#if SI_POSIX
+# include "sanitizer_platform_limits_netbsd.h"
+# include "sanitizer_platform_limits_posix.h"
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define SI_FREEBSD 0
#endif
+#if SANITIZER_NETBSD
+# define SI_NETBSD 1
+#else
+# define SI_NETBSD 0
+#endif
+
#if SANITIZER_LINUX
# define SI_LINUX 1
#else
# define SI_IOS 0
#endif
-#if !SANITIZER_WINDOWS && !SANITIZER_MAC
-# define SI_UNIX_NOT_MAC 1
+#if SANITIZER_FUCHSIA
+# define SI_NOT_FUCHSIA 0
+#else
+# define SI_NOT_FUCHSIA 1
+#endif
+
+#if SANITIZER_POSIX && !SANITIZER_MAC
+# define SI_POSIX_NOT_MAC 1
#else
-# define SI_UNIX_NOT_MAC 0
+# define SI_POSIX_NOT_MAC 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_FREEBSD
+# define SI_LINUX_NOT_FREEBSD 1
+# else
+# define SI_LINUX_NOT_FREEBSD 0
#endif
-#define SANITIZER_INTERCEPT_STRLEN 1
-#define SANITIZER_INTERCEPT_STRNLEN SI_NOT_MAC
-#define SANITIZER_INTERCEPT_STRCMP 1
-#define SANITIZER_INTERCEPT_STRSTR 1
-#define SANITIZER_INTERCEPT_STRCASESTR SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_STRCHR 1
-#define SANITIZER_INTERCEPT_STRCHRNUL SI_UNIX_NOT_MAC
-#define SANITIZER_INTERCEPT_STRRCHR 1
-#define SANITIZER_INTERCEPT_STRSPN 1
-#define SANITIZER_INTERCEPT_STRPBRK 1
+#define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRNLEN (SI_NOT_MAC && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_STRCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRSTR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCASESTR SI_POSIX
+#define SANITIZER_INTERCEPT_STRTOK SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCHRNUL SI_POSIX_NOT_MAC
+#define SANITIZER_INTERCEPT_STRRCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRSPN SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRPBRK SI_NOT_FUCHSIA
#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRCASECMP SI_POSIX
#define SANITIZER_INTERCEPT_MEMSET 1
#define SANITIZER_INTERCEPT_MEMMOVE 1
#define SANITIZER_INTERCEPT_MEMCPY 1
-#define SANITIZER_INTERCEPT_MEMCMP 1
+#define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
+#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD
#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
# define SI_MAC_DEPLOYMENT_BELOW_10_7 1
#endif
// memmem on Darwin doesn't exist on 10.6
// FIXME: enable memmem on Windows.
-#define SANITIZER_INTERCEPT_MEMMEM \
- SI_NOT_WINDOWS && !SI_MAC_DEPLOYMENT_BELOW_10_7
-#define SANITIZER_INTERCEPT_MEMCHR 1
-#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
+#define SANITIZER_INTERCEPT_MEMMEM (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_7)
+#define SANITIZER_INTERCEPT_MEMCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_MEMRCHR (SI_FREEBSD || SI_LINUX || SI_NETBSD)
-#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_WRITE SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_PWRITE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READ SI_POSIX
+#define SANITIZER_INTERCEPT_PREAD SI_POSIX
+#define SANITIZER_INTERCEPT_WRITE SI_POSIX
+#define SANITIZER_INTERCEPT_PWRITE SI_POSIX
+
+#define SANITIZER_INTERCEPT_FREAD SI_POSIX
+#define SANITIZER_INTERCEPT_FWRITE SI_POSIX
#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READV SI_POSIX
+#define SANITIZER_INTERCEPT_WRITEV SI_POSIX
-#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
-#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_POSIX
+#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX
-#define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SCANF SI_POSIX
#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
#ifndef SANITIZER_INTERCEPT_PRINTF
-# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD
+# define SANITIZER_INTERCEPT_PRINTF SI_POSIX
+# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
#endif
-#define SANITIZER_INTERCEPT_FREXP 1
-#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
-#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETPWENT \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX
-#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPWENT_R \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_SETPWENT (SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME (SI_FREEBSD || SI_NETBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
+#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_WAIT SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_INET SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETADDRINFO SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX
-#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_WAIT SI_POSIX
+#define SANITIZER_INTERCEPT_INET SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM SI_POSIX
+#define SANITIZER_INTERCEPT_GETADDRINFO SI_POSIX
+#define SANITIZER_INTERCEPT_GETNAMEINFO SI_POSIX
+#define SANITIZER_INTERCEPT_GETSOCKNAME SI_POSIX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_POSIX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R (SI_FREEBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
+#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_SENDMSG SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MODF SI_POSIX
+#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
+#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX
+#define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX
+#define SANITIZER_INTERCEPT_IOCTL SI_POSIX
+#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
-#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_READDIR SI_POSIX
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
#else
#define SANITIZER_INTERCEPT_PTRACE 0
#endif
-#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SETLOCALE SI_POSIX
+#define SANITIZER_INTERCEPT_GETCWD SI_POSIX
#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRTOIMAX SI_POSIX
+#define SANITIZER_INTERCEPT_MBSTOWCS SI_POSIX
+#define SANITIZER_INTERCEPT_MBSNRTOWCS (SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_WCSTOMBS SI_POSIX
#define SANITIZER_INTERCEPT_WCSNRTOMBS \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_WCRTOMB \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_REALPATH SI_POSIX
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CONFSTR \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRERROR SI_POSIX
+#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR \
- SI_FREEBSD || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
+#define SANITIZER_INTERCEPT_POLL SI_POSIX
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WORDEXP \
- SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
+ (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGSETOPS \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_BACKTRACE SI_FREEBSD || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX
+#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
+#define SANITIZER_INTERCEPT_BACKTRACE \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS \
+ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATFS64 \
- (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID
+ ((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATVFS \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_POSIX
#define SANITIZER_INTERCEPT_ETHER_HOST \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SHMCTL \
- ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64)
+ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_SHMCTL \
+ (SI_NETBSD || ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && \
+ SANITIZER_WORDSIZE == 64)) // NOLINT
#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
- SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
- SI_MAC || SI_LINUX_NOT_ANDROID
+ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_TTYNAME_R SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
+#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
-#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX
+#define SANITIZER_INTERCEPT_REMQUO SI_POSIX
+#define SANITIZER_INTERCEPT_LGAMMA SI_POSIX
+#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_RAND_R \
- SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_ICONV SI_FREEBSD || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_ICONV \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_TIMES SI_POSIX
// FIXME: getline seems to be available on OSX 10.7
-#define SANITIZER_INTERCEPT_GETLINE SI_FREEBSD || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETLINE \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
-#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD || SI_MAC
+#define SANITIZER_INTERCEPT__EXIT \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC)
-#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
- SI_FREEBSD || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
- SI_FREEBSD || SI_LINUX_NOT_ANDROID
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
#define SANITIZER_INTERCEPT_GETIFADDRS \
- SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC)
#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
- SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC)
#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
#if SI_LINUX && defined(__arm__)
#define SANITIZER_INTERCEPT_AEABI_MEM 1
#define SANITIZER_INTERCEPT_AEABI_MEM 0
#endif
#define SANITIZER_INTERCEPT___BZERO SI_MAC
-#define SANITIZER_INTERCEPT_FTIME !SI_FREEBSD && SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_POSIX)
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_TSEARCH \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD)
#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FOPEN SI_POSIX
#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM (SI_LINUX_NOT_ANDROID || SI_NETBSD)
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX
+#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX
#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
- SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC)
#endif
-#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_GETPASS \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD)
#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MLOCKX SI_POSIX
#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SEM SI_LINUX || SI_FREEBSD
-#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_MINCORE SI_LINUX
+#define SANITIZER_INTERCEPT_SEM (SI_LINUX || SI_FREEBSD || SI_NETBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
+#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD)
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
-#define SANITIZER_INTERCEPT_CTERMID SI_LINUX || SI_MAC || SI_FREEBSD
-#define SANITIZER_INTERCEPT_CTERMID_R SI_MAC || SI_FREEBSD
+#define SANITIZER_INTERCEPT_CTERMID \
+ (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD)
+#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD)
-#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX
-#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_SEND_SENDTO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPTOR_HOOKS (SI_LINUX || SI_MAC || SI_WINDOWS)
+#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
+#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
-#define SANITIZER_INTERCEPT_STAT (SI_FREEBSD || SI_MAC || SI_ANDROID)
-#define SANITIZER_INTERCEPT___XSTAT !SANITIZER_INTERCEPT_STAT && SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STAT \
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD)
+#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_UTMP (SI_POSIX && !SI_MAC && !SI_FREEBSD)
+#define SANITIZER_INTERCEPT_UTMPX (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD)
+
+#define SANITIZER_INTERCEPT_GETLOADAVG \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
+
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_PVALLOC \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_CFREE \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC)
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID
+
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
--- /dev/null
+//===-- sanitizer_platform_limits_netbsd.cc -------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_NETBSD
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link_elf.h>
+#include <net/if.h>
+#include <net/if_ether.h>
+#include <net/ppp_defs.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/mqueue.h>
+#include <sys/msg.h>
+#include <sys/mtio.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/shm.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/soundcard.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/times.h>
+#include <sys/timespec.h>
+#include <sys/timex.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+#include <sys/utsname.h>
+#include <term.h>
+#include <termios.h>
+#include <time.h>
+#include <utime.h>
+#include <utmp.h>
+#include <utmpx.h>
+#include <wchar.h>
+#include <wordexp.h>
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_netbsd.h"
+
+namespace __sanitizer {
+unsigned struct_utsname_sz = sizeof(struct utsname);
+unsigned struct_stat_sz = sizeof(struct stat);
+unsigned struct_rusage_sz = sizeof(struct rusage);
+unsigned struct_tm_sz = sizeof(struct tm);
+unsigned struct_passwd_sz = sizeof(struct passwd);
+unsigned struct_group_sz = sizeof(struct group);
+unsigned siginfo_t_sz = sizeof(siginfo_t);
+unsigned struct_sigaction_sz = sizeof(struct sigaction);
+unsigned struct_itimerval_sz = sizeof(struct itimerval);
+unsigned pthread_t_sz = sizeof(pthread_t);
+unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+unsigned pid_t_sz = sizeof(pid_t);
+unsigned timeval_sz = sizeof(timeval);
+unsigned uid_t_sz = sizeof(uid_t);
+unsigned gid_t_sz = sizeof(gid_t);
+unsigned mbstate_t_sz = sizeof(mbstate_t);
+unsigned sigset_t_sz = sizeof(sigset_t);
+unsigned struct_timezone_sz = sizeof(struct timezone);
+unsigned struct_tms_sz = sizeof(struct tms);
+unsigned struct_sigevent_sz = sizeof(struct sigevent);
+unsigned struct_sched_param_sz = sizeof(struct sched_param);
+unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned struct_rlimit_sz = sizeof(struct rlimit);
+unsigned struct_timespec_sz = sizeof(struct timespec);
+unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+unsigned struct_timex_sz = sizeof(struct timex);
+unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+unsigned struct_statvfs_sz = sizeof(struct statvfs);
+
+uptr sig_ign = (uptr)SIG_IGN;
+uptr sig_dfl = (uptr)SIG_DFL;
+uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+int shmctl_ipc_stat = (int)IPC_STAT;
+
+unsigned struct_utmp_sz = sizeof(struct utmp);
+unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+int map_fixed = MAP_FIXED;
+
+int af_inet = (int)AF_INET;
+int af_inet6 = (int)AF_INET6;
+
+uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+}
+
+int glob_nomatch = GLOB_NOMATCH;
+int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+unsigned path_max = PATH_MAX;
+
+// ioctl arguments
+unsigned struct_ifreq_sz = sizeof(struct ifreq);
+unsigned struct_termios_sz = sizeof(struct termios);
+unsigned struct_winsize_sz = sizeof(struct winsize);
+unsigned struct_mtget_sz = sizeof(struct mtget);
+unsigned struct_mtop_sz = sizeof(struct mtop);
+unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+
+const unsigned IOCTL_NOT_PRESENT = 0;
+
+unsigned IOCTL_FIOASYNC = FIOASYNC;
+unsigned IOCTL_FIOCLEX = FIOCLEX;
+unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+unsigned IOCTL_FIONBIO = FIONBIO;
+unsigned IOCTL_FIONCLEX = FIONCLEX;
+unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+unsigned IOCTL_TIOCCONS = TIOCCONS;
+unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+unsigned IOCTL_TIOCGETD = TIOCGETD;
+unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+unsigned IOCTL_TIOCMGET = TIOCMGET;
+unsigned IOCTL_TIOCMSET = TIOCMSET;
+unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+unsigned IOCTL_TIOCPKT = TIOCPKT;
+unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+unsigned IOCTL_TIOCSETD = TIOCSETD;
+unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+unsigned IOCTL_TIOCSTI = TIOCSTI;
+unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+
+const int si_SEGV_MAPERR = SEGV_MAPERR;
+const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_fileno);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, _key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, _seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#endif // SANITIZER_NETBSD
--- /dev/null
+//===-- sanitizer_platform_limits_netbsd.h --------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_NETBSD_H
+#define SANITIZER_PLATFORM_LIMITS_NETBSD_H
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \
+ ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift))))
+
+#if defined(__x86_64__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 608)
+#elif defined(__i386__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 324)
+#endif
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+
+struct __sanitizer_iocb {
+ u64 aio_offset;
+ uptr aio_buf;
+ long aio_nbytes;
+ u32 aio_fildes;
+ u32 aio_lio_opcode;
+ long aio_reqprio;
+#if SANITIZER_WORDSIZE == 64
+ u8 aio_sigevent[32];
+#else
+ u8 aio_sigevent[20];
+#endif
+ u32 _state;
+ u32 _errno;
+ long _retval;
+};
+
+struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+};
+
+struct __sanitizer_sem_t {
+ uptr data[5];
+};
+
+struct __sanitizer_ipc_perm {
+ u32 uid;
+ u32 gid;
+ u32 cuid;
+ u32 cgid;
+ u32 mode;
+ unsigned short _seq;
+ long _key;
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ u32 shm_lpid;
+ u32 shm_cpid;
+ unsigned int shm_nattch;
+ u64 shm_atime;
+ u64 shm_dtime;
+ u64 shm_ctime;
+ void *_shm_internal;
+};
+
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timex_sz;
+extern unsigned struct_statvfs_sz;
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ unsigned int ifa_addrflags;
+};
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+typedef long long __sanitizer_time_t;
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ __sanitizer_time_t pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ __sanitizer_time_t pw_expire;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct __sanitizer_dirent {
+ u64 d_fileno;
+ u16 d_reclen;
+ // more fields that we don't care about
+};
+
+typedef int __sanitizer_clock_t;
+typedef int __sanitizer_clockid_t;
+
+typedef u32 __sanitizer___kernel_uid_t;
+typedef u32 __sanitizer___kernel_gid_t;
+typedef u64 __sanitizer___kernel_off_t;
+typedef struct {
+ u32 fds_bits[8];
+} __sanitizer___kernel_fd_set;
+
+typedef struct {
+ unsigned int pta_magic;
+ int pta_flags;
+ void *pta_private;
+} __sanitizer_pthread_attr_t;
+
+struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+};
+
+struct __sanitizer_sigaction {
+ union {
+ void (*handler)(int sig);
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+};
+
+typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern uptr sig_ign;
+extern uptr sig_dfl;
+extern uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned __sanitizer_nfds_t;
+
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char *, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void * /* struct stat* */);
+ int (*gl_stat)(const char *, void * /* struct stat* */);
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char *we_strings;
+ uptr we_nbytes;
+};
+
+typedef void __sanitizer_FILE;
+#define SANITIZER_HAS_STRUCT_FILE 0
+
+extern int shmctl_ipc_stat;
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#undef IOC_DIRMASK
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_winsize_sz;
+
+extern unsigned struct_arpreq_sz;
+
+extern unsigned struct_mtget_sz;
+extern unsigned struct_mtop_sz;
+extern unsigned struct_rtentry_sz;
+extern unsigned struct_sbi_instrument_sz;
+extern unsigned struct_seq_event_rec_sz;
+extern unsigned struct_synth_info_sz;
+extern unsigned struct_vt_mode_sz;
+extern unsigned struct_audio_buf_info_sz;
+extern unsigned struct_ppp_stats_sz;
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+
+// ioctl request identifiers
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_TIOCCONS;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCSWINSZ;
+extern unsigned IOCTL_SIOCGETSGCNT;
+extern unsigned IOCTL_SIOCGETVIFCNT;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCTOP;
+extern unsigned IOCTL_SIOCADDRT;
+extern unsigned IOCTL_SIOCDELRT;
+extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+extern unsigned IOCTL_SNDCTL_DSP_POST;
+extern unsigned IOCTL_SNDCTL_DSP_RESET;
+extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;
+extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;
+extern unsigned IOCTL_SNDCTL_MIDI_INFO;
+extern unsigned IOCTL_SNDCTL_MIDI_PRETIME;
+extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;
+extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;
+extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;
+extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;
+extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;
+extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;
+extern unsigned IOCTL_SNDCTL_SEQ_PANIC;
+extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;
+extern unsigned IOCTL_SNDCTL_SEQ_RESET;
+extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;
+extern unsigned IOCTL_SNDCTL_SEQ_SYNC;
+extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;
+extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;
+extern unsigned IOCTL_SNDCTL_SYNTH_INFO;
+extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;
+extern unsigned IOCTL_SNDCTL_TMR_CONTINUE;
+extern unsigned IOCTL_SNDCTL_TMR_METRONOME;
+extern unsigned IOCTL_SNDCTL_TMR_SELECT;
+extern unsigned IOCTL_SNDCTL_TMR_SOURCE;
+extern unsigned IOCTL_SNDCTL_TMR_START;
+extern unsigned IOCTL_SNDCTL_TMR_STOP;
+extern unsigned IOCTL_SNDCTL_TMR_TEMPO;
+extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;
+extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;
+extern unsigned IOCTL_SOUND_MIXER_READ_BASS;
+extern unsigned IOCTL_SOUND_MIXER_READ_CAPS;
+extern unsigned IOCTL_SOUND_MIXER_READ_CD;
+extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;
+extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;
+extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;
+extern unsigned IOCTL_SOUND_MIXER_READ_IMIX;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE1;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE2;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE3;
+extern unsigned IOCTL_SOUND_MIXER_READ_LINE;
+extern unsigned IOCTL_SOUND_MIXER_READ_LOUD;
+extern unsigned IOCTL_SOUND_MIXER_READ_MIC;
+extern unsigned IOCTL_SOUND_MIXER_READ_MUTE;
+extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;
+extern unsigned IOCTL_SOUND_MIXER_READ_PCM;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;
+extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;
+extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;
+extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;
+extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;
+extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;
+extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_CD;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;
+extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;
+extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+extern unsigned IOCTL_VT_ACTIVATE;
+extern unsigned IOCTL_VT_GETMODE;
+extern unsigned IOCTL_VT_OPENQRY;
+extern unsigned IOCTL_VT_RELDISP;
+extern unsigned IOCTL_VT_SETMODE;
+extern unsigned IOCTL_VT_WAITACTIVE;
+extern unsigned IOCTL_KDDISABIO;
+extern unsigned IOCTL_KDENABIO;
+extern unsigned IOCTL_KDGETLED;
+extern unsigned IOCTL_KDGKBMODE;
+extern unsigned IOCTL_KDGKBTYPE;
+extern unsigned IOCTL_KDMKTONE;
+extern unsigned IOCTL_KDSETLED;
+extern unsigned IOCTL_KDSETMODE;
+extern unsigned IOCTL_KDSKBMODE;
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#endif // SANITIZER_NETBSD
+
+#endif
#endif
#include <arpa/inet.h>
#include <dirent.h>
-#include <errno.h>
#include <grp.h>
#include <limits.h>
#include <net/if.h>
#include <termios.h>
#include <time.h>
#include <wchar.h>
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+#include <utmp.h>
+#endif
#if !SANITIZER_IOS
#include <net/route.h>
#if !SANITIZER_ANDROID
#include <sys/mount.h>
#include <sys/timeb.h>
+#include <utmpx.h>
#endif
#if SANITIZER_LINUX
int shmctl_shm_stat = (int)SHM_STAT;
#endif
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+#endif
+#if !SANITIZER_ANDROID
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+#endif
+
int map_fixed = MAP_FIXED;
int af_inet = (int)AF_INET;
unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
- const int errno_EINVAL = EINVAL;
-// EOWNERDEAD is not present in some older platforms.
-#if defined(EOWNERDEAD)
- const int errno_EOWNERDEAD = EOWNERDEAD;
-#else
- const int errno_EOWNERDEAD = -1;
-#endif
-
const int si_SEGV_MAPERR = SEGV_MAPERR;
const int si_SEGV_ACCERR = SEGV_ACCERR;
} // namespace __sanitizer
#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
#define SANITIZER_PLATFORM_LIMITS_POSIX_H
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC
+
#include "sanitizer_internal_defs.h"
#include "sanitizer_platform.h"
// incorporates the map structure.
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
+// Get sys/_types.h, because that tells us whether 64-bit inodes are
+// used in struct dirent below.
+#include <sys/_types.h>
#else
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
#endif // !SANITIZER_FREEBSD
#elif defined(__mips__)
const unsigned struct_kernel_stat_sz =
SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) :
- FIRST_32_SECOND_64(144, 216);
+ FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
unsigned __seq;
u64 __unused1;
u64 __unused2;
-#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
- unsigned int mode;
- unsigned short __seq;
- unsigned short __pad1;
- unsigned long __unused1;
- unsigned long __unused2;
#elif defined(__sparc__)
-# if defined(__arch64__)
+#if defined(__arch64__)
unsigned mode;
unsigned short __pad1;
-# else
+#else
unsigned short __pad1;
unsigned short mode;
unsigned short __pad2;
-# endif
+#endif
unsigned short __seq;
unsigned long long __unused1;
unsigned long long __unused2;
+#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
+ unsigned int mode;
+ unsigned short __seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
#else
unsigned short mode;
unsigned short __pad1;
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
#if defined(__sparc__)
- # if !defined(__arch64__)
+ #if !defined(__arch64__)
u32 __pad1;
- # endif
+ #endif
long shm_atime;
- # if !defined(__arch64__)
+ #if !defined(__arch64__)
u32 __pad2;
- # endif
+ #endif
long shm_dtime;
- # if !defined(__arch64__)
+ #if !defined(__arch64__)
u32 __pad3;
- # endif
+ #endif
long shm_ctime;
uptr shm_segsz;
int shm_cpid;
};
#elif SANITIZER_FREEBSD
struct __sanitizer_dirent {
+#if defined(__INO64)
+ unsigned long long d_fileno;
+ unsigned long long d_off;
+#else
unsigned int d_fileno;
+#endif
unsigned short d_reclen;
// more fields that we don't care about
};
extern int shmctl_shm_stat;
#endif
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ extern unsigned struct_utmp_sz;
+#endif
+#if !SANITIZER_ANDROID
+ extern unsigned struct_utmpx_sz;
+#endif
+
extern int map_fixed;
// ioctl arguments
#define IOC_NRBITS 8
#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || defined(__sparc__)
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
+ defined(__sparc__)
#define IOC_SIZEBITS 13
#define IOC_DIRBITS 3
#define IOC_NONE 1U
// In sparc the 14 bits SIZE field overlaps with the
// least significant bit of DIR, so either IOC_READ or
// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
-# define IOC_SIZE(nr) \
- ((((((nr) >> 29) & 0x7) & (4U|2U)) == 0)? \
- 0 : (((nr) >> 16) & 0x3fff))
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
#else
#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
#endif
extern unsigned IOCTL_PIO_SCRNMAP;
#endif
- extern const int errno_EINVAL;
- extern const int errno_EOWNERDEAD;
-
extern const int si_SEGV_MAPERR;
extern const int si_SEGV_ACCERR;
} // namespace __sanitizer
COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
offsetof(struct CLASS, MEMBER))
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC
+
#endif
#if SANITIZER_POSIX
#include "sanitizer_common.h"
+#include "sanitizer_file.h"
#include "sanitizer_libc.h"
#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
+#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/mman.h>
-#if SANITIZER_LINUX
-#include <sys/utsname.h>
-#endif
-
-#if SANITIZER_LINUX && !SANITIZER_ANDROID
-#include <sys/personality.h>
-#endif
-
#if SANITIZER_FREEBSD
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
// that, it was never implemented. So just define it to zero.
return GetPageSize();
}
-#if SANITIZER_WORDSIZE == 32
-// Take care of unusable kernel area in top gigabyte.
-static uptr GetKernelAreaSize() {
-#if SANITIZER_LINUX && !SANITIZER_X32
- const uptr gbyte = 1UL << 30;
-
- // Firstly check if there are writable segments
- // mapped to top gigabyte (e.g. stack).
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr end, prot;
- while (proc_maps.Next(/*start*/nullptr, &end,
- /*offset*/nullptr, /*filename*/nullptr,
- /*filename_size*/0, &prot)) {
- if ((end >= 3 * gbyte)
- && (prot & MemoryMappingLayout::kProtectionWrite) != 0)
- return 0;
- }
-
-#if !SANITIZER_ANDROID
- // Even if nothing is mapped, top Gb may still be accessible
- // if we are running on 64-bit kernel.
- // Uname may report misleading results if personality type
- // is modified (e.g. under schroot) so check this as well.
- struct utsname uname_info;
- int pers = personality(0xffffffffUL);
- if (!(pers & PER_MASK)
- && uname(&uname_info) == 0
- && internal_strstr(uname_info.machine, "64"))
- return 0;
-#endif // SANITIZER_ANDROID
-
- // Top gigabyte is reserved for kernel.
- return gbyte;
-#else
- return 0;
-#endif // SANITIZER_LINUX && !SANITIZER_X32
-}
-#endif // SANITIZER_WORDSIZE == 32
-
-uptr GetMaxVirtualAddress() {
-#if SANITIZER_WORDSIZE == 64
-# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
- // Ideally, we would derive the upper bound from MACH_VM_MAX_ADDRESS. The
- // upper bound can change depending on the device.
- return 0x200000000 - 1;
-# elif defined(__powerpc64__) || defined(__aarch64__)
- // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
- // We somehow need to figure out which one we are using now and choose
- // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
- // Note that with 'ulimit -s unlimited' the stack is moved away from the top
- // of the address space, so simply checking the stack address is not enough.
- // This should (does) work for both PowerPC64 Endian modes.
- // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
- return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
-# elif defined(__mips64)
- return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
-# elif defined(__s390x__)
- return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
-# else
- return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
-# endif
-#else // SANITIZER_WORDSIZE == 32
-# if defined(__s390__)
- return (1ULL << 31) - 1; // 0x7fffffff;
-# else
- uptr res = (1ULL << 32) - 1; // 0xffffffff;
- if (!common_flags()->full_address_space)
- res -= GetKernelAreaSize();
- CHECK_LT(reinterpret_cast<uptr>(&res), res);
- return res;
-# endif
-#endif // SANITIZER_WORDSIZE
-}
-
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = internal_mmap(nullptr, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
int reserrno;
- if (internal_iserror(res, &reserrno))
+ if (UNLIKELY(internal_iserror(res, &reserrno)))
ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
IncreaseTotalMmap(size);
return (void *)res;
void UnmapOrDie(void *addr, uptr size) {
if (!addr || !size) return;
uptr res = internal_munmap(addr, size);
- if (internal_iserror(res)) {
+ if (UNLIKELY(internal_iserror(res))) {
Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
SanitizerToolName, size, size, addr);
CHECK("unable to unmap" && 0);
DecreaseTotalMmap(size);
}
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr res = internal_mmap(nullptr, size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno))) {
+ if (reserrno == ENOMEM)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)res;
+}
+
// We want to map a chunk of address space aligned to 'alignment'.
-// We do it by maping a bit more and then unmaping redundant pieces.
+// We do it by mapping a bit more and then unmapping redundant pieces.
// We probably can do it with fewer syscalls in some OS-dependent way.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
- uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+ uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
+ if (UNLIKELY(!map_res))
+ return nullptr;
uptr map_end = map_res + map_size;
uptr res = map_res;
- if (res & (alignment - 1)) // Not aligned.
- res = (map_res + alignment) & ~(alignment - 1);
- uptr end = res + size;
- if (res != map_res)
+ if (!IsAligned(res, alignment)) {
+ res = (map_res + alignment - 1) & ~(alignment - 1);
UnmapOrDie((void*)map_res, res - map_res);
+ }
+ uptr end = res + size;
if (end != map_end)
UnmapOrDie((void*)end, map_end - end);
return (void*)res;
MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-1, 0);
int reserrno;
- if (internal_iserror(p, &reserrno))
+ if (UNLIKELY(internal_iserror(p, &reserrno)))
ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
IncreaseTotalMmap(size);
return (void *)p;
}
-void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem) {
uptr PageSize = GetPageSizeCached();
uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
RoundUpTo(size, PageSize),
MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-1, 0);
int reserrno;
- if (internal_iserror(p, &reserrno)) {
- char mem_type[30];
+ if (UNLIKELY(internal_iserror(p, &reserrno))) {
+ if (tolerate_enomem && reserrno == ENOMEM)
+ return nullptr;
+ char mem_type[40];
internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
fixed_addr);
ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
return (void *)p;
}
+void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
+ return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/);
+}
+
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
+ return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/);
+}
+
bool MprotectNoAccess(uptr addr, uptr size) {
return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
}
// memory).
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr start, end;
- while (proc_maps.Next(&start, &end,
- /*offset*/nullptr, /*filename*/nullptr,
- /*filename_size*/0, /*protection*/nullptr)) {
- if (start == end) continue; // Empty range.
- CHECK_NE(0, end);
- if (!IntervalsAreSeparate(start, end - 1, range_start, range_end))
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (segment.start == segment.end) continue; // Empty range.
+ CHECK_NE(0, segment.end);
+ if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
+ range_end))
return false;
}
return true;
void DumpProcessMap() {
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr start, end;
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __func__);
+ MemoryMappedSegment segment(filename, kBufSize);
Report("Process memory map follows:\n");
- while (proc_maps.Next(&start, &end, /* file_offset */nullptr,
- filename, kBufSize, /* protection */nullptr)) {
- Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
+ while (proc_maps.Next(&segment)) {
+ Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
+ segment.filename);
}
Report("End of process memory map.\n");
UnmapOrDie(filename, kBufSize);
}
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
- uptr s, e, off, prot;
- InternalScopedString buff(kMaxPathLength);
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
- while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
- && internal_strcmp(module, buff.data()) == 0) {
- *start = s;
- *end = e;
+ InternalScopedString buff(kMaxPathLength);
+ MemoryMappedSegment segment(buff.data(), kMaxPathLength);
+ while (proc_maps.Next(&segment)) {
+ if (segment.IsExecutable() &&
+ internal_strcmp(module, segment.filename) == 0) {
+ *start = segment.start;
+ *end = segment.end;
return true;
}
}
return false;
}
-SignalContext SignalContext::Create(void *siginfo, void *context) {
- auto si = (siginfo_t *)siginfo;
- uptr addr = (uptr)si->si_addr;
- uptr pc, sp, bp;
- GetPcSpBp(context, &pc, &sp, &bp);
- WriteFlag write_flag = GetWriteFlag(context);
- bool is_memory_access = si->si_signo == SIGSEGV;
- return SignalContext(context, addr, pc, sp, bp, is_memory_access, write_flag);
+uptr SignalContext::GetAddress() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return (uptr)si->si_addr;
+}
+
+bool SignalContext::IsMemoryAccess() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return si->si_signo == SIGSEGV;
+}
+
+int SignalContext::GetType() const {
+ return static_cast<const siginfo_t *>(siginfo)->si_signo;
+}
+
+const char *SignalContext::Describe() const {
+ switch (GetType()) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ case SIGSEGV:
+ return "SEGV";
+ case SIGBUS:
+ return "BUS";
+ }
+ return "UNKNOWN SIGNAL";
}
} // namespace __sanitizer
// ----------- ATTENTION -------------
// This header should NOT include any other headers from sanitizer runtime.
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_netbsd.h"
#include "sanitizer_platform_limits_posix.h"
#if !SANITIZER_POSIX
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]);
+
+bool IsStateDetached(int state);
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX_H
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_platform_limits_netbsd.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_posix.h"
#include "sanitizer_procmaps.h"
return (uptr)pthread_self();
}
-void ReleaseMemoryToOS(uptr addr, uptr size) {
- madvise((void*)addr, size, MADV_DONTNEED);
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ uptr page_size = GetPageSizeCached();
+ uptr beg_aligned = RoundUpTo(beg, page_size);
+ uptr end_aligned = RoundDownTo(end, page_size);
+ if (beg_aligned < end_aligned)
+ madvise((void*)beg_aligned, end_aligned - beg_aligned, MADV_DONTNEED);
}
void NoHugePagesInRegion(uptr addr, uptr size) {
void Abort() {
#if !SANITIZER_GO
// If we are handling SIGABRT, unhandle it first.
- if (IsHandledDeadlySignal(SIGABRT)) {
+ // TODO(vitalybuka): Check if handler belongs to sanitizer.
+ if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) {
- if (!IsHandledDeadlySignal(signum))
- return;
+ if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
+
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)handler;
MaybeInstallSigaction(SIGFPE, handler);
MaybeInstallSigaction(SIGILL, handler);
}
+
+bool SignalContext::IsStackOverflow() const {
+ // Access at a reasonable offset above SP, or slightly below it (to account
+ // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
+ // probably a stack overflow.
+#ifdef __s390__
+ // On s390, the fault address in siginfo points to start of the page, not
+ // to the precise word that was accessed. Mask off the low bits of sp to
+ // take it into account.
+ bool IsStackAccess = addr >= (sp & ~0xFFF) && addr < sp + 0xFFFF;
+#else
+ bool IsStackAccess = addr + 512 > sp && addr < sp + 0xFFFF;
+#endif
+
+#if __powerpc__
+ // Large stack frames can be allocated with e.g.
+ // lis r0,-10000
+ // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
+ // If the store faults then sp will not have been updated, so test above
+ // will not work, because the fault address will be more than just "slightly"
+ // below sp.
+ if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {
+ u32 inst = *(unsigned *)pc;
+ u32 ra = (inst >> 16) & 0x1F;
+ u32 opcd = inst >> 26;
+ u32 xo = (inst >> 1) & 0x3FF;
+ // Check for store-with-update to sp. The instructions we accept are:
+ // stbu rs,d(ra) stbux rs,ra,rb
+ // sthu rs,d(ra) sthux rs,ra,rb
+ // stwu rs,d(ra) stwux rs,ra,rb
+ // stdu rs,ds(ra) stdux rs,ra,rb
+ // where ra is r1 (the stack pointer).
+ if (ra == 1 &&
+ (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
+ (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
+ IsStackAccess = true;
+ }
+#endif // __powerpc__
+
+ // We also check si_code to filter out SEGV caused by something else other
+ // then hitting the guard page or unmapped memory, like, for example,
+ // unaligned memory access.
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return IsStackAccess &&
+ (si->si_code == si_SEGV_MAPERR || si->si_code == si_SEGV_ACCERR);
+}
+
#endif // SANITIZER_GO
bool IsAccessibleMemoryRange(uptr beg, uptr size) {
// Same for /proc/self/exe in the symbolizer.
#if !SANITIZER_GO
Symbolizer::GetOrInit()->PrepareForSandboxing();
- CovPrepareForSandboxing(args);
#endif
}
return process_status;
}
+bool IsStateDetached(int state) {
+ return state == PTHREAD_CREATE_DETACHED;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX
namespace __sanitizer {
-StaticSpinMutex CommonSanitizerReportMutex;
-
static int AppendChar(char **buff, const char *buff_end, char c) {
if (*buff < buff_end) {
**buff = c;
// on the value of |pad_with_zero|.
static int AppendNumber(char **buff, const char *buff_end, u64 absolute_value,
u8 base, u8 minimal_num_length, bool pad_with_zero,
- bool negative) {
+ bool negative, bool uppercase) {
uptr const kMaxLen = 30;
RAW_CHECK(base == 10 || base == 16);
RAW_CHECK(base == 10 || !negative);
if (negative && !pad_with_zero) result += AppendChar(buff, buff_end, '-');
for (; pos >= 0; pos--) {
char digit = static_cast<char>(num_buffer[pos]);
- result += AppendChar(buff, buff_end, (digit < 10) ? '0' + digit
- : 'a' + digit - 10);
+ digit = (digit < 10) ? '0' + digit : (uppercase ? 'A' : 'a') + digit - 10;
+ result += AppendChar(buff, buff_end, digit);
}
return result;
}
static int AppendUnsigned(char **buff, const char *buff_end, u64 num, u8 base,
- u8 minimal_num_length, bool pad_with_zero) {
+ u8 minimal_num_length, bool pad_with_zero,
+ bool uppercase) {
return AppendNumber(buff, buff_end, num, base, minimal_num_length,
- pad_with_zero, false /* negative */);
+ pad_with_zero, false /* negative */, uppercase);
}
static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
u8 minimal_num_length, bool pad_with_zero) {
bool negative = (num < 0);
return AppendNumber(buff, buff_end, (u64)(negative ? -num : num), 10,
- minimal_num_length, pad_with_zero, negative);
+ minimal_num_length, pad_with_zero, negative,
+ false /* uppercase */);
}
static int AppendString(char **buff, const char *buff_end, int precision,
int result = 0;
result += AppendString(buff, buff_end, -1, "0x");
result += AppendUnsigned(buff, buff_end, ptr_value, 16,
- SANITIZER_POINTER_FORMAT_LENGTH, true);
+ SANITIZER_POINTER_FORMAT_LENGTH,
+ true /* pad_with_zero */, false /* uppercase */);
return result;
}
int VSNPrintf(char *buff, int buff_length,
const char *format, va_list args) {
static const char *kPrintfFormatsHelp =
- "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x}; %p; %(\\.\\*)?s; %c\n";
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; %(\\.\\*)?s; "
+ "%c\n";
RAW_CHECK(format);
RAW_CHECK(buff_length > 0);
const char *buff_end = &buff[buff_length - 1];
break;
}
case 'u':
- case 'x': {
+ case 'x':
+ case 'X': {
uval = have_ll ? va_arg(args, u64)
: have_z ? va_arg(args, uptr)
: va_arg(args, unsigned);
- result += AppendUnsigned(&buff, buff_end, uval,
- (*cur == 'u') ? 10 : 16, width, pad_with_zero);
+ bool uppercase = (*cur == 'X');
+ result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
+ width, pad_with_zero, uppercase);
break;
}
case 'p': {
}
// Can be overriden in frontend.
-#if SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void OnPrint(const char *str) {
- (void)str;
-}
-#elif SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
-void OnPrint(const char *str);
+#if SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
+// Implementation must be defined in frontend.
+extern "C" void OnPrint(const char *str);
#else
-void OnPrint(const char *str) {
+SANITIZER_INTERFACE_WEAK_DEF(void, OnPrint, const char *str) {
(void)str;
}
#endif
PrintfAndReportCallback(str);
}
-static void SharedPrintfCode(bool append_pid, const char *format,
- va_list args) {
+static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
+ char *local_buffer,
+ int buffer_size,
+ const char *format,
+ va_list args) {
va_list args2;
va_copy(args2, args);
const int kLen = 16 * 1024;
- // |local_buffer| is small enough not to overflow the stack and/or violate
- // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other
- // hand, the bigger the buffer is, the more the chance the error report will
- // fit into it.
- char local_buffer[400];
int needed_length;
char *buffer = local_buffer;
- int buffer_size = ARRAY_SIZE(local_buffer);
// First try to print a message using a local buffer, and then fall back to
// mmaped buffer.
for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
RAW_CHECK_MSG(needed_length < kLen, \
"Buffer in Report is too short!\n"); \
}
- if (append_pid) {
+ // Fuchsia's logging infrastructure always keeps track of the logging
+ // process, thread, and timestamp, so never prepend such information.
+ if (!SANITIZER_FUCHSIA && append_pid) {
int pid = internal_getpid();
const char *exe_name = GetProcessName();
if (common_flags()->log_exe_name && exe_name) {
"==%s", exe_name);
CHECK_NEEDED_LENGTH
}
- needed_length += internal_snprintf(buffer + needed_length,
- buffer_size - needed_length,
- "==%d==", pid);
+ needed_length += internal_snprintf(
+ buffer + needed_length, buffer_size - needed_length, "==%d==", pid);
CHECK_NEEDED_LENGTH
}
needed_length += VSNPrintf(buffer + needed_length,
va_end(args2);
}
+static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
+ va_list args) {
+ // |local_buffer| is small enough not to overflow the stack and/or violate
+ // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other
+ // hand, the bigger the buffer is, the more the chance the error report will
+ // fit into it.
+ char local_buffer[400];
+ SharedPrintfCodeNoBuffer(append_pid, local_buffer, ARRAY_SIZE(local_buffer),
+ format, args);
+}
+
FORMAT(1, 2)
void Printf(const char *format, ...) {
va_list args;
#ifndef SANITIZER_PROCMAPS_H
#define SANITIZER_PROCMAPS_H
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_MAC
+
#include "sanitizer_common.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mac.h"
#include "sanitizer_mutex.h"
namespace __sanitizer {
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
-struct ProcSelfMapsBuff {
- char *data;
- uptr mmaped_size;
- uptr len;
-};
-// Reads process memory map in an OS-specific way.
-void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+// Memory protection masks.
+static const uptr kProtectionRead = 1;
+static const uptr kProtectionWrite = 2;
+static const uptr kProtectionExecute = 4;
+static const uptr kProtectionShared = 8;
+
+struct MemoryMappedSegmentData;
+
+class MemoryMappedSegment {
+ public:
+ MemoryMappedSegment(char *buff = nullptr, uptr size = 0)
+ : filename(buff), filename_size(size), data_(nullptr) {}
+ ~MemoryMappedSegment() {}
+
+ bool IsReadable() const { return protection & kProtectionRead; }
+ bool IsWritable() const { return protection & kProtectionWrite; }
+ bool IsExecutable() const { return protection & kProtectionExecute; }
+ bool IsShared() const { return protection & kProtectionShared; }
+
+ void AddAddressRanges(LoadedModule *module);
+
+ uptr start;
+ uptr end;
+ uptr offset;
+ char *filename; // owned by caller
+ uptr filename_size;
+ uptr protection;
+ ModuleArch arch;
+ u8 uuid[kModuleUUIDSize];
+
+ private:
+ friend class MemoryMappingLayout;
+
+ // This field is assigned and owned by MemoryMappingLayout if needed
+ MemoryMappedSegmentData *data_;
+};
class MemoryMappingLayout {
public:
explicit MemoryMappingLayout(bool cache_enabled);
~MemoryMappingLayout();
- bool Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection);
+ bool Next(MemoryMappedSegment *segment);
void Reset();
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
// to obtain the memory mappings. It should fall back to pre-cached data
static void CacheMemoryMappings();
// Adds all mapped objects into a vector.
- void DumpListOfModules(InternalMmapVector<LoadedModule> *modules);
-
- // Memory protection masks.
- static const uptr kProtectionRead = 1;
- static const uptr kProtectionWrite = 2;
- static const uptr kProtectionExecute = 4;
- static const uptr kProtectionShared = 8;
+ void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
private:
void LoadFromCache();
- // FIXME: Hide implementation details for different platforms in
- // platform-specific files.
-# if SANITIZER_FREEBSD || SANITIZER_LINUX
- ProcSelfMapsBuff proc_self_maps_;
- const char *current_;
-
- // Static mappings cache.
- static ProcSelfMapsBuff cached_proc_self_maps_;
- static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
-# elif SANITIZER_MAC
- template<u32 kLCSegment, typename SegmentCommand>
- bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection);
- int current_image_;
- u32 current_magic_;
- u32 current_filetype_;
- int current_load_cmd_count_;
- char *current_load_cmd_addr_;
-# endif
+ MemoryMappingLayoutData data_;
};
-typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
- /*out*/uptr *stats, uptr stats_size);
-
-// Parse the contents of /proc/self/smaps and generate a memory profile.
-// |cb| is a tool-specific callback that fills the |stats| array containing
-// |stats_size| elements.
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
-
// Returns code range for the specified module.
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
} // namespace __sanitizer
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_MAC
#endif // SANITIZER_PROCMAPS_H
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "sanitizer_common.h"
#include "sanitizer_placement_new.h"
namespace __sanitizer {
-// Linker initialized.
-ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
-StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+static ProcSelfMapsBuff cached_proc_self_maps;
+static StaticSpinMutex cache_lock;
static int TranslateDigit(char c) {
if (c >= '0' && c <= '9')
return ParseNumber(p, 16);
}
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // data_ should be unused on this platform
+ CHECK(!data_);
+ module->addAddressRange(start, end, IsExecutable(), IsWritable());
+}
+
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- ReadProcMaps(&proc_self_maps_);
+ ReadProcMaps(&data_.proc_self_maps);
if (cache_enabled) {
- if (proc_self_maps_.mmaped_size == 0) {
+ if (data_.proc_self_maps.mmaped_size == 0) {
LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
+ CHECK_GT(data_.proc_self_maps.len, 0);
}
} else {
- CHECK_GT(proc_self_maps_.mmaped_size, 0);
+ CHECK_GT(data_.proc_self_maps.mmaped_size, 0);
}
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
MemoryMappingLayout::~MemoryMappingLayout() {
// Only unmap the buffer if it is different from the cached one. Otherwise
// it will be unmapped when the cache is refreshed.
- if (proc_self_maps_.data != cached_proc_self_maps_.data) {
- UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ if (data_.proc_self_maps.data != cached_proc_self_maps.data) {
+ UnmapOrDie(data_.proc_self_maps.data, data_.proc_self_maps.mmaped_size);
}
}
-void MemoryMappingLayout::Reset() {
- current_ = proc_self_maps_.data;
-}
+void MemoryMappingLayout::Reset() { data_.current = data_.proc_self_maps.data; }
// static
void MemoryMappingLayout::CacheMemoryMappings() {
- SpinMutexLock l(&cache_lock_);
+ SpinMutexLock l(&cache_lock);
// Don't invalidate the cache if the mappings are unavailable.
ProcSelfMapsBuff old_proc_self_maps;
- old_proc_self_maps = cached_proc_self_maps_;
- ReadProcMaps(&cached_proc_self_maps_);
- if (cached_proc_self_maps_.mmaped_size == 0) {
- cached_proc_self_maps_ = old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps;
+ ReadProcMaps(&cached_proc_self_maps);
+ if (cached_proc_self_maps.mmaped_size == 0) {
+ cached_proc_self_maps = old_proc_self_maps;
} else {
if (old_proc_self_maps.mmaped_size) {
UnmapOrDie(old_proc_self_maps.data,
}
void MemoryMappingLayout::LoadFromCache() {
- SpinMutexLock l(&cache_lock_);
- if (cached_proc_self_maps_.data) {
- proc_self_maps_ = cached_proc_self_maps_;
+ SpinMutexLock l(&cache_lock);
+ if (cached_proc_self_maps.data) {
+ data_.proc_self_maps = cached_proc_self_maps;
}
}
void MemoryMappingLayout::DumpListOfModules(
- InternalMmapVector<LoadedModule> *modules) {
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
- uptr cur_beg, cur_end, cur_offset, prot;
InternalScopedString module_name(kMaxPathLength);
- for (uptr i = 0; Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
- module_name.size(), &prot);
- i++) {
- const char *cur_name = module_name.data();
+ MemoryMappedSegment segment(module_name.data(), module_name.size());
+ for (uptr i = 0; Next(&segment); i++) {
+ const char *cur_name = segment.filename;
if (cur_name[0] == '\0')
continue;
// Don't subtract 'cur_beg' from the first entry:
// mapped high at address space (in particular, higher than
// shadow memory of the tool), so the module can't be the
// first entry.
- uptr base_address = (i ? cur_beg : 0) - cur_offset;
+ uptr base_address = (i ? segment.start : 0) - segment.offset;
LoadedModule cur_module;
cur_module.set(cur_name, base_address);
- cur_module.addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ segment.AddAddressRanges(&cur_module);
modules->push_back(cur_module);
}
}
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
//
//===----------------------------------------------------------------------===//
//
-// Information about the process mappings (FreeBSD-specific parts).
+// Information about the process mappings (FreeBSD and NetBSD-specific parts).
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
#include "sanitizer_common.h"
+#if SANITIZER_FREEBSD
#include "sanitizer_freebsd.h"
+#endif
#include "sanitizer_procmaps.h"
#include <unistd.h>
#include <sys/sysctl.h>
+#if SANITIZER_FREEBSD
#include <sys/user.h>
+#endif
// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
namespace __sanitizer {
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
- const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() };
+ const int Mib[] = {
+#if SANITIZER_FREEBSD
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_VMMAP,
+ getpid()
+#else
+ CTL_VM,
+ VM_PROC,
+ VM_PROC_MAP,
+ getpid(),
+ sizeof(struct kinfo_vmentry)
+#endif
+ };
+
size_t Size = 0;
- int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0);
+ int Err = sysctl(Mib, ARRAY_SIZE(Mib), NULL, &Size, NULL, 0);
CHECK_EQ(Err, 0);
CHECK_GT(Size, 0);
size_t MmapedSize = Size * 4 / 3;
void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
Size = MmapedSize;
- Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0);
+ Err = sysctl(Mib, ARRAY_SIZE(Mib), VmMap, &Size, NULL, 0);
CHECK_EQ(Err, 0);
proc_maps->data = (char*)VmMap;
proc_maps->len = Size;
}
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- char *last = proc_self_maps_.data + proc_self_maps_.len;
- if (current_ >= last) return false;
- uptr dummy;
- if (!start) start = &dummy;
- if (!end) end = &dummy;
- if (!offset) offset = &dummy;
- if (!protection) protection = &dummy;
- struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
-
- *start = (uptr)VmEntry->kve_start;
- *end = (uptr)VmEntry->kve_end;
- *offset = (uptr)VmEntry->kve_offset;
-
- *protection = 0;
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last) return false;
+ struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry *)data_.current;
+
+ segment->start = (uptr)VmEntry->kve_start;
+ segment->end = (uptr)VmEntry->kve_end;
+ segment->offset = (uptr)VmEntry->kve_offset;
+
+ segment->protection = 0;
if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
- *protection |= kProtectionRead;
+ segment->protection |= kProtectionRead;
if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
- *protection |= kProtectionWrite;
+ segment->protection |= kProtectionWrite;
if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
- *protection |= kProtectionExecute;
+ segment->protection |= kProtectionExecute;
- if (filename != NULL && filename_size > 0) {
- internal_snprintf(filename,
- Min(filename_size, (uptr)PATH_MAX),
- "%s", VmEntry->kve_path);
+ if (segment->filename != NULL && segment->filename_size > 0) {
+ internal_snprintf(segment->filename,
+ Min(segment->filename_size, (uptr)PATH_MAX), "%s",
+ VmEntry->kve_path);
}
- current_ += VmEntry->kve_structsize;
+#if SANITIZER_FREEBSD
+ data_.current += VmEntry->kve_structsize;
+#else
+ data_.current += sizeof(*VmEntry);
+#endif
return true;
}
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD
+#endif // SANITIZER_FREEBSD || SANITIZER_NETBSD
namespace __sanitizer {
void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
- CHECK(ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
- &proc_maps->mmaped_size, &proc_maps->len));
+ ReadFileToBuffer("/proc/self/maps", &proc_maps->data, &proc_maps->mmaped_size,
+ &proc_maps->len);
}
static bool IsOneOf(char c, char c1, char c2) {
return c == c1 || c == c2;
}
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- char *last = proc_self_maps_.data + proc_self_maps_.len;
- if (current_ >= last) return false;
- uptr dummy;
- if (!start) start = &dummy;
- if (!end) end = &dummy;
- if (!offset) offset = &dummy;
- if (!protection) protection = &dummy;
- char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last) return false;
+ char *next_line =
+ (char *)internal_memchr(data_.current, '\n', last - data_.current);
if (next_line == 0)
next_line = last;
// Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
- *start = ParseHex(¤t_);
- CHECK_EQ(*current_++, '-');
- *end = ParseHex(¤t_);
- CHECK_EQ(*current_++, ' ');
- CHECK(IsOneOf(*current_, '-', 'r'));
- *protection = 0;
- if (*current_++ == 'r')
- *protection |= kProtectionRead;
- CHECK(IsOneOf(*current_, '-', 'w'));
- if (*current_++ == 'w')
- *protection |= kProtectionWrite;
- CHECK(IsOneOf(*current_, '-', 'x'));
- if (*current_++ == 'x')
- *protection |= kProtectionExecute;
- CHECK(IsOneOf(*current_, 's', 'p'));
- if (*current_++ == 's')
- *protection |= kProtectionShared;
- CHECK_EQ(*current_++, ' ');
- *offset = ParseHex(¤t_);
- CHECK_EQ(*current_++, ' ');
- ParseHex(¤t_);
- CHECK_EQ(*current_++, ':');
- ParseHex(¤t_);
- CHECK_EQ(*current_++, ' ');
- while (IsDecimal(*current_))
- current_++;
+ segment->start = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, '-');
+ segment->end = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ CHECK(IsOneOf(*data_.current, '-', 'r'));
+ segment->protection = 0;
+ if (*data_.current++ == 'r') segment->protection |= kProtectionRead;
+ CHECK(IsOneOf(*data_.current, '-', 'w'));
+ if (*data_.current++ == 'w') segment->protection |= kProtectionWrite;
+ CHECK(IsOneOf(*data_.current, '-', 'x'));
+ if (*data_.current++ == 'x') segment->protection |= kProtectionExecute;
+ CHECK(IsOneOf(*data_.current, 's', 'p'));
+ if (*data_.current++ == 's') segment->protection |= kProtectionShared;
+ CHECK_EQ(*data_.current++, ' ');
+ segment->offset = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ':');
+ ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ while (IsDecimal(*data_.current)) data_.current++;
// Qemu may lack the trailing space.
// https://github.com/google/sanitizers/issues/160
- // CHECK_EQ(*current_++, ' ');
+ // CHECK_EQ(*data_.current++, ' ');
// Skip spaces.
- while (current_ < next_line && *current_ == ' ')
- current_++;
+ while (data_.current < next_line && *data_.current == ' ') data_.current++;
// Fill in the filename.
- uptr i = 0;
- while (current_ < next_line) {
- if (filename && i < filename_size - 1)
- filename[i++] = *current_;
- current_++;
+ if (segment->filename) {
+ uptr len =
+ Min((uptr)(next_line - data_.current), segment->filename_size - 1);
+ internal_strncpy(segment->filename, data_.current, len);
+ segment->filename[len] = 0;
}
- if (filename && i < filename_size)
- filename[i] = 0;
- current_ = next_line + 1;
+
+ data_.current = next_line + 1;
return true;
}
#include <mach-o/dyld.h>
#include <mach-o/loader.h>
+#include <mach/mach.h>
+
+// These are not available in older macOS SDKs.
+#ifndef CPU_SUBTYPE_X86_64_H
+#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7S
+#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7K
+#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
+#endif
+#ifndef CPU_TYPE_ARM64
+#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+#endif
namespace __sanitizer {
+// Contains information used to iterate through sections.
+struct MemoryMappedSegmentData {
+ char name[kMaxSegName];
+ uptr nsects;
+ char *current_load_cmd_addr;
+ u32 lc_type;
+ uptr base_virt_addr;
+ uptr addr_mask;
+};
+
+template <typename Section>
+static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
+ bool isWritable) {
+ const Section *sc = (const Section *)data->current_load_cmd_addr;
+ data->current_load_cmd_addr += sizeof(Section);
+
+ uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
+ uptr sec_end = sec_start + sc->size;
+ module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
+ sc->sectname);
+}
+
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // Don't iterate over sections when the caller hasn't set up the
+ // data pointer, when there are no sections, or when the segment
+ // is executable. Avoid iterating over executable sections because
+ // it will confuse libignore, and because the extra granularity
+ // of information is not needed by any sanitizers.
+ if (!data_ || !data_->nsects || IsExecutable()) {
+ module->addAddressRange(start, end, IsExecutable(), IsWritable(),
+ data_ ? data_->name : nullptr);
+ return;
+ }
+
+ do {
+ if (data_->lc_type == LC_SEGMENT) {
+ NextSectionLoad<struct section>(module, data_, IsWritable());
+#ifdef MH_MAGIC_64
+ } else if (data_->lc_type == LC_SEGMENT_64) {
+ NextSectionLoad<struct section_64>(module, data_, IsWritable());
+#endif
+ }
+ } while (--data_->nsects);
+}
+
MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}
// _dyld_image_count is thread-unsafe. We need to register callbacks for
// adding and removing images which will invalidate the MemoryMappingLayout
// state.
- current_image_ = _dyld_image_count();
- current_load_cmd_count_ = -1;
- current_load_cmd_addr_ = 0;
- current_magic_ = 0;
- current_filetype_ = 0;
+ data_.current_image = _dyld_image_count();
+ data_.current_load_cmd_count = -1;
+ data_.current_load_cmd_addr = 0;
+ data_.current_magic = 0;
+ data_.current_filetype = 0;
+ data_.current_arch = kModuleArchUnknown;
+ internal_memset(data_.current_uuid, 0, kModuleUUIDSize);
}
+// The dyld load address should be unchanged throughout process execution,
+// and it is expensive to compute once many libraries have been loaded,
+// so cache it here and do not reset.
+static mach_header *dyld_hdr = 0;
+static const char kDyldPath[] = "/usr/lib/dyld";
+static const int kDyldImageIdx = -1;
+
// static
void MemoryMappingLayout::CacheMemoryMappings() {
// No-op on Mac for now.
// No-op on Mac for now.
}
+// _dyld_get_image_header() and related APIs don't report dyld itself.
+// We work around this by manually recursing through the memory map
+// until we hit a Mach header matching dyld instead. These recurse
+// calls are expensive, but the first memory map generation occurs
+// early in the process, when dyld is one of the only images loaded,
+// so it will be hit after only a few iterations.
+static mach_header *get_dyld_image_header() {
+ mach_port_name_t port;
+ if (task_for_pid(mach_task_self(), internal_getpid(), &port) !=
+ KERN_SUCCESS) {
+ return nullptr;
+ }
+
+ unsigned depth = 1;
+ vm_size_t size = 0;
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ while (true) {
+ struct vm_region_submap_info_64 info;
+ err = vm_region_recurse_64(port, &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+ if (err != KERN_SUCCESS) return nullptr;
+
+ if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
+ mach_header *hdr = (mach_header *)address;
+ if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
+ hdr->filetype == MH_DYLINKER) {
+ return hdr;
+ }
+ }
+ address += size;
+ }
+}
+
+const mach_header *get_dyld_hdr() {
+ if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
+
+ return dyld_hdr;
+}
+
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
// Google Perftools, https://github.com/gperftools/gperftools.
// and returns the start and end addresses and file offset of the corresponding
// segment.
// Note that the segment addresses are not necessarily sorted.
-template<u32 kLCSegment, typename SegmentCommand>
-bool MemoryMappingLayout::NextSegmentLoad(
- uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size, uptr *protection) {
- const char* lc = current_load_cmd_addr_;
- current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
+template <u32 kLCSegment, typename SegmentCommand>
+static bool NextSegmentLoad(MemoryMappedSegment *segment,
+MemoryMappedSegmentData *seg_data, MemoryMappingLayoutData &layout_data) {
+ const char *lc = layout_data.current_load_cmd_addr;
+ layout_data.current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
- const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
const SegmentCommand* sc = (const SegmentCommand *)lc;
- if (start) *start = sc->vmaddr + dlloff;
- if (protection) {
- // Return the initial protection.
- *protection = sc->initprot;
+ uptr base_virt_addr, addr_mask;
+ if (layout_data.current_image == kDyldImageIdx) {
+ base_virt_addr = (uptr)get_dyld_hdr();
+ // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
+ // it contains an absolute address rather than an offset for dyld.
+ // To make matters even more complicated, this absolute address
+ // isn't actually the absolute segment address, but the offset portion
+ // of the address is accurate when combined with the dyld base address,
+ // and the mask will give just this offset.
+ addr_mask = 0xfffff;
+ } else {
+ base_virt_addr =
+ (uptr)_dyld_get_image_vmaddr_slide(layout_data.current_image);
+ addr_mask = ~0;
}
- if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
- if (offset) {
- if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
- *offset = sc->vmaddr;
- } else {
- *offset = sc->fileoff;
- }
+
+ segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
+ segment->end = segment->start + sc->vmsize;
+ // Most callers don't need section information, so only fill this struct
+ // when required.
+ if (seg_data) {
+ seg_data->nsects = sc->nsects;
+ seg_data->current_load_cmd_addr =
+ (char *)lc + sizeof(SegmentCommand);
+ seg_data->lc_type = kLCSegment;
+ seg_data->base_virt_addr = base_virt_addr;
+ seg_data->addr_mask = addr_mask;
+ internal_strncpy(seg_data->name, sc->segname,
+ ARRAY_SIZE(seg_data->name));
}
- if (filename) {
- internal_strncpy(filename, _dyld_get_image_name(current_image_),
- filename_size);
+
+ // Return the initial protection.
+ segment->protection = sc->initprot;
+ segment->offset = (layout_data.current_filetype ==
+ /*MH_EXECUTE*/ 0x2)
+ ? sc->vmaddr
+ : sc->fileoff;
+ if (segment->filename) {
+ const char *src = (layout_data.current_image == kDyldImageIdx)
+ ? kDyldPath
+ : _dyld_get_image_name(layout_data.current_image);
+ internal_strncpy(segment->filename, src, segment->filename_size);
}
+ segment->arch = layout_data.current_arch;
+ internal_memcpy(segment->uuid, layout_data.current_uuid, kModuleUUIDSize);
return true;
}
return false;
}
-bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
- char filename[], uptr filename_size,
- uptr *protection) {
- for (; current_image_ >= 0; current_image_--) {
- const mach_header* hdr = _dyld_get_image_header(current_image_);
+ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
+ cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ switch (cputype) {
+ case CPU_TYPE_I386:
+ return kModuleArchI386;
+ case CPU_TYPE_X86_64:
+ if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
+ CHECK(0 && "Invalid subtype of x86_64");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM:
+ if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
+ CHECK(0 && "Invalid subtype of ARM");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM64:
+ return kModuleArchARM64;
+ default:
+ CHECK(0 && "Invalid CPU type");
+ return kModuleArchUnknown;
+ }
+}
+
+static const load_command *NextCommand(const load_command *lc) {
+ return (const load_command *)((char *)lc + lc->cmdsize);
+}
+
+static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
+ for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
+ if (lc->cmd != LC_UUID) continue;
+
+ const uuid_command *uuid_lc = (const uuid_command *)lc;
+ const uint8_t *uuid = &uuid_lc->uuid[0];
+ internal_memcpy(uuid_output, uuid, kModuleUUIDSize);
+ return;
+ }
+}
+
+static bool IsModuleInstrumented(const load_command *first_lc) {
+ for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
+ if (lc->cmd != LC_LOAD_DYLIB) continue;
+
+ const dylib_command *dylib_lc = (const dylib_command *)lc;
+ uint32_t dylib_name_offset = dylib_lc->dylib.name.offset;
+ const char *dylib_name = ((const char *)dylib_lc) + dylib_name_offset;
+ dylib_name = StripModuleName(dylib_name);
+ if (dylib_name != 0 && (internal_strstr(dylib_name, "libclang_rt."))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
+ ? get_dyld_hdr()
+ : _dyld_get_image_header(data_.current_image);
if (!hdr) continue;
- if (current_load_cmd_count_ < 0) {
+ if (data_.current_load_cmd_count < 0) {
// Set up for this image;
- current_load_cmd_count_ = hdr->ncmds;
- current_magic_ = hdr->magic;
- current_filetype_ = hdr->filetype;
- switch (current_magic_) {
+ data_.current_load_cmd_count = hdr->ncmds;
+ data_.current_magic = hdr->magic;
+ data_.current_filetype = hdr->filetype;
+ data_.current_arch = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
+ switch (data_.current_magic) {
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
+ data_.current_load_cmd_addr = (char *)hdr + sizeof(mach_header_64);
break;
}
#endif
case MH_MAGIC: {
- current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
+ data_.current_load_cmd_addr = (char *)hdr + sizeof(mach_header);
break;
}
default: {
continue;
}
}
+ FindUUID((const load_command *)data_.current_load_cmd_addr,
+ data_.current_uuid);
+ data_.current_instrumented = IsModuleInstrumented(
+ (const load_command *)data_.current_load_cmd_addr);
}
- for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
- switch (current_magic_) {
- // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
+ for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ switch (data_.current_magic) {
+ // data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
#ifdef MH_MAGIC_64
case MH_MAGIC_64: {
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
- start, end, offset, filename, filename_size, protection))
+ segment, segment->data_, data_))
return true;
break;
}
#endif
case MH_MAGIC: {
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
- start, end, offset, filename, filename_size, protection))
+ segment, segment->data_, data_))
return true;
break;
}
}
void MemoryMappingLayout::DumpListOfModules(
- InternalMmapVector<LoadedModule> *modules) {
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
Reset();
- uptr cur_beg, cur_end, prot;
InternalScopedString module_name(kMaxPathLength);
- for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(),
- module_name.size(), &prot);
- i++) {
- const char *cur_name = module_name.data();
- if (cur_name[0] == '\0')
- continue;
+ MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
+ MemoryMappedSegmentData data;
+ segment.data_ = &data;
+ while (Next(&segment)) {
+ if (segment.filename[0] == '\0') continue;
LoadedModule *cur_module = nullptr;
if (!modules->empty() &&
- 0 == internal_strcmp(cur_name, modules->back().full_name())) {
+ 0 == internal_strcmp(segment.filename, modules->back().full_name())) {
cur_module = &modules->back();
} else {
modules->push_back(LoadedModule());
cur_module = &modules->back();
- cur_module->set(cur_name, cur_beg);
+ cur_module->set(segment.filename, segment.start, segment.arch,
+ segment.uuid, data_.current_instrumented);
}
- cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ segment.AddAddressRanges(cur_module);
}
}
uptr size;
uptr count;
void *batch[kSize];
+
+ void init(void *ptr, uptr size) {
+ count = 1;
+ batch[0] = ptr;
+ this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr quarantined_size() const {
+ return size - sizeof(QuarantineBatch);
+ }
+
+ void push_back(void *ptr, uptr size) {
+ CHECK_LT(count, kSize);
+ batch[count++] = ptr;
+ this->size += size;
+ }
+
+ bool can_merge(const QuarantineBatch* const from) const {
+ return count + from->count <= kSize;
+ }
+
+ void merge(QuarantineBatch* const from) {
+ CHECK_LE(count + from->count, kSize);
+ CHECK_GE(size, sizeof(QuarantineBatch));
+
+ for (uptr i = 0; i < from->count; ++i)
+ batch[count + i] = from->batch[i];
+ count += from->count;
+ size += from->quarantined_size();
+
+ from->count = 0;
+ from->size = sizeof(QuarantineBatch);
+ }
};
COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
}
void Init(uptr size, uptr cache_size) {
- atomic_store(&max_size_, size, memory_order_release);
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per Put() call).
+ CHECK((size == 0 && cache_size == 0) || cache_size != 0);
+
+ atomic_store(&max_size_, size, memory_order_relaxed);
atomic_store(&min_size_, size / 10 * 9,
- memory_order_release); // 90% of max size.
- max_cache_size_ = cache_size;
+ memory_order_relaxed); // 90% of max size.
+ atomic_store(&max_cache_size_, cache_size, memory_order_relaxed);
}
- uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
+ uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); }
+ uptr GetCacheSize() const {
+ return atomic_load(&max_cache_size_, memory_order_relaxed);
+ }
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
- c->Enqueue(cb, ptr, size);
- if (c->Size() > max_cache_size_)
+ uptr cache_size = GetCacheSize();
+ if (cache_size) {
+ c->Enqueue(cb, ptr, size);
+ } else {
+ // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
+ cb.Recycle(ptr);
+ }
+ // Check cache size anyway to accommodate for runtime cache_size change.
+ if (c->Size() > cache_size)
Drain(c, cb);
}
Recycle(cb);
}
+ void PrintStats() const {
+ // It assumes that the world is stopped, just as the allocator's PrintStats.
+ Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
+ GetSize() >> 20, GetCacheSize() >> 10);
+ cache_.PrintStats();
+ }
+
private:
// Read-only data.
char pad0_[kCacheLineSize];
atomic_uintptr_t max_size_;
atomic_uintptr_t min_size_;
- uptr max_cache_size_;
+ atomic_uintptr_t max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
SpinMutex recycle_mutex_;
void NOINLINE Recycle(Callback cb) {
Cache tmp;
- uptr min_size = atomic_load(&min_size_, memory_order_acquire);
+ uptr min_size = atomic_load(&min_size_, memory_order_relaxed);
{
SpinMutexLock l(&cache_mutex_);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ uptr cache_size = cache_.Size();
+ uptr overhead_size = cache_.OverheadSize();
+ CHECK_GE(cache_size, overhead_size);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ const uptr kOverheadThresholdPercents = 100;
+ if (cache_size > overhead_size &&
+ overhead_size * (100 + kOverheadThresholdPercents) >
+ cache_size * kOverheadThresholdPercents) {
+ cache_.MergeBatches(&tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
while (cache_.Size() > min_size) {
- QuarantineBatch *b = cache_.DequeueBatch();
- tmp.EnqueueBatch(b);
+ tmp.EnqueueBatch(cache_.DequeueBatch());
}
}
recycle_mutex_.Unlock();
list_.clear();
}
+ // Total memory used, including internal accounting.
uptr Size() const {
return atomic_load(&size_, memory_order_relaxed);
}
+ // Memory used for internal accounting.
+ uptr OverheadSize() const {
+ return list_.size() * sizeof(QuarantineBatch);
+ }
+
void Enqueue(Callback cb, void *ptr, uptr size) {
if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
- AllocBatch(cb);
- size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ CHECK(b);
+ b->init(ptr, size);
+ EnqueueBatch(b);
+ } else {
+ list_.back()->push_back(ptr, size);
+ SizeAdd(size);
}
- QuarantineBatch *b = list_.back();
- CHECK(b);
- b->batch[b->count++] = ptr;
- b->size += size;
- SizeAdd(size);
}
- void Transfer(QuarantineCache *c) {
- list_.append_back(&c->list_);
- SizeAdd(c->Size());
- atomic_store(&c->size_, 0, memory_order_relaxed);
+ void Transfer(QuarantineCache *from_cache) {
+ list_.append_back(&from_cache->list_);
+ SizeAdd(from_cache->Size());
+
+ atomic_store(&from_cache->size_, 0, memory_order_relaxed);
}
void EnqueueBatch(QuarantineBatch *b) {
return b;
}
+ void MergeBatches(QuarantineCache *to_deallocate) {
+ uptr extracted_size = 0;
+ QuarantineBatch *current = list_.front();
+ while (current && current->next) {
+ if (current->can_merge(current->next)) {
+ QuarantineBatch *extracted = current->next;
+ // Move all the chunks into the current batch.
+ current->merge(extracted);
+ CHECK_EQ(extracted->count, 0);
+ CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
+ // Remove the next batch from the list and account for its size.
+ list_.extract(current, extracted);
+ extracted_size += extracted->size;
+ // Add it to deallocation list.
+ to_deallocate->EnqueueBatch(extracted);
+ } else {
+ current = current->next;
+ }
+ }
+ SizeSub(extracted_size);
+ }
+
+ void PrintStats() const {
+ uptr batch_count = 0;
+ uptr total_overhead_bytes = 0;
+ uptr total_bytes = 0;
+ uptr total_quarantine_chunks = 0;
+ for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
+ batch_count++;
+ total_bytes += (*it).size;
+ total_overhead_bytes += (*it).size - (*it).quarantined_size();
+ total_quarantine_chunks += (*it).count;
+ }
+ uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
+ int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
+ 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
+ uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
+ int memory_overhead_percent = total_quarantined_bytes == 0 ?
+ 0 : total_overhead_bytes * 100 / total_quarantined_bytes;
+ Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+ "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
+ "\n",
+ batch_count, total_bytes, total_quarantined_bytes,
+ total_quarantine_chunks, quarantine_chunks_capacity,
+ chunks_usage_percent, memory_overhead_percent);
+ }
+
private:
- IntrusiveList<QuarantineBatch> list_;
+ typedef IntrusiveList<QuarantineBatch> List;
+
+ List list_;
atomic_uintptr_t size_;
void SizeAdd(uptr add) {
void SizeSub(uptr sub) {
atomic_store(&size_, Size() - sub, memory_order_relaxed);
}
-
- NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
- QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
- CHECK(b);
- b->count = 0;
- b->size = 0;
- list_.push_back(b);
- return b;
- }
};
+
} // namespace __sanitizer
#endif // SANITIZER_QUARANTINE_H
SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}
const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
+ const char *Warning() const { return Red(); }
+ const char *MemoryByte() { return Magenta(); }
+
protected:
const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
--- /dev/null
+//===-- sanitizer_signal_interceptors.inc -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Signal interceptors for sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_interceptors.h"
+
+using namespace __sanitizer;
+
+#if SANITIZER_INTERCEPT_BSD_SIGNAL
+INTERCEPTOR(void *, bsd_signal, int signum, void *handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ return REAL(bsd_signal)(signum, handler);
+}
+#define INIT_BSD_SIGNAL COMMON_INTERCEPT_FUNCTION(bsd_signal)
+#else // SANITIZER_INTERCEPT_BSD_SIGNAL
+#define INIT_BSD_SIGNAL
+#endif // SANITIZER_INTERCEPT_BSD_SIGNAL
+
+#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+INTERCEPTOR(void *, signal, int signum, void *handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return nullptr;
+ return REAL(signal)(signum, handler);
+}
+#define INIT_SIGNAL COMMON_INTERCEPT_FUNCTION(signal)
+
+INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ return REAL(sigaction)(signum, act, oldact);
+}
+#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction)
+
+namespace __sanitizer {
+int real_sigaction(int signum, const void *act, void *oldact) {
+ return REAL(sigaction)(signum, (const struct sigaction *)act,
+ (struct sigaction *)oldact);
+}
+} // namespace __sanitizer
+#else // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define INIT_SIGNAL
+#define INIT_SIGACTION
+// We need to have defined REAL(sigaction) on other systems.
+DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
+ struct sigaction *oldact)
+#endif // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+
+static void InitializeSignalInterceptors() {
+ static bool was_called_once;
+ CHECK(!was_called_once);
+ was_called_once = true;
+
+ INIT_BSD_SIGNAL;
+ INIT_SIGNAL;
+ INIT_SIGACTION;
+}
if (!map_.size())
return StackTrace();
IdDescPair pair = {id, nullptr};
- uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
- IdDescPair::IdComparator);
- if (idx > map_.size())
+ uptr idx =
+ InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
+ if (idx > map_.size() || map_[idx].id != id)
return StackTrace();
return map_[idx].desc->load();
}
}
}
-static bool MatchPc(uptr cur_pc, uptr trace_pc, uptr threshold) {
- return cur_pc - trace_pc <= threshold || trace_pc - cur_pc <= threshold;
-}
-
void BufferedStackTrace::PopStackFrames(uptr count) {
CHECK_LT(count, size);
size -= count;
}
}
+static uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; }
+
uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
- // Use threshold to find PC in stack trace, as PC we want to unwind from may
- // slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 350;
- for (uptr i = 0; i < size; ++i) {
- if (MatchPc(pc, trace[i], kPcThreshold))
- return i;
+ uptr best = 0;
+ for (uptr i = 1; i < size; ++i) {
+ if (Distance(trace[i], pc) < Distance(trace[best], pc)) best = i;
}
- return 0;
+ return best;
}
} // namespace __sanitizer
void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
uptr stack_bottom, bool request_fast_unwind);
+ void Reset() {
+ *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
+ top_frame_bp = 0;
+ }
+
private:
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
u32 max_depth);
void PopStackFrames(uptr count);
uptr LocatePcInTrace(uptr pc);
- BufferedStackTrace(const BufferedStackTrace &);
- void operator=(const BufferedStackTrace &);
+ BufferedStackTrace(const BufferedStackTrace &) = delete;
+ void operator=(const BufferedStackTrace &) = delete;
};
// Check if given pointer points into allocated stack area.
if (dedup_frames-- > 0) {
if (dedup_token.length())
dedup_token.append("--");
- dedup_token.append(cur->info.function);
+ if (cur->info.function != nullptr)
+ dedup_token.append(cur->info.function);
}
}
frames->ClearAll();
}
}
+static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
+ uptr module_name_len, uptr *pc_offset) {
+ const char *found_module_name = nullptr;
+ bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
+ pc, &found_module_name, pc_offset);
+
+ if (!ok) return false;
+
+ if (module_name && module_name_len) {
+ internal_strncpy(module_name, found_module_name, module_name_len);
+ module_name[module_name_len - 1] = '\x00';
+ }
+ return true;
+}
+
} // namespace __sanitizer
using namespace __sanitizer;
internal_strncpy(out_buf, data_desc.data(), out_buf_size);
out_buf[out_buf_size - 1] = 0;
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_module_and_offset_for_pc( // NOLINT
+ uptr pc, char *module_name, uptr module_name_len, uptr *pc_offset) {
+ return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
+ pc_offset);
+}
} // extern "C"
//===----------------------------------------------------------------------===//
#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_file.h"
+#include "sanitizer_fuchsia.h"
namespace __sanitizer {
+// sanitizer_symbolizer_fuchsia.cc implements these differently for Fuchsia.
+#if !SANITIZER_FUCHSIA
+
static const char *StripFunctionName(const char *function, const char *prefix) {
if (!function) return nullptr;
if (!prefix) return function;
vs_style, strip_path_prefix);
} else if (info.module) {
RenderModuleLocation(buffer, info.module, info.module_offset,
- strip_path_prefix);
+ info.module_arch, strip_path_prefix);
} else {
buffer->append("(<unknown module>)");
}
if (info.address & kExternalPCBit)
{} // There PCs are not meaningful.
else if (info.module)
- buffer->append("(%s+%p)", StripModuleName(info.module),
- (void *)info.module_offset);
+ // Always strip the module name for %M.
+ RenderModuleLocation(buffer, StripModuleName(info.module),
+ info.module_offset, info.module_arch, "");
else
buffer->append("(%p)", (void *)info.address);
break;
}
}
+#endif // !SANITIZER_FUCHSIA
+
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix) {
}
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
- uptr offset, const char *strip_path_prefix) {
- buffer->append("(%s+0x%zx)", StripPathPrefix(module, strip_path_prefix),
- offset);
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix) {
+ buffer->append("(%s", StripPathPrefix(module, strip_path_prefix));
+ if (arch != kModuleArchUnknown) {
+ buffer->append(":%s", ModuleArchToString(arch));
+ }
+ buffer->append("+0x%zx)", offset);
}
} // namespace __sanitizer
const char *strip_path_prefix);
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
- uptr offset, const char *strip_path_prefix);
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix);
// Same as RenderFrame, but for data section (global variables).
// Accepts %s, %l from above.
#include "sanitizer_common.h"
namespace __sanitizer {
-typedef int SuspendedThreadID;
+
+enum PtraceRegistersStatus {
+ REGISTERS_UNAVAILABLE_FATAL = -1,
+ REGISTERS_UNAVAILABLE = 0,
+ REGISTERS_AVAILABLE = 1
+};
// Holds the list of suspended threads and provides an interface to dump their
// register contexts.
class SuspendedThreadsList {
public:
- SuspendedThreadsList()
- : thread_ids_(1024) {}
- SuspendedThreadID GetThreadID(uptr index) const {
- CHECK_LT(index, thread_ids_.size());
- return thread_ids_[index];
+ SuspendedThreadsList() = default;
+
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.
+ virtual PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const {
+ UNIMPLEMENTED();
}
- int GetRegistersAndSP(uptr index, uptr *buffer, uptr *sp) const;
+
// The buffer in GetRegistersAndSP should be at least this big.
- static uptr RegisterCount();
- uptr thread_count() const { return thread_ids_.size(); }
- bool Contains(SuspendedThreadID thread_id) const {
- for (uptr i = 0; i < thread_ids_.size(); i++) {
- if (thread_ids_[i] == thread_id)
- return true;
- }
- return false;
- }
- void Append(SuspendedThreadID thread_id) {
- thread_ids_.push_back(thread_id);
- }
+ virtual uptr RegisterCount() const { UNIMPLEMENTED(); }
+ virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
+ virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }
private:
- InternalMmapVector<SuspendedThreadID> thread_ids_;
-
// Prohibit copy and assign.
SuspendedThreadsList(const SuspendedThreadsList&);
void operator=(const SuspendedThreadsList&);
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__s390__))
+ defined(__s390__) || defined(__i386__) || \
+ defined(__arm__))
#include "sanitizer_stoptheworld.h"
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
-#if SANITIZER_ANDROID && defined(__arm__)
-# include <linux/user.h> // for pt_regs
-#else
-# ifdef __aarch64__
+#if defined(__aarch64__) && !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
-# include <asm/ptrace.h>
-# endif
-# include <sys/user.h> // for user_regs_struct
-# if SANITIZER_ANDROID && SANITIZER_MIPS
-# include <asm/reg.h> // for mips SP register in sys/user.h
-# endif
+# include <asm/ptrace.h>
+#endif
+#include <sys/user.h> // for user_regs_struct
+#if SANITIZER_ANDROID && SANITIZER_MIPS
+# include <asm/reg.h> // for mips SP register in sys/user.h
#endif
#include <sys/wait.h> // for signal-related stuff
namespace __sanitizer {
-COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
+class SuspendedThreadsListLinux : public SuspendedThreadsList {
+ public:
+ SuspendedThreadsListLinux() : thread_ids_(1024) {}
+
+ tid_t GetThreadID(uptr index) const;
+ uptr ThreadCount() const;
+ bool ContainsTid(tid_t thread_id) const;
+ void Append(tid_t tid);
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
+ uptr RegisterCount() const;
+
+ private:
+ InternalMmapVector<tid_t> thread_ids_;
+};
// Structure for passing arguments into the tracer thread.
struct TracerThreadArgument {
bool SuspendAllThreads();
void ResumeAllThreads();
void KillAllThreads();
- SuspendedThreadsList &suspended_threads_list() {
+ SuspendedThreadsListLinux &suspended_threads_list() {
return suspended_threads_list_;
}
TracerThreadArgument *arg;
private:
- SuspendedThreadsList suspended_threads_list_;
+ SuspendedThreadsListLinux suspended_threads_list_;
pid_t pid_;
- bool SuspendThread(SuspendedThreadID thread_id);
+ bool SuspendThread(tid_t thread_id);
};
-bool ThreadSuspender::SuspendThread(SuspendedThreadID tid) {
+bool ThreadSuspender::SuspendThread(tid_t tid) {
// Are we already attached to this thread?
// Currently this check takes linear time, however the number of threads is
// usually small.
- if (suspended_threads_list_.Contains(tid))
- return false;
+ if (suspended_threads_list_.ContainsTid(tid)) return false;
int pterrno;
if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
&pterrno)) {
// Either the thread is dead, or something prevented us from attaching.
// Log this event and move on.
- VReport(1, "Could not attach to thread %d (errno %d).\n", tid, pterrno);
+ VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
+ pterrno);
return false;
} else {
- VReport(2, "Attached to thread %d.\n", tid);
+ VReport(2, "Attached to thread %zu.\n", (uptr)tid);
// The thread is not guaranteed to stop before ptrace returns, so we must
// wait on it. Note: if the thread receives a signal concurrently,
// we can get notification about the signal before notification about stop.
if (internal_iserror(waitpid_status, &wperrno)) {
// Got a ECHILD error. I don't think this situation is possible, but it
// doesn't hurt to report it.
- VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
- tid, wperrno);
+ VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
+ (uptr)tid, wperrno);
internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
return false;
}
}
void ThreadSuspender::ResumeAllThreads() {
- for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) {
+ for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
pid_t tid = suspended_threads_list_.GetThreadID(i);
int pterrno;
if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
}
void ThreadSuspender::KillAllThreads() {
- for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++)
+ for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
nullptr, nullptr);
}
// Signal handler to wake up suspended threads when the tracer thread dies.
static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) {
- SignalContext ctx = SignalContext::Create(siginfo, uctx);
+ SignalContext ctx(siginfo, uctx);
Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
ctx.addr, ctx.pc, ctx.sp);
ThreadSuspender *inst = thread_suspender_instance;
}
// Size of alternative stack for signal handlers in the tracer thread.
-static const int kHandlerStackSize = 4096;
+static const int kHandlerStackSize = 8192;
// This function will be run as a cloned task.
static int TracerThread(void* argument) {
#error "Unsupported architecture"
#endif // SANITIZER_ANDROID && defined(__arm__)
-int SuspendedThreadsList::GetRegistersAndSP(uptr index,
- uptr *buffer,
- uptr *sp) const {
+tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
+ CHECK_LT(index, thread_ids_.size());
+ return thread_ids_[index];
+}
+
+uptr SuspendedThreadsListLinux::ThreadCount() const {
+ return thread_ids_.size();
+}
+
+bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
+ for (uptr i = 0; i < thread_ids_.size(); i++) {
+ if (thread_ids_[i] == thread_id) return true;
+ }
+ return false;
+}
+
+void SuspendedThreadsListLinux::Append(tid_t tid) {
+ thread_ids_.push_back(tid);
+}
+
+PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
+ uptr index, uptr *buffer, uptr *sp) const {
pid_t tid = GetThreadID(index);
regs_struct regs;
int pterrno;
if (isErr) {
VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
pterrno);
- return -1;
+ // ESRCH means that the given thread is not suspended or already dead.
+ // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
+ // we should notify caller about this.
+ return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
}
*sp = regs.REG_SP;
internal_memcpy(buffer, ®s, sizeof(regs));
- return 0;
+ return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsList::RegisterCount() {
+uptr SuspendedThreadsListLinux::RegisterCount() const {
return sizeof(regs_struct) / sizeof(uptr);
}
} // namespace __sanitizer
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
// || defined(__aarch64__) || defined(__powerpc64__)
- // || defined(__s390__)
+ // || defined(__s390__) || defined(__i386__) || defined(__arm__)
--- /dev/null
+//===-- sanitizer_stoptheworld_mac.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \
+ defined(__i386))
+
+#include <mach/mach.h>
+#include <mach/thread_info.h>
+#include <pthread.h>
+
+#include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+typedef struct {
+ tid_t tid;
+ thread_t thread;
+} SuspendedThreadInfo;
+
+class SuspendedThreadsListMac : public SuspendedThreadsList {
+ public:
+ SuspendedThreadsListMac() : threads_(1024) {}
+
+ tid_t GetThreadID(uptr index) const;
+ thread_t GetThread(uptr index) const;
+ uptr ThreadCount() const;
+ bool ContainsThread(thread_t thread) const;
+ void Append(thread_t thread);
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
+ uptr RegisterCount() const;
+
+ private:
+ InternalMmapVector<SuspendedThreadInfo> threads_;
+};
+
+struct RunThreadArgs {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+void RunThread(void *arg) {
+ struct RunThreadArgs *run_args = (struct RunThreadArgs *)arg;
+ SuspendedThreadsListMac suspended_threads_list;
+
+ thread_array_t threads;
+ mach_msg_type_number_t num_threads;
+ kern_return_t err = task_threads(mach_task_self(), &threads, &num_threads);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Failed to get threads for task (errno %d).\n", err);
+ return;
+ }
+
+ thread_t thread_self = mach_thread_self();
+ for (unsigned int i = 0; i < num_threads; ++i) {
+ if (threads[i] == thread_self) continue;
+
+ thread_suspend(threads[i]);
+ suspended_threads_list.Append(threads[i]);
+ }
+
+ run_args->callback(suspended_threads_list, run_args->argument);
+
+ uptr num_suspended = suspended_threads_list.ThreadCount();
+ for (unsigned int i = 0; i < num_suspended; ++i) {
+ thread_resume(suspended_threads_list.GetThread(i));
+ }
+}
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct RunThreadArgs arg = {callback, argument};
+ pthread_t run_thread = (pthread_t)internal_start_thread(RunThread, &arg);
+ internal_join_thread(run_thread);
+}
+
+#if defined(__x86_64__)
+typedef x86_thread_state64_t regs_struct;
+
+#define SP_REG __rsp
+
+#elif defined(__aarch64__)
+typedef arm_thread_state64_t regs_struct;
+
+# if __DARWIN_UNIX03
+# define SP_REG __sp
+# else
+# define SP_REG sp
+# endif
+
+#elif defined(__i386)
+typedef x86_thread_state32_t regs_struct;
+
+#define SP_REG __esp
+
+#else
+#error "Unsupported architecture"
+#endif
+
+tid_t SuspendedThreadsListMac::GetThreadID(uptr index) const {
+ CHECK_LT(index, threads_.size());
+ return threads_[index].tid;
+}
+
+thread_t SuspendedThreadsListMac::GetThread(uptr index) const {
+ CHECK_LT(index, threads_.size());
+ return threads_[index].thread;
+}
+
+uptr SuspendedThreadsListMac::ThreadCount() const {
+ return threads_.size();
+}
+
+bool SuspendedThreadsListMac::ContainsThread(thread_t thread) const {
+ for (uptr i = 0; i < threads_.size(); i++) {
+ if (threads_[i].thread == thread) return true;
+ }
+ return false;
+}
+
+void SuspendedThreadsListMac::Append(thread_t thread) {
+ thread_identifier_info_data_t info;
+ mach_msg_type_number_t info_count = THREAD_IDENTIFIER_INFO_COUNT;
+ kern_return_t err = thread_info(thread, THREAD_IDENTIFIER_INFO,
+ (thread_info_t)&info, &info_count);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Error - unable to get thread ident for a thread\n");
+ return;
+ }
+ threads_.push_back({info.thread_id, thread});
+}
+
+PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
+ uptr index, uptr *buffer, uptr *sp) const {
+ thread_t thread = GetThread(index);
+ regs_struct regs;
+ int err;
+ mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
+ err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)®s,
+ ®_count);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Error - unable to get registers for a thread\n");
+ // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,
+ // or the thread does not exist. The other possible error case,
+ // MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's
+ // still safe to proceed.
+ return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
+ }
+
+ internal_memcpy(buffer, ®s, sizeof(regs));
+ *sp = regs.SP_REG;
+
+ // On x86_64 and aarch64, we must account for the stack redzone, which is 128
+ // bytes.
+ if (SANITIZER_WORDSIZE == 64) *sp -= 128;
+
+ return REGISTERS_AVAILABLE;
+}
+
+uptr SuspendedThreadsListMac::RegisterCount() const {
+ return MACHINE_THREAD_STATE_COUNT;
+}
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
+ // defined(__i386))
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_file.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
if (filename[0] == '\0')
return;
+#if !SANITIZER_FUCHSIA
// If we cannot find the file, check if its location is relative to
// the location of the executable.
InternalScopedString new_file_path(kMaxPathLength);
new_file_path.size())) {
filename = new_file_path.data();
}
+#endif // !SANITIZER_FUCHSIA
// Read the file.
VPrintf(1, "%s: reading suppressions file at %s\n",
function_offset = kUnknown;
}
-void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset) {
+void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
+ ModuleArch mod_arch) {
module = internal_strdup(mod_name);
module_offset = mod_offset;
+ module_arch = mod_arch;
}
SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
StaticSpinMutex Symbolizer::init_mu_;
LowLevelAllocator Symbolizer::symbolizer_allocator_;
+void Symbolizer::InvalidateModuleList() {
+ modules_fresh_ = false;
+}
+
void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,
Symbolizer::EndSymbolizationHook end_hook) {
CHECK(start_hook_ == 0 && end_hook_ == 0);
char *module;
uptr module_offset;
+ ModuleArch module_arch;
static const uptr kUnknown = ~(uptr)0;
char *function;
AddressInfo();
// Deletes all strings and resets all fields.
void Clear();
- void FillModuleInfo(const char *mod_name, uptr mod_offset);
+ void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
};
// Linked list of symbolized frames (each frame is described by AddressInfo).
// (de)allocated using sanitizer internal allocator.
char *module;
uptr module_offset;
+ ModuleArch module_arch;
+
char *file;
uptr line;
char *name;
void AddHooks(StartSymbolizationHook start_hook,
EndSymbolizationHook end_hook);
+ void RefreshModules();
const LoadedModule *FindModuleForAddress(uptr address);
+ void InvalidateModuleList();
+
private:
// GetModuleNameAndOffsetForPC has to return a string to the caller.
// Since the corresponding module might get unloaded later, we should create
static Symbolizer *PlatformInit();
bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
- uptr *module_offset);
+ uptr *module_offset,
+ ModuleArch *module_arch);
ListOfModules modules_;
+ ListOfModules fallback_modules_;
// If stale, need to reload the modules before looking up addresses.
bool modules_fresh_;
--- /dev/null
+//===-- sanitizer_symbolizer_fuchsia.cc -----------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Implementation of Fuchsia-specific symbolizer.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_fuchsia.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// For Fuchsia we don't do any actual symbolization per se.
+// Instead, we emit text containing raw addresses and raw linkage
+// symbol names, embedded in Fuchsia's symbolization markup format.
+// Fuchsia's logging infrastructure emits enough information about
+// process memory layout that a post-processing filter can do the
+// symbolization and pretty-print the markup. See the spec at:
+// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
+constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
+
+// Function name or equivalent from PC location.
+constexpr const char *kFormatFunction = "{{{pc:%p}}}";
+constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
+
+// Global variable name or equivalent from data memory address.
+constexpr const char *kFormatData = "{{{data:%p}}}";
+
+// One frame in a backtrace (printed on a line by itself).
+constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}";
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+// It's expected to return a static buffer that will be reused on each call.
+const char *Symbolizer::Demangle(const char *name) {
+ static char buffer[kFormatDemangleMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
+ return buffer;
+}
+
+// This is used mostly for suppression matching. Making it work
+// would enable "interceptor_via_lib" suppressions. It's also used
+// once in UBSan to say "in module ..." in a message that also
+// includes an address in the module, so post-processing can already
+// pretty-print that so as to indicate the module.
+bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ return false;
+}
+
+// This is used in some places for suppression checking, which we
+// don't really support for Fuchsia. It's also used in UBSan to
+// identify a PC location to a function name, so we always fill in
+// the function member with a string containing markup around the PC
+// value.
+// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
+// to render stack frames, but that should be changed to use
+// RenderStackFrame.
+SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
+ SymbolizedStack *s = SymbolizedStack::New(addr);
+ char buffer[kFormatFunctionMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
+ s->info.function = internal_strdup(buffer);
+ return s;
+}
+
+// Always claim we succeeded, so that RenderDataInfo will be called.
+bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ info->Clear();
+ info->start = addr;
+ return true;
+}
+
+// We ignore the format argument to __sanitizer_symbolize_global.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix) {
+ buffer->append(kFormatData, DI->start);
+}
+
+// We don't support the stack_trace_format flag at all.
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix, const char *strip_func_prefix) {
+ buffer->append(kFormatFrame, frame_no, info.address);
+}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ return new (symbolizer_allocator_) Symbolizer({});
+}
+
+void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
#define SANITIZER_SYMBOLIZER_INTERNAL_H
#include "sanitizer_symbolizer.h"
+#include "sanitizer_file.h"
namespace __sanitizer {
bool SymbolizeData(uptr addr, DataInfo *info) override;
private:
- const char *SendCommand(bool is_data, const char *module_name,
- uptr module_offset);
+ const char *FormatAndSendCommand(bool is_data, const char *module_name,
+ uptr module_offset, ModuleArch arch);
LLVMSymbolizerProcess *symbolizer_process_;
static const uptr kBufferSize = 16 * 1024;
if (frames_symbolized > 0) {
SymbolizedStack *cur = SymbolizedStack::New(addr);
AddressInfo *info = &cur->info;
- info->FillModuleInfo(first->info.module, first->info.module_offset);
+ info->FillModuleInfo(first->info.module, first->info.module_offset,
+ first->info.module_arch);
last->next = cur;
last = cur;
}
namespace __sanitizer {
+Symbolizer *Symbolizer::GetOrInit() {
+ SpinMutexLock l(&init_mu_);
+ if (symbolizer_)
+ return symbolizer_;
+ symbolizer_ = PlatformInit();
+ CHECK(symbolizer_);
+ return symbolizer_;
+}
+
+// See sanitizer_symbolizer_fuchsia.cc.
+#if !SANITIZER_FUCHSIA
+
const char *ExtractToken(const char *str, const char *delims, char **result) {
uptr prefix_len = internal_strcspn(str, delims);
*result = (char*)InternalAlloc(prefix_len + 1);
BlockingMutexLock l(&mu_);
const char *module_name;
uptr module_offset;
+ ModuleArch arch;
SymbolizedStack *res = SymbolizedStack::New(addr);
- if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
+ &arch))
return res;
// Always fill data about module name and offset.
- res->info.FillModuleInfo(module_name, module_offset);
+ res->info.FillModuleInfo(module_name, module_offset, arch);
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizePC(addr, res)) {
BlockingMutexLock l(&mu_);
const char *module_name;
uptr module_offset;
- if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset))
+ ModuleArch arch;
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
+ &arch))
return false;
info->Clear();
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
+ info->module_arch = arch;
for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
if (tool.SymbolizeData(addr, info)) {
uptr *module_address) {
BlockingMutexLock l(&mu_);
const char *internal_module_name = nullptr;
+ ModuleArch arch;
if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
- module_address))
+ module_address, &arch))
return false;
if (module_name)
bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
const char **module_name,
- uptr *module_offset) {
+ uptr *module_offset,
+ ModuleArch *module_arch) {
const LoadedModule *module = FindModuleForAddress(address);
if (module == nullptr)
return false;
*module_name = module->full_name();
*module_offset = address - module->base_address();
+ *module_arch = module->arch();
return true;
}
+void Symbolizer::RefreshModules() {
+ modules_.init();
+ fallback_modules_.fallbackInit();
+ RAW_CHECK(modules_.size() > 0);
+ modules_fresh_ = true;
+}
+
+static const LoadedModule *SearchForModule(const ListOfModules &modules,
+ uptr address) {
+ for (uptr i = 0; i < modules.size(); i++) {
+ if (modules[i].containsAddress(address)) {
+ return &modules[i];
+ }
+ }
+ return nullptr;
+}
+
const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
bool modules_were_reloaded = false;
if (!modules_fresh_) {
- modules_.init();
- RAW_CHECK(modules_.size() > 0);
- modules_fresh_ = true;
+ RefreshModules();
modules_were_reloaded = true;
}
- for (uptr i = 0; i < modules_.size(); i++) {
- if (modules_[i].containsAddress(address)) {
- return &modules_[i];
- }
- }
- // Reload the modules and look up again, if we haven't tried it yet.
+ const LoadedModule *module = SearchForModule(modules_, address);
+ if (module) return module;
+
+ // dlopen/dlclose interceptors invalidate the module list, but when
+ // interception is disabled, we need to retry if the lookup fails in
+ // case the module list changed.
+#if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
if (!modules_were_reloaded) {
- // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors.
- // It's too aggressive to reload the list of modules each time we fail
- // to find a module for a given address.
- modules_fresh_ = false;
- return FindModuleForAddress(address);
+ RefreshModules();
+ module = SearchForModule(modules_, address);
+ if (module) return module;
}
- return 0;
-}
+#endif
-Symbolizer *Symbolizer::GetOrInit() {
- SpinMutexLock l(&init_mu_);
- if (symbolizer_)
- return symbolizer_;
- symbolizer_ = PlatformInit();
- CHECK(symbolizer_);
- return symbolizer_;
+ if (fallback_modules_.size()) {
+ module = SearchForModule(fallback_modules_, address);
+ }
+ return module;
}
// For now we assume the following protocol:
buffer[length - 2] == '\n';
}
+ // When adding a new architecture, don't forget to also update
+ // script/asan_symbolize.py and sanitizer_common.h.
void GetArgV(const char *path_to_binary,
const char *(&argv)[kArgVMax]) const override {
#if defined(__x86_64h__)
char *file_line_info = 0;
str = ExtractToken(str, "\n", &file_line_info);
CHECK(file_line_info);
- // Parse the last :<int>, which must be there.
- char *last_colon = internal_strrchr(file_line_info, ':');
- CHECK(last_colon);
- int line_or_column = internal_atoll(last_colon + 1);
- // Truncate the string at the last colon and find the next-to-last colon.
- *last_colon = '\0';
- last_colon = internal_strrchr(file_line_info, ':');
- if (last_colon && IsDigit(last_colon[1])) {
- // If the second-to-last colon is followed by a digit, it must be the line
- // number, and the previous parsed number was a column.
- info->line = internal_atoll(last_colon + 1);
- info->column = line_or_column;
- *last_colon = '\0';
- } else {
- // Otherwise, we have line info but no column info.
- info->line = line_or_column;
- info->column = 0;
+
+ if (uptr size = internal_strlen(file_line_info)) {
+ char *back = file_line_info + size - 1;
+ for (int i = 0; i < 2; ++i) {
+ while (back > file_line_info && IsDigit(*back)) --back;
+ if (*back != ':' || !IsDigit(back[1])) break;
+ info->column = info->line;
+ info->line = internal_atoll(back + 1);
+ // Truncate the string at the colon to keep only filename.
+ *back = '\0';
+ --back;
+ }
+ ExtractToken(file_line_info, "", &info->file);
}
- ExtractToken(file_line_info, "", &info->file);
+
InternalFree(file_line_info);
return str;
}
top_frame = false;
} else {
cur = SymbolizedStack::New(res->info.address);
- cur->info.FillModuleInfo(res->info.module, res->info.module_offset);
+ cur->info.FillModuleInfo(res->info.module, res->info.module_offset,
+ res->info.module_arch);
last->next = cur;
last = cur;
}
}
bool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
- if (const char *buf = SendCommand(/*is_data*/ false, stack->info.module,
- stack->info.module_offset)) {
+ AddressInfo *info = &stack->info;
+ const char *buf = FormatAndSendCommand(
+ /*is_data*/ false, info->module, info->module_offset, info->module_arch);
+ if (buf) {
ParseSymbolizePCOutput(buf, stack);
return true;
}
}
bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
- if (const char *buf =
- SendCommand(/*is_data*/ true, info->module, info->module_offset)) {
+ const char *buf = FormatAndSendCommand(
+ /*is_data*/ true, info->module, info->module_offset, info->module_arch);
+ if (buf) {
ParseSymbolizeDataOutput(buf, info);
info->start += (addr - info->module_offset); // Add the base address.
return true;
return false;
}
-const char *LLVMSymbolizer::SendCommand(bool is_data, const char *module_name,
- uptr module_offset) {
+const char *LLVMSymbolizer::FormatAndSendCommand(bool is_data,
+ const char *module_name,
+ uptr module_offset,
+ ModuleArch arch) {
CHECK(module_name);
- internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n",
- is_data ? "DATA " : "", module_name, module_offset);
+ const char *is_data_str = is_data ? "DATA " : "";
+ if (arch == kModuleArchUnknown) {
+ if (internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n", is_data_str,
+ module_name,
+ module_offset) >= static_cast<int>(kBufferSize)) {
+ Report("WARNING: Command buffer too small");
+ return nullptr;
+ }
+ } else {
+ if (internal_snprintf(buffer_, kBufferSize, "%s\"%s:%s\" 0x%zx\n",
+ is_data_str, module_name, ModuleArchToString(arch),
+ module_offset) >= static_cast<int>(kBufferSize)) {
+ Report("WARNING: Command buffer too small");
+ return nullptr;
+ }
+ }
return symbolizer_process_->SendCommand(buffer_);
}
CHECK_NE(path_[0], '\0');
}
+static bool IsSameModule(const char* path) {
+ if (const char* ProcessName = GetProcessName()) {
+ if (const char* SymbolizerName = StripModuleName(path)) {
+ return !internal_strcmp(ProcessName, SymbolizerName);
+ }
+ }
+ return false;
+}
+
const char *SymbolizerProcess::SendCommand(const char *command) {
+ if (failed_to_start_)
+ return nullptr;
+ if (IsSameModule(path_)) {
+ Report("WARNING: Symbolizer was blocked from starting itself!\n");
+ failed_to_start_ = true;
+ return nullptr;
+ }
for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
// Start or restart symbolizer if we failed to send command to it.
if (const char *res = SendCommandImpl(command))
read_len += just_read;
if (ReachedEndOfOutput(buffer, read_len))
break;
+ if (read_len + 1 == max_length) {
+ Report("WARNING: Symbolizer buffer too small\n");
+ read_len = 0;
+ break;
+ }
}
buffer[read_len] = '\0';
return true;
return true;
}
+#endif // !SANITIZER_FUCHSIA
+
} // namespace __sanitizer
#if SANITIZER_POSIX
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_file.h"
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_linux.h"
// own demangler (libc++abi's implementation could be adapted so that
// it does not allocate). For now, we just call it anyway, and we leak
// the returned value.
- if (__cxxabiv1::__cxa_demangle)
+ if (&__cxxabiv1::__cxa_demangle)
if (const char *demangled_name =
__cxxabiv1::__cxa_demangle(name, 0, 0, 0))
return demangled_name;
return DemangleCXXABI(name);
}
+static bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) {
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+ infd_[0] = infd[0];
+ infd_[1] = infd[1];
+ outfd_[0] = outfd[0];
+ outfd_[1] = outfd[1];
+ return true;
+}
+
bool SymbolizerProcess::StartSymbolizerSubprocess() {
if (!FileExists(path_)) {
if (!reported_invalid_path_) {
return false;
}
- int pid;
+ int pid = -1;
+
+ int infd[2];
+ internal_memset(&infd, 0, sizeof(infd));
+ int outfd[2];
+ internal_memset(&outfd, 0, sizeof(outfd));
+ if (!CreateTwoHighNumberedPipes(infd, outfd)) {
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ }
+
if (use_forkpty_) {
#if SANITIZER_MAC
fd_t fd = kInvalidFd;
int saved_stderr = dup(STDERR_FILENO);
CHECK_GE(saved_stderr, 0);
+ // We only need one pipe, for stdin of the child.
+ close(outfd[0]);
+ close(outfd[1]);
+
// Use forkpty to disable buffering in the new terminal.
pid = internal_forkpty(&fd);
if (pid == -1) {
} else if (pid == 0) {
// Child subprocess.
+ // infd[0] is the child's reading end.
+ close(infd[1]);
+
+ // Set up stdin to read from the pipe.
+ CHECK_GE(dup2(infd[0], STDIN_FILENO), 0);
+ close(infd[0]);
+
// Restore stderr.
CHECK_GE(dup2(saved_stderr, STDERR_FILENO), 0);
close(saved_stderr);
internal__exit(1);
}
+ // Input for the child, infd[1] is our writing end.
+ output_fd_ = infd[1];
+ close(infd[0]);
+
// Continue execution in parent process.
- input_fd_ = output_fd_ = fd;
+ input_fd_ = fd;
close(saved_stderr);
UNIMPLEMENTED();
#endif // SANITIZER_MAC
} else {
- int *infd = NULL;
- int *outfd = NULL;
- // The client program may close its stdin and/or stdout and/or stderr
- // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
- // In this case the communication between the forked processes may be
- // broken if either the parent or the child tries to close or duplicate
- // these descriptors. The loop below produces two pairs of file
- // descriptors, each greater than 2 (stderr).
- int sock_pair[5][2];
- for (int i = 0; i < 5; i++) {
- if (pipe(sock_pair[i]) == -1) {
- for (int j = 0; j < i; j++) {
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- Report("WARNING: Can't create a socket pair to start "
- "external symbolizer (errno: %d)\n", errno);
- return false;
- } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
- if (infd == NULL) {
- infd = sock_pair[i];
- } else {
- outfd = sock_pair[i];
- for (int j = 0; j < i; j++) {
- if (sock_pair[j] == infd) continue;
- internal_close(sock_pair[j][0]);
- internal_close(sock_pair[j][1]);
- }
- break;
- }
- }
- }
- CHECK(infd);
- CHECK(outfd);
-
const char *argv[kArgVMax];
GetArgV(path_, argv);
pid = StartSubprocess(path_, argv, /* stdin */ outfd[0],
output_fd_ = outfd[1];
}
+ CHECK_GT(pid, 0);
+
// Check that symbolizer subprocess started successfully.
SleepForMillis(kSymbolizerStartupTimeMillis);
if (!IsProcessRunning(pid)) {
bool ReadFromSymbolizer(char *buffer, uptr max_length) override {
if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))
return false;
+ // The returned buffer is empty when output is valid, but exceeds
+ // max_length.
+ if (*buffer == '\0')
+ return true;
// We should cut out output_terminator_ at the end of given buffer,
// appended by addr2line to mark the end of its meaningful output.
// We cannot scan buffer from it's beginning, because it is legal for it
InternalSymbolizer() { }
static const int kBufferSize = 16 * 1024;
- static const int kMaxDemangledNameSize = 1024;
char buffer_[kBufferSize];
};
#else // SANITIZER_SUPPORTS_WEAK_HOOKS
// Otherwise symbolizer program is unknown, let's search $PATH
CHECK(path == nullptr);
- if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
- VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
- return new(*allocator) LLVMSymbolizer(found_path, allocator);
- }
#if SANITIZER_MAC
if (const char *found_path = FindPathToBinary("atos")) {
VReport(2, "Using atos found at: %s\n", found_path);
return new(*allocator) AtosSymbolizer(found_path, allocator);
}
#endif // SANITIZER_MAC
+ if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
+ VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
+ return new(*allocator) LLVMSymbolizer(found_path, allocator);
+ }
if (common_flags()->allow_addr2line) {
if (const char *found_path = FindPathToBinary("addr2line")) {
VReport(2, "Using addr2line found at: %s\n", found_path);
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (IsReportingOOM()) {
+ if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
#include "sanitizer_platform.h"
#if SANITIZER_WINDOWS
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-#include <dbghelp.h>
-#pragma comment(lib, "dbghelp.lib")
+#include "sanitizer_dbghelp.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
+decltype(::StackWalk64) *StackWalk64;
+decltype(::SymCleanup) *SymCleanup;
+decltype(::SymFromAddr) *SymFromAddr;
+decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+decltype(::SymInitialize) *SymInitialize;
+decltype(::SymSetOptions) *SymSetOptions;
+decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
namespace {
class WinSymbolizerTool : public SymbolizerTool {
void InitializeDbgHelpIfNeeded() {
if (is_dbghelp_initialized)
return;
+
+ HMODULE dbghelp = LoadLibraryA("dbghelp.dll");
+ CHECK(dbghelp && "failed to load dbghelp.dll");
+
+#define DBGHELP_IMPORT(name) \
+ do { \
+ name = \
+ reinterpret_cast<decltype(::name) *>(GetProcAddress(dbghelp, #name)); \
+ CHECK(name != nullptr); \
+ } while (0)
+ DBGHELP_IMPORT(StackWalk64);
+ DBGHELP_IMPORT(SymCleanup);
+ DBGHELP_IMPORT(SymFromAddr);
+ DBGHELP_IMPORT(SymFunctionTableAccess64);
+ DBGHELP_IMPORT(SymGetLineFromAddr64);
+ DBGHELP_IMPORT(SymGetModuleBase64);
+ DBGHELP_IMPORT(SymGetSearchPathW);
+ DBGHELP_IMPORT(SymInitialize);
+ DBGHELP_IMPORT(SymSetOptions);
+ DBGHELP_IMPORT(SymSetSearchPathW);
+ DBGHELP_IMPORT(UnDecorateSymbolName);
+#undef DBGHELP_IMPORT
+
if (!TrySymInitialize()) {
// OK, maybe the client app has called SymInitialize already.
// That's a bit unfortunate for us as all the DbgHelp functions are
//
//===----------------------------------------------------------------------===//
//
-// Generic implementations of internal_syscall and internal_iserror.
+// Generic implementations of internal_syscall* and internal_iserror.
//
//===----------------------------------------------------------------------===//
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
# define SYSCALL(name) SYS_ ## name
#else
# define SYSCALL(name) __NR_ ## name
#endif
-#if (SANITIZER_FREEBSD || SANITIZER_MAC) && defined(__x86_64__)
+#if SANITIZER_NETBSD
+// We use 3 kinds of internal_syscall's for different types of retval in order
+// to address differences in calling conventions (e.g. registers to place the
+// return value in).
+// - internal_syscall for 32-bit length (int, pid_t)
+// - internal_syscall64 for 64-bit length (off_t)
+// - internal_syscall_ptr for pointer and (s)size_t
+# define internal_syscall syscall
+# define internal_syscall64 __syscall
+// Handle syscall renames manually
+# define SYS_stat SYS___stat50
+# define SYS_lstat SYS___lstat50
+# define SYS_fstat SYS___fstat50
+# define SYS_gettimeofday SYS___gettimeofday50
+# define SYS_wait4 SYS___wait450
+# define SYS_getdents SYS___getdents30
+# define SYS_sigaltstack SYS___sigaltstack14
+# define SYS_sigprocmask SYS___sigprocmask14
+# define SYS_nanosleep SYS___nanosleep50
+# if SANITIZER_WORDSIZE == 64
+# define internal_syscall_ptr __syscall
+# else
+# define internal_syscall_ptr syscall
+# endif
+#elif defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_MAC)
# define internal_syscall __syscall
# else
# define internal_syscall syscall
ThreadContextBase::ThreadContextBase(u32 tid)
: tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
status(ThreadStatusInvalid),
- detached(false), parent_tid(0), next(0) {
+ detached(false), workerthread(false), parent_tid(0), next(0) {
name[0] = '\0';
}
}
void ThreadContextBase::SetFinished() {
- if (!detached)
- status = ThreadStatusFinished;
+ // ThreadRegistry::FinishThread calls here in ThreadStatusCreated state
+ // for a thread that never actually started. In that case the thread
+ // should go to ThreadStatusFinished regardless of whether it was created
+ // as detached.
+ if (!detached || status == ThreadStatusCreated) status = ThreadStatusFinished;
OnFinished();
}
-void ThreadContextBase::SetStarted(uptr _os_id, void *arg) {
+void ThreadContextBase::SetStarted(tid_t _os_id, bool _workerthread,
+ void *arg) {
status = ThreadStatusRunning;
os_id = _os_id;
+ workerthread = _workerthread;
OnStarted(arg);
}
tctx->status != ThreadStatusDead);
}
-ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(uptr os_id) {
+ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
return FindThreadContextLocked(FindThreadContextByOsIdCallback,
(void *)os_id);
}
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
- CHECK_EQ(ThreadStatusRunning, tctx->status);
+ CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
+ tctx->status);
tctx->SetName(name);
}
QuarantinePush(tctx);
}
+// Normally this is called when the thread is about to exit. If
+// called in ThreadStatusCreated state, then this thread was never
+// really started. We just did CreateThread for a prospective new
+// thread before trying to create it, and then failed to actually
+// create it, and so never called StartThread.
void ThreadRegistry::FinishThread(u32 tid) {
BlockingMutexLock l(&mtx_);
CHECK_GT(alive_threads_, 0);
alive_threads_--;
- CHECK_GT(running_threads_, 0);
- running_threads_--;
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
- CHECK_EQ(ThreadStatusRunning, tctx->status);
+ bool dead = tctx->detached;
+ if (tctx->status == ThreadStatusRunning) {
+ CHECK_GT(running_threads_, 0);
+ running_threads_--;
+ } else {
+ // The thread never really existed.
+ CHECK_EQ(tctx->status, ThreadStatusCreated);
+ dead = true;
+ }
tctx->SetFinished();
- if (tctx->detached) {
+ if (dead) {
tctx->SetDead();
QuarantinePush(tctx);
}
}
-void ThreadRegistry::StartThread(u32 tid, uptr os_id, void *arg) {
+void ThreadRegistry::StartThread(u32 tid, tid_t os_id, bool workerthread,
+ void *arg) {
BlockingMutexLock l(&mtx_);
running_threads_++;
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
CHECK_NE(tctx, 0);
CHECK_EQ(ThreadStatusCreated, tctx->status);
- tctx->SetStarted(os_id, arg);
+ tctx->SetStarted(os_id, workerthread, arg);
}
void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
const u32 tid; // Thread ID. Main thread should have tid = 0.
u64 unique_id; // Unique thread ID.
u32 reuse_count; // Number of times this tid was reused.
- uptr os_id; // PID (used for reporting).
+ tid_t os_id; // PID (used for reporting).
uptr user_id; // Some opaque user thread id (e.g. pthread_t).
char name[64]; // As annotated by user.
ThreadStatus status;
bool detached;
+ bool workerthread;
u32 parent_tid;
ThreadContextBase *next; // For storing thread contexts in a list.
void SetDead();
void SetJoined(void *arg);
void SetFinished();
- void SetStarted(uptr _os_id, void *arg);
+ void SetStarted(tid_t _os_id, bool _workerthread, void *arg);
void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,
u32 _parent_tid, void *arg);
void Reset();
// is found.
ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb,
void *arg);
- ThreadContextBase *FindThreadContextByOsIDLocked(uptr os_id);
+ ThreadContextBase *FindThreadContextByOsIDLocked(tid_t os_id);
void SetThreadName(u32 tid, const char *name);
void SetThreadNameByUserId(uptr user_id, const char *name);
void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
void FinishThread(u32 tid);
- void StartThread(u32 tid, uptr os_id, void *arg);
+ void StartThread(u32 tid, tid_t os_id, bool workerthread, void *arg);
private:
const ThreadContextFactory context_factory_;
static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
if (!size) return;
- VPrintf(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
}
(DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
uptr num_live_dtls =
atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
- VPrintf(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
+ VReport(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
CHECK_LT(num_live_dtls, 1 << 20);
uptr old_dtv_size = dtls.dtv_size;
DTLS::DTV *old_dtv = dtls.dtv;
void DTLS_Destroy() {
if (!common_flags()->intercept_tls_get_addr) return;
- VPrintf(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
uptr s = dtls.dtv_size;
dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
DTLS_Deallocate(dtls.dtv, s);
if (dtls.dtv[dso_id].beg) return 0;
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
- VPrintf(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
+ VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
"num_live_dtls %zd\n",
arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
atomic_load(&number_of_live_dtls, memory_order_relaxed));
if (dtls.last_memalign_ptr == tls_beg) {
tls_size = dtls.last_memalign_size;
- VPrintf(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
+ VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
tls_beg, tls_size);
} else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
// This is the static TLS block which was initialized / unpoisoned at thread
// creation.
- VPrintf(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+ VReport(2, "__tls_get_addr: static tls: %p\n", tls_beg);
tls_size = 0;
} else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
// We may want to check gnu_get_libc_version().
Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
tls_size = header->size;
tls_beg = header->start;
- VPrintf(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
+ VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
tls_beg, tls_size);
} else {
- VPrintf(2, "__tls_get_addr: Can't guess glibc version\n");
+ VReport(2, "__tls_get_addr: Can't guess glibc version\n");
// This may happen inside the DTOR of main thread, so just ignore it.
tls_size = 0;
}
void DTLS_on_libc_memalign(void *ptr, uptr size) {
if (!common_flags()->intercept_tls_get_addr) return;
- VPrintf(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ VReport(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
dtls.last_memalign_size = size;
}
DTLS *DTLS_Get() { return &dtls; }
+bool DTLSInDestruction(DTLS *dtls) {
+ return dtls->dtv_size == kDestroyedThread;
+}
+
#else
void DTLS_on_libc_memalign(void *ptr, uptr size) {}
DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res) { return 0; }
DTLS *DTLS_Get() { return 0; }
void DTLS_Destroy() {}
+bool DTLSInDestruction(DTLS *dtls) {
+ UNREACHABLE("dtls is unsupported on this platform!");
+}
+
#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
} // namespace __sanitizer
void DTLS_on_libc_memalign(void *ptr, uptr size);
DTLS *DTLS_Get();
void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+// Returns true if DTLS of suspended thread is in destruction process.
+bool DTLSInDestruction(DTLS *dtls);
} // namespace __sanitizer
//===----------------------------------------------------------------------===//
//
// This file contains the unwind.h-based (aka "slow") stack unwinding routines
-// available to the tools on Linux, Android, and FreeBSD.
+// available to the tools on Linux, Android, NetBSD and FreeBSD.
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#include "sanitizer_common.h"
#include "sanitizer_stacktrace.h"
}
#endif
-#ifdef __arm__
+#if defined(__arm__) && !SANITIZER_NETBSD
+// NetBSD uses dwarf EH
#define UNWIND_STOP _URC_END_OF_STACK
#define UNWIND_CONTINUE _URC_NO_REASON
#else
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
#define WIN32_LEAN_AND_MEAN
#define NOGDI
#include <windows.h>
-#include <dbghelp.h>
#include <io.h>
#include <psapi.h>
#include <stdlib.h>
#include "sanitizer_common.h"
+#include "sanitizer_dbghelp.h"
+#include "sanitizer_file.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
-#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_symbolizer.h"
+#include "sanitizer_win_defs.h"
+
+// A macro to tell the compiler that this part of the code cannot be reached,
+// if the compiler supports this feature. Since we're using this in
+// code that is called when terminating the process, the expansion of the
+// macro should not terminate the process to avoid infinite recursion.
+#if defined(__clang__)
+# define BUILTIN_UNREACHABLE() __builtin_unreachable()
+#elif defined(__GNUC__) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+# define BUILTIN_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define BUILTIN_UNREACHABLE() __assume(0)
+#else
+# define BUILTIN_UNREACHABLE()
+#endif
namespace __sanitizer {
// In contrast to POSIX, on Windows GetCurrentThreadId()
// returns a system-unique identifier.
-uptr GetTid() {
+tid_t GetTid() {
return GetCurrentThreadId();
}
}
}
+static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
+ const char *mmap_type) {
+ error_t last_error = GetLastError();
+ if (last_error == ERROR_NOT_ENOUGH_MEMORY)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
+ return rv;
+}
+
// We want to map a chunk of address space aligned to 'alignment'.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr mapped_addr =
(uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (!mapped_addr)
- ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
// If we got it right on the first try, return. Otherwise, unmap it and go to
// the slow path.
mapped_addr =
(uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
if (!mapped_addr)
- ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
- GetLastError());
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
// Find the aligned address.
uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
// Fail if we can't make this work quickly.
if (retries == kMaxRetries && mapped_addr == 0)
- ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
return (void *)mapped_addr;
}
return p;
}
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
+ }
+ return p;
+}
+
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
// FIXME: make this really NoReserve?
return MmapOrDie(size, mem_type);
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
}
-
-void ReleaseMemoryToOS(uptr addr, uptr size) {
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
// This is almost useless on 32-bits.
// FIXME: add madvise-analog when we move to 64-bits.
}
// FIXME: add madvise-analog when we move to 64-bits.
}
-uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found) {
uptr address = 0;
while (true) {
MEMORY_BASIC_INFORMATION info;
}
#endif
+void PrintModuleMap() { }
+
void DisableCoreDumperIfNecessary() {
// Do nothing.
}
}
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
-#if !SANITIZER_GO
- CovPrepareForSandboxing(args);
-#endif
}
bool StackSizeIsUnlimited() {
}
void Abort() {
- if (::IsDebuggerPresent())
- __debugbreak();
internal__exit(3);
}
}
void ListOfModules::init() {
- clear();
+ clearOrInit();
HANDLE cur_process = GetCurrentProcess();
// Query the list of modules. Start by assuming there are no more than 256
LoadedModule cur_module;
cur_module.set(module_name, adjusted_base);
// We add the whole module as one single address range.
- cur_module.addAddressRange(base_address, end_address, /*executable*/ true);
+ cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
+ /*writable*/ true);
modules_.push_back(cur_module);
}
UnmapOrDie(hmodules, modules_buffer_size);
-};
+}
+
+void ListOfModules::fallbackInit() { clear(); }
// We can't use atexit() directly at __asan_init time as the CRT is not fully
// initialized at this point. Place the functions into a vector and use
}
void internal__exit(int exitcode) {
- ExitProcess(exitcode);
+ // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
+ // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
+ // so add our own breakpoint here.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ TerminateProcess(GetCurrentProcess(), exitcode);
+ BUILTIN_UNREACHABLE();
}
uptr internal_ftruncate(fd_t fd, uptr size) {
stack_frame.AddrFrame.Mode = AddrModeFlat;
stack_frame.AddrStack.Mode = AddrModeFlat;
while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
- &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
- &SymGetModuleBase64, NULL) &&
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
size < Min(max_depth, kStackTraceMax)) {
trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
}
// FIXME: Decide what to do on Windows.
}
-bool IsHandledDeadlySignal(int signum) {
+HandleSignalMode GetHandleSignalMode(int signum) {
// FIXME: Decide what to do on Windows.
+ return kHandleSignalNo;
+}
+
+// Check based on flags if we should handle this exception.
+bool IsHandledDeadlyException(DWORD exceptionCode) {
+ switch (exceptionCode) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ case EXCEPTION_STACK_OVERFLOW:
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ case EXCEPTION_PRIV_INSTRUCTION:
+ case EXCEPTION_BREAKPOINT:
+ return common_flags()->handle_sigill;
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_STACK_CHECK:
+ case EXCEPTION_FLT_UNDERFLOW:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_OVERFLOW:
+ return common_flags()->handle_sigfpe;
+ }
return false;
}
return true;
}
-SignalContext SignalContext::Create(void *siginfo, void *context) {
+bool SignalContext::IsStackOverflow() const {
+ return GetType() == EXCEPTION_STACK_OVERFLOW;
+}
+
+void SignalContext::InitPcSpBp() {
EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
CONTEXT *context_record = (CONTEXT *)context;
- uptr pc = (uptr)exception_record->ExceptionAddress;
+ pc = (uptr)exception_record->ExceptionAddress;
#ifdef _WIN64
- uptr bp = (uptr)context_record->Rbp;
- uptr sp = (uptr)context_record->Rsp;
+ bp = (uptr)context_record->Rbp;
+ sp = (uptr)context_record->Rsp;
#else
- uptr bp = (uptr)context_record->Ebp;
- uptr sp = (uptr)context_record->Esp;
+ bp = (uptr)context_record->Ebp;
+ sp = (uptr)context_record->Esp;
#endif
- uptr access_addr = exception_record->ExceptionInformation[1];
+}
+uptr SignalContext::GetAddress() const {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ return exception_record->ExceptionInformation[1];
+}
+
+bool SignalContext::IsMemoryAccess() const {
+ return GetWriteFlag() != SignalContext::UNKNOWN;
+}
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
// The contents of this array are documented at
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
// The first element indicates read as 0, write as 1, or execute as 8. The
// second element is the faulting address.
- WriteFlag write_flag = SignalContext::UNKNOWN;
switch (exception_record->ExceptionInformation[0]) {
- case 0: write_flag = SignalContext::READ; break;
- case 1: write_flag = SignalContext::WRITE; break;
- case 8: write_flag = SignalContext::UNKNOWN; break;
+ case 0:
+ return SignalContext::READ;
+ case 1:
+ return SignalContext::WRITE;
+ case 8:
+ return SignalContext::UNKNOWN;
}
- bool is_memory_access = write_flag != SignalContext::UNKNOWN;
- return SignalContext(context, access_addr, pc, sp, bp, is_memory_access,
- write_flag);
+ return SignalContext::UNKNOWN;
+}
+
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
+int SignalContext::GetType() const {
+ return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;
+}
+
+const char *SignalContext::Describe() const {
+ unsigned code = GetType();
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "access-violation";
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ return "array-bounds-exceeded";
+ case EXCEPTION_STACK_OVERFLOW:
+ return "stack-overflow";
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ return "datatype-misalignment";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "in-page-error";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "illegal-instruction";
+ case EXCEPTION_PRIV_INSTRUCTION:
+ return "priv-instruction";
+ case EXCEPTION_BREAKPOINT:
+ return "breakpoint";
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ return "flt-denormal-operand";
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ return "flt-divide-by-zero";
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ return "flt-inexact-result";
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ return "flt-invalid-operation";
+ case EXCEPTION_FLT_OVERFLOW:
+ return "flt-overflow";
+ case EXCEPTION_FLT_STACK_CHECK:
+ return "flt-stack-check";
+ case EXCEPTION_FLT_UNDERFLOW:
+ return "flt-underflow";
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ return "int-divide-by-zero";
+ case EXCEPTION_INT_OVERFLOW:
+ return "int-overflow";
+ }
+ return "unknown exception";
}
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
// FIXME implement on this platform.
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
-} // namespace __sanitizer
-
-#if !SANITIZER_GO
-// Workaround to implement weak hooks on Windows. COFF doesn't directly support
-// weak symbols, but it does support /alternatename, which is similar. If the
-// user does not override the hook, we will use this default definition instead
-// of null.
-extern "C" void __sanitizer_print_memory_profile(int top_percent) {}
+// FIXME: implement on this platform.
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ UNIMPLEMENTED();
+}
-#ifdef _WIN64
-#pragma comment(linker, "/alternatename:__sanitizer_print_memory_profile=__sanitizer_default_print_memory_profile") // NOLINT
-#else
-#pragma comment(linker, "/alternatename:___sanitizer_print_memory_profile=___sanitizer_default_print_memory_profile") // NOLINT
-#endif
-#endif
+} // namespace __sanitizer
#endif // _WIN32
--- /dev/null
+//===-- sanitizer_win.h -----------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Windows-specific declarations.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_H
+#define SANITIZER_WIN_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+// Check based on flags if we should handle the exception.
+bool IsHandledDeadlyException(DWORD exceptionCode);
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_H
--- /dev/null
+//===-- sanitizer_win_defs.h ------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Common definitions for Windows-specific code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DEFS_H
+#define SANITIZER_WIN_DEFS_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#ifndef WINAPI
+#ifdef _M_IX86
+#define WINAPI __stdcall
+#else
+#define WINAPI
+#endif
+#endif
+
+#if defined(_WIN64)
+#define WIN_SYM_PREFIX
+#else
+#define WIN_SYM_PREFIX "_"
+#endif
+
+// Intermediate macro to ensure the parameter is expanded before stringified.
+#define STRINGIFY_(A) #A
+#define STRINGIFY(A) STRINGIFY_(A)
+
+// ----------------- A workaround for the absence of weak symbols --------------
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value.
+// Take into account that this is a pragma directive for the linker, so it will
+// be ignored by the compiler and the function will be marked as UNDEF in the
+// symbol table of the resulting object file. The linker won't find the default
+// implementation until it links with that object file.
+// So, suppose we provide a default implementation "fundef" for "fun", and this
+// is compiled into the object file "test.obj" including the pragma directive.
+// If we have some code with references to "fun" and we link that code with
+// "test.obj", it will work because the linker always link object files.
+// But, if "test.obj" is included in a static library, like "test.lib", then the
+// liker will only link to "test.obj" if necessary. If we only included the
+// definition of "fun", it won't link to "test.obj" (from test.lib) because
+// "fun" appears as UNDEF, so it doesn't resolve the symbol "fun", and will
+// result in a link error (the linker doesn't find the pragma directive).
+// So, a workaround is to force linkage with the modules that include weak
+// definitions, with the following macro: WIN_FORCE_LINK()
+
+#define WIN_WEAK_ALIAS(Name, Default) \
+ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY(Name) "="\
+ WIN_SYM_PREFIX STRINGIFY(Default)))
+
+#define WIN_FORCE_LINK(Name) \
+ __pragma(comment(linker, "/include:" WIN_SYM_PREFIX STRINGIFY(Name)))
+
+#define WIN_EXPORT(ExportedName, Name) \
+ __pragma(comment(linker, "/export:" WIN_SYM_PREFIX STRINGIFY(ExportedName) \
+ "=" WIN_SYM_PREFIX STRINGIFY(Name)))
+
+// We cannot define weak functions on Windows, but we can use WIN_WEAK_ALIAS()
+// which defines an alias to a default implementation, and only works when
+// linking statically.
+// So, to define a weak function "fun", we define a default implementation with
+// a different name "fun__def" and we create a "weak alias" fun = fun__def.
+// Then, users can override it just defining "fun".
+// We impose "extern "C"" because otherwise WIN_WEAK_ALIAS() will fail because
+// of name mangling.
+
+// Dummy name for default implementation of weak function.
+# define WEAK_DEFAULT_NAME(Name) Name##__def
+// Name for exported implementation of weak function.
+# define WEAK_EXPORT_NAME(Name) Name##__dll
+
+// Use this macro when you need to define and export a weak function from a
+// library. For example:
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_ALIAS(Name, WEAK_DEFAULT_NAME(Name)) \
+ WIN_EXPORT(WEAK_EXPORT_NAME(Name), Name) \
+ extern "C" ReturnType Name(__VA_ARGS__); \
+ extern "C" ReturnType WEAK_DEFAULT_NAME(Name)(__VA_ARGS__)
+
+// Use this macro when you need to import a weak function from a library. It
+// defines a weak alias to the imported function from the dll. For example:
+// WIN_WEAK_IMPORT_DEF(compare)
+# define WIN_WEAK_IMPORT_DEF(Name) \
+ WIN_WEAK_ALIAS(Name, WEAK_EXPORT_NAME(Name))
+
+// So, for Windows we provide something similar to weak symbols in Linux, with
+// some differences:
+// + A default implementation must always be provided.
+//
+// + When linking statically it works quite similarly. For example:
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// And it will work fine. If we don't override the function, we need to ensure
+// that the linker includes the object file with the default implementation.
+// We can do so with the linker option "-wholearchive:".
+//
+// + When linking dynamically with a library (dll), weak functions are exported
+// with "__dll" suffix. Clients can use the macro WIN_WEAK_IMPORT_DEF(fun)
+// which defines a "weak alias" fun = fun__dll.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// But if we override the function, the dlls don't have access to it (which
+// is different in linux). If that is desired, the strong definition must be
+// exported and interception can be used from the rest of the dlls.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+// // When initialized, check if the main executable defined "compare".
+// int libExample_init() {
+// uptr fnptr = __interception::InternalGetProcAddress(
+// (void *)GetModuleHandleA(0), "compare");
+// if (fnptr && !__interception::OverrideFunction((uptr)compare, fnptr, 0))
+// abort();
+// return 0;
+// }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We override and export compare:
+// extern "C" __declspec(dllexport) bool compare (int a, int b) {
+// return a >= b;
+// }
+//
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_DEFS_H
--- /dev/null
+//===-- sanitizer_win_dll_thunk.cc ----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_defs.h"
+#include "sanitizer_win_dll_thunk.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name) {
+ uptr ret =
+ __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+int dllThunkIntercept(const char* main_function, uptr dll_function) {
+ uptr wrapper = dllThunkGetRealAddrOrDie(main_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function) {
+ uptr wrapper = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), main_function);
+ if (!wrapper)
+ wrapper = dllThunkGetRealAddrOrDie(default_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".DLLTH$A", read) // NOLINT
+#pragma section(".DLLTH$Z", read) // NOLINT
+
+typedef void (*DllThunkCB)();
+extern "C" {
+__declspec(allocate(".DLLTH$A")) DllThunkCB __start_dll_thunk;
+__declspec(allocate(".DLLTH$Z")) DllThunkCB __stop_dll_thunk;
+}
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+extern "C" int __dll_thunk_init() {
+ static bool flag = false;
+ // __dll_thunk_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (DllThunkCB *it = &__start_dll_thunk; it < &__stop_dll_thunk; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+// We want to call dll_thunk_init before C/C++ initializers / constructors are
+// executed, otherwise functions like memset might be invoked.
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__dll_thunk_preinit)() =
+ __dll_thunk_init;
+
+static void WINAPI dll_thunk_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) __dll_thunk_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__dll_thunk_tls_init)(void *,
+ unsigned long, void *) = dll_thunk_thread_init;
+
+#endif // SANITIZER_DLL_THUNK
--- /dev/null
+//===-- sanitizer_win_dll_thunk.h -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls to the shared runtime
+// that lives in the main executable. It should be included to dll_thunks that
+// will be linked to the dlls, when the sanitizer is a static library included
+// in the main executable.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DLL_THUNK_H
+#define SANITIZER_WIN_DLL_THUNK_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name);
+
+int dllThunkIntercept(const char* main_function, uptr dll_function);
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function);
+}
+
+extern "C" int __dll_thunk_init();
+
+// ----------------- Function interception helper macros -------------------- //
+// Override dll_function with main_function from main executable.
+#define INTERCEPT_OR_DIE(main_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkIntercept(main_function, (__sanitizer::uptr) \
+ dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// Try to override dll_function with main_function from main executable.
+// If main_function is not present, override dll_function with default_function.
+#define INTERCEPT_WHEN_POSSIBLE(main_function, default_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkInterceptWhenPossible(main_function, \
+ default_function, (__sanitizer::uptr)dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// -------------------- Function interception macros ------------------------ //
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERCEPT_SANITIZER_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name)
+
+// Special case of hooks -- Weak functions, could be redefined in the main
+// executable, but that is not necessary, so we shouldn't die if we can not find
+// a reference. Instead, when the function is not present in the main executable
+// we consider the default impl provided by asan library.
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, STRINGIFY(WEAK_EXPORT_NAME(name)), name)
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __dll_thunk_init, but that would lead
+// to false negatives in the startup code before user's global initializers,
+// which isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+
+// Use these macros for functions that could be called before __dll_thunk_init()
+// is executed and don't lead to errors if defined (free, malloc, etc).
+#define INTERCEPT_WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#endif // SANITIZER_WIN_DLL_THUNK_H
--- /dev/null
+//===-- santizer_win_dynamic_runtime_thunk.cc -----------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Common, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer common.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_common_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
--- /dev/null
+//===-- sanitizer_win_weak_interception.cc --------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in the sanitizer when it is implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS && SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_win_defs.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+// Try to get a pointer to real_function in the main module and override
+// dll_function with that pointer. If the function isn't found, nothing changes.
+int interceptWhenPossible(uptr dll_function, const char *real_function) {
+ uptr real = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), real_function);
+ if (real && !__interception::OverrideFunction((uptr)dll_function, real, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Declare weak hooks.
+extern "C" {
+void __sanitizer_weak_hook_memcmp(uptr called_pc, const void *s1,
+ const void *s2, uptr n, int result);
+void __sanitizer_weak_hook_strcmp(uptr called_pc, const char *s1,
+ const char *s2, int result);
+void __sanitizer_weak_hook_strncmp(uptr called_pc, const char *s1,
+ const char *s2, uptr n, int result);
+void __sanitizer_weak_hook_strstr(uptr called_pc, const char *s1,
+ const char *s2, char *result);
+}
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".WEAK$A", read) // NOLINT
+#pragma section(".WEAK$Z", read) // NOLINT
+
+typedef void (*InterceptCB)();
+extern "C" {
+__declspec(allocate(".WEAK$A")) InterceptCB __start_weak_list;
+__declspec(allocate(".WEAK$Z")) InterceptCB __stop_weak_list;
+}
+
+static int weak_intercept_init() {
+ static bool flag = false;
+ // weak_interception_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (InterceptCB *it = &__start_weak_list; it < &__stop_weak_list; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__weak_intercept_preinit)() =
+ weak_intercept_init;
+
+static void WINAPI weak_intercept_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) weak_intercept_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void(WINAPI *__weak_intercept_tls_init)(
+ void *, unsigned long, void *) = weak_intercept_thread_init;
+
+#endif // SANITIZER_WINDOWS && SANITIZER_DYNAMIC
--- /dev/null
+//===-- sanitizer_win_weak_interception.h ---------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls of weak functions to the
+// implementation in the main executable when a strong definition is present.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_WEAK_INTERCEPTION_H
+#define SANITIZER_WIN_WEAK_INTERCEPTION_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+int interceptWhenPossible(uptr dll_function, const char *real_function);
+}
+
+// ----------------- Function interception helper macros -------------------- //
+// Weak functions, could be redefined in the main executable, but that is not
+// necessary, so we shouldn't die if we can not find a reference.
+#define INTERCEPT_WEAK(Name) interceptWhenPossible((uptr) Name, #Name);
+
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) \
+ static int intercept_##Name() { \
+ return __sanitizer::interceptWhenPossible((__sanitizer::uptr) Name, #Name);\
+ } \
+ __pragma(section(".WEAK$M", long, read)) \
+ __declspec(allocate(".WEAK$M")) int (*__weak_intercept_##Name)() = \
+ intercept_##Name;
+
+#endif // SANITIZER_WIN_WEAK_INTERCEPTION_H
tsan_files = \
tsan_clock.cc \
tsan_debugging.cc \
+ tsan_external.cc \
tsan_fd.cc \
tsan_flags.cc \
tsan_ignoreset.cc \
"$(DESTDIR)$(toolexeclibdir)"
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
-am__objects_1 = tsan_clock.lo tsan_debugging.lo tsan_fd.lo \
- tsan_flags.lo tsan_ignoreset.lo tsan_interceptors.lo \
- tsan_interceptors_mac.lo tsan_interface_ann.lo \
- tsan_interface_atomic.lo tsan_interface.lo \
- tsan_interface_java.lo tsan_libdispatch_mac.lo \
- tsan_malloc_mac.lo tsan_md5.lo tsan_mman.lo tsan_mutex.lo \
- tsan_mutexset.lo tsan_new_delete.lo tsan_platform_linux.lo \
- tsan_platform_mac.lo tsan_platform_posix.lo \
- tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \
- tsan_rtl_mutex.lo tsan_rtl_proc.lo tsan_rtl_report.lo \
- tsan_rtl_thread.lo tsan_stack_trace.lo tsan_stat.lo \
- tsan_suppressions.lo tsan_symbolize.lo tsan_sync.lo
+am__objects_1 = tsan_clock.lo tsan_debugging.lo tsan_external.lo \
+ tsan_fd.lo tsan_flags.lo tsan_ignoreset.lo \
+ tsan_interceptors.lo tsan_interceptors_mac.lo \
+ tsan_interface_ann.lo tsan_interface_atomic.lo \
+ tsan_interface.lo tsan_interface_java.lo \
+ tsan_libdispatch_mac.lo tsan_malloc_mac.lo tsan_md5.lo \
+ tsan_mman.lo tsan_mutex.lo tsan_mutexset.lo tsan_new_delete.lo \
+ tsan_platform_linux.lo tsan_platform_mac.lo \
+ tsan_platform_posix.lo tsan_platform_windows.lo tsan_report.lo \
+ tsan_rtl.lo tsan_rtl_mutex.lo tsan_rtl_proc.lo \
+ tsan_rtl_report.lo tsan_rtl_thread.lo tsan_stack_trace.lo \
+ tsan_stat.lo tsan_suppressions.lo tsan_symbolize.lo \
+ tsan_sync.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
tsan_files = \
tsan_clock.cc \
tsan_debugging.cc \
+ tsan_external.cc \
tsan_fd.cc \
tsan_flags.cc \
tsan_ignoreset.cc \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_debugging.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_external.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_fd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_ignoreset.Plo@am__quote@
// an exclusive lock; ThreadClock's are private to respective threads and so
// do not need any protection.
//
-// Description of ThreadClock state:
-// clk_ - fixed size vector clock.
-// nclk_ - effective size of the vector clock (the rest is zeros).
-// tid_ - index of the thread associated with he clock ("current thread").
-// last_acquire_ - current thread time when it acquired something from
-// other threads.
-//
// Description of SyncClock state:
// clk_ - variable size vector clock, low kClkBits hold timestamp,
// the remaining bits hold "acquired" flag (the actual value is thread's
// reused counter);
// if acquried == thr->reused_, then the respective thread has already
-// acquired this clock (except possibly dirty_tids_).
-// dirty_tids_ - holds up to two indeces in the vector clock that other threads
+// acquired this clock (except possibly for dirty elements).
+// dirty_ - holds up to two indeces in the vector clock that other threads
// need to acquire regardless of "acquired" flag value;
// release_store_tid_ - denotes that the clock state is a result of
// release-store operation by the thread with release_store_tid_ index.
namespace __tsan {
+static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
+ return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
+}
+
+// Drop reference to the first level block idx.
+static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ atomic_uint32_t *ref = ref_ptr(cb);
+ u32 v = atomic_load(ref, memory_order_acquire);
+ for (;;) {
+ CHECK_GT(v, 0);
+ if (v == 1)
+ break;
+ if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
+ return;
+ }
+ // First level block owns second level blocks, so them as well.
+ for (uptr i = 0; i < blocks; i++)
+ ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
+ ctx->clock_alloc.Free(c, idx);
+}
+
ThreadClock::ThreadClock(unsigned tid, unsigned reused)
: tid_(tid)
- , reused_(reused + 1) { // 0 has special meaning
+ , reused_(reused + 1) // 0 has special meaning
+ , cached_idx_()
+ , cached_size_()
+ , cached_blocks_() {
CHECK_LT(tid, kMaxTidInClock);
CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
nclk_ = tid_ + 1;
last_acquire_ = 0;
internal_memset(clk_, 0, sizeof(clk_));
- clk_[tid_].reused = reused_;
}
-void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
+void ThreadClock::ResetCached(ClockCache *c) {
+ if (cached_idx_) {
+ UnrefClockBlock(c, cached_idx_, cached_blocks_);
+ cached_idx_ = 0;
+ cached_size_ = 0;
+ cached_blocks_ = 0;
+ }
+}
+
+void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(src->size_, kMaxTid);
CPP_STAT_INC(StatClockAcquire);
return;
}
- // Check if we've already acquired src after the last release operation on src
bool acquired = false;
- if (nclk > tid_) {
- CPP_STAT_INC(StatClockAcquireLarge);
- if (src->elem(tid_).reused == reused_) {
- CPP_STAT_INC(StatClockAcquireRepeat);
- for (unsigned i = 0; i < kDirtyTids; i++) {
- unsigned tid = src->dirty_tids_[i];
- if (tid != kInvalidTid) {
- u64 epoch = src->elem(tid).epoch;
- if (clk_[tid].epoch < epoch) {
- clk_[tid].epoch = epoch;
- acquired = true;
- }
- }
- }
- if (acquired) {
- CPP_STAT_INC(StatClockAcquiredSomething);
- last_acquire_ = clk_[tid_].epoch;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty dirty = src->dirty_[i];
+ unsigned tid = dirty.tid;
+ if (tid != kInvalidTid) {
+ if (clk_[tid] < dirty.epoch) {
+ clk_[tid] = dirty.epoch;
+ acquired = true;
}
- return;
}
}
- // O(N) acquire.
- CPP_STAT_INC(StatClockAcquireFull);
- nclk_ = max(nclk_, nclk);
- for (uptr i = 0; i < nclk; i++) {
- u64 epoch = src->elem(i).epoch;
- if (clk_[i].epoch < epoch) {
- clk_[i].epoch = epoch;
- acquired = true;
+ // Check if we've already acquired src after the last release operation on src
+ if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
+ nclk_ = max(nclk_, nclk);
+ u64 *dst_pos = &clk_[0];
+ for (ClockElem &src_elem : *src) {
+ u64 epoch = src_elem.epoch;
+ if (*dst_pos < epoch) {
+ *dst_pos = epoch;
+ acquired = true;
+ }
+ dst_pos++;
}
- }
- // Remember that this thread has acquired this clock.
- if (nclk > tid_)
- src->elem(tid_).reused = reused_;
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->elem(tid_).reused = reused_;
+ }
if (acquired) {
CPP_STAT_INC(StatClockAcquiredSomething);
- last_acquire_ = clk_[tid_].epoch;
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
}
}
-void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
+void ThreadClock::release(ClockCache *c, SyncClock *dst) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(dst->size_, kMaxTid);
// since the last release on dst. If so, we need to update
// only dst->elem(tid_).
if (dst->elem(tid_).epoch > last_acquire_) {
- UpdateCurrentThread(dst);
+ UpdateCurrentThread(c, dst);
if (dst->release_store_tid_ != tid_ ||
dst->release_store_reused_ != reused_)
dst->release_store_tid_ = kInvalidTid;
// O(N) release.
CPP_STAT_INC(StatClockReleaseFull);
+ dst->Unshare(c);
// First, remember whether we've acquired dst.
bool acquired = IsAlreadyAcquired(dst);
if (acquired)
CPP_STAT_INC(StatClockReleaseAcquired);
// Update dst->clk_.
- for (uptr i = 0; i < nclk_; i++) {
- ClockElem &ce = dst->elem(i);
- ce.epoch = max(ce.epoch, clk_[i].epoch);
+ dst->FlushDirty();
+ uptr i = 0;
+ for (ClockElem &ce : *dst) {
+ ce.epoch = max(ce.epoch, clk_[i]);
ce.reused = 0;
+ i++;
}
// Clear 'acquired' flag in the remaining elements.
if (nclk_ < dst->size_)
CPP_STAT_INC(StatClockReleaseClearTail);
for (uptr i = nclk_; i < dst->size_; i++)
dst->elem(i).reused = 0;
- for (unsigned i = 0; i < kDirtyTids; i++)
- dst->dirty_tids_[i] = kInvalidTid;
dst->release_store_tid_ = kInvalidTid;
dst->release_store_reused_ = 0;
// If we've acquired dst, remember this fact,
dst->elem(tid_).reused = reused_;
}
-void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(dst->size_, kMaxTid);
CPP_STAT_INC(StatClockStore);
+ if (dst->size_ == 0 && cached_idx_ != 0) {
+ // Reuse the cached clock.
+ // Note: we could reuse/cache the cached clock in more cases:
+ // we could update the existing clock and cache it, or replace it with the
+ // currently cached clock and release the old one. And for a shared
+ // existing clock, we could replace it with the currently cached;
+ // or unshare, update and cache. But, for simplicity, we currnetly reuse
+ // cached clock only when the target clock is empty.
+ dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
+ dst->tab_idx_ = cached_idx_;
+ dst->size_ = cached_size_;
+ dst->blocks_ = cached_blocks_;
+ CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
+ // The cached clock is shared (immutable),
+ // so this is where we store the current clock.
+ dst->dirty_[0].tid = tid_;
+ dst->dirty_[0].epoch = clk_[tid_];
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
+ // Grab a reference.
+ atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
+ return;
+ }
+
// Check if we need to resize dst.
if (dst->size_ < nclk_)
dst->Resize(c, nclk_);
dst->release_store_reused_ == reused_ &&
dst->elem(tid_).epoch > last_acquire_) {
CPP_STAT_INC(StatClockStoreFast);
- UpdateCurrentThread(dst);
+ UpdateCurrentThread(c, dst);
return;
}
// O(N) release-store.
CPP_STAT_INC(StatClockStoreFull);
- for (uptr i = 0; i < nclk_; i++) {
- ClockElem &ce = dst->elem(i);
- ce.epoch = clk_[i].epoch;
+ dst->Unshare(c);
+ // Note: dst can be larger than this ThreadClock.
+ // This is fine since clk_ beyond size is all zeros.
+ uptr i = 0;
+ for (ClockElem &ce : *dst) {
+ ce.epoch = clk_[i];
ce.reused = 0;
+ i++;
}
- // Clear the tail of dst->clk_.
- if (nclk_ < dst->size_) {
- for (uptr i = nclk_; i < dst->size_; i++) {
- ClockElem &ce = dst->elem(i);
- ce.epoch = 0;
- ce.reused = 0;
- }
- CPP_STAT_INC(StatClockStoreTail);
- }
- for (unsigned i = 0; i < kDirtyTids; i++)
- dst->dirty_tids_[i] = kInvalidTid;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dst->dirty_[i].tid = kInvalidTid;
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
// Rememeber that we don't need to acquire it in future.
dst->elem(tid_).reused = reused_;
+
+ // If the resulting clock is cachable, cache it for future release operations.
+ // The clock is always cachable if we released to an empty sync object.
+ if (cached_idx_ == 0 && dst->Cachable()) {
+ // Grab a reference to the ClockBlock.
+ atomic_uint32_t *ref = ref_ptr(dst->tab_);
+ if (atomic_load(ref, memory_order_acquire) == 1)
+ atomic_store_relaxed(ref, 2);
+ else
+ atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
+ cached_idx_ = dst->tab_idx_;
+ cached_size_ = dst->size_;
+ cached_blocks_ = dst->blocks_;
+ }
}
void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
}
// Updates only single element related to the current thread in dst->clk_.
-void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
+void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
// Update the threads time, but preserve 'acquired' flag.
- dst->elem(tid_).epoch = clk_[tid_].epoch;
-
for (unsigned i = 0; i < kDirtyTids; i++) {
- if (dst->dirty_tids_[i] == tid_) {
- CPP_STAT_INC(StatClockReleaseFast1);
- return;
- }
- if (dst->dirty_tids_[i] == kInvalidTid) {
- CPP_STAT_INC(StatClockReleaseFast2);
- dst->dirty_tids_[i] = tid_;
+ SyncClock::Dirty *dirty = &dst->dirty_[i];
+ const unsigned tid = dirty->tid;
+ if (tid == tid_ || tid == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast);
+ dirty->tid = tid_;
+ dirty->epoch = clk_[tid_];
return;
}
}
// Reset all 'acquired' flags, O(N).
+ // We are going to touch dst elements, so we need to unshare it.
+ dst->Unshare(c);
CPP_STAT_INC(StatClockReleaseSlow);
+ dst->elem(tid_).epoch = clk_[tid_];
for (uptr i = 0; i < dst->size_; i++)
dst->elem(i).reused = 0;
- for (unsigned i = 0; i < kDirtyTids; i++)
- dst->dirty_tids_[i] = kInvalidTid;
+ dst->FlushDirty();
}
-// Checks whether the current threads has already acquired src.
+// Checks whether the current thread has already acquired src.
bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
if (src->elem(tid_).reused != reused_)
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
- unsigned tid = src->dirty_tids_[i];
- if (tid != kInvalidTid) {
- if (clk_[tid].epoch < src->elem(tid).epoch)
+ SyncClock::Dirty dirty = src->dirty_[i];
+ if (dirty.tid != kInvalidTid) {
+ if (clk_[dirty.tid] < dirty.epoch)
return false;
}
}
return true;
}
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid]);
+ clk_[tid] = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
+}
+
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
+ printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
+}
+
+SyncClock::SyncClock() {
+ ResetImpl();
+}
+
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(blocks_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_)
+ UnrefClockBlock(c, tab_idx_, blocks_);
+ ResetImpl();
+}
+
+void SyncClock::ResetImpl() {
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
+ blocks_ = 0;
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_[i].tid = kInvalidTid;
+}
+
void SyncClock::Resize(ClockCache *c, uptr nclk) {
CPP_STAT_INC(StatClockReleaseResize);
- if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
- RoundUpTo(size_, ClockBlock::kClockCount)) {
- // Growing within the same block.
+ Unshare(c);
+ if (nclk <= capacity()) {
// Memory is already allocated, just increase the size.
size_ = nclk;
return;
}
- if (nclk <= ClockBlock::kClockCount) {
+ if (size_ == 0) {
// Grow from 0 to one-level table.
CHECK_EQ(size_, 0);
+ CHECK_EQ(blocks_, 0);
CHECK_EQ(tab_, 0);
CHECK_EQ(tab_idx_, 0);
- size_ = nclk;
tab_idx_ = ctx->clock_alloc.Alloc(c);
tab_ = ctx->clock_alloc.Map(tab_idx_);
internal_memset(tab_, 0, sizeof(*tab_));
- return;
- }
- // Growing two-level table.
- if (size_ == 0) {
- // Allocate first level table.
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- } else if (size_ <= ClockBlock::kClockCount) {
- // Transform one-level table to two-level table.
- u32 old = tab_idx_;
- tab_idx_ = ctx->clock_alloc.Alloc(c);
- tab_ = ctx->clock_alloc.Map(tab_idx_);
- internal_memset(tab_, 0, sizeof(*tab_));
- tab_->table[0] = old;
+ atomic_store_relaxed(ref_ptr(tab_), 1);
+ size_ = 1;
+ } else if (size_ > blocks_ * ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
+ uptr top = size_ - blocks_ * ClockBlock::kClockCount;
+ CHECK_LT(top, ClockBlock::kClockCount);
+ const uptr move = top * sizeof(tab_->clock[0]);
+ internal_memcpy(&new_cb->clock[0], tab_->clock, move);
+ internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
+ internal_memset(tab_->clock, 0, move);
+ append_block(idx);
}
- // At this point we have first level table allocated.
+ // At this point we have first level table allocated and all clock elements
+ // are evacuated from it to a second level block.
// Add second level tables as necessary.
- for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
- i < nclk; i += ClockBlock::kClockCount) {
+ while (nclk > capacity()) {
u32 idx = ctx->clock_alloc.Alloc(c);
ClockBlock *cb = ctx->clock_alloc.Map(idx);
internal_memset(cb, 0, sizeof(*cb));
- CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
- tab_->table[i/ClockBlock::kClockCount] = idx;
+ append_block(idx);
}
size_ = nclk;
}
-// Sets a single element in the vector clock.
-// This function is called only from weird places like AcquireGlobal.
-void ThreadClock::set(unsigned tid, u64 v) {
- DCHECK_LT(tid, kMaxTid);
- DCHECK_GE(v, clk_[tid].epoch);
- clk_[tid].epoch = v;
- if (nclk_ <= tid)
- nclk_ = tid + 1;
- last_acquire_ = clk_[tid_].epoch;
-}
-
-void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
- printf("clock=[");
- for (uptr i = 0; i < nclk_; i++)
- printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
- printf("] reused=[");
- for (uptr i = 0; i < nclk_; i++)
- printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
- printf("] tid=%u/%u last_acq=%llu",
- tid_, reused_, last_acquire_);
+// Flushes all dirty elements into the main clock array.
+void SyncClock::FlushDirty() {
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ Dirty *dirty = &dirty_[i];
+ if (dirty->tid != kInvalidTid) {
+ CHECK_LT(dirty->tid, size_);
+ elem(dirty->tid).epoch = dirty->epoch;
+ dirty->tid = kInvalidTid;
+ }
+ }
}
-SyncClock::SyncClock()
- : release_store_tid_(kInvalidTid)
- , release_store_reused_()
- , tab_()
- , tab_idx_()
- , size_() {
- for (uptr i = 0; i < kDirtyTids; i++)
- dirty_tids_[i] = kInvalidTid;
+bool SyncClock::IsShared() const {
+ if (size_ == 0)
+ return false;
+ atomic_uint32_t *ref = ref_ptr(tab_);
+ u32 v = atomic_load(ref, memory_order_acquire);
+ CHECK_GT(v, 0);
+ return v > 1;
}
-SyncClock::~SyncClock() {
- // Reset must be called before dtor.
- CHECK_EQ(size_, 0);
- CHECK_EQ(tab_, 0);
- CHECK_EQ(tab_idx_, 0);
+// Unshares the current clock if it's shared.
+// Shared clocks are immutable, so they need to be unshared before any updates.
+// Note: this does not apply to dirty entries as they are not shared.
+void SyncClock::Unshare(ClockCache *c) {
+ if (!IsShared())
+ return;
+ // First, copy current state into old.
+ SyncClock old;
+ old.tab_ = tab_;
+ old.tab_idx_ = tab_idx_;
+ old.size_ = size_;
+ old.blocks_ = blocks_;
+ old.release_store_tid_ = release_store_tid_;
+ old.release_store_reused_ = release_store_reused_;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ old.dirty_[i] = dirty_[i];
+ // Then, clear current object.
+ ResetImpl();
+ // Allocate brand new clock in the current object.
+ Resize(c, old.size_);
+ // Now copy state back into this object.
+ Iter old_iter(&old);
+ for (ClockElem &ce : *this) {
+ ce = *old_iter;
+ ++old_iter;
+ }
+ release_store_tid_ = old.release_store_tid_;
+ release_store_reused_ = old.release_store_reused_;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dirty_[i] = old.dirty_[i];
+ // Drop reference to old and delete if necessary.
+ old.Reset(c);
}
-void SyncClock::Reset(ClockCache *c) {
- if (size_ == 0) {
- // nothing
- } else if (size_ <= ClockBlock::kClockCount) {
- // One-level table.
- ctx->clock_alloc.Free(c, tab_idx_);
- } else {
- // Two-level table.
- for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
- ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
- ctx->clock_alloc.Free(c, tab_idx_);
+// Can we cache this clock for future release operations?
+ALWAYS_INLINE bool SyncClock::Cachable() const {
+ if (size_ == 0)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dirty_[i].tid != kInvalidTid)
+ return false;
}
- tab_ = 0;
- tab_idx_ = 0;
- size_ = 0;
- release_store_tid_ = kInvalidTid;
- release_store_reused_ = 0;
- for (uptr i = 0; i < kDirtyTids; i++)
- dirty_tids_[i] = kInvalidTid;
+ return atomic_load_relaxed(ref_ptr(tab_)) == 1;
}
-ClockElem &SyncClock::elem(unsigned tid) const {
+// elem linearizes the two-level structure into linear array.
+// Note: this is used only for one time accesses, vector operations use
+// the iterator as it is much faster.
+ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const {
DCHECK_LT(tid, size_);
- if (size_ <= ClockBlock::kClockCount)
+ const uptr block = tid / ClockBlock::kClockCount;
+ DCHECK_LE(block, blocks_);
+ tid %= ClockBlock::kClockCount;
+ if (block == blocks_)
return tab_->clock[tid];
- u32 idx = tab_->table[tid / ClockBlock::kClockCount];
+ u32 idx = get_block(block);
ClockBlock *cb = ctx->clock_alloc.Map(idx);
- return cb->clock[tid % ClockBlock::kClockCount];
+ return cb->clock[tid];
+}
+
+ALWAYS_INLINE uptr SyncClock::capacity() const {
+ if (size_ == 0)
+ return 0;
+ uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
+ // How many clock elements we can fit into the first level block.
+ // +1 for ref counter.
+ uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
+ return blocks_ * ClockBlock::kClockCount + top;
+}
+
+ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const {
+ DCHECK(size_);
+ DCHECK_LT(bi, blocks_);
+ return tab_->table[ClockBlock::kBlockIdx - bi];
+}
+
+ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
+ uptr bi = blocks_++;
+ CHECK_EQ(get_block(bi), 0);
+ tab_->table[ClockBlock::kBlockIdx - bi] = idx;
+}
+
+// Used only by tests.
+u64 SyncClock::get(unsigned tid) const {
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ Dirty dirty = dirty_[i];
+ if (dirty.tid == tid)
+ return dirty.epoch;
+ }
+ return elem(tid).epoch;
+}
+
+// Used only by Iter test.
+u64 SyncClock::get_clean(unsigned tid) const {
+ return elem(tid).epoch;
}
void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
printf("] reused=[");
for (uptr i = 0; i < size_; i++)
printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
- printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
+ printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
release_store_tid_, release_store_reused_,
- dirty_tids_[0], dirty_tids_[1]);
+ dirty_[0].tid, dirty_[0].epoch,
+ dirty_[1].tid, dirty_[1].epoch);
+}
+
+void SyncClock::Iter::Next() {
+ // Finished with the current block, move on to the next one.
+ block_++;
+ if (block_ < parent_->blocks_) {
+ // Iterate over the next second level block.
+ u32 idx = parent_->get_block(block_);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ pos_ = &cb->clock[0];
+ end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
+ ClockBlock::kClockCount);
+ return;
+ }
+ if (block_ == parent_->blocks_ &&
+ parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
+ // Iterate over elements in the first level block.
+ pos_ = &parent_->tab_->clock[0];
+ end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
+ ClockBlock::kClockCount);
+ return;
+ }
+ parent_ = nullptr; // denotes end
}
} // namespace __tsan
namespace __tsan {
-struct ClockElem {
- u64 epoch : kClkBits;
- u64 reused : 64 - kClkBits;
-};
-
-struct ClockBlock {
- static const uptr kSize = 512;
- static const uptr kTableSize = kSize / sizeof(u32);
- static const uptr kClockCount = kSize / sizeof(ClockElem);
-
- union {
- u32 table[kTableSize];
- ClockElem clock[kClockCount];
- };
-
- ClockBlock() {
- }
-};
-
typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
typedef DenseSlabAllocCache ClockCache;
SyncClock();
~SyncClock();
- uptr size() const {
- return size_;
- }
+ uptr size() const;
- u64 get(unsigned tid) const {
- return elem(tid).epoch;
- }
+ // These are used only in tests.
+ u64 get(unsigned tid) const;
+ u64 get_clean(unsigned tid) const;
void Resize(ClockCache *c, uptr nclk);
void Reset(ClockCache *c);
void DebugDump(int(*printf)(const char *s, ...));
+ // Clock element iterator.
+ // Note: it iterates only over the table without regard to dirty entries.
+ class Iter {
+ public:
+ explicit Iter(SyncClock* parent);
+ Iter& operator++();
+ bool operator!=(const Iter& other);
+ ClockElem &operator*();
+
+ private:
+ SyncClock *parent_;
+ // [pos_, end_) is the current continuous range of clock elements.
+ ClockElem *pos_;
+ ClockElem *end_;
+ int block_; // Current number of second level block.
+
+ NOINLINE void Next();
+ };
+
+ Iter begin();
+ Iter end();
+
private:
- friend struct ThreadClock;
+ friend class ThreadClock;
+ friend class Iter;
static const uptr kDirtyTids = 2;
+ struct Dirty {
+ u64 epoch : kClkBits;
+ u64 tid : 64 - kClkBits; // kInvalidId if not active
+ };
+
unsigned release_store_tid_;
unsigned release_store_reused_;
- unsigned dirty_tids_[kDirtyTids];
- // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc.
- // If size_ <= 64, then tab_ points to an array with 64 ClockElem's.
- // Otherwise, tab_ points to an array with 128 u32 elements,
+ Dirty dirty_[kDirtyTids];
+ // If size_ is 0, tab_ is nullptr.
+ // If size <= 64 (kClockCount), tab_ contains pointer to an array with
+ // 64 ClockElem's (ClockBlock::clock).
+ // Otherwise, tab_ points to an array with up to 127 u32 elements,
// each pointing to the second-level 512b block with 64 ClockElem's.
+ // Unused space in the first level ClockBlock is used to store additional
+ // clock elements.
+ // The last u32 element in the first level ClockBlock is always used as
+ // reference counter.
+ //
+ // See the following scheme for details.
+ // All memory blocks are 512 bytes (allocated from ClockAlloc).
+ // Clock (clk) elements are 64 bits.
+ // Idx and ref are 32 bits.
+ //
+ // tab_
+ // |
+ // \/
+ // +----------------------------------------------------+
+ // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
+ // +----------------------------------------------------+
+ // | |
+ // | \/
+ // | +----------------+
+ // | | clk0 ... clk63 |
+ // | +----------------+
+ // \/
+ // +------------------+
+ // | clk64 ... clk127 |
+ // +------------------+
+ //
+ // Note: dirty entries, if active, always override what's stored in the clock.
ClockBlock *tab_;
u32 tab_idx_;
- u32 size_;
-
+ u16 size_;
+ u16 blocks_; // Number of second level blocks.
+
+ void Unshare(ClockCache *c);
+ bool IsShared() const;
+ bool Cachable() const;
+ void ResetImpl();
+ void FlushDirty();
+ uptr capacity() const;
+ u32 get_block(uptr bi) const;
+ void append_block(u32 idx);
ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
-struct ThreadClock {
+class ThreadClock {
public:
typedef DenseSlabAllocCache Cache;
explicit ThreadClock(unsigned tid, unsigned reused = 0);
- u64 get(unsigned tid) const {
- DCHECK_LT(tid, kMaxTidInClock);
- return clk_[tid].epoch;
- }
-
- void set(unsigned tid, u64 v);
-
- void set(u64 v) {
- DCHECK_GE(v, clk_[tid_].epoch);
- clk_[tid_].epoch = v;
- }
+ u64 get(unsigned tid) const;
+ void set(ClockCache *c, unsigned tid, u64 v);
+ void set(u64 v);
+ void tick();
+ uptr size() const;
- void tick() {
- clk_[tid_].epoch++;
- }
-
- uptr size() const {
- return nclk_;
- }
-
- void acquire(ClockCache *c, const SyncClock *src);
- void release(ClockCache *c, SyncClock *dst) const;
+ void acquire(ClockCache *c, SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst);
void acq_rel(ClockCache *c, SyncClock *dst);
- void ReleaseStore(ClockCache *c, SyncClock *dst) const;
+ void ReleaseStore(ClockCache *c, SyncClock *dst);
+ void ResetCached(ClockCache *c);
void DebugReset();
void DebugDump(int(*printf)(const char *s, ...));
private:
static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ // Index of the thread associated with he clock ("current thread").
const unsigned tid_;
- const unsigned reused_;
+ const unsigned reused_; // tid_ reuse count.
+ // Current thread time when it acquired something from other threads.
u64 last_acquire_;
+
+ // Cached SyncClock (without dirty entries and release_store_tid_).
+ // We reuse it for subsequent store-release operations without intervening
+ // acquire operations. Since it is shared (and thus constant), clock value
+ // for the current thread is then stored in dirty entries in the SyncClock.
+ // We host a refernece to the table while it is cached here.
+ u32 cached_idx_;
+ u16 cached_size_;
+ u16 cached_blocks_;
+
+ // Number of active elements in the clk_ table (the rest is zeros).
uptr nclk_;
- ClockElem clk_[kMaxTidInClock];
+ u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
bool IsAlreadyAcquired(const SyncClock *src) const;
- void UpdateCurrentThread(SyncClock *dst) const;
+ void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
};
+ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
+ DCHECK_LT(tid, kMaxTidInClock);
+ return clk_[tid];
+}
+
+ALWAYS_INLINE void ThreadClock::set(u64 v) {
+ DCHECK_GE(v, clk_[tid_]);
+ clk_[tid_] = v;
+}
+
+ALWAYS_INLINE void ThreadClock::tick() {
+ clk_[tid_]++;
+}
+
+ALWAYS_INLINE uptr ThreadClock::size() const {
+ return nclk_;
+}
+
+ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
+ return Iter(this);
+}
+
+ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
+ return Iter(nullptr);
+}
+
+ALWAYS_INLINE uptr SyncClock::size() const {
+ return size_;
+}
+
+ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
+ : parent_(parent)
+ , pos_(nullptr)
+ , end_(nullptr)
+ , block_(-1) {
+ if (parent)
+ Next();
+}
+
+ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
+ pos_++;
+ if (UNLIKELY(pos_ >= end_))
+ Next();
+ return *this;
+}
+
+ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
+ return parent_ != other.parent_;
+}
+
+ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
+ return *pos_;
+}
} // namespace __tsan
#endif // TSAN_CLOCK_H
#include "tsan_report.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
using namespace __tsan;
static const char *ReportTypeDescription(ReportType typ) {
if (typ == ReportTypeVptrRace) return "data-race-vptr";
if (typ == ReportTypeUseAfterFree) return "heap-use-after-free";
if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free-vptr";
+ if (typ == ReportTypeExternalRace) return "external-race";
if (typ == ReportTypeThreadLeak) return "thread-leak";
if (typ == ReportTypeMutexDestroyLocked) return "locked-mutex-destroy";
if (typ == ReportTypeMutexDoubleLock) return "mutex-double-lock";
return 1;
}
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *object_type = GetObjectTypeFromTag(loc->external_tag);
+ return 1;
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
int *destroyed, void **trace, uptr trace_size) {
}
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
int *running, const char **name, int *parent_tid,
void **trace, uptr trace_size) {
const ReportDesc *rep = (ReportDesc *)report;
*tid = rep->unique_tids[idx];
return 1;
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (IsMetaMem(addr)) {
+ region_kind = "meta shadow";
+ } else if (IsShadowMem(addr)) {
+ region_kind = "shadow";
+ } else {
+ bool is_stack = false;
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+
+ if (b != 0) {
+ region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
+ region_size = b->siz;
+ region_kind = "heap";
+ } else {
+ // TODO(kuba.brecka): We should not lock. This is supposed to be called
+ // from within the debugger when other threads are stopped.
+ ctx->thread_registry->Lock();
+ ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
+ ctx->thread_registry->Unlock();
+ if (tctx) {
+ region_kind = is_stack ? "stack" : "tls";
+ } else {
+ region_kind = "global";
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
+ internal_strncpy(name, info.name, name_size);
+ region_address = info.start;
+ region_size = info.size;
+ }
+ }
+ }
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id) {
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b == 0) return 0;
+
+ *thread_id = b->tid;
+ // No locking. This is supposed to be called from within the debugger when
+ // other threads are stopped.
+ ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ *os_id = tctx->os_id;
+
+ StackTrace stack = StackDepotGet(b->stk);
+ size = Min(size, (uptr)stack.size);
+ for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
+ return size;
+}
namespace __tsan {
+const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
+
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits; // tid reuse count
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+ static const uptr kRefIdx = kTableSize - 1;
+ static const uptr kBlockIdx = kTableSize - 2;
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
const int kTidBits = 13;
-const unsigned kMaxTid = 1 << kTidBits;
+// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
+// occupied by reference counter, so total number of elements we can store
+// in SyncClock is kClockCount * (kTableSize - 1).
+const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
#endif
-const int kClkBits = 42;
-const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
const bool kCollectHistory = true;
#endif
-const unsigned kInvalidTid = (unsigned)-1;
+const u16 kInvalidTid = kMaxTid + 1;
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to
// Descriptor of user's memory block.
struct MBlock {
- u64 siz;
+ u64 siz : 48;
+ u64 tag : 16;
u32 stk;
u16 tid;
};
COMPILER_CHECK(sizeof(MBlock) == 16);
+enum ExternalTag : uptr {
+ kExternalTagNone = 0,
+ kExternalTagSwiftModifyingAccess = 1,
+ kExternalTagFirstUserAvailable = 2,
+ kExternalTagMax = 1024,
+ // Don't set kExternalTagMax over 65,536, since MBlock only stores tags
+ // as 16-bit values, see tsan_defs.h.
+};
+
} // namespace __tsan
#endif // TSAN_DEFS_H
typedef DenseSlabAllocCache Cache;
typedef typename Cache::IndexT IndexT;
- DenseSlabAlloc() {
+ explicit DenseSlabAlloc(const char *name) {
// Check that kL1Size and kL2Size are sane.
CHECK_EQ(kL1Size & (kL1Size - 1), 0);
CHECK_EQ(kL2Size & (kL2Size - 1), 0);
internal_memset(map_, 0, sizeof(map_));
freelist_ = 0;
fillpos_ = 0;
+ name_ = name;
}
~DenseSlabAlloc() {
SpinMutex mtx_;
IndexT freelist_;
uptr fillpos_;
+ const char *name_;
void Refill(Cache *c) {
SpinMutexLock lock(&mtx_);
if (freelist_ == 0) {
if (fillpos_ == kL1Size) {
- Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
+ name_, kL1Size, kL2Size);
Die();
}
- T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
+ name_, fillpos_, kL1Size, kL2Size);
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
// Reserve 0 as invalid index.
IndexT start = fillpos_ == 0 ? 1 : 0;
for (IndexT i = start; i < kL2Size; i++) {
--- /dev/null
+//===-- tsan_external.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+#include "tsan_interceptors.h"
+
+namespace __tsan {
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+struct TagData {
+ const char *object_type;
+ const char *header;
+};
+
+static TagData registered_tags[kExternalTagMax] = {
+ {},
+ {"Swift variable", "Swift access race"},
+};
+static atomic_uint32_t used_tags{kExternalTagFirstUserAvailable}; // NOLINT.
+static TagData *GetTagData(uptr tag) {
+ // Invalid/corrupted tag? Better return NULL and let the caller deal with it.
+ if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr;
+ return ®istered_tags[tag];
+}
+
+const char *GetObjectTypeFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->object_type : nullptr;
+}
+
+const char *GetReportHeaderFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->header : nullptr;
+}
+
+void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)®istered_tags[tag]);
+}
+
+uptr TagFromShadowStackFrame(uptr pc) {
+ uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
+ void *pc_ptr = (void *)pc;
+ if (pc_ptr < GetTagData(0) || pc_ptr > GetTagData(tag_count - 1))
+ return 0;
+ return (TagData *)pc_ptr - GetTagData(0);
+}
+
+#if !SANITIZER_GO
+
+typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
+void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ ThreadState *thr = cur_thread();
+ if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
+ InsertShadowStackFrameForTag(thr, (uptr)tag);
+ bool in_ignored_lib;
+ if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
+ access(thr, CALLERPC, (uptr)addr, kSizeLog1);
+ }
+ FuncExit(thr);
+ if (caller_pc) FuncExit(thr);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type) {
+ uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed);
+ CHECK_LT(new_tag, kExternalTagMax);
+ GetTagData(new_tag)->object_type = internal_strdup(object_type);
+ char header[127] = {0};
+ internal_snprintf(header, sizeof(header), "race on %s", object_type);
+ GetTagData(new_tag)->header = internal_strdup(header);
+ return (void *)new_tag;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header) {
+ CHECK_GE((uptr)tag, kExternalTagFirstUserAvailable);
+ CHECK_LT((uptr)tag, kExternalTagMax);
+ atomic_uintptr_t *header_ptr =
+ (atomic_uintptr_t *)&GetTagData((uptr)tag)->header;
+ header = internal_strdup(header);
+ char *old_header =
+ (char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
+ if (old_header) internal_free(old_header);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ Allocator *a = allocator();
+ MBlock *b = nullptr;
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b) {
+ b->tag = (uptr)tag;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, caller_pc, tag, MemoryRead);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, caller_pc, tag, MemoryWrite);
+}
+} // extern "C"
+
+#endif // !SANITIZER_GO
+
+} // namespace __tsan
}
static FdSync *allocsync(ThreadState *thr, uptr pc) {
- FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
- false);
+ FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
+ kDefaultAlignment, false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
+ void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
namespace __tsan {
-Flags *flags() {
- return &ctx->flags;
-}
-
// Can be overriden in frontend.
#ifdef TSAN_EXTERNAL_HOOKS
extern "C" const char* __tsan_default_options();
void ParseFromString(const char *str);
};
-Flags *flags();
void InitializeFlags(Flags *flags, const char *env);
} // namespace __tsan
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
TSAN_FLAG(bool, ignore_interceptors_accesses, false,
"Ignore reads and writes from all interceptors.")
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+ "Interceptors should only detect races when called from instrumented "
+ "modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
"Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "interception/interception.h"
#include "tsan_mman.h"
#include "tsan_fd.h"
-#if SANITIZER_POSIX
-#include "sanitizer_common/sanitizer_posix.h"
-#endif
using namespace __tsan; // NOLINT
#if SANITIZER_FREEBSD || SANITIZER_MAC
-#define __errno_location __error
#define stdout __stdoutp
#define stderr __stderrp
#endif
-#if SANITIZER_ANDROID
-#define __errno_location __errno
-#define mallopt(a, b)
+#if SANITIZER_NETBSD
+#define dirfd(dirp) (*(int *)(dirp))
+#define fileno_unlocked fileno
+#define stdout __sF[1]
+#define stderr __sF[2]
#endif
-#if SANITIZER_LINUX || SANITIZER_FREEBSD
-#define PTHREAD_CREATE_DETACHED 1
-#elif SANITIZER_MAC
-#define PTHREAD_CREATE_DETACHED 2
+#if SANITIZER_ANDROID
+#define mallopt(a, b)
#endif
-
#ifdef __mips__
const int kSigCount = 129;
#else
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
extern "C" void *pthread_self();
extern "C" void _exit(int status);
-extern "C" int *__errno_location();
extern "C" int fileno_unlocked(void *stream);
+#if !SANITIZER_NETBSD
extern "C" int dirfd(void *dirp);
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
extern "C" int mallopt(int param, int value);
#endif
+#if SANITIZER_NETBSD
+extern __sanitizer_FILE **__sF;
+#else
extern __sanitizer_FILE *stdout, *stderr;
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
#else
const int PTHREAD_MUTEX_RECURSIVE = 2;
const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
#endif
-const int EINVAL = 22;
-const int EBUSY = 16;
-const int EOWNERDEAD = 130;
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
const int SIGSYS = 31;
#endif
void *const MAP_FAILED = (void*)-1;
-#if !SANITIZER_MAC
+#if SANITIZER_NETBSD
+const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
+#elif !SANITIZER_MAC
const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
#endif
const int MAP_FIXED = 0x10;
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
# define F_TEST 3 /* Test a region for other processes locks. */
-#define errno (*__errno_location())
-
typedef void (*sighandler_t)(int sig);
typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
__sanitizer_sigset_t sa_mask;
void (*sa_restorer)();
};
+#elif SANITIZER_NETBSD
+struct sigaction_t {
+ union {
+ sighandler_t sa_handler;
+ sigactionhandler_t sa_sigaction;
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+};
#else
struct sigaction_t {
#ifdef __mips__
const sighandler_t SIG_DFL = (sighandler_t)0;
const sighandler_t SIG_IGN = (sighandler_t)1;
const sighandler_t SIG_ERR = (sighandler_t)-1;
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
const int SA_SIGINFO = 0x40;
const int SIG_SETMASK = 3;
#elif defined(__mips__)
// The object is 64-byte aligned, because we want hot data to be located in
// a single cache line if possible (it's accessed in every interceptor).
static ALIGNED(64) char libignore_placeholder[sizeof(LibIgnore)];
-static LibIgnore *libignore() {
+LibIgnore *libignore() {
return reinterpret_cast<LibIgnore*>(&libignore_placeholder[0]);
}
if (0 == internal_strcmp(s->type, kSuppressionLib))
libignore()->AddIgnoredLibrary(s->templ);
}
+ if (flags()->ignore_noninstrumented_modules)
+ libignore()->IgnoreNoninstrumentedModules(true);
libignore()->OnLibraryLoaded(0);
}
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
- : thr_(thr)
- , pc_(pc)
- , in_ignored_lib_(false) {
+ : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
Initialize(thr);
- if (!thr_->is_inited)
- return;
- if (!thr_->ignore_interceptors)
- FuncEntry(thr, pc);
+ if (!thr_->is_inited) return;
+ if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
- if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
- in_ignored_lib_ = true;
- thr_->in_ignored_lib = true;
- ThreadIgnoreBegin(thr_, pc_);
- }
- if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
+ ignoring_ =
+ !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
+ libignore()->IsIgnored(pc, &in_ignored_lib_));
+ EnableIgnores();
}
ScopedInterceptor::~ScopedInterceptor() {
- if (!thr_->is_inited)
- return;
- if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
- if (in_ignored_lib_) {
- thr_->in_ignored_lib = false;
- ThreadIgnoreEnd(thr_, pc_);
- }
+ if (!thr_->is_inited) return;
+ DisableIgnores();
if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
FuncExit(thr_);
}
}
-void ScopedInterceptor::UserCallbackStart() {
- if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
- if (in_ignored_lib_) {
- thr_->in_ignored_lib = false;
- ThreadIgnoreEnd(thr_, pc_);
+void ScopedInterceptor::EnableIgnores() {
+ if (ignoring_) {
+ ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
+ if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
+ if (in_ignored_lib_) {
+ DCHECK(!thr_->in_ignored_lib);
+ thr_->in_ignored_lib = true;
+ }
}
}
-void ScopedInterceptor::UserCallbackEnd() {
- if (in_ignored_lib_) {
- thr_->in_ignored_lib = true;
- ThreadIgnoreBegin(thr_, pc_);
+void ScopedInterceptor::DisableIgnores() {
+ if (ignoring_) {
+ ThreadIgnoreEnd(thr_, pc_);
+ if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
+ if (in_ignored_lib_) {
+ DCHECK(thr_->in_ignored_lib);
+ thr_->in_ignored_lib = false;
+ }
}
- if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
}
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
#else
# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
return res;
}
+TSAN_INTERCEPTOR(int, pause) {
+ SCOPED_TSAN_INTERCEPTOR(pause);
+ return BLOCK_REAL(pause)();
+}
+
// The sole reason tsan wraps atexit callbacks is to establish synchronization
// between callback setup and callback execution.
struct AtExitCtx {
static void LongJmp(ThreadState *thr, uptr *env) {
#ifdef __powerpc__
uptr mangled_sp = env[0];
-#elif SANITIZER_FREEBSD || SANITIZER_MAC
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
uptr mangled_sp = env[2];
+#elif SANITIZER_MAC
+# ifdef __aarch64__
+ uptr mangled_sp = env[13];
+# else
+ uptr mangled_sp = env[2];
+# endif
#elif defined(SANITIZER_LINUX)
# ifdef __aarch64__
uptr mangled_sp = env[13];
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ return user_memalign(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
if (*addr) {
if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
if (flags & MAP_FIXED) {
- errno = EINVAL;
+ errno = errno_EINVAL;
return false;
} else {
*addr = 0;
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ return user_memalign(thr, pc, align, sz);
}
#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
#else
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
- SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
+ return user_aligned_alloc(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
- return user_alloc(thr, pc, sz, GetPageSizeCached());
+ return user_valloc(thr, pc, sz);
}
#endif
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
- sz = RoundUp(sz, GetPageSizeCached());
- return user_alloc(thr, pc, sz, GetPageSizeCached());
+ return user_pvalloc(thr, pc, sz);
}
#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
#else
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
- *memptr = user_alloc(thr, pc, sz, align);
- return 0;
+ return user_posix_memalign(thr, pc, memptr, align, sz);
}
#endif
internal_sched_yield();
Processor *proc = ProcCreate();
ProcWire(proc, thr);
- ThreadStart(thr, tid, GetTid());
+ ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
atomic_store(&p->tid, 0, memory_order_release);
}
void *res = callback(param);
ThreadIgnoreEnd(thr, pc);
}
if (res == 0) {
- int tid = ThreadCreate(thr, pc, *(uptr*)th,
- detached == PTHREAD_CREATE_DETACHED);
+ int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
CHECK_NE(tid, 0);
// Synchronization on p.tid serves two purposes:
// 1. ThreadCreate must finish before the new thread starts.
ThreadSignalContext *ctx = SigCtx(arg->thr);
CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
- MutexLock(arg->thr, arg->pc, (uptr)arg->m);
+ MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
// Undo BlockingCall ctor effects.
arg->thr->ignore_interceptors--;
arg->si->~ScopedInterceptor();
fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
}
if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
int res = REAL(pthread_mutex_init)(m, a);
if (res == 0) {
- bool recursive = false;
+ u32 flagz = 0;
if (a) {
int type = 0;
if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
- recursive = (type == PTHREAD_MUTEX_RECURSIVE
- || type == PTHREAD_MUTEX_RECURSIVE_NP);
+ if (type == PTHREAD_MUTEX_RECURSIVE ||
+ type == PTHREAD_MUTEX_RECURSIVE_NP)
+ flagz |= MutexFlagWriteReentrant;
}
- MutexCreate(thr, pc, (uptr)m, false, recursive, false);
+ MutexCreate(thr, pc, (uptr)m, flagz);
}
return res;
}
TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
int res = REAL(pthread_mutex_destroy)(m);
- if (res == 0 || res == EBUSY) {
+ if (res == 0 || res == errno_EBUSY) {
MutexDestroy(thr, pc, (uptr)m);
}
return res;
TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
int res = REAL(pthread_mutex_trylock)(m);
- if (res == EOWNERDEAD)
+ if (res == errno_EOWNERDEAD)
MutexRepair(thr, pc, (uptr)m);
- if (res == 0 || res == EOWNERDEAD)
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
int res = REAL(pthread_mutex_timedlock)(m, abstime);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
int res = REAL(pthread_spin_init)(m, pshared);
if (res == 0) {
- MutexCreate(thr, pc, (uptr)m, false, false, false);
+ MutexCreate(thr, pc, (uptr)m);
}
return res;
}
TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
int res = REAL(pthread_spin_lock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
int res = REAL(pthread_spin_trylock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
int res = REAL(pthread_rwlock_init)(m, a);
if (res == 0) {
- MutexCreate(thr, pc, (uptr)m, true, false, false);
+ MutexCreate(thr, pc, (uptr)m);
}
return res;
}
TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ MutexPreReadLock(thr, pc, (uptr)m);
int res = REAL(pthread_rwlock_rdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexPostReadLock(thr, pc, (uptr)m);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
+ MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
if (res == 0) {
- MutexReadLock(thr, pc, (uptr)m);
+ MutexPostReadLock(thr, pc, (uptr)m);
}
return res;
}
TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ MutexPreLock(thr, pc, (uptr)m);
int res = REAL(pthread_rwlock_wrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
int res = REAL(pthread_rwlock_trywrlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
}
return res;
}
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
if (o == 0 || f == 0)
- return EINVAL;
+ return errno_EINVAL;
atomic_uint32_t *a;
if (!SANITIZER_MAC)
a = static_cast<atomic_uint32_t*>(o);
#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
#define TSAN_MAYBE_INTERCEPT_TMPFILE64
#endif
-TSAN_INTERCEPTOR(uptr, fread, void *ptr, uptr size, uptr nmemb, void *f) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fread, ptr, size, nmemb, f);
- MemoryAccessRange(thr, pc, (uptr)ptr, size * nmemb, true);
- }
- return REAL(fread)(ptr, size, nmemb, f);
-}
-
-TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) {
- // libc file streams can call user-supplied functions, see fopencookie.
- {
- SCOPED_TSAN_INTERCEPTOR(fwrite, p, size, nmemb, f);
- MemoryAccessRange(thr, pc, (uptr)p, size * nmemb, false);
- }
- return REAL(fwrite)(p, size, nmemb, f);
-}
-
static void FlushStreams() {
// Flushing all the streams here may freeze the process if a child thread is
// performing file stream operations at the same time.
sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
sizeof(sigactions[sig].sa_mask));
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
sigactions[sig].sa_restorer = act->sa_restorer;
#endif
sigaction_t newact;
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
OnExit(((TsanInterceptorContext *) ctx)->thr)
-#define COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m) \
- MutexLock(((TsanInterceptorContext *)ctx)->thr, \
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
+ MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
+ MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
}
};
-#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
TSAN_INTERCEPT(sleep);
TSAN_INTERCEPT(usleep);
TSAN_INTERCEPT(nanosleep);
+ TSAN_INTERCEPT(pause);
TSAN_INTERCEPT(gettimeofday);
TSAN_INTERCEPT(getaddrinfo);
public:
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
~ScopedInterceptor();
- void UserCallbackStart();
- void UserCallbackEnd();
+ void DisableIgnores();
+ void EnableIgnores();
private:
ThreadState *const thr_;
const uptr pc_;
bool in_ignored_lib_;
+ bool ignoring_;
};
+LibIgnore *libignore();
+
} // namespace __tsan
#define SCOPED_INTERCEPTOR_RAW(func, ...) \
/**/
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
- si.UserCallbackStart();
+ si.DisableIgnores();
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
- si.UserCallbackEnd();
+ si.EnableIgnores();
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#include "tsan_interface_ann.h"
#include <libkern/OSAtomic.h>
+
+#if defined(__has_include) && __has_include(<xpc/xpc.h>)
#include <xpc/xpc.h>
+#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
typedef long long_t; // NOLINT
REAL(os_lock_unlock)(lock);
}
+#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
xpc_connection_t connection, xpc_handler_t handler) {
SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
(connection, message, replyq, new_handler);
}
+TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
+ Release(thr, pc, (uptr)connection);
+ REAL(xpc_connection_cancel)(connection);
+}
+
+#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
// On macOS, libc++ is always linked dynamically, so intercepting works the
// usual way.
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
};
} // namespace
-// This adds a libc++ interceptor for:
+// The following code adds libc++ interceptors for:
// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// bool __shared_count::__release_shared() _NOEXCEPT;
// Shared and weak pointers in C++ maintain reference counts via atomics in
// libc++.dylib, which are TSan-invisible, and this leads to false positives in
-// destructor code. This interceptor re-implements the whole function so that
+// destructor code. These interceptors re-implements the whole functions so that
// the mo_acq_rel semantics of the atomic decrement are visible.
//
-// Unfortunately, this interceptor cannot simply Acquire/Release some sync
+// Unfortunately, the interceptors cannot simply Acquire/Release some sync
// object and call the original function, because it would have a race between
// the sync and the destruction of the object. Calling both under a lock will
// not work because the destructor can invoke this interceptor again (and even
// in a different thread, so recursive locks don't help).
+
STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
fake_shared_weak_count *o) {
if (!flags()->shared_ptr_interceptor)
}
}
+STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ return true;
+ }
+ return false;
+}
+
namespace {
struct call_once_callback_args {
void (*orig_func)(void *arg);
Initialize(cur_thread());
}
+void __tsan_flush_memory() {
+ FlushShadowMemory();
+}
+
void __tsan_read16(void *addr) {
MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
#include <sanitizer_common/sanitizer_internal_defs.h>
using __sanitizer::uptr;
+using __sanitizer::tid_t;
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
+
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
int *fd, int *suppressable, void **trace,
uptr trace_size);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type);
+
// Returns information about mutexes included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
// Returns information about threads included in the report.
SANITIZER_INTERFACE_ATTRIBUTE
-int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
int *running, const char **name, int *parent_tid,
void **trace, uptr trace_size);
SANITIZER_INTERFACE_ATTRIBUTE
int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+// Returns the type of the pointer (heap, stack, global, ...) and if possible
+// also the starting address (e.g. of a heap allocation) and size.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+// Returns the allocation stack for a heap pointer.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id);
+
#endif // SANITIZER_GO
#ifdef __cplusplus
class ScopedAnnotation {
public:
- ScopedAnnotation(ThreadState *thr, const char *aname, const char *f, int l,
- uptr pc)
+ ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
: thr_(thr) {
FuncEntry(thr_, pc);
- DPrintf("#%d: annotation %s() %s:%d\n", thr_->tid, aname, f, l);
+ DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
}
~ScopedAnnotation() {
ThreadState *const thr_;
};
-#define SCOPED_ANNOTATION(typ) \
+#define SCOPED_ANNOTATION_RET(typ, ret) \
if (!flags()->enable_annotations) \
- return; \
+ return ret; \
ThreadState *thr = cur_thread(); \
const uptr caller_pc = (uptr)__builtin_return_address(0); \
StatInc(thr, StatAnnotation); \
StatInc(thr, Stat##typ); \
- ScopedAnnotation sa(thr, __func__, f, l, caller_pc); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
const uptr pc = StackTrace::GetCurrentPc(); \
(void)pc; \
/**/
+#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
+
static const int kMaxDescLen = 128;
struct ExpectRace {
void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockCreate);
- MutexCreate(thr, pc, m, true, true, false);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
- MutexCreate(thr, pc, m, true, true, true);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
uptr is_w) {
SCOPED_ANNOTATION(AnnotateRWLockAcquired);
if (is_w)
- MutexLock(thr, pc, m);
+ MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
else
- MutexReadLock(thr, pc, m);
+ MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
}
void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
void INTERFACE_ATTRIBUTE
AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_create(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_create);
+ MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_destroy(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_destroy);
+ MutexDestroy(thr, pc, (uptr)m, flagz);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
+ if (!(flagz & MutexFlagTryLock)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPreReadLock(thr, pc, (uptr)m);
+ else
+ MutexPreLock(thr, pc, (uptr)m);
+ }
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_lock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+ if (!(flagz & MutexFlagTryLockFailed)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPostReadLock(thr, pc, (uptr)m, flagz);
+ else
+ MutexPostLock(thr, pc, (uptr)m, flagz, rec);
+ }
+}
+
+INTERFACE_ATTRIBUTE
+int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
+ int ret = 0;
+ if (flagz & MutexFlagReadLock) {
+ CHECK(!(flagz & MutexFlagRecursiveUnlock));
+ MutexReadUnlock(thr, pc, (uptr)m);
+ } else {
+ ret = MutexUnlock(thr, pc, (uptr)m, flagz);
+ }
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ return ret;
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_signal);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
+ // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_divert);
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
} // extern "C"
#endif
template<typename T>
-static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
- morder mo) {
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return NoTsanAtomicLoad(a, mo);
}
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
- AcquireImpl(thr, pc, &s->clock);
+ // Don't create sync object if it does not exist yet. For example, an atomic
+ // pointer is initialized to nullptr and then periodically acquire-loaded.
T v = NoTsanAtomicLoad(a, mo);
- s->mtx.ReadUnlock();
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
+ if (s) {
+ AcquireImpl(thr, pc, &s->clock);
+ // Re-read under sync mutex because we need a consistent snapshot
+ // of the value and the clock we acquire.
+ v = NoTsanAtomicLoad(a, mo);
+ s->mtx.ReadUnlock();
+ }
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
- ReleaseImpl(thr, pc, &s->clock);
+ ReleaseStoreImpl(thr, pc, &s->clock);
NoTsanAtomicStore(a, v, mo);
s->mtx.Unlock();
}
// C/C++
-static morder covert_morder(morder mo) {
+static morder convert_morder(morder mo) {
if (flags()->force_seq_cst_atomics)
return (morder)mo_seq_cst;
}
#define SCOPED_ATOMIC(func, ...) \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = StackTrace::GetCurrentPc(); \
- mo = covert_morder(mo); \
ThreadState *const thr = cur_thread(); \
- if (thr->ignore_interceptors) \
+ if (thr->ignore_sync || thr->ignore_interceptors) { \
+ ProcessPendingSignals(thr); \
return NoTsanAtomic##func(__VA_ARGS__); \
+ } \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = convert_morder(mo); \
AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
ScopedAtomic sa(thr, callpc, a, mo, __func__); \
return Atomic##func(thr, pc, __VA_ARGS__); \
FuncExit(cur_thread());
}
+void __tsan_ignore_thread_begin() {
+ ThreadIgnoreBegin(cur_thread(), CALLERPC);
+}
+
+void __tsan_ignore_thread_end() {
+ ThreadIgnoreEnd(cur_thread(), CALLERPC);
+}
+
void __tsan_read_range(void *addr, uptr size) {
MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
}
}
}
+jptr __tsan_java_find(jptr *from_ptr, jptr to) {
+ SCOPED_JAVA_FUNC(__tsan_java_find);
+ DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
+ CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ CHECK_EQ(to % kHeapAlignment, 0);
+ CHECK_GE(*from_ptr, jctx->heap_begin);
+ CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
+ MBlock *b = ctx->metamap.GetBlock(from);
+ if (b) {
+ *from_ptr = from;
+ return b->siz;
+ }
+ }
+ return 0;
+}
+
void __tsan_java_finalize() {
SCOPED_JAVA_FUNC(__tsan_java_finalize);
DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexCreate(thr, pc, addr, true, true, true);
- MutexLock(thr, pc, addr);
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_unlock(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- MutexCreate(thr, pc, addr, true, true, true);
- MutexReadLock(thr, pc, addr);
+ MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
+ MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
}
void __tsan_java_mutex_read_unlock(jptr addr) {
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
CHECK_GT(rec, 0);
- MutexCreate(thr, pc, addr, true, true, true);
- MutexLock(thr, pc, addr, rec);
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
}
int __tsan_java_mutex_unlock_rec(jptr addr) {
CHECK_GE(addr, jctx->heap_begin);
CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
- return MutexUnlock(thr, pc, addr, true);
+ return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
}
void __tsan_java_acquire(jptr addr) {
// It ensures necessary synchronization between
// java object creation and finalization.
void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
+// Finds the first allocated memory block in the [*from_ptr, to) range, saves
+// its address in *from_ptr and returns its size. Returns 0 if there are no
+// allocated memory blocks in the range.
+jptr __tsan_java_find(jptr *from_ptr, jptr to) INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
return width == 1;
}
-static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+static dispatch_queue_t GetTargetQueueFromQueue(dispatch_queue_t q) {
CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
- dispatch_queue_t target_queue =
- *(dispatch_queue_t *)(((uptr)source) +
- dispatch_queue_offsets.dqo_target_queue);
- CHECK_NE(target_queue, 0);
- return target_queue;
+ dispatch_queue_t tq = *(
+ dispatch_queue_t *)(((uptr)q) + dispatch_queue_offsets.dqo_target_queue);
+ return tq;
+}
+
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ dispatch_queue_t tq = GetTargetQueueFromQueue((dispatch_queue_t)source);
+ CHECK_NE(tq, 0);
+ return tq;
}
static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
void *orig_context,
dispatch_function_t orig_work) {
tsan_block_context_t *new_context =
- (tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t));
+ (tsan_block_context_t *)user_alloc_internal(thr, pc,
+ sizeof(tsan_block_context_t));
new_context->queue = queue;
new_context->orig_context = orig_context;
new_context->orig_work = orig_work;
new_context->free_context_in_callback = true;
new_context->submitted_synchronously = false;
new_context->is_barrier_block = false;
+ new_context->non_queue_sync_object = 0;
return new_context;
}
+#define GET_QUEUE_SYNC_VARS(context, q) \
+ bool is_queue_serial = q && IsQueueSerial(q); \
+ uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
+ uptr serial_sync = (uptr)sync_ptr; \
+ uptr concurrent_sync = sync_ptr ? ((uptr)sync_ptr) + sizeof(uptr) : 0; \
+ bool serial_task = context->is_barrier_block || is_queue_serial
+
+static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
+ tsan_block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ Acquire(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ if (serial_sync) Acquire(thr, pc, serial_sync);
+ if (serial_task && concurrent_sync) Acquire(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_sync_post_execute(ThreadState *thr, uptr pc,
+ tsan_block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ if (serial_task && serial_sync) Release(thr, pc, serial_sync);
+ if (!serial_task && concurrent_sync) Release(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
static void dispatch_callback_wrap(void *param) {
SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
tsan_block_context_t *context = (tsan_block_context_t *)param;
- bool is_queue_serial = context->queue && IsQueueSerial(context->queue);
- uptr sync_ptr = (uptr)context->queue ?: context->non_queue_sync_object;
-
- uptr serial_sync = (uptr)sync_ptr;
- uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr);
- uptr submit_sync = (uptr)context;
- bool serial_task = context->is_barrier_block || is_queue_serial;
- Acquire(thr, pc, submit_sync);
- Acquire(thr, pc, serial_sync);
- if (serial_task) Acquire(thr, pc, concurrent_sync);
+ dispatch_sync_pre_execute(thr, pc, context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
context->orig_work(context->orig_context);
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
- Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
- if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+ dispatch_sync_post_execute(thr, pc, context);
if (context->free_context_in_callback) user_free(thr, pc, context);
}
}
#define DISPATCH_INTERCEPT_SYNC_B(name, barrier) \
- TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, \
+ DISPATCH_NOESCAPE dispatch_block_t block) { \
SCOPED_TSAN_INTERCEPTOR(name, q, block); \
SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
dispatch_block_t heap_block = Block_copy(block); \
// need to undefine the macro.
#undef dispatch_once
TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
- dispatch_block_t block) {
+ DISPATCH_NOESCAPE dispatch_block_t block) {
SCOPED_INTERCEPTOR_RAW(dispatch_once, predicate, block);
atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
u32 v = atomic_load(a, memory_order_acquire);
}
TSAN_INTERCEPTOR(void, dispatch_apply, size_t iterations,
- dispatch_queue_t queue, void (^block)(size_t)) {
+ dispatch_queue_t queue,
+ DISPATCH_NOESCAPE void (^block)(size_t)) {
SCOPED_TSAN_INTERCEPTOR(dispatch_apply, iterations, queue, block);
void *parent_to_child_sync = nullptr;
return REAL(dispatch_io_close)(channel, flags);
}
+// Resuming a suspended queue needs to synchronize with all subsequent
+// executions of blocks in that queue.
+TSAN_INTERCEPTOR(void, dispatch_resume, dispatch_object_t o) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_resume, o);
+ Release(thr, pc, (uptr)o); // Synchronizes with the Acquire() on serial_sync
+ // in dispatch_sync_pre_execute
+ return REAL(dispatch_resume)(o);
+}
+
} // namespace __tsan
#endif // SANITIZER_MAC
#define COMMON_MALLOC_FORCE_UNLOCK()
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
void *p = \
- user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
+ user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
SCOPED_INTERCEPTOR_RAW(malloc, size); \
if (cur_thread()->in_symbolizer) \
return InternalAlloc(size, nullptr, GetPageSizeCached()); \
SCOPED_INTERCEPTOR_RAW(valloc, size); \
- void *p = user_alloc(thr, pc, size, GetPageSizeCached())
+ void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
#include "tsan_rtl.h"
diff = p + size - RoundDown(p + size, kPageSize);
if (diff != 0)
size -= diff;
- ReleaseMemoryToOS((uptr)MemToMeta(p), size / kMetaRatio);
+ uptr p_meta = (uptr)MemToMeta(p);
+ ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
}
};
}
void InitializeAllocator() {
- allocator()->Init(common_flags()->allocator_may_return_null);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {
OutputReport(thr, rep);
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
+ bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
- if (p == 0)
+ if (UNLIKELY(p == 0))
return 0;
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
return p;
}
-void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
- if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDieOnBadRequest();
- void *p = user_alloc(thr, pc, n * size);
- if (p)
- internal_memset(p, 0, n * size);
- return p;
-}
-
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
SignalUnsafeCall(thr, pc);
}
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
+}
+
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n)))
+ return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ void *p = user_alloc_internal(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return SetErrnoOnNull(p);
+}
+
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
ctx->metamap.AllocBlock(thr, pc, p, sz);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
- void *p2 = user_alloc(thr, pc, sz);
- if (p2 == 0)
- return 0;
- if (p) {
- uptr oldsz = user_alloc_usable_size(p);
- internal_memcpy(p2, p, min(oldsz, sz));
+ if (!p)
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
+ if (!sz) {
user_free(thr, pc, p);
+ return nullptr;
+ }
+ void *new_p = user_alloc_internal(thr, pc, sz);
+ if (new_p) {
+ uptr old_sz = user_alloc_usable_size(p);
+ internal_memcpy(new_p, p, min(old_sz, sz));
+ user_free(thr, pc, p);
+ }
+ return SetErrnoOnNull(new_p);
+}
+
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!IsPowerOfTwo(align))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
+ Allocator::FailureHandler::OnBadRequest();
+ return errno_EINVAL;
+ }
+ void *ptr = user_alloc_internal(thr, pc, sz, align);
+ if (UNLIKELY(!ptr))
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, align));
+ *memptr = ptr;
+ return 0;
+}
+
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
+}
+
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
+ errno = errno_ENOMEM;
+ return Allocator::FailureHandler::OnBadRequest();
}
- return p2;
+ // pvalloc(0) should allocate one page.
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
uptr user_alloc_usable_size(const void *p) {
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
allocator()->SwallowCache(&thr->proc()->alloc_cache);
internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
ctx->metamap.OnProcIdle(thr->proc());
void AllocatorPrintStats();
// For user allocations.
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment, bool signal = true);
-void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
+// Interceptor implementations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
-void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz);
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.
// Interceptors for operators new and delete.
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "tsan_interceptors.h"
DECLARE_REAL(void *, malloc, uptr size)
DECLARE_REAL(void, free, void *ptr)
-#define OPERATOR_NEW_BODY(mangled_name) \
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(mangled_name, nothrow) \
if (cur_thread()->in_symbolizer) \
return InternalAlloc(size); \
void *p = 0; \
{ \
SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
p = user_alloc(thr, pc, size); \
+ if (!nothrow && UNLIKELY(!p)) DieOnFailure::OnOOM(); \
} \
invoke_malloc_hook(p, size); \
return p;
SANITIZER_INTERFACE_ATTRIBUTE
void *operator new(__sanitizer::uptr size);
void *operator new(__sanitizer::uptr size) {
- OPERATOR_NEW_BODY(_Znwm);
+ OPERATOR_NEW_BODY(_Znwm, false /*nothrow*/);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *operator new[](__sanitizer::uptr size);
void *operator new[](__sanitizer::uptr size) {
- OPERATOR_NEW_BODY(_Znam);
+ OPERATOR_NEW_BODY(_Znam, false /*nothrow*/);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t);
+ OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t, true /*nothrow*/);
}
SANITIZER_INTERFACE_ATTRIBUTE
void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
- OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t);
+ OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t, true /*nothrow*/);
}
#define OPERATOR_DELETE_BODY(mangled_name) \
};
#define TSAN_MID_APP_RANGE 1
+#elif defined(__aarch64__) && defined(__APPLE__)
+/*
+C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
+0200 0000 00 - 0300 0000 00: heap (4 GB)
+0300 0000 00 - 0400 0000 00: - (4 GB)
+0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
+0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
+0e00 0000 00 - 0f00 0000 00: - (4 GB)
+0f00 0000 00 - 1000 0000 00: traces (4 GB)
+*/
+struct Mapping {
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kHeapMemBeg = 0x0200000000ull;
+ static const uptr kHeapMemEnd = 0x0300000000ull;
+ static const uptr kShadowBeg = 0x0400000000ull;
+ static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kMetaShadowBeg = 0x0d00000000ull;
+ static const uptr kMetaShadowEnd = 0x0e00000000ull;
+ static const uptr kTraceMemBeg = 0x0f00000000ull;
+ static const uptr kTraceMemEnd = 0x1000000000ull;
+ static const uptr kHiAppMemBeg = 0x1000000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000ull;
+ static const uptr kAppMemMsk = 0x0ull;
+ static const uptr kAppMemXor = 0x0ull;
+ static const uptr kVdsoBeg = 0x7000000000000000ull;
+};
+
#elif defined(__aarch64__)
// AArch64 supports multiple VMA which leads to multiple address transformation
// functions. To support these multiple VMAS transformations and mappings TSAN
template<int Type>
uptr MappingArchImpl(void) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return MappingImpl<Mapping39, Type>();
case 42: return MappingImpl<Mapping42, Type>();
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return IsAppMemImpl<Mapping39>(mem);
case 42: return IsAppMemImpl<Mapping42>(mem);
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return IsShadowMemImpl<Mapping39>(mem);
case 42: return IsShadowMemImpl<Mapping42>(mem);
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return IsMetaMemImpl<Mapping39>(mem);
case 42: return IsMetaMemImpl<Mapping42>(mem);
ALWAYS_INLINE
uptr MemToShadow(uptr x) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return MemToShadowImpl<Mapping39>(x);
case 42: return MemToShadowImpl<Mapping42>(x);
ALWAYS_INLINE
u32 *MemToMeta(uptr x) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return MemToMetaImpl<Mapping39>(x);
case 42: return MemToMetaImpl<Mapping42>(x);
ALWAYS_INLINE
uptr ShadowToMem(uptr s) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return ShadowToMemImpl<Mapping39>(s);
case 42: return ShadowToMemImpl<Mapping42>(s);
ALWAYS_INLINE
uptr GetThreadTrace(int tid) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return GetThreadTraceImpl<Mapping39>(tid);
case 42: return GetThreadTraceImpl<Mapping42>(tid);
ALWAYS_INLINE
uptr GetThreadTraceHeader(int tid) {
-#ifdef __aarch64__
+#if defined(__aarch64__) && !defined(__APPLE__)
switch (vmaSize) {
case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
#include <sys/resource.h>
#include <sys/stat.h>
#include <unistd.h>
-#include <errno.h>
#include <sched.h>
#include <dlfcn.h>
#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- ReleaseMemoryToOS(ShadowBeg(), ShadowEnd() - ShadowBeg());
+ ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
}
#endif
}
// Map the file into shadow of .rodata sections.
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr start, end, offset, prot;
// Reusing the buffer 'name'.
- while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
- if (name[0] != 0 && name[0] != '['
- && (prot & MemoryMappingLayout::kProtectionRead)
- && (prot & MemoryMappingLayout::kProtectionExecute)
- && !(prot & MemoryMappingLayout::kProtectionWrite)
- && IsAppMem(start)) {
+ MemoryMappedSegment segment(name, ARRAY_SIZE(name));
+ while (proc_maps.Next(&segment)) {
+ if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
+ segment.IsReadable() && segment.IsExecutable() &&
+ !segment.IsWritable() && IsAppMem(segment.start)) {
// Assume it's .rodata
- char *shadow_start = (char*)MemToShadow(start);
- char *shadow_end = (char*)MemToShadow(end);
+ char *shadow_start = (char *)MemToShadow(segment.start);
+ char *shadow_end = (char *)MemToShadow(segment.end);
for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
return res;
}
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
+ // Check that the thr object is in tls;
+ const uptr thr_beg = (uptr)thr;
+ const uptr thr_end = (uptr)thr + sizeof(*thr);
+ CHECK_GE(thr_beg, tls_addr);
+ CHECK_LE(thr_beg, tls_addr + tls_size);
+ CHECK_GE(thr_end, tls_addr);
+ CHECK_LE(thr_end, tls_addr + tls_size);
+ // Since the thr object is huge, skip it.
+ MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
+ tls_addr + tls_size - thr_end);
+}
+
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
#if !SANITIZER_GO
#if SANITIZER_ANDROID
-
-#if defined(__aarch64__)
-# define __get_tls() \
- ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; })
-#elif defined(__x86_64__)
-# define __get_tls() \
- ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; })
-#else
-#error unsupported architecture
-#endif
-
-// On Android, __thread is not supported. So we store the pointer to ThreadState
-// in TLS_SLOT_TSAN, which is the tls slot allocated by Android bionic for tsan.
-static const int TLS_SLOT_TSAN = 8;
// On Android, one thread can call intercepted functions after
// DestroyThreadState(), so add a fake thread state for "dead" threads.
static ThreadState *dead_thread_state = nullptr;
ThreadState *cur_thread() {
- ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
+ ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
if (thr == nullptr) {
__sanitizer_sigset_t emptyset;
internal_sigfillset(&emptyset);
__sanitizer_sigset_t oldset;
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
- thr = reinterpret_cast<ThreadState*>(__get_tls()[TLS_SLOT_TSAN]);
+ thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
if (thr == nullptr) {
thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
"ThreadState"));
- __get_tls()[TLS_SLOT_TSAN] = thr;
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
if (dead_thread_state == nullptr) {
dead_thread_state = reinterpret_cast<ThreadState*>(
MmapOrDie(sizeof(ThreadState), "ThreadState"));
internal_sigfillset(&emptyset);
__sanitizer_sigset_t oldset;
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
- ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
+ ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
if (thr != dead_thread_state) {
- __get_tls()[TLS_SLOT_TSAN] = dead_thread_state;
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state);
UnmapOrDie(thr, sizeof(ThreadState));
}
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
#include "tsan_flags.h"
+#include <mach/mach.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
static uptr main_thread_identity = 0;
ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
+ThreadState **cur_thread_location() {
+ ThreadState **thread_identity = (ThreadState **)pthread_self();
+ return ((uptr)thread_identity == main_thread_identity) ? nullptr
+ : thread_identity;
+}
+
ThreadState *cur_thread() {
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity || main_thread_identity == 0) {
+ ThreadState **thr_state_loc = cur_thread_location();
+ if (thr_state_loc == nullptr || main_thread_identity == 0) {
return (ThreadState *)&main_thread_state;
}
- ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
+ ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc);
ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
(uptr *)fake_tls, sizeof(ThreadState));
return thr;
// munmap first and then clear `fake_tls`; if we receive a signal in between,
// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
- uptr thread_identity = (uptr)pthread_self();
- if (thread_identity == main_thread_identity) {
+ ThreadState **thr_state_loc = cur_thread_location();
+ if (thr_state_loc == nullptr) {
// Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
// exit the main thread. Let's keep the main thread's ThreadState.
return;
}
- ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
+ ThreadState **fake_tls = (ThreadState **)MemToShadow((uptr)thr_state_loc);
internal_munmap(*fake_tls, sizeof(ThreadState));
*fake_tls = nullptr;
}
#endif
-uptr GetShadowMemoryConsumption() {
- return 0;
+void FlushShadowMemory() {
}
-void FlushShadowMemory() {
+static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
+ vm_address_t address = start;
+ vm_address_t end_address = end;
+ uptr resident_pages = 0;
+ uptr dirty_pages = 0;
+ while (address < end_address) {
+ vm_size_t vm_region_size;
+ mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
+ vm_region_extended_info_data_t vm_region_info;
+ mach_port_t object_name;
+ kern_return_t ret = vm_region_64(
+ mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
+ (vm_region_info_t)&vm_region_info, &count, &object_name);
+ if (ret != KERN_SUCCESS) break;
+
+ resident_pages += vm_region_info.pages_resident;
+ dirty_pages += vm_region_info.pages_dirtied;
+
+ address += vm_region_size;
+ }
+ *res = resident_pages * GetPageSizeCached();
+ *dirty = dirty_pages * GetPageSizeCached();
}
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr shadow_res, shadow_dirty;
+ uptr meta_res, meta_dirty;
+ uptr trace_res, trace_dirty;
+ RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
+ RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
+ RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
+
+#if !SANITIZER_GO
+ uptr low_res, low_dirty;
+ uptr high_res, high_dirty;
+ uptr heap_res, heap_dirty;
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
+ RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
+ RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
+#else // !SANITIZER_GO
+ uptr app_res, app_dirty;
+ RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+#endif
+
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#endif
+ "stacks: %zd unique IDs, %zd kB allocated\n"
+ "threads: %zd total, %zd live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+ TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
+#if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+#else // !SANITIZER_GO
+ AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
+#endif
+ stacks->n_uniq_ids, stacks->allocated / 1024,
+ nthread, nlive);
}
#if !SANITIZER_GO
ThreadState *parent_thread_state = nullptr; // No parent.
int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
CHECK_NE(tid, 0);
- ThreadStart(thr, tid, GetTid());
+ ThreadStart(thr, tid, GetTid(), /*workerthread*/ true);
}
} else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
if (thread == pthread_self()) {
#endif
void InitializePlatformEarly() {
+#if defined(__aarch64__)
+ uptr max_vm = GetMaxVirtualAddress() + 1;
+ if (max_vm != Mapping::kHiAppMemEnd) {
+ Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
+ max_vm, Mapping::kHiAppMemEnd);
+ Die();
+ }
+#endif
}
void InitializePlatform() {
#endif
}
+#if !SANITIZER_GO
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
+ // The pointer to the ThreadState object is stored in the shadow memory
+ // of the tls.
+ uptr tls_end = tls_addr + tls_size;
+ ThreadState **thr_state_loc = cur_thread_location();
+ if (thr_state_loc == nullptr) {
+ MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
+ } else {
+ uptr thr_state_start = (uptr)thr_state_loc;
+ uptr thr_state_end = thr_state_start + sizeof(uptr);
+ CHECK_GE(thr_state_start, tls_addr);
+ CHECK_LE(thr_state_start, tls_addr + tls_size);
+ CHECK_GE(thr_state_end, tls_addr);
+ CHECK_LE(thr_state_end, tls_addr + tls_size);
+ MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
+ thr_state_start - tls_addr);
+ MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
+ tls_end - thr_state_end);
+ }
+}
+#endif
+
#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
#elif defined(__mips64)
const uptr kMadviseRangeBeg = 0xff00000000ull;
const uptr kMadviseRangeSize = 0x0100000000ull;
+#elif defined(__aarch64__) && defined(__APPLE__)
+ uptr kMadviseRangeBeg = LoAppMemBeg();
+ uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
#elif defined(__aarch64__)
uptr kMadviseRangeBeg = 0;
uptr kMadviseRangeSize = 0;
void CheckAndProtect() {
// Ensure that the binary is indeed compiled with -pie.
MemoryMappingLayout proc_maps(true);
- uptr p, end, prot;
- while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
- if (IsAppMem(p))
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (IsAppMem(segment.start)) continue;
+ if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
+ if (segment.protection == 0) // Zero page or mprotected.
continue;
- if (p >= HeapMemEnd() &&
- p < HeapEnd())
- continue;
- if (prot == 0) // Zero page or mprotected.
- continue;
- if (p >= VdsoBeg()) // vdso
+ if (segment.start >= VdsoBeg()) // vdso
break;
- Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
+ segment.start, segment.end);
Die();
}
+#if defined(__aarch64__) && defined(__APPLE__)
+ ProtectRange(HeapMemEnd(), ShadowBeg());
+ ProtectRange(ShadowEnd(), MetaShadowBeg());
+ ProtectRange(MetaShadowEnd(), TraceMemBeg());
+#else
ProtectRange(LoAppMemEnd(), ShadowBeg());
ProtectRange(ShadowEnd(), MetaShadowBeg());
#ifdef TSAN_MID_APP_RANGE
ProtectRange(TraceMemBeg(), TraceMemEnd());
ProtectRange(TraceMemEnd(), HeapMemBeg());
ProtectRange(HeapEnd(), HiAppMemBeg());
+#endif
}
#endif
namespace __tsan {
-uptr GetShadowMemoryConsumption() {
- return 0;
-}
-
void FlushShadowMemory() {
}
#include "tsan_report.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace_printer.h"
class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() { }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
const char *Access() { return Blue(); }
- const char *EndAccess() { return Default(); }
const char *ThreadDescription() { return Cyan(); }
- const char *EndThreadDescription() { return Default(); }
const char *Location() { return Green(); }
- const char *EndLocation() { return Default(); }
const char *Sleep() { return Yellow(); }
- const char *EndSleep() { return Default(); }
const char *Mutex() { return Magenta(); }
- const char *EndMutex() { return Default(); }
};
ReportDesc::ReportDesc()
- : stacks(MBlockReportStack)
+ : tag(kExternalTagNone)
+ , stacks(MBlockReportStack)
, mops(MBlockReportMop)
, locs(MBlockReportLoc)
, mutexes(MBlockReportMutex)
return buf;
}
-static const char *ReportTypeString(ReportType typ) {
+static const char *ReportTypeString(ReportType typ, uptr tag) {
if (typ == ReportTypeRace)
return "data race";
if (typ == ReportTypeVptrRace)
return "heap-use-after-free";
if (typ == ReportTypeVptrUseAfterFree)
return "heap-use-after-free (virtual call vs free)";
+ if (typ == ReportTypeExternalRace) {
+ const char *str = GetReportHeaderFromTag(tag);
+ return str ? str : "race on external object";
+ }
if (typ == ReportTypeThreadLeak)
return "thread leak";
if (typ == ReportTypeMutexDestroyLocked)
: (write ? "Previous write" : "Previous read"));
}
+static const char *ExternalMopDesc(bool first, bool write) {
+ return first ? (write ? "Modifying" : "Read-only")
+ : (write ? "Previous modifying" : "Previous read-only");
+}
+
static void PrintMop(const ReportMop *mop, bool first) {
Decorator d;
char thrbuf[kThreadBufSize];
Printf("%s", d.Access());
- Printf(" %s of size %d at %p by %s",
- MopDesc(first, mop->write, mop->atomic),
- mop->size, (void*)mop->addr,
- thread_name(thrbuf, mop->tid));
+ if (mop->external_tag == kExternalTagNone) {
+ Printf(" %s of size %d at %p by %s",
+ MopDesc(first, mop->write, mop->atomic), mop->size,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ } else {
+ const char *object_type = GetObjectTypeFromTag(mop->external_tag);
+ if (object_type == nullptr)
+ object_type = "external object";
+ Printf(" %s access of %s at %p by %s",
+ ExternalMopDesc(first, mop->write), object_type,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ }
PrintMutexSet(mop->mset);
Printf(":\n");
- Printf("%s", d.EndAccess());
+ Printf("%s", d.Default());
PrintStack(mop->stack);
}
global.module_offset);
} else if (loc->type == ReportLocationHeap) {
char thrbuf[kThreadBufSize];
- Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
- loc->heap_chunk_size, loc->heap_chunk_start,
- thread_name(thrbuf, loc->tid));
+ const char *object_type = GetObjectTypeFromTag(loc->external_tag);
+ if (!object_type) {
+ Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ } else {
+ Printf(" Location is %s of size %zu at %p allocated by %s:\n",
+ object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ }
print_stack = true;
} else if (loc->type == ReportLocationStack) {
Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
loc->fd, thread_name(thrbuf, loc->tid));
print_stack = true;
}
- Printf("%s", d.EndLocation());
+ Printf("%s", d.Default());
if (print_stack)
PrintStack(loc->stack);
}
static void PrintMutexShort(const ReportMutex *rm, const char *after) {
Decorator d;
- Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.EndMutex(), after);
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
}
static void PrintMutexShortWithAddress(const ReportMutex *rm,
const char *after) {
Decorator d;
- Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.EndMutex(), after);
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
}
static void PrintMutex(const ReportMutex *rm) {
if (rm->destroyed) {
Printf("%s", d.Mutex());
Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
- Printf("%s", d.EndMutex());
+ Printf("%s", d.Default());
} else {
Printf("%s", d.Mutex());
Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
- Printf("%s", d.EndMutex());
+ Printf("%s", d.Default());
PrintStack(rm->stack);
}
}
if (rt->name && rt->name[0] != '\0')
Printf(" '%s'", rt->name);
char thrbuf[kThreadBufSize];
- Printf(" (tid=%zu, %s) created by %s",
- rt->os_id, rt->running ? "running" : "finished",
- thread_name(thrbuf, rt->parent_tid));
+ const char *thread_status = rt->running ? "running" : "finished";
+ if (rt->workerthread) {
+ Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+ Printf("\n");
+ Printf("%s", d.Default());
+ return;
+ }
+ Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+ thread_name(thrbuf, rt->parent_tid));
if (rt->stack)
Printf(" at:");
Printf("\n");
- Printf("%s", d.EndThreadDescription());
+ Printf("%s", d.Default());
PrintStack(rt->stack);
}
Decorator d;
Printf("%s", d.Sleep());
Printf(" As if synchronized via sleep:\n");
- Printf("%s", d.EndSleep());
+ Printf("%s", d.Default());
PrintStack(s);
}
void PrintReport(const ReportDesc *rep) {
Decorator d;
Printf("==================\n");
- const char *rep_typ_str = ReportTypeString(rep->typ);
+ const char *rep_typ_str = ReportTypeString(rep->typ, rep->tag);
Printf("%s", d.Warning());
Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
(int)internal_getpid());
- Printf("%s", d.EndWarning());
+ Printf("%s", d.Default());
if (rep->typ == ReportTypeDeadlock) {
char thrbuf[kThreadBufSize];
PrintMutexShort(rep->mutexes[i], " in ");
Printf("%s", d.ThreadDescription());
Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
- Printf("%s", d.EndThreadDescription());
+ Printf("%s", d.Default());
if (flags()->second_deadlock_stack) {
PrintStack(rep->stacks[2*i]);
Printf(" Mutex ");
ReportErrorSummary(rep_typ_str, frame->info);
}
+ if (common_flags()->print_module_map == 2) PrintModuleMap();
+
Printf("==================\n");
}
ReportTypeVptrRace,
ReportTypeUseAfterFree,
ReportTypeVptrUseAfterFree,
+ ReportTypeExternalRace,
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
ReportTypeMutexDoubleLock,
int size;
bool write;
bool atomic;
+ uptr external_tag;
Vector<ReportMopMutex> mset;
ReportStack *stack;
DataInfo global;
uptr heap_chunk_start;
uptr heap_chunk_size;
+ uptr external_tag;
int tid;
int fd;
bool suppressable;
struct ReportThread {
int id;
- uptr os_id;
+ tid_t os_id;
bool running;
+ bool workerthread;
char *name;
- int parent_tid;
+ u32 parent_tid;
ReportStack *stack;
};
class ReportDesc {
public:
ReportType typ;
+ uptr tag;
Vector<ReportStack*> stacks;
Vector<ReportMop*> mops;
Vector<ReportLocation*> locs;
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_placement_new.h"
namespace __tsan {
#if !SANITIZER_GO && !SANITIZER_MAC
- __attribute__((tls_model("initial-exec")))
+__attribute__((tls_model("initial-exec")))
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
, fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
- , fired_suppressions(8) {
+ , fired_suppressions(8)
+ , clock_alloc("clock allocator") {
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
#endif
void DontNeedShadowFor(uptr addr, uptr size) {
- uptr shadow_beg = MemToShadow(addr);
- uptr shadow_end = MemToShadow(addr + size);
- ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
+ ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
}
void MapShadow(uptr addr, uptr size) {
// Initialize thread 0.
int tid = ThreadCreate(thr, 0, 0, true);
CHECK_EQ(tid, 0);
- ThreadStart(thr, tid, internal_getpid());
+ ThreadStart(thr, tid, GetTid(), /*workerthread*/ false);
#if TSAN_CONTAINS_UBSAN
__ubsan::InitAsPlugin();
#endif
int Finalize(ThreadState *thr) {
bool failed = false;
+ if (common_flags()->print_module_map == 1) PrintModuleMap();
+
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
SleepForMillis(flags()->atexit_sleep_ms);
// Don't want to touch lots of shadow memory.
// If a program maps 10MB stack, there is no need reset the whole range.
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
- // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
- // so we do it only for C/C++.
- if (SANITIZER_GO || size < common_flags()->clear_shadow_mmap_threshold) {
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
+ if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
thr->shadow_stack_pos--;
}
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
#if !SANITIZER_GO
- if (!ctx->after_multithreaded_fork)
+ if (save_stack && !ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->ignore_reads_and_writes--;
- CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
#if !SANITIZER_GO
}
}
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
+#if !SANITIZER_GO
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __tsan_testonly_shadow_stack_current_size() {
+ ThreadState *thr = cur_thread();
+ return thr->shadow_stack_pos - thr->shadow_stack;
+}
+#endif
+
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
#if !SANITIZER_GO
- if (!ctx->after_multithreaded_fork)
+ if (save_stack && !ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
}
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_sync, 0);
thr->ignore_sync--;
- CHECK_GE(thr->ignore_sync, 0);
#if !SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#if !SANITIZER_GO
struct MapUnmapCallback;
#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
-static const uptr kAllocatorSpace = 0;
-static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kAllocatorRegionSizeLog = 20;
static const uptr kAllocatorNumRegions =
- kAllocatorSize >> kAllocatorRegionSizeLog;
+ SANITIZER_MMAP_RANGE_SIZE >> kAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
MapUnmapCallback> ByteMap;
-typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
- CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
- MapUnmapCallback> PrimaryAllocator;
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = kAllocatorRegionSizeLog;
+ typedef __tsan::ByteMap ByteMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#else
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
// for better performance.
int ignore_reads_and_writes;
int ignore_sync;
+ int suppress_reports;
// Go does not support ignores.
#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
extern Context *ctx; // The one and the only global runtime context.
+ALWAYS_INLINE Flags *flags() {
+ return &ctx->flags;
+}
+
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
#if !SANITIZER_GO
}
};
+const char *GetObjectTypeFromTag(uptr tag);
+const char *GetReportHeaderFromTag(uptr tag);
+uptr TagFromShadowStackFrame(uptr pc);
+
class ScopedReport {
public:
- explicit ScopedReport(ReportType typ);
+ explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
~ScopedReport();
- void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
const MutexSet *mset);
void AddStack(StackTrace stack, bool suppressable = false);
void AddThread(const ThreadContext *tctx, bool suppressable = false);
void operator = (const ScopedReport&);
};
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset);
+ MutexSet *mset, uptr *tag = nullptr);
+
+// The stack could look like:
+// <start> | <main> | <foo> | tag | <bar>
+// This will extract the tag and keep:
+// <start> | <main> | <foo> | <bar>
+template<typename StackTraceTy>
+void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
+ if (stack->size < 2) return;
+ uptr possible_tag_pc = stack->trace[stack->size - 2];
+ uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
+ if (possible_tag == kExternalTagNone) return;
+ stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
+ stack->size -= 1;
+ if (tag) *tag = possible_tag;
+}
template<typename StackTraceTy>
-void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
+ uptr *tag = nullptr) {
uptr size = thr->shadow_stack_pos - thr->shadow_stack;
uptr start = 0;
if (size + !!toppc > kStackTraceMax) {
size = kStackTraceMax - !!toppc;
}
stack->Init(&thr->shadow_stack[start], size, toppc);
+ ExtractTagFromStack(stack, tag);
}
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
-void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
-void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
void FuncEntry(ThreadState *thr, uptr pc);
void FuncExit(ThreadState *thr);
int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
-void ThreadStart(ThreadState *thr, int tid, uptr os_id);
+void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread);
void ThreadFinish(ThreadState *thr);
int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
void ThreadJoin(ThreadState *thr, uptr pc, int tid);
void ProcWire(Processor *proc, ThreadState *thr);
void ProcUnwire(Processor *proc, ThreadState *thr);
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
- bool rw, bool recursive, bool linker_init);
-void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
- bool try_lock = false);
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
+ int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
return;
DCHECK_GE((int)typ, 0);
DCHECK_LE((int)typ, 7);
- DCHECK_EQ(GetLsb(addr, 61), addr);
+ DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
}
Event *trace = (Event*)GetThreadTrace(fs.tid());
Event *evp = &trace[pos];
- Event ev = (u64)addr | ((u64)typ << 61);
+ Event ev = (u64)addr | ((u64)typ << kEventPCBits);
*evp = ev;
}
+// The content of this file is AArch64-only:
+#if defined(__aarch64__)
+
#include "sanitizer_common/sanitizer_asm.h"
+#if !defined(__APPLE__)
.section .bss
.type __tsan_pointer_chk_guard, %object
-.size __tsan_pointer_chk_guard, 8
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__tsan_pointer_chk_guard))
__tsan_pointer_chk_guard:
.zero 8
+#endif
+
+#if defined(__APPLE__)
+.align 2
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long _setjmp$non_lazy_ptr
+_setjmp$non_lazy_ptr:
+.indirect_symbol _setjmp
+.long 0
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long __setjmp$non_lazy_ptr
+__setjmp$non_lazy_ptr:
+.indirect_symbol __setjmp
+.long 0
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long _sigsetjmp$non_lazy_ptr
+_sigsetjmp$non_lazy_ptr:
+.indirect_symbol _sigsetjmp
+.long 0
+#endif
+#if !defined(__APPLE__)
.section .text
+#else
+.section __TEXT,__text
+.align 3
+#endif
+#if !defined(__APPLE__)
// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
// functions) by XORing them with a random guard pointer. For AArch64 it is a
// global variable rather than a TCB one (as for x86_64/powerpc) and althought
// not stable). So InitializeGuardPtr obtains the pointer guard value by
// issuing a setjmp and checking the resulting pointers values against the
// original ones.
-.hidden _Z18InitializeGuardPtrv
+ASM_HIDDEN(_Z18InitializeGuardPtrv)
.global _Z18InitializeGuardPtrv
-.type _Z18InitializeGuardPtrv, @function
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
_Z18InitializeGuardPtrv:
CFI_STARTPROC
// Allocates a jmp_buf for the setjmp call.
CFI_DEF_CFA (31, 0)
ret
CFI_ENDPROC
-.size _Z18InitializeGuardPtrv, .-_Z18InitializeGuardPtrv
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
+#endif
-.hidden __tsan_setjmp
+ASM_HIDDEN(__tsan_setjmp)
.comm _ZN14__interception11real_setjmpE,8,8
-.type setjmp, @function
-setjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
CFI_STARTPROC
// save env parameters for function call
CFI_OFFSET (19, -16)
mov x19, x0
+#if !defined(__APPLE__)
// SP pointer mangling (see glibc setjmp)
adrp x2, __tsan_pointer_chk_guard
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
eor x1, x2, x0
+#else
+ add x0, x29, 32
+ mov x1, x0
+#endif
// call tsan interceptor
- bl __tsan_setjmp
+ bl ASM_TSAN_SYMBOL(__tsan_setjmp)
// restore env parameter
mov x0, x19
CFI_DEF_CFA (31, 0)
// tail jump to libc setjmp
+#if !defined(__APPLE__)
adrp x1, :got:_ZN14__interception11real_setjmpE
ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
ldr x1, [x1]
+#else
+ adrp x1, _setjmp$non_lazy_ptr@page
+ add x1, x1, _setjmp$non_lazy_ptr@pageoff
+ ldr x1, [x1]
+#endif
br x1
CFI_ENDPROC
-.size setjmp, .-setjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
.comm _ZN14__interception12real__setjmpE,8,8
-.globl _setjmp
-.type _setjmp, @function
-_setjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
// save env parameters for function call
CFI_OFFSET (19, -16)
mov x19, x0
+#if !defined(__APPLE__)
// SP pointer mangling (see glibc setjmp)
adrp x2, __tsan_pointer_chk_guard
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
eor x1, x2, x0
+#else
+ add x0, x29, 32
+ mov x1, x0
+#endif
// call tsan interceptor
- bl __tsan_setjmp
+ bl ASM_TSAN_SYMBOL(__tsan_setjmp)
// Restore jmp_buf parameter
mov x0, x19
CFI_DEF_CFA (31, 0)
// tail jump to libc setjmp
+#if !defined(__APPLE__)
adrp x1, :got:_ZN14__interception12real__setjmpE
ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
ldr x1, [x1]
+#else
+ adrp x1, __setjmp$non_lazy_ptr@page
+ add x1, x1, __setjmp$non_lazy_ptr@pageoff
+ ldr x1, [x1]
+#endif
br x1
CFI_ENDPROC
-.size _setjmp, .-_setjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
.comm _ZN14__interception14real_sigsetjmpE,8,8
-.globl sigsetjmp
-.type sigsetjmp, @function
-sigsetjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
CFI_STARTPROC
// save env parameters for function call
mov w20, w1
mov x19, x0
+#if !defined(__APPLE__)
// SP pointer mangling (see glibc setjmp)
adrp x2, __tsan_pointer_chk_guard
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
eor x1, x2, x0
+#else
+ add x0, x29, 32
+ mov x1, x0
+#endif
// call tsan interceptor
- bl __tsan_setjmp
+ bl ASM_TSAN_SYMBOL(__tsan_setjmp)
// restore env parameter
mov w1, w20
CFI_DEF_CFA (31, 0)
// tail jump to libc sigsetjmp
+#if !defined(__APPLE__)
adrp x2, :got:_ZN14__interception14real_sigsetjmpE
ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
ldr x2, [x2]
+#else
+ adrp x2, _sigsetjmp$non_lazy_ptr@page
+ add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
+ ldr x2, [x2]
+#endif
br x2
CFI_ENDPROC
-.size sigsetjmp, .-sigsetjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
+#if !defined(__APPLE__)
.comm _ZN14__interception16real___sigsetjmpE,8,8
-.globl __sigsetjmp
-.type __sigsetjmp, @function
-__sigsetjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
// save env parameters for function call
mov w20, w1
mov x19, x0
+#if !defined(__APPLE__)
// SP pointer mangling (see glibc setjmp)
adrp x2, __tsan_pointer_chk_guard
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
add x0, x29, 32
eor x1, x2, x0
+#endif
// call tsan interceptor
- bl __tsan_setjmp
+ bl ASM_TSAN_SYMBOL(__tsan_setjmp)
mov w1, w20
mov x0, x19
CFI_DEF_CFA (31, 0)
// tail jump to libc __sigsetjmp
+#if !defined(__APPLE__)
adrp x2, :got:_ZN14__interception16real___sigsetjmpE
ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
ldr x2, [x2]
+#else
+ adrp x2, ASM_TSAN_SYMBOL(__sigsetjmp)@page
+ add x2, x2, ASM_TSAN_SYMBOL(__sigsetjmp)@pageoff
+#endif
br x2
CFI_ENDPROC
-.size __sigsetjmp, .-__sigsetjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif
#if defined(__linux__)
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
#endif
+
+#endif
+// The content of this file is x86_64-only:
+#if defined(__x86_64__)
+
#include "sanitizer_common/sanitizer_asm.h"
+
#if !defined(__APPLE__)
.section .text
#else
/* We do not need executable stack. */
.section .note.GNU-stack,"",@progbits
#endif
+
+#endif
OutputReport(thr, rep);
}
-void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
- bool rw, bool recursive, bool linker_init) {
- DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
StatInc(thr, StatMutexCreate);
- if (!linker_init && IsAppMem(addr)) {
+ if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
- s->is_rw = rw;
- s->is_recursive = recursive;
- s->is_linker_init = linker_init;
+ s->SetFlags(flagz & MutexCreationFlagMask);
if (!SANITIZER_GO && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
-void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
- if (s->is_linker_init) {
+ if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit)) {
// Destroy is no-op for linker-initialized mutexes.
s->mtx.Unlock();
return;
bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
- && !s->is_broken) {
- s->is_broken = true;
+ && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
unlock_locked = true;
}
u64 mid = s->GetId();
// s will be destroyed and freed in MetaMap::FreeBlock.
}
-void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
- DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
- CHECK_GT(rec, 0);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != thr->tid) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ } else {
+ s->mtx.ReadUnlock();
+ }
+ }
+}
+
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+ DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
+ thr->tid, addr, flagz, rec);
+ if (flagz & MutexFlagRecursiveLock)
+ CHECK_GT(rec, 0);
+ else
+ rec = 1;
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->UpdateFlags(flagz);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
bool report_double_lock = false;
s->last_lock = thr->fast_state.raw();
} else if (s->owner_tid == thr->tid) {
CHECK_GT(s->recursion, 0);
- } else if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_double_lock = true;
}
- if (s->recursion == 0) {
+ const bool first = s->recursion == 0;
+ s->recursion += rec;
+ if (first) {
StatInc(thr, StatMutexLock);
AcquireImpl(thr, pc, &s->clock);
AcquireImpl(thr, pc, &s->read_clock);
- } else if (!s->is_recursive) {
+ } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
StatInc(thr, StatMutexRecLock);
}
- s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
- if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
+ bool pre_lock = false;
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
Callback cb(thr, pc);
- if (!try_lock)
+ if (pre_lock)
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
- ctx->dd->MutexAfterLock(&cb, &s->dd, true, try_lock);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
}
u64 mid = s->GetId();
s->mtx.Unlock();
// Can't touch s after this point.
+ s = 0;
if (report_double_lock)
ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
- if (common_flags()->detect_deadlocks) {
+ if (first && pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
}
-int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
- DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
int rec = 0;
bool report_bad_unlock = false;
if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
- if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
}
} else {
- rec = all ? s->recursion : 1;
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
s->recursion -= rec;
if (s->recursion == 0) {
StatInc(thr, StatMutexUnlock);
return rec;
}
-void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool trylock) {
- DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
bool report_bad_lock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_lock = true;
}
}
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
- if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ bool pre_lock = false;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
Callback cb(thr, pc);
- if (!trylock)
+ if (pre_lock)
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
- ctx->dd->MutexAfterLock(&cb, &s->dd, false, trylock);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
}
u64 mid = s->GetId();
s->mtx.ReadUnlock();
// Can't touch s after this point.
+ s = 0;
if (report_bad_lock)
ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
- if (common_flags()->detect_deadlocks) {
+ if (pre_lock && common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
bool report_bad_unlock = false;
if (s->owner_tid != SyncVar::kInvalidTid) {
- if (flags()->report_mutex_bugs && !s->is_broken) {
- s->is_broken = true;
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
}
}
} else {
StatInc(thr, StatMutexRecUnlock);
}
- } else if (!s->is_broken) {
- s->is_broken = true;
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
if (tctx->status == ThreadStatusRunning)
- thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
- else
- thr->clock.set(tctx->tid, tctx->epoch1);
+ epoch = tctx->thr->fast_state.epoch();
+ thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
}
void AcquireGlobal(ThreadState *thr, uptr pc) {
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
if (tctx->status == ThreadStatusRunning)
- thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
- else
- thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
+ epoch = tctx->thr->fast_state.epoch();
+ thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
}
void AfterSleep(ThreadState *thr, uptr pc) {
return stack;
}
-ScopedReport::ScopedReport(ReportType typ) {
+ScopedReport::ScopedReport(ReportType typ, uptr tag) {
ctx->thread_registry->CheckLocked();
void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
rep_ = new(mem) ReportDesc;
rep_->typ = typ;
+ rep_->tag = tag;
ctx->report_mtx.Lock();
CommonSanitizerReportMutex.Lock();
}
(*rs)->suppressable = suppressable;
}
-void ScopedReport::AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
- const MutexSet *mset) {
+void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
+ StackTrace stack, const MutexSet *mset) {
void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
ReportMop *mop = new(mem) ReportMop;
rep_->mops.PushBack(mop);
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
mop->stack = SymbolizeStack(stack);
+ mop->external_tag = external_tag;
if (mop->stack)
mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
rt->running = (tctx->status == ThreadStatusRunning);
rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
+ rt->workerthread = tctx->workerthread;
rt->stack = 0;
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
if (rt->stack)
return;
#if !SANITIZER_GO
int fd = -1;
- int creat_tid = -1;
+ int creat_tid = kInvalidTid;
u32 creat_stack = 0;
if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
ReportLocation *loc = ReportLocation::New(ReportLocationFD);
ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
loc->heap_chunk_size = b->siz;
+ loc->external_tag = b->tag;
loc->tid = tctx ? tctx->tid : b->tid;
loc->stack = SymbolizeStackId(b->stk);
rep_->locs.PushBack(loc);
}
void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
- MutexSet *mset) {
+ MutexSet *mset, uptr *tag) {
// This function restores stack trace and mutex set for the thread/epoch.
// It does so by getting stack trace and mutex set at the beginning of
// trace part, and then replaying the trace till the given epoch.
Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
Event ev = events[i];
- EventType typ = (EventType)(ev >> 61);
- uptr pc = (uptr)(ev & ((1ull << 61) - 1));
+ EventType typ = (EventType)(ev >> kEventPCBits);
+ uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
if (typ == EventTypeMop) {
stack[pos] = pc;
return;
pos++;
stk->Init(&stack[0], pos);
+ ExtractTagFromStack(stk, tag);
}
static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
}
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
- if (!flags()->report_bugs)
+ if (!flags()->report_bugs || thr->suppress_reports)
return false;
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
const uptr kMop = 2;
VarSizeStackTrace traces[kMop];
- const uptr toppc = TraceTopPC(thr);
- ObtainCurrentStack(thr, toppc, &traces[0]);
+ uptr tags[kMop] = {kExternalTagNone};
+ uptr toppc = TraceTopPC(thr);
+ if (toppc >> kEventPCBits) {
+ // This is a work-around for a known issue.
+ // The scenario where this happens is rather elaborate and requires
+ // an instrumented __sanitizer_report_error_summary callback and
+ // a __tsan_symbolize_external callback and a race during a range memory
+ // access larger than 8 bytes. MemoryAccessRange adds the current PC to
+ // the trace and starts processing memory accesses. A first memory access
+ // triggers a race, we report it and call the instrumented
+ // __sanitizer_report_error_summary, which adds more stuff to the trace
+ // since it is intrumented. Then a second memory access in MemoryAccessRange
+ // also triggers a race and we get here and call TraceTopPC to get the
+ // current PC, however now it contains some unrelated events from the
+ // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
+ // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
+ // and the resulting PC has kExternalPCBit set, so we pass it to
+ // __tsan_symbolize_external. __tsan_symbolize_external is within its rights
+ // to crash since the PC is completely bogus.
+ // test/tsan/double_race.cc contains a test case for this.
+ toppc = 0;
+ }
+ ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
if (IsFiredSuppression(ctx, typ, traces[0]))
return;
MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
Shadow s2(thr->racy_state[1]);
- RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2);
+ RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
if (IsFiredSuppression(ctx, typ, traces[1]))
return;
if (HandleRacyStacks(thr, traces, addr_min, addr_max))
return;
+ // If any of the accesses has a tag, treat this as an "external" race.
+ uptr tag = kExternalTagNone;
+ for (uptr i = 0; i < kMop; i++) {
+ if (tags[i] != kExternalTagNone) {
+ typ = ReportTypeExternalRace;
+ tag = tags[i];
+ break;
+ }
+ }
+
ThreadRegistryLock l0(ctx->thread_registry);
- ScopedReport rep(typ);
+ ScopedReport rep(typ, tag);
for (uptr i = 0; i < kMop; i++) {
Shadow s(thr->racy_state[i]);
- rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2);
+ rep.AddMemoryAccess(addr, tags[i], s, traces[i],
+ i == 0 ? &thr->mset : mset2);
}
for (uptr i = 0; i < kMop; i++) {
void ThreadContext::OnReset() {
CHECK_EQ(sync.size(), 0);
- ReleaseMemoryToOS(GetThreadTrace(tid), TraceSize() * sizeof(Event));
+ uptr trace_p = GetThreadTrace(tid);
+ ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
//!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
}
if (common_flags()->detect_deadlocks)
ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+#if !SANITIZER_GO
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+#endif
thr->~ThreadState();
#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
return tid;
}
-void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
+void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread) {
uptr stk_addr = 0;
uptr stk_size = 0;
uptr tls_addr = 0;
if (stk_addr && stk_size)
MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
- if (tls_addr && tls_size) {
- // Check that the thr object is in tls;
- const uptr thr_beg = (uptr)thr;
- const uptr thr_end = (uptr)thr + sizeof(*thr);
- CHECK_GE(thr_beg, tls_addr);
- CHECK_LE(thr_beg, tls_addr + tls_size);
- CHECK_GE(thr_end, tls_addr);
- CHECK_LE(thr_end, tls_addr + tls_size);
- // Since the thr object is huge, skip it.
- MemoryRangeImitateWrite(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
- MemoryRangeImitateWrite(thr, /*pc=*/ 2,
- thr_end, tls_addr + tls_size - thr_end);
- }
+ if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
}
#endif
ThreadRegistry *tr = ctx->thread_registry;
OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
- tr->StartThread(tid, os_id, &args);
+ tr->StartThread(tid, os_id, workerthread, &args);
tr->Lock();
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
StatInc(thr, StatMopRange);
if (*shadow_mem == kShadowRodata) {
+ DCHECK(!is_write);
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMopRangeRodata);
name[StatClockAcquire] = "Clock acquire ";
name[StatClockAcquireEmpty] = " empty clock ";
name[StatClockAcquireFastRelease] = " fast from release-store ";
- name[StatClockAcquireLarge] = " contains my tid ";
- name[StatClockAcquireRepeat] = " repeated (fast) ";
name[StatClockAcquireFull] = " full (slow) ";
name[StatClockAcquiredSomething] = " acquired something ";
name[StatClockRelease] = "Clock release ";
name[StatClockReleaseResize] = " resize ";
- name[StatClockReleaseFast1] = " fast1 ";
- name[StatClockReleaseFast2] = " fast2 ";
+ name[StatClockReleaseFast] = " fast ";
name[StatClockReleaseSlow] = " dirty overflow (slow) ";
name[StatClockReleaseFull] = " full (slow) ";
name[StatClockReleaseAcquired] = " was acquired ";
name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
name[StatAnnotateThreadName] = " ThreadName ";
+ name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
+ name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
+ name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
+ name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
+ name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
+ name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
+ name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
+ name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
+ name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
+ name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
name[StatMtxTotal] = "Contentionz ";
name[StatMtxTrace] = " Trace ";
StatClockAcquire,
StatClockAcquireEmpty,
StatClockAcquireFastRelease,
- StatClockAcquireLarge,
- StatClockAcquireRepeat,
StatClockAcquireFull,
StatClockAcquiredSomething,
// Clocks - release.
StatClockRelease,
StatClockReleaseResize,
- StatClockReleaseFast1,
- StatClockReleaseFast2,
+ StatClockReleaseFast,
StatClockReleaseSlow,
StatClockReleaseFull,
StatClockReleaseAcquired,
StatAnnotatePublishMemoryRange,
StatAnnotateUnpublishMemoryRange,
StatAnnotateThreadName,
+ Stat__tsan_mutex_create,
+ Stat__tsan_mutex_destroy,
+ Stat__tsan_mutex_pre_lock,
+ Stat__tsan_mutex_post_lock,
+ Stat__tsan_mutex_pre_unlock,
+ Stat__tsan_mutex_post_unlock,
+ Stat__tsan_mutex_pre_signal,
+ Stat__tsan_mutex_post_signal,
+ Stat__tsan_mutex_pre_divert,
+ Stat__tsan_mutex_post_divert,
// Internal mutex contentionz.
StatMtxTotal,
return kSuppressionRace;
else if (typ == ReportTypeVptrUseAfterFree)
return kSuppressionRace;
+ else if (typ == ReportTypeExternalRace)
+ return kSuppressionRace;
else if (typ == ReportTypeThreadLeak)
return kSuppressionThread;
else if (typ == ReportTypeMutexDestroyLocked)
owner_tid = kInvalidTid;
last_lock = 0;
recursion = 0;
- is_rw = 0;
- is_recursive = 0;
- is_broken = 0;
- is_linker_init = 0;
+ atomic_store_relaxed(&flags, 0);
if (proc == 0) {
CHECK_EQ(clock.size(), 0);
}
}
-MetaMap::MetaMap() {
+MetaMap::MetaMap()
+ : block_alloc_("heap block allocator")
+ , sync_alloc_("sync allocator") {
atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
+ b->tag = 0;
b->tid = thr->tid;
b->stk = CurrentStackId(thr, pc);
u32 *meta = MemToMeta(p);
namespace __tsan {
+// These need to match __tsan_mutex_* flags defined in tsan_interface.h.
+// See documentation there as well.
+enum MutexFlags {
+ MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init
+ MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant
+ MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant
+ MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock
+ MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock
+ MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
+ MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
+ MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
+
+ // The following flags are runtime private.
+ // Mutex API misuse was detected, so don't report any more.
+ MutexFlagBroken = 1 << 30,
+ // We did not intercept pre lock event, so handle it on post lock.
+ MutexFlagDoPreLockOnPostLock = 1 << 29,
+ // Must list all mutex creation flags.
+ MutexCreationFlagMask = MutexFlagLinkerInit |
+ MutexFlagWriteReentrant |
+ MutexFlagReadReentrant,
+};
+
struct SyncVar {
SyncVar();
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
int recursion;
- bool is_rw;
- bool is_recursive;
- bool is_broken;
- bool is_linker_init;
+ atomic_uint32_t flags;
u32 next; // in MetaMap
DDMutex dd;
SyncClock read_clock; // Used for rw mutexes only.
*uid = id >> 48;
return (uptr)GetLsb(id, 48);
}
+
+ bool IsFlagSet(u32 f) const {
+ return atomic_load_relaxed(&flags) & f;
+ }
+
+ void SetFlags(u32 f) {
+ atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
+ }
+
+ void UpdateFlags(u32 flagz) {
+ // Filter out operation flags.
+ if (!(flagz & MutexCreationFlagMask))
+ return;
+ u32 current = atomic_load_relaxed(&flags);
+ if (current & MutexCreationFlagMask)
+ return;
+ // Note: this can be called from MutexPostReadLock which holds only read
+ // lock on the SyncVar.
+ atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
+ }
};
/* MetaMap allows to map arbitrary user pointers onto various descriptors.
// u64 addr : 61; // Associated pc.
typedef u64 Event;
+const uptr kEventPCBits = 61;
+
struct TraceHeader {
#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
# May be used by toolexeclibdir.
gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER)
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1 -DUBSAN_CAN_USE_CXXABI=1
AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros
AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS)
AM_CXXFLAGS += -std=gnu++11
ubsan_files = \
$(ubsan_plugin_files) \
- ubsan_init_standalone.cc
+ ubsan_init_standalone.cc \
+ ubsan_signals_standalone.cc
libubsan_la_SOURCES = $(ubsan_files)
libubsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la
ubsan_handlers_cxx.lo ubsan_init.lo ubsan_type_hash.lo \
ubsan_type_hash_itanium.lo ubsan_type_hash_win.lo \
ubsan_value.lo
-am__objects_2 = $(am__objects_1) ubsan_init_standalone.lo
+am__objects_2 = $(am__objects_1) ubsan_init_standalone.lo \
+ ubsan_signals_standalone.lo
am_libubsan_la_OBJECTS = $(am__objects_2)
libubsan_la_OBJECTS = $(am_libubsan_la_OBJECTS)
libubsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
-DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1
+DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1 -DUBSAN_CAN_USE_CXXABI=1
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ubsan_files = \
$(ubsan_plugin_files) \
- ubsan_init_standalone.cc
+ ubsan_init_standalone.cc \
+ ubsan_signals_standalone.cc
libubsan_la_SOURCES = $(ubsan_files)
libubsan_la_LIBADD = \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers_cxx.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init_standalone.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_signals_standalone.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_itanium.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_win.Plo@am__quote@
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
-0:0:0
+1:0:0
UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero",
"integer-divide-by-zero")
UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero")
+UBSAN_CHECK(InvalidBuiltin, "invalid-builtin-use", "invalid-builtin-use")
UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base")
UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent")
UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds")
using namespace __ubsan;
+void __ubsan::GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack,
+ uptr max_depth, uptr pc, uptr bp,
+ void *context, bool fast) {
+ uptr top = 0;
+ uptr bottom = 0;
+ if (fast)
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ stack->Unwind(max_depth, pc, bp, context, top, bottom, fast);
+}
+
static void MaybePrintStackTrace(uptr pc, uptr bp) {
// We assume that flags are already parsed, as UBSan runtime
// will definitely be called when we print the first diagnostics message.
if (!flags()->print_stacktrace)
return;
- // We can only use slow unwind, as we don't have any information about stack
- // top/bottom.
- // FIXME: It's better to respect "fast_unwind_on_fatal" runtime flag and
- // fetch stack top/bottom information if we have it (e.g. if we're running
- // under ASan).
- if (StackTrace::WillUseFastUnwind(false))
- return;
+
BufferedStackTrace stack;
- stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false);
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, nullptr,
+ common_flags()->fast_unwind_on_fatal);
stack.Print();
}
AI.line = SLoc.getLine();
AI.column = SLoc.getColumn();
AI.function = internal_strdup(""); // Avoid printing ?? as function name.
- ReportErrorSummary(ErrorKind, AI);
+ ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
AI.Clear();
return;
}
} else if (Loc.isSymbolizedStack()) {
const AddressInfo &AI = Loc.getSymbolizedStack()->info;
- ReportErrorSummary(ErrorKind, AI);
+ ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
return;
}
- ReportErrorSummary(ErrorKind);
+ ReportErrorSummary(ErrorKind, GetSanititizerToolName());
}
namespace {
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Highlight() const { return Green(); }
- const char *EndHighlight() const { return Default(); }
const char *Note() const { return Black(); }
- const char *EndNote() const { return Default(); }
};
}
common_flags()->strip_path_prefix);
else if (Info.module)
RenderModuleLocation(Buffer, Info.module, Info.module_offset,
- common_flags()->strip_path_prefix);
+ Info.module_arch, common_flags()->strip_path_prefix);
else
Buffer->append("%p", Info.address);
return;
Buffer.append("%c", P == Loc ? '^' : Byte);
Buffer.append("%c", Byte);
}
- Buffer.append("%s\n", Decor.EndHighlight());
+ Buffer.append("%s\n", Decor.Default());
// Go over the line again, and print names for the ranges.
InRange = 0;
Diag::~Diag() {
// All diagnostics should be printed under report mutex.
- CommonSanitizerReportMutex.CheckLocked();
+ ScopedReport::CheckLocked();
Decorator Decor;
InternalScopedString Buffer(1024);
switch (Level) {
case DL_Error:
- Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.EndWarning(),
+ Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.Default(),
Decor.Bold());
break;
case DL_Note:
- Buffer.append("%s note: %s", Decor.Note(), Decor.EndNote());
+ Buffer.append("%s note: %s", Decor.Note(), Decor.Default());
break;
}
PrintMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges, NumRanges, Args);
}
+ScopedReport::Initializer::Initializer() { InitAsStandaloneIfNecessary(); }
+
ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc,
ErrorType Type)
- : Opts(Opts), SummaryLoc(SummaryLoc), Type(Type) {
- InitAsStandaloneIfNecessary();
- CommonSanitizerReportMutex.Lock();
-}
+ : Opts(Opts), SummaryLoc(SummaryLoc), Type(Type) {}
ScopedReport::~ScopedReport() {
MaybePrintStackTrace(Opts.pc, Opts.bp);
MaybeReportErrorSummary(SummaryLoc, Type);
- CommonSanitizerReportMutex.Unlock();
if (flags()->halt_on_error)
Die();
}
GET_CALLER_PC_BP; \
ReportOptions Opts = {unrecoverable_handler, pc, bp}
+void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
+ uptr pc, uptr bp, void *context,
+ bool fast);
+
/// \brief Instantiate this class before printing diagnostics in the error
/// report. This class ensures that reports from different threads and from
/// different sanitizers won't be mixed.
class ScopedReport {
+ struct Initializer {
+ Initializer();
+ };
+ Initializer initializer_;
+ ScopedErrorReportLock report_lock_;
+
ReportOptions Opts;
Location SummaryLoc;
ErrorType Type;
public:
ScopedReport(ReportOptions Opts, Location SummaryLoc, ErrorType Type);
~ScopedReport();
+
+ static void CheckLocked() { ScopedErrorReportLock::CheckLocked(); }
};
void InitializeSuppressions();
--- /dev/null
+//===-- ubsan_diag_standalone.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Diagnostic reporting for the standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_diag.h"
+
+using namespace __ubsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ uptr top = 0;
+ uptr bottom = 0;
+ bool request_fast_unwind = common_flags()->fast_unwind_on_fatal;
+ if (request_fast_unwind)
+ __sanitizer::GetThreadStackTopAndBottom(false, &top, &bottom);
+
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ BufferedStackTrace stack;
+ stack.Unwind(kStackTraceMax, pc, bp, nullptr, top, bottom,
+ request_fast_unwind);
+ stack.Print();
+}
+} // extern "C"
+
+#endif // CAN_SANITIZE_UB
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.print_summary = false;
+ cf.external_symbolizer_path = GetEnv("UBSAN_SYMBOLIZER_PATH");
OverrideCommonFlags(cf);
}
} // namespace __ubsan
-extern "C" {
-
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char *__ubsan_default_options() { return ""; }
-#endif
-
-#if SANITIZER_WINDOWS
-const char *__ubsan_default_default_options() { return ""; }
-# ifdef _WIN64
-# pragma comment(linker, "/alternatename:__ubsan_default_options=__ubsan_default_default_options")
-# else
-# pragma comment(linker, "/alternatename:___ubsan_default_options=___ubsan_default_default_options")
-# endif
-#endif
-
-} // extern "C"
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __ubsan_default_options, void) {
+ return "";
+}
#endif // CAN_SANITIZE_UB
const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
"member call on", "constructor call on", "downcast of", "downcast of",
- "upcast of", "cast to virtual base of"};
+ "upcast of", "cast to virtual base of", "_Nonnull binding to"};
}
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
ReportOptions Opts) {
Location Loc = Data->Loc.acquire();
+ uptr Alignment = (uptr)1 << Data->LogAlignment;
ErrorType ET;
if (!Pointer)
ET = ErrorType::NullPointerUse;
- else if (Data->Alignment && (Pointer & (Data->Alignment - 1)))
+ else if (Pointer & (Alignment - 1))
ET = ErrorType::MisalignedPointerUse;
else
ET = ErrorType::InsufficientObjectSize;
case ErrorType::MisalignedPointerUse:
Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, "
"which requires %2 byte alignment")
- << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer
- << Data->Alignment << Data->Type;
+ << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment
+ << Data->Type;
break;
case ErrorType::InsufficientObjectSize:
Diag(Loc, DL_Error, "%0 address %1 with insufficient space "
Diag(Pointer, DL_Note, "pointer points here");
}
-void __ubsan::__ubsan_handle_type_mismatch(TypeMismatchData *Data,
- ValueHandle Pointer) {
+void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data,
+ ValueHandle Pointer) {
GET_REPORT_OPTIONS(false);
handleTypeMismatchImpl(Data, Pointer, Opts);
}
-void __ubsan::__ubsan_handle_type_mismatch_abort(TypeMismatchData *Data,
- ValueHandle Pointer) {
+void __ubsan::__ubsan_handle_type_mismatch_v1_abort(TypeMismatchData *Data,
+ ValueHandle Pointer) {
GET_REPORT_OPTIONS(true);
handleTypeMismatchImpl(Data, Pointer, Opts);
Die();
ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error,
- "value %0 is outside the range of representable values of type %2")
+ "%0 is outside the range of representable values of type %2")
<< Value(*FromType, From) << *FromType << *ToType;
}
SourceLocation Loc = Data->Loc.acquire();
// This check could be more precise if we used different handlers for
// -fsanitize=bool and -fsanitize=enum.
- bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'"));
+ bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")) ||
+ (0 == internal_strncmp(Data->Type.getTypeName(), "'BOOL'", 6));
ErrorType ET =
IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad;
Die();
}
+static void handleInvalidBuiltin(InvalidBuiltinData *Data, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::InvalidBuiltin;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error,
+ "passing zero to %0, which is not a valid argument")
+ << ((Data->Kind == BCK_CTZPassedZero) ? "ctz()" : "clz()");
+}
+
+void __ubsan::__ubsan_handle_invalid_builtin(InvalidBuiltinData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleInvalidBuiltin(Data, Opts);
+}
+void __ubsan::__ubsan_handle_invalid_builtin_abort(InvalidBuiltinData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleInvalidBuiltin(Data, Opts);
+ Die();
+}
+
static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
ValueHandle Function,
ReportOptions Opts) {
Die();
}
-static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
- SourceLocation Loc = Data->Loc.acquire();
+static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr,
+ ReportOptions Opts, bool IsAttr) {
+ if (!LocPtr)
+ UNREACHABLE("source location pointer is null!");
+
+ SourceLocation Loc = LocPtr->acquire();
ErrorType ET = ErrorType::InvalidNullReturn;
if (ignoreReport(Loc, Opts, ET))
Diag(Loc, DL_Error, "null pointer returned from function declared to never "
"return null");
if (!Data->AttrLoc.isInvalid())
- Diag(Data->AttrLoc, DL_Note, "returns_nonnull attribute specified here");
+ Diag(Data->AttrLoc, DL_Note, "%0 specified here")
+ << (IsAttr ? "returns_nonnull attribute"
+ : "_Nonnull return type annotation");
+}
+
+void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data,
+ SourceLocation *LocPtr) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, LocPtr, Opts, true);
}
-void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
+void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data,
+ SourceLocation *LocPtr) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, LocPtr, Opts, true);
+ Die();
+}
+
+void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data,
+ SourceLocation *LocPtr) {
GET_REPORT_OPTIONS(false);
- handleNonNullReturn(Data, Opts);
+ handleNonNullReturn(Data, LocPtr, Opts, false);
}
-void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
+void __ubsan::__ubsan_handle_nullability_return_v1_abort(
+ NonNullReturnData *Data, SourceLocation *LocPtr) {
GET_REPORT_OPTIONS(true);
- handleNonNullReturn(Data, Opts);
+ handleNonNullReturn(Data, LocPtr, Opts, false);
Die();
}
-static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
+static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts,
+ bool IsAttr) {
SourceLocation Loc = Data->Loc.acquire();
ErrorType ET = ErrorType::InvalidNullArgument;
ScopedReport R(Opts, Loc, ET);
- Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to "
- "never be null") << Data->ArgIndex;
+ Diag(Loc, DL_Error,
+ "null pointer passed as argument %0, which is declared to "
+ "never be null")
+ << Data->ArgIndex;
if (!Data->AttrLoc.isInvalid())
- Diag(Data->AttrLoc, DL_Note, "nonnull attribute specified here");
+ Diag(Data->AttrLoc, DL_Note, "%0 specified here")
+ << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation");
}
void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) {
GET_REPORT_OPTIONS(false);
- handleNonNullArg(Data, Opts);
+ handleNonNullArg(Data, Opts, true);
}
void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
GET_REPORT_OPTIONS(true);
- handleNonNullArg(Data, Opts);
+ handleNonNullArg(Data, Opts, true);
+ Die();
+}
+
+void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts, false);
+}
+
+void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts, false);
Die();
}
ScopedReport R(Opts, Loc, ET);
- Diag(Loc, DL_Error, "pointer index expression with base %0 overflowed to %1")
- << (void *)Base << (void*)Result;
+ if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) {
+ if (Base > Result)
+ Diag(Loc, DL_Error, "addition of unsigned offset to %0 overflowed to %1")
+ << (void *)Base << (void *)Result;
+ else
+ Diag(Loc, DL_Error,
+ "subtraction of unsigned offset from %0 overflowed to %1")
+ << (void *)Base << (void *)Result;
+ } else {
+ Diag(Loc, DL_Error,
+ "pointer index expression with base %0 overflowed to %1")
+ << (void *)Base << (void *)Result;
+ }
}
void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data,
}
namespace __ubsan {
+
#ifdef UBSAN_CAN_USE_CXXABI
-SANITIZER_WEAK_ATTRIBUTE
-void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable,
- bool ValidVtable, ReportOptions Opts);
-#else
-static void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable,
- bool ValidVtable, ReportOptions Opts) {
+
+#ifdef _WIN32
+
+extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data,
+ ValueHandle Vtable,
+ bool ValidVtable,
+ ReportOptions Opts) {
Die();
}
-#endif
-} // namespace __ubsan
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(false);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
-}
+WIN_WEAK_ALIAS(__ubsan_handle_cfi_bad_type, __ubsan_handle_cfi_bad_type_default)
+#else
+SANITIZER_WEAK_ATTRIBUTE
+#endif
+void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts);
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(true);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
+#else
+void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts) {
Die();
}
+#endif
+
+} // namespace __ubsan
void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
ValueHandle Value,
if (Data->CheckKind == CFITCK_ICall)
handleCFIBadIcall(Data, Value, Opts);
else
- HandleCFIBadType(Data, Value, ValidVtable, Opts);
+ __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts);
}
void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data,
if (Data->CheckKind == CFITCK_ICall)
handleCFIBadIcall(Data, Value, Opts);
else
- HandleCFIBadType(Data, Value, ValidVtable, Opts);
+ __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts);
Die();
}
struct TypeMismatchData {
SourceLocation Loc;
const TypeDescriptor &Type;
- uptr Alignment;
+ unsigned char LogAlignment;
unsigned char TypeCheckKind;
};
/// \brief Handle a runtime type check failure, caused by either a misaligned
/// pointer, a null pointer, or a pointer to insufficient storage for the
/// type.
-RECOVERABLE(type_mismatch, TypeMismatchData *Data, ValueHandle Pointer)
+RECOVERABLE(type_mismatch_v1, TypeMismatchData *Data, ValueHandle Pointer)
struct OverflowData {
SourceLocation Loc;
/// \brief Handle a load of an invalid value for the type.
RECOVERABLE(load_invalid_value, InvalidValueData *Data, ValueHandle Val)
+/// Known builtin check kinds.
+/// Keep in sync with the enum of the same name in CodeGenFunction.h
+enum BuiltinCheckKind : unsigned char {
+ BCK_CTZPassedZero,
+ BCK_CLZPassedZero,
+};
+
+struct InvalidBuiltinData {
+ SourceLocation Loc;
+ unsigned char Kind;
+};
+
+/// Handle a builtin called in an invalid way.
+RECOVERABLE(invalid_builtin, InvalidBuiltinData *Data)
+
struct FunctionTypeMismatchData {
SourceLocation Loc;
const TypeDescriptor &Type;
ValueHandle Val)
struct NonNullReturnData {
- SourceLocation Loc;
SourceLocation AttrLoc;
};
-/// \brief Handle returning null from function with returns_nonnull attribute.
-RECOVERABLE(nonnull_return, NonNullReturnData *Data)
+/// \brief Handle returning null from function with the returns_nonnull
+/// attribute, or a return type annotated with _Nonnull.
+RECOVERABLE(nonnull_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
+RECOVERABLE(nullability_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
struct NonNullArgData {
SourceLocation Loc;
int ArgIndex;
};
-/// \brief Handle passing null pointer to function with nonnull attribute.
+/// \brief Handle passing null pointer to a function parameter with the nonnull
+/// attribute, or a _Nonnull type annotation.
RECOVERABLE(nonnull_arg, NonNullArgData *Data)
+RECOVERABLE(nullability_arg, NonNullArgData *Data)
struct PointerOverflowData {
SourceLocation Loc;
CFITCK_ICall,
};
-struct CFIBadIcallData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
-};
-
struct CFICheckFailData {
CFITypeCheckKind CheckKind;
SourceLocation Loc;
const TypeDescriptor &Type;
};
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
/// \brief Handle control flow integrity failures.
RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
uptr VtableIsValid)
+
+struct ReportOptions;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_cfi_bad_type(
+ CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable,
+ ReportOptions Opts);
+
}
#endif // UBSAN_HANDLERS_H
}
namespace __ubsan {
-void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable,
- bool ValidVtable, ReportOptions Opts) {
+void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
ErrorType ET = ErrorType::CFIBadType;
CheckKindStr = "cast to unrelated type";
break;
case CFITCK_ICall:
+ default:
Die();
}
}
} // namespace __ubsan
-void __ubsan::__ubsan_handle_cfi_bad_type(CFIBadTypeData *TypeData,
- ValueHandle Vtable) {
- GET_REPORT_OPTIONS(false);
- CFITypeCheckKind TypeCheckKind
- = static_cast<CFITypeCheckKind> (TypeData->TypeCheckKind);
- CFICheckFailData Data = {TypeCheckKind, TypeData->Loc, TypeData->Type};
- HandleCFIBadType(&Data, Vtable, false, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *TypeData,
- ValueHandle Vtable) {
- GET_REPORT_OPTIONS(true);
- CFITypeCheckKind TypeCheckKind
- = static_cast<CFITypeCheckKind> (TypeData->TypeCheckKind);
- CFICheckFailData Data = {TypeCheckKind, TypeData->Loc, TypeData->Type};
- HandleCFIBadType(&Data, Vtable, false, Opts);
-}
-
#endif // CAN_SANITIZE_UB
unsigned char TypeCheckKind;
};
-struct CFIBadTypeData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
- unsigned char TypeCheckKind;
-};
-
/// \brief Handle a runtime type check failure, caused by an incorrect vptr.
/// When this handler is called, all we know is that the type was not in the
/// cache; this does not necessarily imply the existence of a bug.
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
-
-/// \brief Handle a control flow integrity check failure by printing a
-/// diagnostic.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data, ValueHandle Vtable);
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data, ValueHandle Vtable);
}
#endif // UBSAN_HANDLERS_H
using namespace __ubsan;
-static enum {
- UBSAN_MODE_UNKNOWN = 0,
- UBSAN_MODE_STANDALONE,
- UBSAN_MODE_PLUGIN
-} ubsan_mode;
+const char *__ubsan::GetSanititizerToolName() {
+ return "UndefinedBehaviorSanitizer";
+}
+
+static bool ubsan_initialized;
static StaticSpinMutex ubsan_init_mu;
static void CommonInit() {
}
static void CommonStandaloneInit() {
- SanitizerToolName = "UndefinedBehaviorSanitizer";
- InitializeFlags();
+ SanitizerToolName = GetSanititizerToolName();
CacheBinaryName();
+ InitializeFlags();
__sanitizer_set_report_path(common_flags()->log_path);
AndroidLogInit();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
CommonInit();
- ubsan_mode = UBSAN_MODE_STANDALONE;
}
void __ubsan::InitAsStandalone() {
- if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
- CHECK_EQ(UBSAN_MODE_UNKNOWN, ubsan_mode);
- CommonStandaloneInit();
- return;
- }
SpinMutexLock l(&ubsan_init_mu);
- CHECK_NE(UBSAN_MODE_PLUGIN, ubsan_mode);
- if (ubsan_mode == UBSAN_MODE_UNKNOWN)
+ if (!ubsan_initialized) {
CommonStandaloneInit();
-}
-
-void __ubsan::InitAsStandaloneIfNecessary() {
- if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
- CHECK_NE(UBSAN_MODE_UNKNOWN, ubsan_mode);
- return;
+ ubsan_initialized = true;
}
- SpinMutexLock l(&ubsan_init_mu);
- if (ubsan_mode == UBSAN_MODE_UNKNOWN)
- CommonStandaloneInit();
}
+void __ubsan::InitAsStandaloneIfNecessary() { return InitAsStandalone(); }
+
void __ubsan::InitAsPlugin() {
-#if !SANITIZER_CAN_USE_PREINIT_ARRAY
SpinMutexLock l(&ubsan_init_mu);
-#endif
- CHECK_EQ(UBSAN_MODE_UNKNOWN, ubsan_mode);
- CommonInit();
- ubsan_mode = UBSAN_MODE_PLUGIN;
+ if (!ubsan_initialized) {
+ CommonInit();
+ ubsan_initialized = true;
+ }
}
#endif // CAN_SANITIZE_UB
namespace __ubsan {
+// Get the full tool name for UBSan.
+const char *GetSanititizerToolName();
+
// Initialize UBSan as a standalone tool. Typically should be called early
// during initialization.
void InitAsStandalone();
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "ubsan_init.h"
+#include "ubsan_signals_standalone.h"
+
+namespace __ubsan {
-#if SANITIZER_CAN_USE_PREINIT_ARRAY
-__attribute__((section(".preinit_array"), used))
-void (*__local_ubsan_preinit)(void) = __ubsan::InitAsStandalone;
-#else
-// Use a dynamic initializer.
class UbsanStandaloneInitializer {
public:
UbsanStandaloneInitializer() {
- __ubsan::InitAsStandalone();
+ InitAsStandalone();
+ InitializeDeadlySignals();
}
};
static UbsanStandaloneInitializer ubsan_standalone_initializer;
-#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
+
+} // namespace __ubsan
--- /dev/null
+//===-- ubsan_init_standalone_preinit.cc
+//------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if !CAN_SANITIZE_UB
+#error "UBSan is not supported on this platform!"
+#endif
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "ubsan_init.h"
+#include "ubsan_signals_standalone.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+namespace __ubsan {
+
+static void PreInitAsStandalone() {
+ InitAsStandalone();
+ InitializeDeadlySignals();
+}
+
+} // namespace __ubsan
+
+__attribute__((section(".preinit_array"), used)) void (*__local_ubsan_preinit)(
+ void) = __ubsan::PreInitAsStandalone;
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
--- /dev/null
+//===-- ubsan_interface.inc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Ubsan interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__ubsan_handle_add_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_add_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_builtin_unreachable)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_bad_type)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail_abort)
+INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss)
+INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort)
+INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_abort)
+INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin)
+INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin_abort)
+INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value)
+INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort)
+INTERFACE_FUNCTION(__ubsan_handle_missing_return)
+INTERFACE_FUNCTION(__ubsan_handle_mul_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_mul_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_negate_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_arg)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_sub_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_sub_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1)
+INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive)
+INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive_abort)
+INTERFACE_WEAK_FUNCTION(__ubsan_default_options)
#ifndef CAN_SANITIZE_UB
// Other platforms should be easy to add, and probably work as-is.
-#if (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) && \
- (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \
- defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__) || \
+#if (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
+ defined(__NetBSD__)) && \
+ (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \
+ defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__) || \
defined(__s390__))
# define CAN_SANITIZE_UB 1
-#elif defined(_WIN32)
+#elif defined(_WIN32) || defined(__Fuchsia__)
# define CAN_SANITIZE_UB 1
#else
# define CAN_SANITIZE_UB 0
--- /dev/null
+//=-- ubsan_signals_standalone.cc
+//------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Installs signal handlers and related interceptors for UBSan standalone.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "ubsan_diag.h"
+#include "ubsan_init.h"
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+namespace __ubsan {
+
+#if SANITIZER_FUCHSIA
+void InitializeDeadlySignals() {}
+#else
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
+ sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void UBsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+
+static bool is_initialized = false;
+
+void InitializeDeadlySignals() {
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ InitializeSignalInterceptors();
+ InstallDeadlySignalHandlers(&UBsanOnDeadlySignal);
+}
+#endif
+
+} // namespace __ubsan
+
+#endif // CAN_SANITIZE_UB
--- /dev/null
+//=-- ubsan_signals_standalone.h
+//------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Installs signal handlers and related interceptors for UBSan standalone.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UBSAN_SIGNALS_STANDALONE_H
+#define UBSAN_SIGNALS_STANDALONE_H
+
+namespace __ubsan {
+
+// Initializes signal handlers and interceptors.
+void InitializeDeadlySignals();
+
+} // namespace __ubsan
+
+#endif // UBSAN_SIGNALS_STANDALONE_H
};
VtablePrefix *getVtablePrefix(void *Vtable) {
VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable);
- if (!Vptr)
- return nullptr;
VtablePrefix *Prefix = Vptr - 1;
+ if (!IsAccessibleMemoryRange((uptr)Prefix, sizeof(VtablePrefix)))
+ return nullptr;
if (!Prefix->TypeInfo)
// This can't possibly be a valid vtable.
return nullptr;
--- /dev/null
+//===-- ubsan_win_dll_thunk.cc --------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_common/sanitizer_win_dll_thunk.h"
+// Ubsan interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DLL_THUNK
--- /dev/null
+//===-- ubsan_win_dynamic_runtime_thunk.cc --------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Ubsan, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_common/sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from ubsan.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
--- /dev/null
+//===-- ubsan_win_weak_interception.cc ------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Ubsan when it is implemented as a shared
+// library on Windows (dll), in order to delegate the calls of weak functions to
+// the implementation in the main executable when a strong definition is
+// provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_common/sanitizer_win_weak_interception.h"
+#include "ubsan_flags.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DYNAMIC