+2014-09-23 Kostya Serebryany <kcc@google.com>
+
+ Update to match the changed asan API.
+ * asan.c (asan_global_struct): Update the __asan_global definition
+ to match the new API.
+ (asan_add_global): Ditto.
+ * sanitizer.def (BUILT_IN_ASAN_INIT): Rename __asan_init_v3
+ to __asan_init_v4.
+
2014-09-23 Michael Meissner <meissner@linux.vnet.ibm.com>
* config/rs6000/rs6000.md (f32_vsx): New mode attributes to
// 1 if it has dynamic initialization, 0 otherwise.
uptr __has_dynamic_init;
+
+ // A pointer to struct that contains source location, could be NULL.
+ __asan_global_source_location *__location;
}
A destructor function that calls the runtime asan library function
const void *__name;
const void *__module_name;
uptr __has_dynamic_init;
+ __asan_global_source_location *__location;
} type. */
static tree
asan_global_struct (void)
{
- static const char *field_names[6]
+ static const char *field_names[7]
= { "__beg", "__size", "__size_with_redzone",
- "__name", "__module_name", "__has_dynamic_init" };
- tree fields[6], ret;
+ "__name", "__module_name", "__has_dynamic_init", "__location"};
+ tree fields[7], ret;
int i;
ret = make_node (RECORD_TYPE);
- for (i = 0; i < 6; i++)
+ for (i = 0; i < 7; i++)
{
fields[i]
= build_decl (UNKNOWN_LOCATION, FIELD_DECL,
int has_dynamic_init = vnode ? vnode->dynamically_initialized : 0;
CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
build_int_cst (uptr, has_dynamic_init));
+ CONSTRUCTOR_APPEND_ELT (vinner, NULL_TREE,
+ build_int_cst (uptr, 0));
init = build_constructor (type, vinner);
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, init);
}
for other FEs by asan.c. */
/* Address Sanitizer */
-DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v3",
+DEF_SANITIZER_BUILTIN(BUILT_IN_ASAN_INIT, "__asan_init_v4",
BT_FN_VOID, ATTR_NOTHROW_LEAF_LIST)
/* Do not reorder the BUILT_IN_ASAN_{REPORT,CHECK}* builtins, e.g. cfgcleanup.c
relies on this order. */
+2014-09-19 Kostya Serebryany <kcc@google.com>
+
+ * All source files: Merge from upstream r218156.
+ * asan/Makefile.am (asan_files): Added new files.
+ * asan/Makefile.in: Regenerate.
+ * ubsan/Makefile.am (ubsan_files): Added new files.
+ * ubsan/Makefile.in: Regenerate.
+ * tsan/Makefile.am (tsan_files): Added new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Added new
+ files.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * asan/libtool-version: Bump the libasan SONAME.
+
2014-09-10 Jakub Jelinek <jakub@redhat.com>
* ubsan/ubsan_handlers.cc, ubsan/ubsan_handlers.h: Cherry pick
-209283
+218156
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
asan_files = \
asan_activation.cc \
asan_allocator2.cc \
- asan_dll_thunk.cc \
+ asan_debugging.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
- asan_win.cc
+ asan_win.cc \
+ asan_win_dll_thunk.cc \
+ asan_win_dynamic_runtime_thunk.cc
libasan_la_SOURCES = $(asan_files)
libasan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/lsan/libsanitizer_lsan.la
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
am__objects_1 = asan_activation.lo asan_allocator2.lo \
- asan_dll_thunk.lo asan_fake_stack.lo asan_globals.lo \
+ asan_debugging.lo asan_fake_stack.lo asan_globals.lo \
asan_interceptors.lo asan_linux.lo asan_mac.lo \
asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
- asan_thread.lo asan_win.lo
+ asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \
+ asan_win_dynamic_runtime_thunk.lo
am_libasan_la_OBJECTS = $(am__objects_1)
libasan_la_OBJECTS = $(am_libasan_la_OBJECTS)
libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
asan_files = \
asan_activation.cc \
asan_allocator2.cc \
- asan_dll_thunk.cc \
+ asan_debugging.cc \
asan_fake_stack.cc \
asan_globals.cc \
asan_interceptors.cc \
asan_stack.cc \
asan_stats.cc \
asan_thread.cc \
- asan_win.cc
+ asan_win.cc \
+ asan_win_dll_thunk.cc \
+ asan_win_dynamic_runtime_thunk.cc
libasan_la_SOURCES = $(asan_files)
libasan_la_LIBADD = \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_dll_thunk.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dynamic_runtime_thunk.Plo@am__quote@
.cc.o:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type);
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type);
+void asan_sized_free(void *ptr, uptr size, StackTrace *stack,
+ AllocType alloc_type);
void *asan_malloc(uptr size, StackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack);
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
}
return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
}
- // If we don't use stack depot, we store the alloc/free stack traces
- // in the chunk itself.
- u32 *AllocStackBeg() {
- return (u32*)(Beg() - RZLog2Size(rz_log));
- }
- uptr AllocStackSize() {
- CHECK_LE(RZLog2Size(rz_log), kChunkHeaderSize);
- return (RZLog2Size(rz_log) - kChunkHeaderSize) / sizeof(u32);
- }
- u32 *FreeStackBeg() {
- return (u32*)(Beg() + kChunkHeader2Size);
- }
- uptr FreeStackSize() {
- if (user_requested_size < kChunkHeader2Size) return 0;
- uptr available = RoundUpTo(user_requested_size, SHADOW_GRANULARITY);
- return (available - kChunkHeader2Size) / sizeof(u32);
- }
bool AddrIsInside(uptr addr, bool locked_version = false) {
return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
}
}
}
-static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
+static void Deallocate(void *ptr, uptr delete_size, StackTrace *stack,
+ AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ if (delete_size && flags()->new_delete_type_mismatch &&
+ delete_size != m->UsedSize()) {
+ ReportNewDeleteSizeMismatch(p, delete_size, stack);
+ }
ASAN_FREE_HOOK(ptr);
// Must mark the chunk as quarantined before any changes to its metadata.
AtomicallySetQuarantineFlag(m, ptr, stack);
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
- Deallocate(old_ptr, stack, FROM_MALLOC);
+ Deallocate(old_ptr, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
}
void asan_free(void *ptr, StackTrace *stack, AllocType alloc_type) {
- Deallocate(ptr, stack, alloc_type);
+ Deallocate(ptr, 0, stack, alloc_type);
+}
+
+void asan_sized_free(void *ptr, uptr size, StackTrace *stack,
+ AllocType alloc_type) {
+ Deallocate(ptr, size, stack, alloc_type);
}
void *asan_malloc(uptr size, StackTrace *stack) {
if (p == 0)
return Allocate(size, 8, stack, FROM_MALLOC, true);
if (size == 0) {
- Deallocate(p, stack, FROM_MALLOC);
+ Deallocate(p, 0, stack, FROM_MALLOC);
return 0;
}
return Reallocate(p, size, stack);
// ASan allocator doesn't reserve extra bytes, so normally we would
// just return "size". We don't want to expose our redzone sizes, etc here.
-uptr __asan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-int __asan_get_ownership(const void *p) {
+int __sanitizer_get_ownership(const void *p) {
uptr ptr = reinterpret_cast<uptr>(p);
return (AllocationSize(ptr) > 0);
}
-uptr __asan_get_allocated_size(const void *p) {
+uptr __sanitizer_get_allocated_size(const void *p) {
if (p == 0) return 0;
uptr ptr = reinterpret_cast<uptr>(p);
uptr allocated_size = AllocationSize(ptr);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
GET_STACK_TRACE_FATAL_HERE;
- ReportAsanGetAllocatedSizeNotOwned(ptr, &stack);
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
}
return allocated_size;
}
// Provide default (no-op) implementation of malloc hooks.
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_malloc_hook(void *ptr, uptr size) {
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-void __asan_free_hook(void *ptr) {
+void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
} // extern "C"
+++ /dev/null
-// This file was generated by gen_asm_instrumentation.sh. Please, do not edit
-.section .text
-#if defined(__x86_64__) || defined(__i386__)
-.globl __asan_report_store1
-.globl __asan_report_load1
-.globl __asan_report_store2
-.globl __asan_report_load2
-.globl __asan_report_store4
-.globl __asan_report_load4
-.globl __asan_report_store8
-.globl __asan_report_load8
-.globl __asan_report_store16
-.globl __asan_report_load16
-#endif // defined(__x86_64__) || defined(__i386__)
-#if defined(__i386__)
-// Sanitize 1-byte store. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_store1
-.type __sanitizer_sanitize_store1, @function
-__sanitizer_sanitize_store1:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushl %edx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- movb 0x20000000(%ecx), %cl
- testb %cl, %cl
- je .sanitize_store1_done
- movl %eax, %edx
- andl $0x7, %edx
- movsbl %cl, %ecx
- cmpl %ecx, %edx
- jl .sanitize_store1_done
- pushl %eax
- cld
- emms
- call __asan_report_store1@PLT
-.sanitize_store1_done:
- popfl
- popl %edx
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 1-byte load. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_load1
-.type __sanitizer_sanitize_load1, @function
-__sanitizer_sanitize_load1:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushl %edx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- movb 0x20000000(%ecx), %cl
- testb %cl, %cl
- je .sanitize_load1_done
- movl %eax, %edx
- andl $0x7, %edx
- movsbl %cl, %ecx
- cmpl %ecx, %edx
- jl .sanitize_load1_done
- pushl %eax
- cld
- emms
- call __asan_report_load1@PLT
-.sanitize_load1_done:
- popfl
- popl %edx
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 2-byte store. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_store2
-.type __sanitizer_sanitize_store2, @function
-__sanitizer_sanitize_store2:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushl %edx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- movb 0x20000000(%ecx), %cl
- testb %cl, %cl
- je .sanitize_store2_done
- movl %eax, %edx
- andl $0x7, %edx
- incl %edx
- movsbl %cl, %ecx
- cmpl %ecx, %edx
- jl .sanitize_store2_done
- pushl %eax
- cld
- emms
- call __asan_report_store2@PLT
-.sanitize_store2_done:
- popfl
- popl %edx
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 2-byte load. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_load2
-.type __sanitizer_sanitize_load2, @function
-__sanitizer_sanitize_load2:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushl %edx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- movb 0x20000000(%ecx), %cl
- testb %cl, %cl
- je .sanitize_load2_done
- movl %eax, %edx
- andl $0x7, %edx
- incl %edx
- movsbl %cl, %ecx
- cmpl %ecx, %edx
- jl .sanitize_load2_done
- pushl %eax
- cld
- emms
- call __asan_report_load2@PLT
-.sanitize_load2_done:
- popfl
- popl %edx
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 4-byte store. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_store4
-.type __sanitizer_sanitize_store4, @function
-__sanitizer_sanitize_store4:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushl %edx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- movb 0x20000000(%ecx), %cl
- testb %cl, %cl
- je .sanitize_store4_done
- movl %eax, %edx
- andl $0x7, %edx
- addl $0x3, %edx
- movsbl %cl, %ecx
- cmpl %ecx, %edx
- jl .sanitize_store4_done
- pushl %eax
- cld
- emms
- call __asan_report_store4@PLT
-.sanitize_store4_done:
- popfl
- popl %edx
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 4-byte load. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_load4
-.type __sanitizer_sanitize_load4, @function
-__sanitizer_sanitize_load4:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushl %edx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- movb 0x20000000(%ecx), %cl
- testb %cl, %cl
- je .sanitize_load4_done
- movl %eax, %edx
- andl $0x7, %edx
- addl $0x3, %edx
- movsbl %cl, %ecx
- cmpl %ecx, %edx
- jl .sanitize_load4_done
- pushl %eax
- cld
- emms
- call __asan_report_load4@PLT
-.sanitize_load4_done:
- popfl
- popl %edx
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 8-byte store. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_store8
-.type __sanitizer_sanitize_store8, @function
-__sanitizer_sanitize_store8:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- cmpb $0x0, 0x20000000(%ecx)
- je .sanitize_store8_done
- pushl %eax
- cld
- emms
- call __asan_report_store8@PLT
-.sanitize_store8_done:
- popfl
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 8-byte load. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_load8
-.type __sanitizer_sanitize_load8, @function
-__sanitizer_sanitize_load8:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- cmpb $0x0, 0x20000000(%ecx)
- je .sanitize_load8_done
- pushl %eax
- cld
- emms
- call __asan_report_load8@PLT
-.sanitize_load8_done:
- popfl
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 16-byte store. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_store16
-.type __sanitizer_sanitize_store16, @function
-__sanitizer_sanitize_store16:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- cmpw $0x0, 0x20000000(%ecx)
- je .sanitize_store16_done
- pushl %eax
- cld
- emms
- call __asan_report_store16@PLT
-.sanitize_store16_done:
- popfl
- popl %ecx
- popl %eax
- leave
- ret
-// Sanitize 16-byte load. Takes one 4-byte address as an argument on
-// stack, nothing is returned.
-.globl __sanitizer_sanitize_load16
-.type __sanitizer_sanitize_load16, @function
-__sanitizer_sanitize_load16:
- pushl %ebp
- movl %esp, %ebp
- pushl %eax
- pushl %ecx
- pushfl
- movl 8(%ebp), %eax
- movl %eax, %ecx
- shrl $0x3, %ecx
- cmpw $0x0, 0x20000000(%ecx)
- je .sanitize_load16_done
- pushl %eax
- cld
- emms
- call __asan_report_load16@PLT
-.sanitize_load16_done:
- popfl
- popl %ecx
- popl %eax
- leave
- ret
-#endif // defined(__i386__)
-#if defined(__x86_64__)
-// Sanitize 1-byte store. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_store1
-.type __sanitizer_sanitize_store1, @function
-__sanitizer_sanitize_store1:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushq %rcx
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- movb 0x7fff8000(%rax), %al
- test %al, %al
- je .sanitize_store1_done
- movl %edi, %ecx
- andl $0x7, %ecx
- movsbl %al, %eax
- cmpl %eax, %ecx
- jl .sanitize_store1_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_store1@PLT
-.sanitize_store1_done:
- popfq
- popq %rcx
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 1-byte load. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_load1
-.type __sanitizer_sanitize_load1, @function
-__sanitizer_sanitize_load1:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushq %rcx
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- movb 0x7fff8000(%rax), %al
- test %al, %al
- je .sanitize_load1_done
- movl %edi, %ecx
- andl $0x7, %ecx
- movsbl %al, %eax
- cmpl %eax, %ecx
- jl .sanitize_load1_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_load1@PLT
-.sanitize_load1_done:
- popfq
- popq %rcx
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 2-byte store. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_store2
-.type __sanitizer_sanitize_store2, @function
-__sanitizer_sanitize_store2:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushq %rcx
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- movb 0x7fff8000(%rax), %al
- test %al, %al
- je .sanitize_store2_done
- movl %edi, %ecx
- andl $0x7, %ecx
- incl %ecx
- movsbl %al, %eax
- cmpl %eax, %ecx
- jl .sanitize_store2_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_store2@PLT
-.sanitize_store2_done:
- popfq
- popq %rcx
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 2-byte load. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_load2
-.type __sanitizer_sanitize_load2, @function
-__sanitizer_sanitize_load2:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushq %rcx
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- movb 0x7fff8000(%rax), %al
- test %al, %al
- je .sanitize_load2_done
- movl %edi, %ecx
- andl $0x7, %ecx
- incl %ecx
- movsbl %al, %eax
- cmpl %eax, %ecx
- jl .sanitize_load2_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_load2@PLT
-.sanitize_load2_done:
- popfq
- popq %rcx
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 4-byte store. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_store4
-.type __sanitizer_sanitize_store4, @function
-__sanitizer_sanitize_store4:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushq %rcx
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- movb 0x7fff8000(%rax), %al
- test %al, %al
- je .sanitize_store4_done
- movl %edi, %ecx
- andl $0x7, %ecx
- addl $0x3, %ecx
- movsbl %al, %eax
- cmpl %eax, %ecx
- jl .sanitize_store4_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_store4@PLT
-.sanitize_store4_done:
- popfq
- popq %rcx
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 4-byte load. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_load4
-.type __sanitizer_sanitize_load4, @function
-__sanitizer_sanitize_load4:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushq %rcx
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- movb 0x7fff8000(%rax), %al
- test %al, %al
- je .sanitize_load4_done
- movl %edi, %ecx
- andl $0x7, %ecx
- addl $0x3, %ecx
- movsbl %al, %eax
- cmpl %eax, %ecx
- jl .sanitize_load4_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_load4@PLT
-.sanitize_load4_done:
- popfq
- popq %rcx
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 8-byte store. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_store8
-.type __sanitizer_sanitize_store8, @function
-__sanitizer_sanitize_store8:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- cmpb $0x0, 0x7fff8000(%rax)
- je .sanitize_store8_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_store8@PLT
-.sanitize_store8_done:
- popfq
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 8-byte load. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_load8
-.type __sanitizer_sanitize_load8, @function
-__sanitizer_sanitize_load8:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- cmpb $0x0, 0x7fff8000(%rax)
- je .sanitize_load8_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_load8@PLT
-.sanitize_load8_done:
- popfq
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 16-byte store. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_store16
-.type __sanitizer_sanitize_store16, @function
-__sanitizer_sanitize_store16:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- cmpw $0x0, 0x7fff8000(%rax)
- je .sanitize_store16_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_store16@PLT
-.sanitize_store16_done:
- popfq
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-// Sanitize 16-byte load. Takes one 8-byte address as an argument in %rdi,
-// nothing is returned.
-.globl __sanitizer_sanitize_load16
-.type __sanitizer_sanitize_load16, @function
-__sanitizer_sanitize_load16:
- leaq -128(%rsp), %rsp
- pushq %rax
- pushfq
- movq %rdi, %rax
- shrq $0x3, %rax
- cmpw $0x0, 0x7fff8000(%rax)
- je .sanitize_load16_done
- subq $8, %rsp
- andq $-16, %rsp
- cld
- emms
- call __asan_report_load16@PLT
-.sanitize_load16_done:
- popfq
- popq %rax
- leaq 128(%rsp), %rsp
- ret
-#endif // defined(__x86_64__)
-/* We do not need executable stack. */
-#if defined(__arm__)
- .section .note.GNU-stack,"",%progbits
-#else
- .section .note.GNU-stack,"",@progbits
-#endif // defined(__arm__)
-#endif // __linux__
--- /dev/null
+//===-- asan_debugging.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file contains various functions that are generally useful to call when
+// using a debugger (LLDB, GDB).
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_thread.h"
+
+namespace __asan {
+
+uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id,
+ bool alloc_stack) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) return 0;
+
+ StackTrace stack;
+ if (alloc_stack) {
+ if (chunk.AllocTid() == kInvalidTid) return 0;
+ chunk.GetAllocStack(&stack);
+ if (thread_id) *thread_id = chunk.AllocTid();
+ } else {
+ if (chunk.FreeTid() == kInvalidTid) return 0;
+ chunk.GetFreeStack(&stack);
+ if (thread_id) *thread_id = chunk.FreeTid();
+ }
+
+ if (trace && size) {
+ if (size > kStackTraceMax)
+ size = kStackTraceMax;
+ if (size > stack.size)
+ size = stack.size;
+ for (uptr i = 0; i < size; i++)
+ trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);
+
+ return size;
+ }
+
+ return 0;
+}
+
+} // namespace __asan
+
+using namespace __asan;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
+ if (shadow_scale)
+ *shadow_scale = SHADOW_SCALE;
+ if (shadow_offset)
+ *shadow_offset = SHADOW_OFFSET;
+}
+++ /dev/null
-//===-- asan_dll_thunk.cc -------------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer, an address sanity checker.
-//
-// This file defines a family of thunks that should be statically linked into
-// the DLLs that have ASan instrumentation in order to delegate the calls to the
-// shared runtime that lives in the main binary.
-// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
-// details.
-//===----------------------------------------------------------------------===//
-
-// Only compile this code when buidling asan_dll_thunk.lib
-// Using #ifdef rather than relying on Makefiles etc.
-// simplifies the build procedure.
-#ifdef ASAN_DLL_THUNK
-#include "sanitizer_common/sanitizer_interception.h"
-
-// ----------------- Helper functions and macros --------------------- {{{1
-extern "C" {
-void *__stdcall GetModuleHandleA(const char *module_name);
-void *__stdcall GetProcAddress(void *module, const char *proc_name);
-void abort();
-}
-
-static void *getRealProcAddressOrDie(const char *name) {
- void *ret = GetProcAddress(GetModuleHandleA(0), name);
- if (!ret)
- abort();
- return ret;
-}
-
-#define WRAP_V_V(name) \
- extern "C" void name() { \
- typedef void (*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(); \
- }
-
-#define WRAP_V_W(name) \
- extern "C" void name(void *arg) { \
- typedef void (*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg); \
- }
-
-#define WRAP_V_WW(name) \
- extern "C" void name(void *arg1, void *arg2) { \
- typedef void (*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2); \
- }
-
-#define WRAP_V_WWW(name) \
- extern "C" void name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- fn(arg1, arg2, arg3); \
- }
-
-#define WRAP_W_V(name) \
- extern "C" void *name() { \
- typedef void *(*fntype)(); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(); \
- }
-
-#define WRAP_W_W(name) \
- extern "C" void *name(void *arg) { \
- typedef void *(*fntype)(void *arg); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg); \
- }
-
-#define WRAP_W_WW(name) \
- extern "C" void *name(void *arg1, void *arg2) { \
- typedef void *(*fntype)(void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2); \
- }
-
-#define WRAP_W_WWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
- typedef void *(*fntype)(void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3); \
- }
-
-#define WRAP_W_WWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
- typedef void *(*fntype)(void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4); \
- }
-
-#define WRAP_W_WWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5); \
- }
-
-#define WRAP_W_WWWWWW(name) \
- extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
- void *arg5, void *arg6) { \
- typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
- static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
- return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
- }
-// }}}
-
-// --------- Interface interception helper functions and macros ----------- {{{1
-// We need to intercept the ASan interface exported by the DLL thunk and forward
-// all the functions to the runtime in the main module.
-// However, we don't want to keep two lists of interface functions.
-// To avoid that, the list of interface functions should be defined using the
-// INTERFACE_FUNCTION macro. Then, all the interface can be intercepted at once
-// by calling INTERCEPT_ASAN_INTERFACE().
-
-// Use macro+template magic to automatically generate the list of interface
-// functions. Each interface function at line LINE defines a template class
-// with a static InterfaceInteceptor<LINE>::Execute() method intercepting the
-// function. The default implementation of InterfaceInteceptor<LINE> is to call
-// the Execute() method corresponding to the previous line.
-template<int LINE>
-struct InterfaceInteceptor {
- static void Execute() { InterfaceInteceptor<LINE-1>::Execute(); }
-};
-
-// There shouldn't be any interface function with negative line number.
-template<>
-struct InterfaceInteceptor<0> {
- static void Execute() {}
-};
-
-#define INTERFACE_FUNCTION(name) \
- extern "C" void name() { __debugbreak(); } \
- template<> struct InterfaceInteceptor<__LINE__> { \
- static void Execute() { \
- void *wrapper = getRealProcAddressOrDie(#name); \
- if (!__interception::OverrideFunction((uptr)name, (uptr)wrapper, 0)) \
- abort(); \
- InterfaceInteceptor<__LINE__-1>::Execute(); \
- } \
- };
-
-// INTERCEPT_ASAN_INTERFACE must be used after the last INTERFACE_FUNCTION.
-#define INTERCEPT_ASAN_INTERFACE InterfaceInteceptor<__LINE__>::Execute
-
-static void InterceptASanInterface();
-// }}}
-
-// ----------------- ASan own interface functions --------------------
-// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
-// want to call it in the __asan_init interceptor.
-WRAP_W_V(__asan_should_detect_stack_use_after_return)
-
-extern "C" {
- int __asan_option_detect_stack_use_after_return;
-
- // Manually wrap __asan_init as we need to initialize
- // __asan_option_detect_stack_use_after_return afterwards.
- void __asan_init_v3() {
- typedef void (*fntype)();
- static fntype fn = 0;
- if (fn) return;
-
- fn = (fntype)getRealProcAddressOrDie("__asan_init_v3");
- fn();
- __asan_option_detect_stack_use_after_return =
- (__asan_should_detect_stack_use_after_return() != 0);
-
- InterceptASanInterface();
- }
-}
-
-INTERFACE_FUNCTION(__asan_handle_no_return)
-
-INTERFACE_FUNCTION(__asan_report_store1)
-INTERFACE_FUNCTION(__asan_report_store2)
-INTERFACE_FUNCTION(__asan_report_store4)
-INTERFACE_FUNCTION(__asan_report_store8)
-INTERFACE_FUNCTION(__asan_report_store16)
-INTERFACE_FUNCTION(__asan_report_store_n)
-
-INTERFACE_FUNCTION(__asan_report_load1)
-INTERFACE_FUNCTION(__asan_report_load2)
-INTERFACE_FUNCTION(__asan_report_load4)
-INTERFACE_FUNCTION(__asan_report_load8)
-INTERFACE_FUNCTION(__asan_report_load16)
-INTERFACE_FUNCTION(__asan_report_load_n)
-
-INTERFACE_FUNCTION(__asan_memcpy);
-INTERFACE_FUNCTION(__asan_memset);
-INTERFACE_FUNCTION(__asan_memmove);
-
-INTERFACE_FUNCTION(__asan_register_globals)
-INTERFACE_FUNCTION(__asan_unregister_globals)
-
-INTERFACE_FUNCTION(__asan_before_dynamic_init)
-INTERFACE_FUNCTION(__asan_after_dynamic_init)
-
-INTERFACE_FUNCTION(__asan_poison_stack_memory)
-INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
-
-INTERFACE_FUNCTION(__asan_poison_memory_region)
-INTERFACE_FUNCTION(__asan_unpoison_memory_region)
-
-INTERFACE_FUNCTION(__asan_get_current_fake_stack)
-INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
-
-INTERFACE_FUNCTION(__asan_stack_malloc_0)
-INTERFACE_FUNCTION(__asan_stack_malloc_1)
-INTERFACE_FUNCTION(__asan_stack_malloc_2)
-INTERFACE_FUNCTION(__asan_stack_malloc_3)
-INTERFACE_FUNCTION(__asan_stack_malloc_4)
-INTERFACE_FUNCTION(__asan_stack_malloc_5)
-INTERFACE_FUNCTION(__asan_stack_malloc_6)
-INTERFACE_FUNCTION(__asan_stack_malloc_7)
-INTERFACE_FUNCTION(__asan_stack_malloc_8)
-INTERFACE_FUNCTION(__asan_stack_malloc_9)
-INTERFACE_FUNCTION(__asan_stack_malloc_10)
-
-INTERFACE_FUNCTION(__asan_stack_free_0)
-INTERFACE_FUNCTION(__asan_stack_free_1)
-INTERFACE_FUNCTION(__asan_stack_free_2)
-INTERFACE_FUNCTION(__asan_stack_free_4)
-INTERFACE_FUNCTION(__asan_stack_free_5)
-INTERFACE_FUNCTION(__asan_stack_free_6)
-INTERFACE_FUNCTION(__asan_stack_free_7)
-INTERFACE_FUNCTION(__asan_stack_free_8)
-INTERFACE_FUNCTION(__asan_stack_free_9)
-INTERFACE_FUNCTION(__asan_stack_free_10)
-
-// TODO(timurrrr): Add more interface functions on the as-needed basis.
-
-// ----------------- Memory allocation functions ---------------------
-WRAP_V_W(free)
-WRAP_V_WW(_free_dbg)
-
-WRAP_W_W(malloc)
-WRAP_W_WWWW(_malloc_dbg)
-
-WRAP_W_WW(calloc)
-WRAP_W_WWWWW(_calloc_dbg)
-WRAP_W_WWW(_calloc_impl)
-
-WRAP_W_WW(realloc)
-WRAP_W_WWW(_realloc_dbg)
-WRAP_W_WWW(_recalloc)
-
-WRAP_W_W(_msize)
-WRAP_W_W(_expand)
-WRAP_W_W(_expand_dbg)
-
-// TODO(timurrrr): Might want to add support for _aligned_* allocation
-// functions to detect a bit more bugs. Those functions seem to wrap malloc().
-
-// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
-
-void InterceptASanInterface() {
- INTERCEPT_ASAN_INTERFACE();
-}
-
-#endif // ASAN_DLL_THUNK
bool print_stats;
bool print_legend;
bool atexit;
- bool disable_core;
bool allow_reexec;
bool print_full_thread_history;
bool poison_heap;
bool poison_partial;
+ bool poison_array_cookie;
bool alloc_dealloc_mismatch;
+ bool new_delete_type_mismatch;
bool strict_memcmp;
bool strict_init_order;
bool start_deactivated;
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
// Lazy-initialized and never deleted.
static VectorOfGlobals *dynamic_init_globals;
+// We want to remember where a certain range of globals was registered.
+struct GlobalRegistrationSite {
+ u32 stack_id;
+ Global *g_first, *g_last;
+};
+typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector;
+static GlobalRegistrationSiteVector *global_registration_site_vector;
+
ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
FastPoisonShadow(g->beg, g->size_with_redzone, value);
}
}
static void ReportGlobal(const Global &g, const char *prefix) {
- Report("%s Global: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
- prefix, (void*)g.beg, g.size, g.size_with_redzone, g.name,
+ Report("%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu\n",
+ prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
g.module_name, g.has_dynamic_init);
+ if (g.location) {
+ Report(" location (%p): name=%s[%p], %d %d\n", g.location,
+ g.location->filename, g.location->filename, g.location->line_no,
+ g.location->column_no);
+ }
}
bool DescribeAddressIfGlobal(uptr addr, uptr size) {
return res;
}
+u32 FindRegistrationSite(const Global *g) {
+ CHECK(global_registration_site_vector);
+ for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
+ GlobalRegistrationSite &grs = (*global_registration_site_vector)[i];
+ if (g >= grs.g_first && g <= grs.g_last)
+ return grs.stack_id;
+ }
+ return 0;
+}
+
// Register a global variable.
// This function may be called more than once for every global
// so we store the globals in a map.
for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
if (g->beg == l->g->beg &&
(flags()->detect_odr_violation >= 2 || g->size != l->g->size))
- ReportODRViolation(g, l->g);
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
}
}
}
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
+ GET_STACK_TRACE_FATAL_HERE;
+ u32 stack_id = StackDepotPut(stack.trace, stack.size);
BlockingMutexLock lock(&mu_for_globals);
+ if (!global_registration_site_vector)
+ global_registration_site_vector =
+ new(allocator_for_globals) GlobalRegistrationSiteVector(128);
+ GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
+ global_registration_site_vector->push_back(site);
+ if (flags()->report_globals >= 2) {
+ PRINT_CURRENT_STACK();
+ Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
+ }
for (uptr i = 0; i < n; i++) {
RegisterGlobal(&globals[i]);
}
--- /dev/null
+//===-- asan_init_version.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This header defines a versioned __asan_init function to be called at the
+// startup of the instrumented program.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INIT_VERSION_H
+#define ASAN_INIT_VERSION_H
+
+extern "C" {
+ // Every time the ASan ABI changes we also change the version number in the
+ // __asan_init function name. Objects built with incompatible ASan ABI
+ // versions will not link with run-time.
+ // Changes between ABI versions:
+ // v1=>v2: added 'module_name' to __asan_global
+ // v2=>v3: stack frame description (created by the compiler)
+ // contains the function PC as the 3-rd field (see
+ // DescribeAddressIfStack).
+ // v3=>v4: added '__asan_global_source_location' to __asan_global.
+ #define __asan_init __asan_init_v4
+ #define __asan_init_name "__asan_init_v4"
+}
+
+#endif // ASAN_INIT_VERSION_H
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping()
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping()
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s)
}
#endif
-// intercept mlock and friends.
-// Since asan maps 16T of RAM, mlock is completely unfriendly to asan.
-// All functions return 0 (success).
-static void MlockIsUnsupported() {
- static bool printed = false;
- if (printed) return;
- printed = true;
- VPrintf(1,
- "INFO: AddressSanitizer ignores "
- "mlock/mlockall/munlock/munlockall\n");
-}
-
-INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
+#if SANITIZER_WINDOWS
+INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(RaiseException));
+ __asan_handle_no_return();
+ REAL(RaiseException)(a, b, c, d);
}
-INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
+INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler3));
+ __asan_handle_no_return();
+ return REAL(_except_handler3)(a, b, c, d);
}
-INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
+#if ASAN_DYNAMIC
+// This handler is named differently in -MT and -MD CRTs.
+#define _except_handler4 _except_handler4_common
+#endif
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler4));
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
}
+#endif
static inline int CharCmp(unsigned char c1, unsigned char c2) {
return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
}
#endif
-INTERCEPTOR(uptr, strlen, const char *s) {
+INTERCEPTOR(SIZE_T, strlen, const char *s) {
if (UNLIKELY(!asan_inited)) return internal_strlen(s);
// strlen is called from malloc_default_purgeable_zone()
// in __asan::ReplaceSystemAlloc() on Mac.
return REAL(strlen)(s);
}
ENSURE_ASAN_INITED();
- uptr length = REAL(strlen)(s);
+ SIZE_T length = REAL(strlen)(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(s, length + 1);
}
return length;
}
-INTERCEPTOR(uptr, wcslen, const wchar_t *s) {
- uptr length = REAL(wcslen)(s);
+INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ SIZE_T length = REAL(wcslen)(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t));
}
#endif // ASAN_INTERCEPT___CXA_ATEXIT
+#if ASAN_INTERCEPT_FORK
+INTERCEPTOR(int, fork, void) {
+ ENSURE_ASAN_INITED();
+ if (common_flags()->coverage) CovBeforeFork();
+ int pid = REAL(fork)();
+ if (common_flags()->coverage) CovAfterFork(pid);
+ return pid;
+}
+#endif // ASAN_INTERCEPT_FORK
+
#if SANITIZER_WINDOWS
INTERCEPTOR_WINAPI(DWORD, CreateThread,
void* security, uptr stack_size,
namespace __asan {
void InitializeWindowsInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(RaiseException);
+ ASAN_INTERCEPT_FUNC(_except_handler3);
+ ASAN_INTERCEPT_FUNC(_except_handler4);
}
} // namespace __asan
ASAN_INTERCEPT_FUNC(strtoll);
#endif
-#if ASAN_INTERCEPT_MLOCKX
- // Intercept mlock/munlock.
- ASAN_INTERCEPT_FUNC(mlock);
- ASAN_INTERCEPT_FUNC(munlock);
- ASAN_INTERCEPT_FUNC(mlockall);
- ASAN_INTERCEPT_FUNC(munlockall);
-#endif
-
// Intecept signal- and jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
// Intercept exception handling functions.
#if ASAN_INTERCEPT___CXA_THROW
- INTERCEPT_FUNCTION(__cxa_throw);
+ ASAN_INTERCEPT_FUNC(__cxa_throw);
#endif
// Intercept threading-related functions
ASAN_INTERCEPT_FUNC(__cxa_atexit);
#endif
+#if ASAN_INTERCEPT_FORK
+ ASAN_INTERCEPT_FUNC(fork);
+#endif
+
// Some Windows-specific interceptors.
#if SANITIZER_WINDOWS
InitializeWindowsInterceptors();
# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
-# define ASAN_INTERCEPT_MLOCKX 1
+# define ASAN_INTERCEPT_FORK 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
-# define ASAN_INTERCEPT_MLOCKX 0
+# define ASAN_INTERCEPT_FORK 0
#endif
#if SANITIZER_FREEBSD || SANITIZER_LINUX
# define ASAN_INTERCEPT_SIGLONGJMP 0
#endif
-#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS
+// Android bug: https://code.google.com/p/android/issues/detail?id=61799
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \
+ !(SANITIZER_ANDROID && defined(__i386))
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
DECLARE_REAL(char*, strchr, const char *str, int c)
-DECLARE_REAL(uptr, strlen, const char *s)
+DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "asan_init_version.h"
+
using __sanitizer::uptr;
extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
- // Every time the asan ABI changes we also change the version number in this
- // name. Objects build with incompatible asan ABI version
- // will not link with run-time.
- // Changes between ABI versions:
- // v1=>v2: added 'module_name' to __asan_global
- // v2=>v3: stack frame description (created by the compiler)
- // contains the function PC as the 3-rd field (see
- // DescribeAddressIfStack).
- SANITIZER_INTERFACE_ATTRIBUTE void __asan_init_v3();
- #define __asan_init __asan_init_v3
+ // Please note that __asan_init is a macro that is replaced with
+ // __asan_init_vXXX at compile-time.
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_init();
+
+ // This structure is used to describe the source location of a place where
+ // global was defined.
+ struct __asan_global_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+ };
// This structure describes an instrumented global variable.
struct __asan_global {
const char *module_name; // Module name as a C string. This pointer is a
// unique identifier of a module.
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
+ __asan_global_source_location *location; // Source location of a global,
+ // or NULL if it is unknown.
};
// These two functions should be called by the instrumented code.
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_describe_address(uptr addr);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset);
+
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_report_error(uptr pc, uptr bp, uptr sp,
uptr addr, int is_write, uptr access_size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __asan_on_error();
- SANITIZER_INTERFACE_ATTRIBUTE
- uptr __asan_get_estimated_allocated_size(uptr size);
-
- SANITIZER_INTERFACE_ATTRIBUTE int __asan_get_ownership(const void *p);
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_allocated_size(const void *p);
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_current_allocated_bytes();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_heap_size();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_free_bytes();
- SANITIZER_INTERFACE_ATTRIBUTE uptr __asan_get_unmapped_bytes();
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_malloc_hook(void *ptr, uptr size);
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- /* OPTIONAL */ void __asan_free_hook(void *ptr);
-
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
void* __asan_memset(void *s, int c, uptr n);
SANITIZER_INTERFACE_ATTRIBUTE
void* __asan_memmove(void* dest, const void* src, uptr n);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_cxx_array_cookie(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_load_cxx_array_cookie(uptr *p);
} // extern "C"
#endif // ASAN_INTERFACE_INTERNAL_H
# endif
#endif
-#ifndef ASAN_USE_PREINIT_ARRAY
-# define ASAN_USE_PREINIT_ARRAY (SANITIZER_LINUX && !SANITIZER_ANDROID)
-#endif
-
#ifndef ASAN_DYNAMIC
# ifdef PIC
# define ASAN_DYNAMIC 1
void ParseExtraActivationFlags();
+void *AsanDlSymNext(const char *sym);
+
// Platform-specific options.
#if SANITIZER_MAC
bool PlatformHasDifferentMemcpyAndMemmove();
// Add convenient macro for interface functions that may be represented as
// weak hooks.
#define ASAN_MALLOC_HOOK(ptr, size) \
- if (&__asan_malloc_hook) __asan_malloc_hook(ptr, size)
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
#define ASAN_FREE_HOOK(ptr) \
- if (&__asan_free_hook) __asan_free_hook(ptr)
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
#define ASAN_ON_ERROR() \
if (&__asan_on_error) __asan_on_error()
const int kAsanStackUseAfterScopeMagic = 0xf8;
const int kAsanGlobalRedzoneMagic = 0xf9;
const int kAsanInternalHeapMagic = 0xfe;
+const int kAsanArrayCookieMagic = 0xac;
static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
static const uptr kRetiredStackFrameMagic = 0x45E0360E;
#include "asan_internal.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_freebsd.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/types.h>
+#include <dlfcn.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdio.h>
extern "C" void* _DYNAMIC;
#else
#include <sys/ucontext.h>
-#include <dlfcn.h>
#include <link.h>
#endif
-// x86_64 FreeBSD 9.2 and older define 64-bit register names in both 64-bit
-// and 32-bit modes.
-#if SANITIZER_FREEBSD
-#include <sys/param.h>
-# if __FreeBSD_version <= 902001 // v9.2
-# define mc_eip mc_rip
-# define mc_ebp mc_rbp
-# define mc_esp mc_rsp
-# endif
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
+ __FreeBSD_version <= 902001 // v9.2
+#define ucontext_t xucontext_t
#endif
typedef enum {
}
#endif
+void *AsanDlSymNext(const char *sym) {
+ return dlsym(RTLD_NEXT, sym);
+}
+
} // namespace __asan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
work(); \
}
+// Forces the compiler to generate a frame pointer in the function.
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ } while (0)
+
INTERCEPTOR(void, dispatch_async,
dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_async)(dq, asan_block);
}
INTERCEPTOR(void, dispatch_group_async,
dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_group_async)(dg, dq, asan_block);
}
INTERCEPTOR(void, dispatch_after,
dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_after)(when, queue, asan_block);
}
INTERCEPTOR(void, dispatch_source_set_cancel_handler,
dispatch_source_t ds, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
}
INTERCEPTOR(void, dispatch_source_set_event_handler,
dispatch_source_t ds, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
GET_ASAN_BLOCK(work);
REAL(dispatch_source_set_event_handler)(ds, asan_block);
}
#include "asan_internal.h"
#include "asan_stack.h"
-#if SANITIZER_ANDROID
-DECLARE_REAL_AND_INTERCEPTOR(void*, malloc, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
-DECLARE_REAL_AND_INTERCEPTOR(void*, calloc, uptr nmemb, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void*, realloc, void *ptr, uptr size)
-DECLARE_REAL_AND_INTERCEPTOR(void*, memalign, uptr boundary, uptr size)
-
-struct MallocDebug {
- void* (*malloc)(uptr bytes);
- void (*free)(void* mem);
- void* (*calloc)(uptr n_elements, uptr elem_size);
- void* (*realloc)(void* oldMem, uptr bytes);
- void* (*memalign)(uptr alignment, uptr bytes);
-};
-
-const MallocDebug asan_malloc_dispatch ALIGNED(32) = {
- WRAP(malloc), WRAP(free), WRAP(calloc), WRAP(realloc), WRAP(memalign)
-};
-
-extern "C" const MallocDebug* __libc_malloc_dispatch;
-
-namespace __asan {
-void ReplaceSystemMalloc() {
- __libc_malloc_dispatch = &asan_malloc_dispatch;
-}
-} // namespace __asan
-
-#else // ANDROID
-
-namespace __asan {
-void ReplaceSystemMalloc() {
-}
-} // namespace __asan
-#endif // ANDROID
-
// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
return asan_memalign(boundary, size, &stack, FROM_MALLOC);
}
+INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+}
+
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
__asan_print_accumulated_stats();
}
+#if SANITIZER_ANDROID
+// Format of __libc_malloc_dispatch has changed in Android L.
+// While we are moving towards a solution that does not depend on bionic
+// internals, here is something to support both K* and L releases.
+struct MallocDebugK {
+ void *(*malloc)(uptr bytes);
+ void (*free)(void *mem);
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+};
+
+struct MallocDebugL {
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void (*free)(void *mem);
+ fake_mallinfo (*mallinfo)(void);
+ void *(*malloc)(uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ int (*posix_memalign)(void **memptr, uptr alignment, uptr size);
+ void* (*pvalloc)(uptr size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void* (*valloc)(uptr size);
+};
+
+ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = {
+ WRAP(malloc), WRAP(free), WRAP(calloc),
+ WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)};
+
+ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = {
+ WRAP(calloc), WRAP(free), WRAP(mallinfo),
+ WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign),
+ WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc),
+ WRAP(valloc)};
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+ void **__libc_malloc_dispatch_p =
+ (void **)AsanDlSymNext("__libc_malloc_dispatch");
+ if (__libc_malloc_dispatch_p) {
+ // Decide on K vs L dispatch format by the presence of
+ // __libc_malloc_default_dispatch export in libc.
+ void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch");
+ if (default_dispatch_p)
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k;
+ else
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l;
+ }
+}
+} // namespace __asan
+
+#else // SANITIZER_ANDROID
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+}
+} // namespace __asan
+#endif // SANITIZER_ANDROID
+
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#include <stddef.h>
-// ---------------------- Replacement functions ---------------- {{{1
using namespace __asan; // NOLINT
-// FIXME: Simply defining functions with the same signature in *.obj
-// files overrides the standard functions in *.lib
-// This works well for simple helloworld-like tests but might need to be
-// revisited in the future.
+// MT: Simply defining functions with the same signature in *.obj
+// files overrides the standard functions in the CRT.
+// MD: Memory allocation functions are defined in the CRT .dll,
+// so we have to intercept them before they are called for the first time.
+
+#if ASAN_DYNAMIC
+# define ALLOCATION_FUNCTION_ATTRIBUTE
+#else
+# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#endif
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void free(void *ptr) {
GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack, FROM_MALLOC);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void _free_dbg(void* ptr, int) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void _free_dbg(void *ptr, int) {
free(ptr);
}
+ALLOCATION_FUNCTION_ATTRIBUTE
void cfree(void *ptr) {
- CHECK(!"cfree() should not be used on Windows?");
+ CHECK(!"cfree() should not be used on Windows");
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *malloc(size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _malloc_dbg(size_t size, int , const char*, int) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *calloc(size_t nmemb, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _calloc_dbg(size_t n, size_t size, int, const char*, int) {
- return calloc(n, size);
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
+ return calloc(nmemb, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
return calloc(nmemb, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *realloc(void *ptr, size_t size) {
GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_realloc_dbg(void *ptr, size_t size, int) {
CHECK(!"_realloc_dbg should not exist!");
return 0;
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void* _recalloc(void* p, size_t n, size_t elem_size) {
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc(void *p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
const size_t size = n * elem_size;
return realloc(p, size);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
size_t _msize(void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_expand(void *memblock, size_t size) {
// _expand is used in realloc-like functions to resize the buffer if possible.
// We don't want memory to stand still while resizing buffers, so return 0.
return 0;
}
-SANITIZER_INTERFACE_ATTRIBUTE
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_expand_dbg(void *memblock, size_t size) {
- return 0;
+ return _expand(memblock, size);
}
// TODO(timurrrr): Might want to add support for _aligned_* allocation
}
} // extern "C"
-using __interception::GetRealFunctionAddress;
-
-// We don't want to include "windows.h" in this file to avoid extra attributes
-// set on malloc/free etc (e.g. dllimport), so declare a few things manually:
-extern "C" int __stdcall VirtualProtect(void* addr, size_t size,
- DWORD prot, DWORD *old_prot);
-const int PAGE_EXECUTE_READWRITE = 0x40;
-
namespace __asan {
void ReplaceSystemMalloc() {
-#if defined(_DLL)
-# ifdef _WIN64
-# error ReplaceSystemMalloc was not tested on x64
-# endif
- char *crt_malloc;
- if (GetRealFunctionAddress("malloc", (void**)&crt_malloc)) {
- // Replace malloc in the CRT dll with a jump to our malloc.
- DWORD old_prot, unused;
- CHECK(VirtualProtect(crt_malloc, 16, PAGE_EXECUTE_READWRITE, &old_prot));
- REAL(memset)(crt_malloc, 0xCC /* int 3 */, 16); // just in case.
-
- ptrdiff_t jmp_offset = (char*)malloc - (char*)crt_malloc - 5;
- crt_malloc[0] = 0xE9; // jmp, should be followed by an offset.
- REAL(memcpy)(crt_malloc + 1, &jmp_offset, sizeof(jmp_offset));
-
- CHECK(VirtualProtect(crt_malloc, 16, old_prot, &unused));
-
- // FYI: FlushInstructionCache is needed on Itanium etc but not on x86/x64.
- }
-
- // FIXME: investigate whether anything else is needed.
+#if defined(ASAN_DYNAMIC)
+ // We don't check the result because CRT might not be used in the process.
+ __interception::OverrideFunction("free", (uptr)free);
+ __interception::OverrideFunction("malloc", (uptr)malloc);
+ __interception::OverrideFunction("_malloc_crt", (uptr)malloc);
+ __interception::OverrideFunction("calloc", (uptr)calloc);
+ __interception::OverrideFunction("_calloc_crt", (uptr)calloc);
+ __interception::OverrideFunction("realloc", (uptr)realloc);
+ __interception::OverrideFunction("_realloc_crt", (uptr)realloc);
+ __interception::OverrideFunction("_recalloc", (uptr)_recalloc);
+ __interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
+ __interception::OverrideFunction("_msize", (uptr)_msize);
+ __interception::OverrideFunction("_expand", (uptr)_expand);
+
+ // Override different versions of 'operator new' and 'operator delete'.
+ // No need to override the nothrow versions as they just wrap the throw
+ // versions.
+ // FIXME: Unfortunately, MSVC miscompiles the statements that take the
+ // addresses of the array versions of these operators,
+ // see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
+ // We might want to try to work around this by [inline] assembly or compiling
+ // parts of the RTL with Clang.
+ void *(*op_new)(size_t sz) = operator new;
+ void (*op_delete)(void *p) = operator delete;
+ void *(*op_array_new)(size_t sz) = operator new[];
+ void (*op_array_delete)(void *p) = operator delete[];
+ __interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
+ __interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
+ __interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
+ __interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
#endif
}
} // namespace __asan
#include <stddef.h>
+// C++ operators can't have visibility attributes on Windows.
+#if SANITIZER_WINDOWS
+# define CXX_OPERATOR_ATTRIBUTE
+#else
+# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+#endif
+
using namespace __asan; // NOLINT
// This code has issues on OSX.
#endif // __FreeBSD_version
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size) { OPERATOR_NEW_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size) { OPERATOR_NEW_BODY(FROM_NEW_BR); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new(size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW); }
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&)
{ OPERATOR_NEW_BODY(FROM_NEW_BR); }
asan_free(ptr, &stack, type);
#if !SANITIZER_MAC
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr) throw() {
OPERATOR_DELETE_BODY(FROM_NEW);
}
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr) throw() {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
-INTERCEPTOR_ATTRIBUTE
+CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) throw() {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) throw() {
+ GET_STACK_TRACE_FREE;
+ asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
+}
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
*p = x;
}
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_cxx_array_cookie(uptr p) {
+ if (SANITIZER_WORDSIZE != 64) return;
+ if (!flags()->poison_array_cookie) return;
+ uptr s = MEM_TO_SHADOW(p);
+ *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_load_cxx_array_cookie(uptr *p) {
+ if (SANITIZER_WORDSIZE != 64) return *p;
+ if (!flags()->poison_array_cookie) return *p;
+ uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
+ u8 sval = *reinterpret_cast<u8*>(s);
+ if (sval == kAsanArrayCookieMagic) return *p;
+ // If sval is not kAsanArrayCookieMagic it can only be freed memory,
+ // which means that we are going to get double-free. So, return 0 to avoid
+ // infinite loop of destructors. We don't want to report a double-free here
+ // though, so print a warning just in case.
+ // CHECK_EQ(sval, kAsanHeapFreeMagic);
+ if (sval == kAsanHeapFreeMagic) {
+ Report("AddressSanitizer: loaded array cookie from free-d memory; "
+ "expect a double-free report\n");
+ return 0;
+ }
+ // FIXME: apparently it can be something else; need to find a reproducer.
+ return *p;
+}
+
// This is a simplified version of __asan_(un)poison_memory_region, which
// assumes that left border of region to be poisoned is properly aligned.
static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
u8 value) {
DCHECK(flags()->poison_heap);
- uptr PageSize = GetPageSizeCached();
uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
uptr shadow_end = MEM_TO_SHADOW(
aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {
- uptr page_beg = RoundUpTo(shadow_beg, PageSize);
- uptr page_end = RoundDownTo(shadow_end, PageSize);
+ uptr page_size = GetPageSizeCached();
+ uptr page_beg = RoundUpTo(shadow_beg, page_size);
+ uptr page_end = RoundDownTo(shadow_end, page_size);
if (page_beg >= page_end) {
REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
(code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(pc, sp, bp, context, addr);
else
- ReportSIGSEGV(pc, sp, bp, context, addr);
+ ReportSIGSEGV("SEGV", pc, sp, bp, context, addr);
}
// ---------------------- TSD ---------------- {{{1
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Call __asan_init at the very early stage of process startup.
-// On Linux we use .preinit_array section (unless PIC macro is defined).
//===----------------------------------------------------------------------===//
#include "asan_internal.h"
-#if ASAN_USE_PREINIT_ARRAY && !defined(PIC)
- // On Linux, we force __asan_init to be called before anyone else
- // by placing it into .preinit_array section.
- // FIXME: do we have anything like this on Mac?
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
// The symbol is called __local_asan_preinit, because it's not intended to be
// exported.
__attribute__((section(".preinit_array"), used))
void (*__local_asan_preinit)(void) = __asan_init;
-#elif SANITIZER_WINDOWS && defined(_DLL)
- // On Windows, when using dynamic CRT (/MD), we can put a pointer
- // to __asan_init into the global list of C initializers.
- // See crt0dat.c in the CRT sources for the details.
- #pragma section(".CRT$XIB", long, read) // NOLINT
- __declspec(allocate(".CRT$XIB")) void (*__asan_preinit)() = __asan_init;
#endif
switch (byte) {
case kAsanHeapLeftRedzoneMagic:
case kAsanHeapRightRedzoneMagic:
+ case kAsanArrayCookieMagic:
return Red();
case kAsanHeapFreeMagic:
return Magenta();
kAsanUserPoisonedMemoryMagic);
PrintShadowByte(str, " Container overflow: ",
kAsanContiguousContainerOOBMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
}
else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
should_demangle = true;
- return should_demangle ? Symbolizer::Get()->Demangle(name) : name;
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
}
// Check if the global is a zero-terminated ASCII string. If so, print it.
(char *)g.beg);
}
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location)
+ res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+static void PrintGlobalLocation(InternalScopedString *str,
+ const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location)
+ return;
+ if (g.location->line_no)
+ str->append(":%d", g.location->line_no);
+ if (g.location->column_no)
+ str->append(":%d", g.location->column_no);
+}
+
bool DescribeAddressRelativeToGlobal(uptr addr, uptr size,
const __asan_global &g) {
static const uptr kMinimalDistanceFromAnotherGlobal = 64;
// Can it happen?
str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
}
- str.append(" of global variable '%s' from '%s' (0x%zx) of size %zu\n",
- MaybeDemangleGlobalName(g.name), g.module_name, g.beg, g.size);
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
str.append("%s", d.EndLocation());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
Printf("%s", str.data());
}
-struct StackVarDescr {
- uptr beg;
- uptr size;
- const char *name_pos;
- uptr name_len;
-};
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars) {
+ char *p;
+ uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
+ CHECK_GT(n_objects, 0);
+
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr beg = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr size = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr len = (uptr)internal_simple_strtoll(p, &p, 10);
+ if (beg == 0 || size == 0 || *p != ' ') {
+ return false;
+ }
+ p++;
+ StackVarDescr var = {beg, size, p, len};
+ vars->push_back(var);
+ p += len;
+ }
+
+ return true;
+}
bool DescribeAddressIfStack(uptr addr, uptr access_size) {
AsanThread *t = FindThreadByStackAddress(addr);
alloca_stack.size = 1;
Printf("%s", d.EndLocation());
alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(frame_descr, &vars)) {
+ Printf("AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n", frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return true;
+ }
+ uptr n_objects = vars.size();
// Report the number of stack objects.
- char *p;
- uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
- CHECK_GT(n_objects, 0);
Printf(" This frame has %zu object(s):\n", n_objects);
// Report all objects in this frame.
- InternalScopedBuffer<StackVarDescr> vars(n_objects);
- for (uptr i = 0; i < n_objects; i++) {
- uptr beg, size;
- uptr len;
- beg = (uptr)internal_simple_strtoll(p, &p, 10);
- size = (uptr)internal_simple_strtoll(p, &p, 10);
- len = (uptr)internal_simple_strtoll(p, &p, 10);
- if (beg == 0 || size == 0 || *p != ' ') {
- Printf("AddressSanitizer can't parse the stack frame "
- "descriptor: |%s|\n", frame_descr);
- break;
- }
- p++;
- vars[i].beg = beg;
- vars[i].size = size;
- vars[i].name_pos = p;
- vars[i].name_len = len;
- p += len;
- }
for (uptr i = 0; i < n_objects; i++) {
buf[0] = 0;
internal_strncat(buf, vars[i].name_pos,
prev_var_end, next_var_beg);
}
Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism or swapcontext\n"
- " (longjmp and C++ exceptions *are* supported)\n");
+ "some custom stack unwind mechanism or swapcontext\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
DescribeThread(t);
return true;
}
// Do not print more than one report, otherwise they will mix up.
// Error reporting functions shouldn't return at this situation, as
// they are defined as no-return.
- Report("AddressSanitizer: while reporting a bug found another one."
+ Report("AddressSanitizer: while reporting a bug found another one. "
"Ignoring.\n");
u32 current_tid = GetCurrentTidOrInvalid();
if (current_tid != reporting_thread_tid) {
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: stack-overflow on address %p"
- " (pc %p sp %p bp %p T%d)\n",
- (void *)addr, (void *)pc, (void *)sp, (void *)bp,
+ " (pc %p bp %p sp %p T%d)\n",
+ (void *)addr, (void *)pc, (void *)bp, (void *)sp,
GetCurrentTidOrInvalid());
Printf("%s", d.EndWarning());
GET_STACK_TRACE_SIGNAL(pc, bp, context);
ReportErrorSummary("stack-overflow", &stack);
}
-void ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr) {
+void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
+ void *context, uptr addr) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report(
- "ERROR: AddressSanitizer: SEGV on unknown address %p"
- " (pc %p sp %p bp %p T%d)\n",
- (void *)addr, (void *)pc, (void *)sp, (void *)bp,
+ "ERROR: AddressSanitizer: %s on unknown address %p"
+ " (pc %p bp %p sp %p T%d)\n",
+ description, (void *)addr, (void *)pc, (void *)bp, (void *)sp,
GetCurrentTidOrInvalid());
+ if (pc < GetPageSizeCached()) {
+ Report("Hint: pc points to the zero page.\n");
+ }
Printf("%s", d.EndWarning());
GET_STACK_TRACE_SIGNAL(pc, bp, context);
stack.Print();
ReportErrorSummary("double-free", &stack);
}
+void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
+ StackTrace *free_stack) {
+ ScopedInErrorReport in_report;
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ u32 curr_tid = GetCurrentTidOrInvalid();
+ Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in "
+ "thread T%d%s:\n",
+ addr, curr_tid,
+ ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
+ Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf(" size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ asan_mz_size(reinterpret_cast<void*>(addr)), delete_size);
+ CHECK_GT(free_stack->size, 0);
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ DescribeHeapAddress(addr, 1);
+ ReportErrorSummary("new-delete-type-mismatch", &stack);
+ Report("HINT: if you don't care about these warnings you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
void ReportFreeNotMalloced(uptr addr, StackTrace *free_stack) {
ScopedInErrorReport in_report;
Decorator d;
ReportErrorSummary("bad-malloc_usable_size", stack);
}
-void ReportAsanGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
+void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: attempting to call "
- "__asan_get_allocated_size() for pointer which is "
+ "__sanitizer_get_allocated_size() for pointer which is "
"not owned: %p\n", addr);
Printf("%s", d.EndWarning());
stack->Print();
DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-__asan_get_allocated_size", stack);
+ ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
}
void ReportStringFunctionMemoryRangesOverlap(
ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
}
-void ReportODRViolation(const __asan_global *g1, const __asan_global *g2) {
+void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2) {
ScopedInErrorReport in_report;
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
Printf("%s", d.EndWarning());
- Printf(" [1] size=%zd %s %s\n", g1->size, g1->name, g1->module_name);
- Printf(" [2] size=%zd %s %s\n", g2->size, g2->name, g2->module_name);
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, *g1);
+ PrintGlobalLocation(&g2_loc, *g2);
+ Printf(" [1] size=%zd '%s' %s\n", g1->size,
+ MaybeDemangleGlobalName(g1->name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", g2->size,
+ MaybeDemangleGlobalName(g2->name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ uptr stack_size;
+ const uptr *stack_trace = StackDepotGet(stack_id1, &stack_size);
+ StackTrace::PrintStack(stack_trace, stack_size);
+ Printf(" [2]:\n");
+ stack_trace = StackDepotGet(stack_id2, &stack_size);
+ StackTrace::PrintStack(stack_trace, stack_size);
+ }
Report("HINT: if you don't care about these warnings you may set "
"ASAN_OPTIONS=detect_odr_violation=0\n");
- ReportErrorSummary("odr-violation", g1->module_name, 0, g1->name);
+ InternalScopedString error_msg(256);
+ error_msg.append("odr-violation: global '%s' at %s",
+ MaybeDemangleGlobalName(g1->name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
}
// ----------------------- CheckForInvalidPointerPair ----------- {{{1
switch (*shadow_addr) {
case kAsanHeapLeftRedzoneMagic:
case kAsanHeapRightRedzoneMagic:
+ case kAsanArrayCookieMagic:
bug_descr = "heap-buffer-overflow";
break;
case kAsanHeapFreeMagic:
Decorator d;
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s on address "
- "%p at pc 0x%zx bp 0x%zx sp 0x%zx\n",
+ "%p at pc %p bp %p sp %p\n",
bug_descr, (void*)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
}
void __asan_describe_address(uptr addr) {
+ // Thread registry must be locked while we're describing an address.
+ asanThreadRegistry().Lock();
DescribeAddress(addr, 1);
+ asanThreadRegistry().Unlock();
}
extern "C" {
namespace __asan {
+struct StackVarDescr {
+ uptr beg;
+ uptr size;
+ const char *name_pos;
+ uptr name_len;
+};
+
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
void DescribeHeapAddress(uptr addr, uptr access_size);
bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
const __asan_global &g);
bool DescribeAddressIfShadow(uptr addr);
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars);
bool DescribeAddressIfStack(uptr addr, uptr access_size);
// Determines memory type on its own.
void DescribeAddress(uptr addr, uptr access_size);
// Different kinds of error reports.
void NORETURN
ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
-void NORETURN
- ReportSIGSEGV(uptr pc, uptr sp, uptr bp, void *context, uptr addr);
+void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp,
+ void *context, uptr addr);
+void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
+ StackTrace *free_stack);
void NORETURN ReportDoubleFree(uptr addr, StackTrace *free_stack);
void NORETURN ReportFreeNotMalloced(uptr addr, StackTrace *free_stack);
void NORETURN ReportAllocTypeMismatch(uptr addr, StackTrace *free_stack,
AllocType dealloc_type);
void NORETURN ReportMallocUsableSizeNotOwned(uptr addr,
StackTrace *stack);
-void NORETURN ReportAsanGetAllocatedSizeNotOwned(uptr addr,
- StackTrace *stack);
+void NORETURN
+ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, StackTrace *stack);
void NORETURN ReportStringFunctionMemoryRangesOverlap(
const char *function, const char *offset1, uptr length1,
const char *offset2, uptr length2, StackTrace *stack);
uptr new_mid, StackTrace *stack);
void NORETURN
-ReportODRViolation(const __asan_global *g1, const __asan_global *g2);
+ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings.
void WarnMacFreeUnallocated(
"If set, prints ASan exit stats even after program terminates "
"successfully.");
- ParseFlag(str, &f->disable_core, "disable_core",
- "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
- "dumping a 16T+ core file. "
- "Ignored on OSes that don't dump core by default.");
-
ParseFlag(str, &f->allow_reexec, "allow_reexec",
"Allow the tool to re-exec the program. This may interfere badly with "
"the debugger.");
"Poison (or not) the heap memory on [de]allocation. Zero value is useful "
"for benchmarking the allocator or instrumentator.");
+ ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie",
+ "Poison (or not) the array cookie after operator new[].");
+
ParseFlag(str, &f->poison_partial, "poison_partial",
"If true, poison partially addressable 8-byte aligned words "
"(default=true). This flag affects heap and global buffers, but not "
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch",
"Report errors on malloc/delete, new/free, new/delete[], etc.");
+
+ ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch",
+ "Report errors on mismatch betwen size of new and delete.");
+
ParseFlag(str, &f->strict_memcmp, "strict_memcmp",
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.");
f->print_stats = false;
f->print_legend = true;
f->atexit = false;
- f->disable_core = (SANITIZER_WORDSIZE == 64);
f->allow_reexec = true;
f->print_full_thread_history = true;
f->poison_heap = true;
+ f->poison_array_cookie = true;
f->poison_partial = true;
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
// https://code.google.com/p/address-sanitizer/issues/detail?id=131
// https://code.google.com/p/address-sanitizer/issues/detail?id=309
// TODO(glider,timurrrr): Fix known issues and enable this back.
f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0);
+ f->new_delete_type_mismatch = true;
f->strict_memcmp = true;
f->strict_init_order = false;
f->start_deactivated = false;
f->detect_invalid_pointer_pairs = 0;
f->detect_container_overflow = true;
+ f->detect_odr_violation = 2;
// Override from compile definition.
ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition());
case 15: __asan_set_error_report_callback(0); break;
case 16: __asan_handle_no_return(); break;
case 17: __asan_address_is_poisoned(0); break;
- case 18: __asan_get_allocated_size(0); break;
- case 19: __asan_get_current_allocated_bytes(); break;
- case 20: __asan_get_estimated_allocated_size(0); break;
- case 21: __asan_get_free_bytes(); break;
- case 22: __asan_get_heap_size(); break;
- case 23: __asan_get_ownership(0); break;
- case 24: __asan_get_unmapped_bytes(); break;
case 25: __asan_poison_memory_region(0, 0); break;
case 26: __asan_unpoison_memory_region(0, 0); break;
case 27: __asan_set_error_exit_code(0); break;
InitializeAsanInterceptors();
+ // Enable system log ("adb logcat") on Android.
+ // Doing this before interceptors are initialized crashes in:
+ // AsanInitInternal -> android_log_write -> __interceptor_strcmp
+ AndroidLogInit();
+
ReplaceSystemMalloc();
uptr shadow_start = kLowShadowBeg;
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
-#if SANITIZER_LINUX && defined(__x86_64__) && !ASAN_FIXED_MAPPING
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
if (common_flags()->verbosity)
PrintAddressSpaceLayout();
- if (flags()->disable_core) {
- DisableCoreDumper();
- }
+ DisableCoreDumperIfNecessary();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
- // Allocator should be initialized before starting external symbolizer, as
- // fork() on Mac locks the allocator.
InitializeAllocator();
- Symbolizer::Init(common_flags()->external_symbolizer_path);
-
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
asan_inited = 1;
SanitizerInitializeUnwinder();
#if CAN_SANITIZE_LEAKS
- __lsan::InitCommonLsan();
+ __lsan::InitCommonLsan(false);
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
}
#include "asan_internal.h"
#include "asan_stats.h"
#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
BlockingMutexLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
- Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
- stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+ Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
PrintInternalAllocatorStats();
}
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
-uptr __asan_get_current_allocated_bytes() {
+uptr __sanitizer_get_current_allocated_bytes() {
AsanStats stats;
GetAccumulatedStats(&stats);
uptr malloced = stats.malloced;
return (malloced > freed) ? malloced - freed : 1;
}
-uptr __asan_get_heap_size() {
+uptr __sanitizer_get_heap_size() {
AsanStats stats;
GetAccumulatedStats(&stats);
return stats.mmaped - stats.munmaped;
}
-uptr __asan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
AsanStats stats;
GetAccumulatedStats(&stats);
uptr total_free = stats.mmaped
return (total_free > total_used) ? total_free - total_used : 1;
}
-uptr __asan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 0;
}
}
void AsanThread::Init() {
+ fake_stack_ = 0; // Will be initialized lazily if needed.
+ CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls();
+ CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
ClearShadowForThreadStackAndTLS();
VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
(void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
&local);
- fake_stack_ = 0; // Will be initialized lazily if needed.
AsanPlatformThreadInit();
}
#include "asan_interceptors.h"
#include "asan_internal.h"
+#include "asan_report.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
return 0;
}
-void AsanCheckDynamicRTPrereqs() { UNIMPLEMENTED(); }
+void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
UNIMPLEMENTED();
}
+static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
+
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+ uptr pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+ uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp;
+#else
+ uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp;
+#endif
+
+ if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
+ exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
+ const char *description =
+ (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ ? "access-violation"
+ : "in-page-error";
+ uptr access_addr = exception_record->ExceptionInformation[1];
+ ReportSIGSEGV(description, pc, sp, bp, context, access_addr);
+ }
+
+ // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+
+ return default_seh_handler(info);
+}
+
+// We want to install our own exception handler (EH) to print helpful reports
+// on access violations and whatnot. Unfortunately, the CRT initializers assume
+// they are run before any user code and drop any previously-installed EHs on
+// the floor, so we can't install our handler inside __asan_init.
+// (See crt0dat.c in the CRT sources for the details)
+//
+// Things get even more complicated with the dynamic runtime, as it finishes its
+// initialization before the .exe module CRT begins to initialize.
+//
+// For the static runtime (-MT), it's enough to put a callback to
+// __asan_set_seh_filter in the last section for C initializers.
+//
+// For the dynamic runtime (-MD), we want link the same
+// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter
+// will be called for each instrumented module. This ensures that at least one
+// __asan_set_seh_filter call happens after the .exe module CRT is initialized.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_set_seh_filter() {
+ // We should only store the previous handler if it's not our own handler in
+ // order to avoid loops in the EH chain.
+ auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler);
+ if (prev_seh_handler != &SEHHandler)
+ default_seh_handler = prev_seh_handler;
+ return 0;
+}
+
+#if !ASAN_DYNAMIC
+// Put a pointer to __asan_set_seh_filter at the end of the global list
+// of C initializers, after the default EH is set by the CRT.
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+static __declspec(allocate(".CRT$XIZ"))
+ int (*__intercept_seh)() = __asan_set_seh_filter;
+#endif
+
} // namespace __asan
#endif // _WIN32
--- /dev/null
+//===-- asan_win_dll_thunk.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have ASan instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
+// details.
+//===----------------------------------------------------------------------===//
+
+// Only compile this code when buidling asan_dll_thunk.lib
+// Using #ifdef rather than relying on Makefiles etc.
+// simplifies the build procedure.
+#ifdef ASAN_DLL_THUNK
+#include "asan_init_version.h"
+#include "sanitizer_common/sanitizer_interception.h"
+
+// ---------- Function interception helper functions and macros ----------- {{{1
+extern "C" {
+void *__stdcall GetModuleHandleA(const char *module_name);
+void *__stdcall GetProcAddress(void *module, const char *proc_name);
+void abort();
+}
+
+static void *getRealProcAddressOrDie(const char *name) {
+ void *ret = GetProcAddress(GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+// We need to intercept some functions (e.g. ASan interface, memory allocator --
+// let's call them "hooks") exported by the DLL thunk and forward the hooks to
+// the runtime in the main module.
+// However, we don't want to keep two lists of these hooks.
+// To avoid that, the list of hooks should be defined using the
+// INTERCEPT_WHEN_POSSIBLE macro. Then, all these hooks can be intercepted
+// at once by calling INTERCEPT_HOOKS().
+
+// Use macro+template magic to automatically generate the list of hooks.
+// Each hook at line LINE defines a template class with a static
+// FunctionInterceptor<LINE>::Execute() method intercepting the hook.
+// The default implementation of FunctionInterceptor<LINE> is to call
+// the Execute() method corresponding to the previous line.
+template<int LINE>
+struct FunctionInterceptor {
+ static void Execute() { FunctionInterceptor<LINE-1>::Execute(); }
+};
+
+// There shouldn't be any hooks with negative definition line number.
+template<>
+struct FunctionInterceptor<0> {
+ static void Execute() {}
+};
+
+#define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \
+ template<> struct FunctionInterceptor<__LINE__> { \
+ static void Execute() { \
+ void *wrapper = getRealProcAddressOrDie(main_function); \
+ if (!__interception::OverrideFunction((uptr)dll_function, \
+ (uptr)wrapper, 0)) \
+ abort(); \
+ FunctionInterceptor<__LINE__-1>::Execute(); \
+ } \
+ };
+
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERFACE_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8); (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name)
+
+// INTERCEPT_HOOKS must be used after the last INTERCEPT_WHEN_POSSIBLE.
+#define INTERCEPT_HOOKS FunctionInterceptor<__LINE__>::Execute
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __asan_init, but that would lead to
+// false negatives in the startup code before user's global initializers, which
+// isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_WHEN_POSSIBLE(WRAPPER_NAME(name), name)
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+static void InterceptHooks();
+// }}}
+
+// ---------- Function wrapping helpers ----------------------------------- {{{1
+#define WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef void (*fntype)(); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef void (*fntype)(void *arg); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef void (*fntype)(void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef void *(*fntype)(void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef void *(*fntype)(); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef void *(*fntype)(void *arg); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef void *(*fntype)(void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef void *(*fntype)(void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef void *(*fntype)(void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef void *(*fntype)(void *, void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+
+#define WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef void *(*fntype)(void *, void *, void *, void *, void *, void *); \
+ static fntype fn = (fntype)getRealProcAddressOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, name);
+// }}}
+
+// ----------------- ASan own interface functions --------------------
+// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
+// want to call it in the __asan_init interceptor.
+WRAP_W_V(__asan_should_detect_stack_use_after_return)
+
+extern "C" {
+ int __asan_option_detect_stack_use_after_return;
+
+ // Manually wrap __asan_init as we need to initialize
+ // __asan_option_detect_stack_use_after_return afterwards.
+ void __asan_init() {
+ typedef void (*fntype)();
+ static fntype fn = 0;
+ // __asan_init is expected to be called by only one thread.
+ if (fn) return;
+
+ fn = (fntype)getRealProcAddressOrDie(__asan_init_name);
+ fn();
+ __asan_option_detect_stack_use_after_return =
+ (__asan_should_detect_stack_use_after_return() != 0);
+
+ InterceptHooks();
+ }
+}
+
+INTERFACE_FUNCTION(__asan_handle_no_return)
+
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+
+INTERFACE_FUNCTION(__asan_store1)
+INTERFACE_FUNCTION(__asan_store2)
+INTERFACE_FUNCTION(__asan_store4)
+INTERFACE_FUNCTION(__asan_store8)
+INTERFACE_FUNCTION(__asan_store16)
+INTERFACE_FUNCTION(__asan_storeN)
+
+INTERFACE_FUNCTION(__asan_load1)
+INTERFACE_FUNCTION(__asan_load2)
+INTERFACE_FUNCTION(__asan_load4)
+INTERFACE_FUNCTION(__asan_load8)
+INTERFACE_FUNCTION(__asan_load16)
+INTERFACE_FUNCTION(__asan_loadN)
+
+INTERFACE_FUNCTION(__asan_memcpy);
+INTERFACE_FUNCTION(__asan_memset);
+INTERFACE_FUNCTION(__asan_memmove);
+
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+
+INTERFACE_FUNCTION(__asan_address_is_poisoned)
+INTERFACE_FUNCTION(__asan_region_is_poisoned)
+
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
+
+INTERFACE_FUNCTION(__sanitizer_cov_module_init)
+
+// TODO(timurrrr): Add more interface functions on the as-needed basis.
+
+// ----------------- Memory allocation functions ---------------------
+WRAP_V_W(free)
+WRAP_V_WW(_free_dbg)
+
+WRAP_W_W(malloc)
+WRAP_W_WWWW(_malloc_dbg)
+
+WRAP_W_WW(calloc)
+WRAP_W_WWWWW(_calloc_dbg)
+WRAP_W_WWW(_calloc_impl)
+
+WRAP_W_WW(realloc)
+WRAP_W_WWW(_realloc_dbg)
+WRAP_W_WWW(_recalloc)
+
+WRAP_W_W(_msize)
+WRAP_W_W(_expand)
+WRAP_W_W(_expand_dbg)
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
+// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
+
+INTERCEPT_LIBRARY_FUNCTION(atoi);
+INTERCEPT_LIBRARY_FUNCTION(atol);
+INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
+
+// _except_handler4 checks -GS cookie which is different for each module, so we
+// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+
+INTERCEPT_LIBRARY_FUNCTION(frexp);
+INTERCEPT_LIBRARY_FUNCTION(longjmp);
+INTERCEPT_LIBRARY_FUNCTION(memchr);
+INTERCEPT_LIBRARY_FUNCTION(memcmp);
+INTERCEPT_LIBRARY_FUNCTION(memcpy);
+INTERCEPT_LIBRARY_FUNCTION(memmove);
+INTERCEPT_LIBRARY_FUNCTION(memset);
+INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strchr);
+INTERCEPT_LIBRARY_FUNCTION(strcmp);
+INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strlen);
+INTERCEPT_LIBRARY_FUNCTION(strncat);
+INTERCEPT_LIBRARY_FUNCTION(strncmp);
+INTERCEPT_LIBRARY_FUNCTION(strncpy);
+INTERCEPT_LIBRARY_FUNCTION(strnlen);
+INTERCEPT_LIBRARY_FUNCTION(strtol);
+INTERCEPT_LIBRARY_FUNCTION(wcslen);
+
+// Must be after all the interceptor declarations due to the way INTERCEPT_HOOKS
+// is defined.
+void InterceptHooks() {
+ INTERCEPT_HOOKS();
+ INTERCEPT_FUNCTION(_except_handler4);
+}
+
+// We want to call __asan_init before C/C++ initializers/constructors are
+// executed, otherwise functions like memset might be invoked.
+// For some strange reason, merely linking in asan_preinit.cc doesn't work
+// as the callback is never called... Is link.exe doing something too smart?
+
+// In DLLs, the callbacks are expected to return 0,
+// otherwise CRT initialization fails.
+static int call_asan_init() {
+ __asan_init();
+ return 0;
+}
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = call_asan_init;
+
+#endif // ASAN_DLL_THUNK
--- /dev/null
+//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines things that need to be present in the application modules
+// to interact with the ASan DLL runtime correctly and can't be implemented
+// using the default "import library" generated when linking the DLL RTL.
+//
+// This includes:
+// - forwarding the detect_stack_use_after_return runtime option
+// - installing a custom SEH handler
+//
+//===----------------------------------------------------------------------===//
+
+// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
+// Using #ifdef rather than relying on Makefiles etc.
+// simplifies the build procedure.
+#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
+extern "C" {
+__declspec(dllimport) int __asan_set_seh_filter();
+__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
+
+// Define a copy of __asan_option_detect_stack_use_after_return that should be
+// used when linking an MD runtime with a set of object files on Windows.
+//
+// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return',
+// so normally we would just dllimport it. Unfortunately, the dllimport
+// attribute adds __imp_ prefix to the symbol name of a variable.
+// Since in general we don't know if a given TU is going to be used
+// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
+// just to work around this issue, let's clone the a variable that is
+// constant after initialization anyways.
+int __asan_option_detect_stack_use_after_return =
+ __asan_should_detect_stack_use_after_return();
+
+// Set the ASan-specific SEH handler at the end of CRT initialization of each
+// module (see asan_win.cc for the details).
+//
+// Unfortunately, putting a pointer to __asan_set_seh_filter into
+// __asan_intercept_seh gets optimized out, so we have to use an extra function.
+static int SetSEHFilter() { return __asan_set_seh_filter(); }
+#pragma section(".CRT$XIZ", long, read) // NOLINT
+__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
+}
+#endif // ASAN_DYNAMIC_RUNTIME_THUNK
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
-1:0:0
+2:0:0
--- /dev/null
+//===-- allocator_interface.h ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface header for allocator used in sanitizers (ASan/TSan/MSan).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Returns the estimated number of bytes that will be reserved by allocator
+ for request of "size" bytes. If allocator can't allocate that much
+ memory, returns the maximal possible allocation size, otherwise returns
+ "size". */
+ size_t __sanitizer_get_estimated_allocated_size(size_t size);
+
+ /* Returns true if p was returned by the allocator and
+ is not yet freed. */
+ int __sanitizer_get_ownership(const volatile void *p);
+
+ /* Returns the number of bytes reserved for the pointer p.
+ Requires (get_ownership(p) == true) or (p == 0). */
+ size_t __sanitizer_get_allocated_size(const volatile void *p);
+
+ /* Number of bytes, allocated and not yet freed by the application. */
+ size_t __sanitizer_get_current_allocated_bytes();
+
+ /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
+ Generally, for request of X bytes, allocator can reserve and add to free
+ lists a large number of chunks of size X to use them for future requests.
+ All these chunks count toward the heap size. Currently, allocator never
+ releases memory to OS (instead, it just puts freed chunks to free
+ lists). */
+ size_t __sanitizer_get_heap_size();
+
+ /* Number of bytes, mmaped by the allocator, which can be used to fulfill
+ allocation requests. When a user program frees memory chunk, it can first
+ fall into quarantine and will count toward __sanitizer_get_free_bytes()
+ later. */
+ size_t __sanitizer_get_free_bytes();
+
+ /* Number of bytes in unmapped pages, that are released to OS. Currently,
+ always returns 0. */
+ size_t __sanitizer_get_unmapped_bytes();
+
+ /* Malloc hooks that may be optionally provided by user.
+ __sanitizer_malloc_hook(ptr, size) is called immediately after
+ allocation of "size" bytes, which returned "ptr".
+ __sanitizer_free_hook(ptr) is called immediately before
+ deallocation of "ptr". */
+ void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
+ void __sanitizer_free_hook(const volatile void *ptr);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
// Print the description of addr (useful when debugging in gdb).
void __asan_describe_address(void *addr);
+ // Useful for calling from the debugger to get the allocation stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the free stack trace
+ // and thread ID for a heap address. Stores up to 'size' frames into 'trace',
+ // returns the number of stored frames or 0 on error.
+ size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+ // Useful for calling from the debugger to get the current shadow memory
+ // mapping.
+ void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
+
// This is an internal function that is called to report an error.
// However it is still a part of the interface because users may want to
// set a breakpoint on this function in a debugger.
// the program crashes before ASan report is printed.
void __asan_on_error();
- // Returns the estimated number of bytes that will be reserved by allocator
- // for request of "size" bytes. If ASan allocator can't allocate that much
- // memory, returns the maximal possible allocation size, otherwise returns
- // "size".
- size_t __asan_get_estimated_allocated_size(size_t size);
- // Returns 1 if p was returned by the ASan allocator and is not yet freed.
- // Otherwise returns 0.
- int __asan_get_ownership(const void *p);
- // Returns the number of bytes reserved for the pointer p.
- // Requires (get_ownership(p) == true) or (p == 0).
- size_t __asan_get_allocated_size(const void *p);
- // Number of bytes, allocated and not yet freed by the application.
- size_t __asan_get_current_allocated_bytes();
- // Number of bytes, mmaped by asan allocator to fulfill allocation requests.
- // Generally, for request of X bytes, allocator can reserve and add to free
- // lists a large number of chunks of size X to use them for future requests.
- // All these chunks count toward the heap size. Currently, allocator never
- // releases memory to OS (instead, it just puts freed chunks to free lists).
- size_t __asan_get_heap_size();
- // Number of bytes, mmaped by asan allocator, which can be used to fulfill
- // allocation requests. When a user program frees memory chunk, it can first
- // fall into quarantine and will count toward __asan_get_free_bytes() later.
- size_t __asan_get_free_bytes();
- // Number of bytes in unmapped pages, that are released to OS. Currently,
- // always returns 0.
- size_t __asan_get_unmapped_bytes();
// Prints accumulated stats to stderr. Used for debugging.
void __asan_print_accumulated_stats();
// a string containing ASan runtime options. See asan_flags.h for details.
const char* __asan_default_options();
- // Malloc hooks that may be optionally provided by user.
- // __asan_malloc_hook(ptr, size) is called immediately after
- // allocation of "size" bytes, which returned "ptr".
- // __asan_free_hook(ptr) is called immediately before
- // deallocation of "ptr".
- void __asan_malloc_hook(void *ptr, size_t size);
- void __asan_free_hook(void *ptr);
-
// The following 2 functions facilitate garbage collection in presence of
// asan's fake stack.
void __sanitizer_cov_init();
// Record and dump coverage info.
void __sanitizer_cov_dump();
+ // Open <name>.sancov.packed in the coverage directory and return the file
+ // descriptor. Returns -1 on failure, or if coverage dumping is disabled.
+ // This is intended for use by sandboxing code.
+ intptr_t __sanitizer_maybe_open_cov_file(const char *name);
// Annotate the current state of a contiguous container, such as
// std::vector, std::string or similar.
#ifdef __cplusplus
extern "C" {
#endif
- /* Returns a string describing a stack origin.
- Return NULL if the origin is invalid, or is not a stack origin. */
- const char *__msan_get_origin_descr_if_stack(uint32_t id);
-
/* Set raw origin for the memory range. */
void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
a string containing Msan runtime options. See msan_flags.h for details. */
const char* __msan_default_options();
- // Sets the callback to be called right before death on error.
- // Passing 0 will unset the callback.
+ /* Sets the callback to be called right before death on error.
+ Passing 0 will unset the callback. */
void __msan_set_death_callback(void (*callback)(void));
- /***********************************/
- /* Allocator statistics interface. */
-
- /* Returns the estimated number of bytes that will be reserved by allocator
- for request of "size" bytes. If Msan allocator can't allocate that much
- memory, returns the maximal possible allocation size, otherwise returns
- "size". */
- size_t __msan_get_estimated_allocated_size(size_t size);
-
- /* Returns true if p was returned by the Msan allocator and
- is not yet freed. */
- int __msan_get_ownership(const volatile void *p);
-
- /* Returns the number of bytes reserved for the pointer p.
- Requires (get_ownership(p) == true) or (p == 0). */
- size_t __msan_get_allocated_size(const volatile void *p);
-
- /* Number of bytes, allocated and not yet freed by the application. */
- size_t __msan_get_current_allocated_bytes();
-
- /* Number of bytes, mmaped by msan allocator to fulfill allocation requests.
- Generally, for request of X bytes, allocator can reserve and add to free
- lists a large number of chunks of size X to use them for future requests.
- All these chunks count toward the heap size. Currently, allocator never
- releases memory to OS (instead, it just puts freed chunks to free
- lists). */
- size_t __msan_get_heap_size();
-
- /* Number of bytes, mmaped by msan allocator, which can be used to fulfill
- allocation requests. When a user program frees memory chunk, it can first
- fall into quarantine and will count toward __msan_get_free_bytes()
- later. */
- size_t __msan_get_free_bytes();
-
- /* Number of bytes in unmapped pages, that are released to OS. Currently,
- always returns 0. */
- size_t __msan_get_unmapped_bytes();
-
- /* Malloc hooks that may be optionally provided by user.
- __msan_malloc_hook(ptr, size) is called immediately after
- allocation of "size" bytes, which returned "ptr".
- __msan_free_hook(ptr) is called immediately before
- deallocation of "ptr". */
- void __msan_malloc_hook(const volatile void *ptr, size_t size);
- void __msan_free_hook(const volatile void *ptr);
#ifdef __cplusplus
} // extern "C"
#endif
# define DECLARE_WRAPPER(ret_type, func, ...)
#elif defined(_WIN32)
-# if defined(_DLL) // DLL CRT
-# define WRAP(x) x
-# define WRAPPER_NAME(x) #x
-# define INTERCEPTOR_ATTRIBUTE
-# else // Static CRT
-# define WRAP(x) wrap_##x
-# define WRAPPER_NAME(x) "wrap_"#x
-# define INTERCEPTOR_ATTRIBUTE
-# endif
+# define WRAP(x) __asan_wrap_##x
+# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
# define DECLARE_WRAPPER(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
+#elif defined(__FreeBSD__)
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
+// priority than weak ones so weak aliases won't work for indirect calls
+// in position-independent (-fPIC / -fPIE) mode.
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((alias("__interceptor_" #func), visibility("default")));
#else
# define WRAP(x) __interceptor_ ## x
# define WRAPPER_NAME(x) "__interceptor_" #x
namespace __interception {
-bool GetRealFunctionAddress(const char *func_name, uptr *func_addr) {
- const char *DLLS[] = {
- "msvcr80.dll",
- "msvcr90.dll",
- "kernel32.dll",
- NULL
- };
- *func_addr = 0;
- for (size_t i = 0; *func_addr == 0 && DLLS[i]; ++i) {
- *func_addr = (uptr)GetProcAddress(GetModuleHandleA(DLLS[i]), func_name);
- }
- return (*func_addr != 0);
-}
-
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
case 0xEC83: // 83 EC XX = sub esp, XX
+ case 0x75FF: // FF 75 XX = push dword ptr [ebp+XXh]
cursor += 3;
continue;
case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
+ case 0x25FF: // FF 25 XX YY ZZ WW = jmp dword ptr ds:[WWZZYYXX]
cursor += 6;
continue;
case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
}
switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
+ case 0x24448B: // 8B 44 24 XX = mov eax, dword ptr [esp+XXh]
case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
// FIXME: Unknown instruction failures might happen when we add a new
// interceptor or a new compiler version. In either case, they should result
// in visible and readable error messages. However, merely calling abort()
- // or __debugbreak() leads to an infinite recursion in CheckFailed.
+ // leads to an infinite recursion in CheckFailed.
// Do we have a good way to abort with an error message here?
+ __debugbreak();
return 0;
}
return true;
}
+static const void **InterestingDLLsAvailable() {
+ const char *InterestingDLLs[] = { "kernel32.dll", "msvcr120.dll", NULL };
+ static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
+ if (!result[0]) {
+ for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
+ if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))
+ result[j++] = (void *)h;
+ }
+ }
+ return (const void **)&result[0];
+}
+
+static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
+ *func_addr = 0;
+ const void **DLLs = InterestingDLLsAvailable();
+ for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
+ *func_addr = (uptr)GetProcAddress((HMODULE)DLLs[i], func_name);
+ return (*func_addr != 0);
+}
+
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
+ uptr orig_func;
+ if (!GetFunctionAddressInDLLs(name, &orig_func))
+ return false;
+ return OverrideFunction(orig_func, new_func, orig_old_func);
+}
+
} // namespace __interception
#endif // _WIN32
#define INTERCEPTION_WIN_H
namespace __interception {
-// returns true if a function with the given name was found.
-bool GetRealFunctionAddress(const char *func_name, uptr *func_addr);
+// All the functions in the OverrideFunction() family return true on success,
+// false on failure (including "couldn't find the function").
-// returns true if the old function existed, false on failure.
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func);
+// Overrides a function by its address.
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
+
+// Overrides a function in a system DLL or DLL CRT by its exported name.
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
} // namespace __interception
-#if defined(_DLL)
-# define INTERCEPT_FUNCTION_WIN(func) \
- ::__interception::GetRealFunctionAddress( \
- #func, (::__interception::uptr*)&REAL(func))
+#if defined(INTERCEPTION_DYNAMIC_CRT)
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction(#func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
#else
-# define INTERCEPT_FUNCTION_WIN(func) \
- ::__interception::OverrideFunction( \
- (::__interception::uptr)func, \
- (::__interception::uptr)WRAP(func), \
- (::__interception::uptr*)&REAL(func))
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction((::__interception::uptr)func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
#endif
-#define INTERCEPT_FUNCTION_VER_WIN(func, symver) \
- INTERCEPT_FUNCTION_WIN(func)
+#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
#endif // INTERCEPTION_WIN_H
#endif // _WIN32
namespace __lsan {
-static void InitializeCommonFlags() {
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
- cf->malloc_context_size = 30;
- cf->detect_leaks = true;
-
- ParseCommonFlagsFromString(cf, GetEnv("LSAN_OPTIONS"));
-}
-
///// Interface to the common LSan module. /////
bool WordIsPoisoned(uptr addr) {
return false;
return;
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
- InitializeCommonFlags();
+ InitCommonLsan(true);
InitializeAllocator();
InitTlsSize();
InitializeInterceptors();
ThreadStart(tid, GetTid());
SetCurrentThread(tid);
- Symbolizer::Init(common_flags()->external_symbolizer_path);
-
- InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
Atexit(DoLeakCheck);
lsan_inited = true;
lsan_init_is_running = false;
}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_STACK_TRACE_FATAL;
+ stack.Print();
+}
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#define GET_STACK_TRACE(max_size, fast) \
+ StackTrace stack; \
+ { \
+ uptr stack_top = 0, stack_bottom = 0; \
+ ThreadContext *t; \
+ if (fast && (t = CurrentThreadContext())) { \
+ stack_top = t->stack_end(); \
+ stack_bottom = t->stack_begin(); \
+ } \
+ stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ /* context */ 0, stack_top, stack_bottom, fast); \
+ }
+
+#define GET_STACK_TRACE_FATAL \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+ common_flags()->fast_unwind_on_malloc)
+
namespace __lsan {
void InitializeInterceptors();
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
allocator.SwallowCache(&cache);
}
-static ChunkMetadata *Metadata(void *p) {
+static ChunkMetadata *Metadata(const void *p) {
return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
}
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
RegisterAllocation(stack, p, size);
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
return p;
}
void Deallocate(void *p) {
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
*end = *begin + sizeof(cache);
}
-uptr GetMallocUsableSize(void *p) {
+uptr GetMallocUsableSize(const void *p) {
ChunkMetadata *m = Metadata(p);
if (!m) return 0;
return m->requested_size;
}
}
} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+} // extern "C"
void Deallocate(void *p);
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
uptr alignment);
-uptr GetMallocUsableSize(void *p);
+uptr GetMallocUsableSize(const void *p);
template<typename Callable>
void ForEachChunk(const Callable &callback);
Flags lsan_flags;
-static void InitializeFlags() {
+static void InitializeFlags(bool standalone) {
Flags *f = flags();
// Default values.
f->report_objects = false;
f->resolution = 0;
f->max_leaks = 0;
f->exitcode = 23;
- f->print_suppressions = true;
- f->suppressions="";
f->use_registers = true;
f->use_globals = true;
f->use_stacks = true;
ParseFlag(options, &f->log_pointers, "log_pointers", "");
ParseFlag(options, &f->log_threads, "log_threads", "");
ParseFlag(options, &f->exitcode, "exitcode", "");
- ParseFlag(options, &f->print_suppressions, "print_suppressions", "");
- ParseFlag(options, &f->suppressions, "suppressions", "");
}
+
+ // Set defaults for common flags (only in standalone mode) and parse
+ // them from LSAN_OPTIONS.
+ CommonFlags *cf = common_flags();
+ if (standalone) {
+ SetCommonFlagsDefaults(cf);
+ cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf->malloc_context_size = 30;
+ cf->detect_leaks = true;
+ }
+ ParseCommonFlagsFromString(cf, options);
}
#define LOG_POINTERS(...) \
if (flags()->log_threads) Report(__VA_ARGS__); \
} while (0);
-SuppressionContext *suppression_ctx;
+static bool suppressions_inited = false;
void InitializeSuppressions() {
- CHECK(!suppression_ctx);
- ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
- suppression_ctx = new(placeholder) SuppressionContext;
- char *suppressions_from_file;
- uptr buffer_size;
- if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
- &buffer_size, 1 << 26 /* max_len */))
- suppression_ctx->Parse(suppressions_from_file);
- if (flags()->suppressions[0] && !buffer_size) {
- Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
- flags()->suppressions);
- Die();
- }
+ CHECK(!suppressions_inited);
+ SuppressionContext::InitIfNecessary();
if (&__lsan_default_suppressions)
- suppression_ctx->Parse(__lsan_default_suppressions());
+ SuppressionContext::Get()->Parse(__lsan_default_suppressions());
+ suppressions_inited = true;
}
struct RootRegion {
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
-void InitCommonLsan() {
- InitializeFlags();
+void InitCommonLsan(bool standalone) {
+ InitializeFlags(standalone);
InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Initialization which can fail or print warnings should only be done if
}
}
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
const char *End() { return Default(); }
static void PrintMatchedSuppressions() {
InternalMmapVector<Suppression *> matched(1);
- suppression_ctx->GetMatched(&matched);
+ SuppressionContext::Get()->GetMatched(&matched);
if (!matched.size())
return;
const char *line = "-----------------------------------------------------";
Printf("%s", d.End());
param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
- if (flags()->print_suppressions)
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
if (unsuppressed_count > 0) {
param.leak_report.PrintSummary();
// Suppress by module name.
const char *module_name;
uptr module_offset;
- if (Symbolizer::Get()->GetModuleNameAndOffsetForPC(addr, &module_name,
- &module_offset) &&
- suppression_ctx->Match(module_name, SuppressionLeak, &s))
+ if (Symbolizer::GetOrInit()
+ ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) &&
+ SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s))
return s;
// Suppress by file or function name.
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
+ uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
for (uptr i = 0; i < addr_frames_num; i++) {
- if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
- suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s))
+ if (SuppressionContext::Get()->Match(addr_frames[i].function,
+ SuppressionLeak, &s) ||
+ SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak,
+ &s))
return s;
}
return 0;
int max_leaks;
// If nonzero kill the process with this exit code upon finding leaks.
int exitcode;
- // Print matched suppressions after leak checking.
- bool print_suppressions;
- // Suppressions file name.
- const char* suppressions;
// Flags controlling the root set of reachable memory.
// Global variables (.data and .bss).
};
// Functions called from the parent tool.
-void InitCommonLsan();
+void InitCommonLsan(bool standalone);
void DoLeakCheck();
bool DisabledInThisThread();
int pthread_setspecific(unsigned key, const void *v);
}
-#define GET_STACK_TRACE \
- StackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- bool fast = common_flags()->fast_unwind_on_malloc; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- stack.Unwind(__sanitizer::common_flags()->malloc_context_size, \
- StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), 0, \
- stack_top, stack_bottom, fast); \
- }
-
#define ENSURE_LSAN_INITED do { \
CHECK(!lsan_init_is_running); \
if (!lsan_inited) \
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, 1, kAlwaysClearMemory);
}
}
if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0;
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
size *= nmemb;
return Allocate(stack, size, 1, true);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
return Reallocate(stack, q, size, 1);
}
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
+ return Allocate(stack, size, alignment, kAlwaysClearMemory);
+}
+
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
return Allocate(stack, size, alignment, kAlwaysClearMemory);
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
*memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
// FIXME: Return ENOMEM if user requested more than max alloc size.
return 0;
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
- GET_STACK_TRACE;
+ GET_STACK_TRACE_MALLOC;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
#define OPERATOR_NEW_BODY \
ENSURE_LSAN_INITED; \
- GET_STACK_TRACE; \
+ GET_STACK_TRACE_MALLOC; \
return Allocate(stack, size, 1, kAlwaysClearMemory);
INTERCEPTOR_ATTRIBUTE
#include "lsan.h"
-#ifndef LSAN_USE_PREINIT_ARRAY
-#define LSAN_USE_PREINIT_ARRAY 1
-#endif
-
-#if LSAN_USE_PREINIT_ARRAY && !defined(PIC)
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
// We force __lsan_init to be called before anyone else by placing it into
// .preinit_array section.
__attribute__((section(".preinit_array"), used))
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage.cc \
+ sanitizer_coverage_libcdep.cc \
+ sanitizer_coverage_mapping_libcdep.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
+ sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_common.cc \
+ sanitizer_procmaps_freebsd.cc \
sanitizer_procmaps_linux.cc \
sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
+ sanitizer_unwind_posix_libcdep.cc \
sanitizer_win.cc
+
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
LTLIBRARIES = $(noinst_LTLIBRARIES)
libsanitizer_common_la_LIBADD =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
- sanitizer_common_libcdep.lo sanitizer_coverage.lo \
+ sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
+ sanitizer_coverage_mapping_libcdep.lo \
sanitizer_deadlock_detector1.lo \
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \
sanitizer_linux_libcdep.lo sanitizer_mac.lo \
+ sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
+ sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \
sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \
sanitizer_stackdepot.lo sanitizer_stacktrace.lo \
sanitizer_stacktrace_libcdep.lo \
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \
sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
- sanitizer_tls_get_addr.lo sanitizer_win.lo
+ sanitizer_tls_get_addr.lo sanitizer_unwind_posix_libcdep.lo \
+ sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
sanitizer_allocator.cc \
sanitizer_common.cc \
sanitizer_common_libcdep.cc \
- sanitizer_coverage.cc \
+ sanitizer_coverage_libcdep.cc \
+ sanitizer_coverage_mapping_libcdep.cc \
sanitizer_deadlock_detector1.cc \
sanitizer_deadlock_detector2.cc \
sanitizer_flags.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
sanitizer_mac.cc \
+ sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
sanitizer_platform_limits_posix.cc \
sanitizer_posix.cc \
sanitizer_posix_libcdep.cc \
sanitizer_printf.cc \
+ sanitizer_procmaps_common.cc \
+ sanitizer_procmaps_freebsd.cc \
sanitizer_procmaps_linux.cc \
sanitizer_procmaps_mac.cc \
sanitizer_stackdepot.cc \
sanitizer_symbolizer_win.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
+ sanitizer_unwind_posix_libcdep.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_common_libcdep.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_printf.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_common.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_freebsd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_procmaps_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_stackdepot.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
.cc.o:
if (stats == this)
break;
}
- // All stats must be positive.
+ // All stats must be non-negative.
for (int i = 0; i < AllocatorStatCount; i++)
- s[i] = ((sptr)s[i]) > 0 ? s[i] : 1;
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
}
private:
--- /dev/null
+//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Re-declaration of functions from public sanitizer allocator interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include "sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ /* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ /* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+} // extern "C"
+
+#endif // SANITIZER_ALLOCATOR_INTERFACE_H
static const uptr kInternalAllocatorSpace = 0;
static const u64 kInternalAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
-#if SANITIZER_WORDSIZE == 32
static const uptr kInternalAllocatorRegionSizeLog = 20;
+#if SANITIZER_WORDSIZE == 32
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef FlatByteMap<kInternalAllocatorNumRegions> ByteMap;
#else
-static const uptr kInternalAllocatorRegionSizeLog = 24;
static const uptr kInternalAllocatorNumRegions =
kInternalAllocatorSize >> kInternalAllocatorRegionSizeLog;
typedef TwoLevelByteMap<(kInternalAllocatorNumRegions >> 12), 1 << 12> ByteMap;
#endif
typedef SizeClassAllocator32<
- kInternalAllocatorSpace, kInternalAllocatorSize, 16, InternalSizeClassMap,
+ kInternalAllocatorSpace, kInternalAllocatorSize, 0, InternalSizeClassMap,
kInternalAllocatorRegionSizeLog, ByteMap> PrimaryInternalAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
-// We don't want our internal allocator to do any map/unmap operations from
-// LargeMmapAllocator.
-struct CrashOnMapUnmap {
- void OnMap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected mmap in InternalAllocator!\n");
- }
- void OnUnmap(uptr p, uptr size) const {
- RAW_CHECK_MSG(0, "Unexpected munmap in InternalAllocator!\n");
- }
-};
-
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<CrashOnMapUnmap> >
- InternalAllocator;
+ LargeMmapAllocator<> > InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0);
void InternalFree(void *p, InternalAllocatorCache *cache = 0);
long long volatile *Destination, // NOLINT
long long Exchange, long long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange64)
-
-#ifdef _WIN64
-extern "C" long long _InterlockedExchangeAdd64( // NOLINT
- long long volatile * Addend, long long Value); // NOLINT
-#pragma intrinsic(_InterlockedExchangeAdd64)
extern "C" void *_InterlockedCompareExchangePointer(
void *volatile *Destination,
void *Exchange, void *Comparand);
#pragma intrinsic(_InterlockedCompareExchangePointer)
-#else
-// There's no _InterlockedCompareExchangePointer intrinsic on x86,
-// so call _InterlockedCompareExchange instead.
extern "C"
long __cdecl _InterlockedCompareExchange( // NOLINT
long volatile *Destination, // NOLINT
long Exchange, long Comparand); // NOLINT
#pragma intrinsic(_InterlockedCompareExchange)
-inline static void *_InterlockedCompareExchangePointer(
- void *volatile *Destination,
- void *Exchange, void *Comparand) {
- return reinterpret_cast<void*>(
- _InterlockedCompareExchange(
- reinterpret_cast<long volatile*>(Destination), // NOLINT
- reinterpret_cast<long>(Exchange), // NOLINT
- reinterpret_cast<long>(Comparand))); // NOLINT
-}
+#ifdef _WIN64
+extern "C" long long _InterlockedExchangeAdd64( // NOLINT
+ long long volatile * Addend, long long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd64)
#endif
namespace __sanitizer {
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
-#include "sanitizer_stacktrace.h"
-#include "sanitizer_symbolizer.h"
namespace __sanitizer {
ReportErrorSummary(buff.data());
}
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
- if (!common_flags()->print_summary)
- return;
- AddressInfo ai;
-#if !SANITIZER_GO
- if (stack->size > 0 && Symbolizer::Get()->CanReturnFileLineInfo()) {
- // Currently, we include the first stack frame into the report summary.
- // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
- uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
- Symbolizer::Get()->SymbolizePC(pc, &ai, 1);
- }
-#endif
- ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
-}
-
LoadedModule::LoadedModule(const char *module_name, uptr base_address) {
full_name_ = internal_strdup(module_name);
base_address_ = base_address;
n_ranges_ = 0;
}
-void LoadedModule::addAddressRange(uptr beg, uptr end) {
+void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges);
ranges_[n_ranges_].beg = beg;
ranges_[n_ranges_].end = end;
+ exec_[n_ranges_] = executable;
n_ranges_++;
}
atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
}
-static void (*sandboxing_callback)();
-void SetSandboxingCallback(void (*f)()) {
- sandboxing_callback = f;
-}
-
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
}
}
-void NOINLINE
-__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
- PrepareForSandboxing(args);
- if (sandboxing_callback)
- sandboxing_callback();
-}
-
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary);
}
// (or NULL if the mapping failes). Stores the size of mmaped region
// in '*buff_size'.
void *MapFileToMemory(const char *file_name, uptr *buff_size);
+void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset);
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size);
// Error report formatting.
const char *StripPathPrefix(const char *filepath,
const char *module, uptr offset);
// OS
-void DisableCoreDumper();
+void DisableCoreDumperIfNecessary();
void DumpProcessMap();
bool FileExists(const char *filename);
const char *GetEnv(const char *name);
void ReExec();
bool StackSizeIsUnlimited();
void SetStackSizeLimitInBytes(uptr limit);
+bool AddressSpaceIsUnlimited();
+void SetAddressSpaceUnlimited();
void AdjustStackSize(void *attr);
void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
void SetSandboxingCallback(void (*f)());
+void CovUpdateMapping(uptr caller_pc = 0);
+void CovBeforeFork();
+void CovAfterFork(int child_pid);
+
void InitTlsSize();
uptr GetTlsSize();
// and pass it to __sanitizer_report_error_summary.
void ReportErrorSummary(const char *error_message);
// Same as above, but construct error_message as:
-// error_type: file:line function
+// error_type file:line function
void ReportErrorSummary(const char *error_type, const char *file,
int line, const char *function);
void ReportErrorSummary(const char *error_type, StackTrace *trace);
class LoadedModule {
public:
LoadedModule(const char *module_name, uptr base_address);
- void addAddressRange(uptr beg, uptr end);
+ void addAddressRange(uptr beg, uptr end, bool executable);
bool containsAddress(uptr address) const;
const char *full_name() const { return full_name_; }
uptr base_address() const { return base_address_; }
+ uptr n_ranges() const { return n_ranges_; }
+ uptr address_range_start(int i) const { return ranges_[i].beg; }
+ uptr address_range_end(int i) const { return ranges_[i].end; }
+ bool address_range_executable(int i) const { return exec_[i]; }
+
private:
struct AddressRange {
uptr beg;
uptr base_address_;
static const uptr kMaxNumberOfAddressRanges = 6;
AddressRange ranges_[kMaxNumberOfAddressRanges];
+ bool exec_[kMaxNumberOfAddressRanges];
uptr n_ranges_;
};
#endif
#if SANITIZER_ANDROID
+// Initialize Android logging. Any writes before this are silently lost.
+void AndroidLogInit();
void AndroidLogWrite(const char *buffer);
void GetExtraActivationFlags(char *buf, uptr size);
void SanitizerInitializeUnwinder();
#else
+INLINE void AndroidLogInit() {}
INLINE void AndroidLogWrite(const char *buffer_unused) {}
INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
INLINE void SanitizerInitializeUnwinder() {}
return alloc.Allocate(size);
}
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
#endif // SANITIZER_COMMON_H
// This file should be included into the tool's interceptor file,
// which has to define it's own macros:
// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_ENTER_NOIGNORE
// COMMON_INTERCEPTOR_READ_RANGE
// COMMON_INTERCEPTOR_WRITE_RANGE
// COMMON_INTERCEPTOR_INITIALIZE_RANGE
// COMMON_INTERCEPTOR_MUTEX_REPAIR
// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
// COMMON_INTERCEPTOR_HANDLE_RECVMSG
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
//===----------------------------------------------------------------------===//
#include "interception/interception.h"
#include "sanitizer_addrhashmap.h"
#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}
#endif
+#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \
+ COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
}
INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strncmp(s1, s2, size);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);
unsigned char c1 = 0, c2 = 0;
INTERCEPTOR(double, frexp, double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);
- double res = REAL(frexp)(x, exp);
+ // Assuming frexp() always writes to |exp|.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ double res = REAL(frexp)(x, exp);
return res;
}
INTERCEPTOR(float, frexpf, float x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(frexpf)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
INTERCEPTOR(long double, frexpl, long double x, int *exp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(frexpl)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
INTERCEPTOR(unsigned long, time, unsigned long *t) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, time, t);
- unsigned long res = REAL(time)(t);
+ unsigned long local_t;
+ unsigned long res = REAL(time)(&local_t);
if (t && res != (unsigned long)-1) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));
+ *t = local_t;
}
return res;
}
INTERCEPTOR(char *, ctime, unsigned long *timep) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ctime)(timep);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ctime_r)(timep, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(asctime)(tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(asctime_r)(tm, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
if (format)
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(strptime)(s, format, tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, res - s);
return res; \
}
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
{ \
VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
return res; \
}
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
{ \
VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
return res; \
}
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
{ \
VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
#endif
#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS || \
- SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT
+ SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT || \
+ SANITIZER_INTERCEPT_GETPWENT_R || SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
if (pwd) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
}
}
#endif // SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS ||
- // SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT
+ // SANITIZER_INTERCEPT_GETPWENT || SANITIZER_INTERCEPT_FGETPWENT ||
+ // SANITIZER_INTERCEPT_GETPWENT_R ||
+ // SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_passwd(ctx, *result);
SIZE_T buflen, __sanitizer_passwd **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_passwd(ctx, *result);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_group(ctx, *result);
SIZE_T buflen, __sanitizer_group **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_group(ctx, *result);
SIZE_T buflen, __sanitizer_passwd **pwbufp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
SIZE_T buflen, __sanitizer_passwd **pwbufp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
__sanitizer_group **pwbufp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
SIZE_T buflen, __sanitizer_group **pwbufp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(clock_getres)(clk_id, tp);
if (!res && tp) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(clock_gettime)(clk_id, tp);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getitimer)(which, curr_value);
if (!res && curr_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);
if (new_value)
COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(setitimer)(which, new_value, old_value);
if (!res && old_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait)(status);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
return res;
}
+// On FreeBSD id_t is always 64-bit wide.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop,
+ int options) {
+#else
INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
int options) {
+#endif
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(waitid)(idtype, id, infop, options);
if (res != -1 && infop)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(waitpid)(pid, status, options);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait3)(status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(__wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
uptr sz = __sanitizer_in_addr_sz(af);
if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);
// FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(inet_ntop)(af, src, dst, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);
// FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(inet_pton)(af, src, dst);
if (res == 1) {
uptr sz = __sanitizer_in_addr_sz(af);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(inet_aton)(cp, dst);
if (res != 0) {
uptr sz = __sanitizer_in_addr_sz(af_inet);
INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_getschedparam)(thread, policy, param);
if (res == 0) {
if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
if (hints)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getaddrinfo)(node, service, hints, out);
if (res == 0 && out) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));
serv, servlen, flags);
// FIXME: consider adding READ_RANGE(sockaddr, salen)
// There is padding in in_addr that may make this too noisy
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
if (res == 0) {
COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
int addrlen_in = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getsockname)(sock_fd, addr, addrlen);
if (res == 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
#endif
#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
+ char *buf, SIZE_T buflen, __sanitizer_hostent **result,
+ int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r);
+#else
+#define INIT_GETHOSTBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTENT_R
INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
if (result) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
+#define INIT_GETHOSTENT_R \
+ COMMON_INTERCEPT_FUNCTION(gethostent_r);
+#else
+#define INIT_GETHOSTENT_R
+#endif
+#if SANITIZER_INTERCEPT_GETHOSTBYADDR_R
INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
__sanitizer_hostent **result, int *h_errnop) {
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,
buflen, result, h_errnop);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
h_errnop);
if (result) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
+#define INIT_GETHOSTBYADDR_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r);
+#else
+#define INIT_GETHOSTBYADDR_R
+#endif
-INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
- char *buf, SIZE_T buflen, __sanitizer_hostent **result,
- int *h_errnop) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
- h_errnop);
- int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
- if (result) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
- if (res == 0 && *result) write_hostent(ctx, *result);
- }
- if (h_errnop)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
- return res;
-}
-
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
__sanitizer_hostent **result, int *h_errnop) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,
result, h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
if (result) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
return res;
}
-#define INIT_GETHOSTBYNAME_R \
- COMMON_INTERCEPT_FUNCTION(gethostent_r); \
- COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r); \
- COMMON_INTERCEPT_FUNCTION(gethostbyname_r); \
+#define INIT_GETHOSTBYNAME2_R \
COMMON_INTERCEPT_FUNCTION(gethostbyname2_r);
#else
-#define INIT_GETHOSTBYNAME_R
+#define INIT_GETHOSTBYNAME2_R
#endif
#if SANITIZER_INTERCEPT_GETSOCKOPT
COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,
optlen);
if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
if (res == 0)
if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
addrlen0 = *addrlen;
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int fd2 = REAL(accept4)(fd, addr, addrlen, f);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
INTERCEPTOR(double, modf, double x, double *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(modf)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
INTERCEPTOR(float, modff, float x, float *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(modff)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(modfl)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
int flags) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
if (res >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
unsigned addr_sz;
if (addrlen) addr_sz = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getpeername)(sockfd, addr, addrlen);
if (!res && addr && addrlen)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
#if SANITIZER_INTERCEPT_SYSINFO
INTERCEPTOR(int, sysinfo, void *info) {
void *ctx;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);
int res = REAL(sysinfo)(info);
if (!res && info)
INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
__sanitizer_dirent **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(readdir_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
__sanitizer_dirent64 **result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(readdir64_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
}
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
uptr res = REAL(ptrace)(request, pid, addr, data);
if (!res && data) {
- // Note that PEEK* requests assing different meaning to the return value.
+ // Note that PEEK* requests assign different meaning to the return value.
// This function does not handle them (nor does it need to).
if (request == ptrace_getregs)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
else if (request == ptrace_getsiginfo)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz);
+ else if (request == ptrace_geteventmsg)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long));
else if (request == ptrace_getregset) {
__sanitizer_iovec *iov = (__sanitizer_iovec *)data;
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iov->iov_base, iov->iov_len);
INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(getcwd)(buf, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
INTERCEPTOR(char *, get_current_dir_name, int fake) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(get_current_dir_name)(fake);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
INTMAX_T res = REAL(strtoimax)(nptr, endptr, base);
if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
return res;
INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
INTMAX_T res = REAL(strtoumax)(nptr, endptr, base);
if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
return res;
INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbstowcs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps);
if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
// This function, and several others, may or may not write the terminating
if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
}
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
SIZE_T write_cnt = res + !*src;
INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcstombs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps);
if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);
if (res != (SIZE_T) - 1 && dest && src) {
SIZE_T write_cnt = res + !*src;
if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
}
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);
if (res != (SIZE_T) - 1 && dest && src) {
SIZE_T write_cnt = res + !*src;
INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(tcgetattr)(fd, termios_p);
if (!res && termios_p)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);
INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(confstr)(name, buf, len);
if (buf && res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);
INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);
if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
return res;
INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
// There are 2 versions of strerror_r:
// * POSIX version returns 0 on success, negative error code on failure,
INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
// This version always returns a null-terminated string.
if (buf && buflen)
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
scandir_filter = filter;
scandir_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(scandir)(dirp, namelist, filter ? wrapped_scandir_filter : 0,
compar ? wrapped_scandir_compar : 0);
scandir_filter = 0;
if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
scandir64_filter = filter;
scandir64_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res =
REAL(scandir64)(dirp, namelist, filter ? wrapped_scandir64_filter : 0,
compar ? wrapped_scandir64_compar : 0);
INTERCEPTOR(int, getgroups, int size, u32 *lst) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getgroups)(size, lst);
if (res && lst) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(wordexp)(s, p, flags);
if (!res && p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);
if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigemptyset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigfillset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigpending)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(sigprocmask)(how, set, oldset);
if (!res && oldset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
INTERCEPTOR(int, backtrace, void **buffer, int size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(backtrace)(buffer, size);
if (res && buffer)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
if (buffer && size)
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
INTERCEPTOR(int, fstatfs, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statvfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatvfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(statvfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(fstatvfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
#define INIT_INITGROUPS
#endif
-#if SANITIZER_INTERCEPT_ETHER
+#if SANITIZER_INTERCEPT_ETHER_NTOA_ATON
INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
return res;
}
+#define INIT_ETHER_NTOA_ATON \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton);
+#else
+#define INIT_ETHER_NTOA_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_HOST
INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_ntohost)(hostname, addr);
if (!res && hostname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
if (hostname)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_hostton)(hostname, addr);
if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ether_line)(line, addr, hostname);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
}
return res;
}
-#define INIT_ETHER \
- COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
- COMMON_INTERCEPT_FUNCTION(ether_aton); \
+#define INIT_ETHER_HOST \
COMMON_INTERCEPT_FUNCTION(ether_ntohost); \
COMMON_INTERCEPT_FUNCTION(ether_hostton); \
COMMON_INTERCEPT_FUNCTION(ether_line);
#else
-#define INIT_ETHER
+#define INIT_ETHER_HOST
#endif
#if SANITIZER_INTERCEPT_ETHER_R
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf);
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(ether_ntoa_r)(addr, buf);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));
return res;
INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(shmctl)(shmid, cmd, buf);
if (res >= 0) {
unsigned sz = 0;
INTERCEPTOR(int, random_r, void *buf, u32 *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(random_r)(buf, result);
if (!res && result)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
#define INIT_RANDOM_R
#endif
-#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
- SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED
-#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
- INTERCEPTOR(int, pthread_attr_get##what, void *attr, void *r) { \
- void *ctx; \
- COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_get##what, attr, r); \
- int res = REAL(pthread_attr_get##what)(attr, r); \
- if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
- return res; \
+// FIXME: under ASan the REAL() call below may write to freed memory and corrupt
+// its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET
+#define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz) \
+ INTERCEPTOR(int, fn, void *attr, void *r) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r); \
+ int res = REAL(fn)(attr, r); \
+ if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
+ return res; \
}
+#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz)
#endif
#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_attr_getstack)(attr, addr, size);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize,
cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);
if (!res && cpusetsize && cpuset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
#endif
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared);
+#else
+#define INIT_PTHREAD_CONDATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+INTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETCLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock);
+#else
+#define INIT_PTHREAD_CONDATTR_GETCLOCK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared);
+#else
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED
+#endif
+
#if SANITIZER_INTERCEPT_TMPNAM
INTERCEPTOR(char *, tmpnam, char *s) {
void *ctx;
char *res = REAL(tmpnam)(s);
if (res) {
if (s)
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
else
COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
INTERCEPTOR(char *, tmpnam_r, char *s) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(tmpnam_r)(s);
if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
return res;
INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincos)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincosf)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(sincosl)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
INTERCEPTOR(double, remquo, double x, double y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(remquo)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
INTERCEPTOR(float, remquof, float x, float y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(remquof)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(remquol)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
INTERCEPTOR(double, lgamma_r, double x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
double res = REAL(lgamma_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
INTERCEPTOR(float, lgammaf_r, float x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
float res = REAL(lgammaf_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
+#define INIT_LGAMMA_R \
+ COMMON_INTERCEPT_FUNCTION(lgamma_r); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf_r);
+#else
+#define INIT_LGAMMA_R
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL_R
INTERCEPTOR(long double, lgammal_r, long double x, int *signp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
long double res = REAL(lgammal_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
-#define INIT_LGAMMA_R \
- COMMON_INTERCEPT_FUNCTION(lgamma_r); \
- COMMON_INTERCEPT_FUNCTION(lgammaf_r); \
- COMMON_INTERCEPT_FUNCTION(lgammal_r);
+#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION(lgammal_r);
#else
-#define INIT_LGAMMA_R
+#define INIT_LGAMMAL_R
#endif
#if SANITIZER_INTERCEPT_DRAND48_R
INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(drand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(lrand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(getline)(lineptr, n, stream);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
}
return res;
}
-INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim,
void *stream) {
void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getdelim, lineptr, n, delim, stream);
- SSIZE_T res = REAL(getdelim)(lineptr, n, delim, stream);
+ COMMON_INTERCEPTOR_ENTER(ctx, __getdelim, lineptr, n, delim, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ SSIZE_T res = REAL(__getdelim)(lineptr, n, delim, stream);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
}
return res;
}
-#define INIT_GETLINE \
- COMMON_INTERCEPT_FUNCTION(getline); \
+INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream) {
+ return __getdelim(lineptr, n, delim, stream);
+}
+#define INIT_GETLINE \
+ COMMON_INTERCEPT_FUNCTION(getline); \
+ COMMON_INTERCEPT_FUNCTION(__getdelim); \
COMMON_INTERCEPT_FUNCTION(getdelim);
#else
#define INIT_GETLINE
if (outbytesleft)
COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft));
void *outbuf_orig = outbuf ? *outbuf : 0;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, times, tms);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_clock_t res = REAL(times)(tms);
if (res != (__sanitizer_clock_t)-1 && tms)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
void *res = REAL(__tls_get_addr)(arg);
- DTLS_on_tls_get_addr(arg, res);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
return res;
}
#else
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(listxattr)(path, list, size);
// Here and below, size == 0 is a special case where nothing is written to the
// buffer, and res contains the desired buffer size.
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(llistxattr)(path, list, size);
if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
return res;
INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(flistxattr)(fd, list, size);
if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
return res;
COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(getxattr)(path, name, value, size);
if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
return res;
COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
return res;
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
return res;
INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getresuid)(ruid, euid, suid);
if (res >= 0) {
if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getresgid)(rgid, egid, sgid);
if (res >= 0) {
if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(getifaddrs)(ifap);
if (res == 0 && ifap) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
char *res = REAL(if_indextoname)(ifindex, ifname);
if (res && ifname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);
if (hdrp)
COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(capget)(hdrp, datap);
if (res == 0 && datap)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(ftime)(tp);
if (tp)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
unsigned size, int op) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(xdrmem_create)(xdrs, addr, size, op);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
if (op == __sanitizer_XDR_ENCODE) {
INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
REAL(xdrstdio_create)(xdrs, file, op);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
}
+// FIXME: under ASan the call below may write to freed memory and corrupt
+// its metadata. See
+// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
#define XDR_INTERCEPTOR(F, T) \
INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
void *ctx; \
COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));
COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
}
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
int res = REAL(xdr_string)(xdrs, p, maxsize);
if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
int (*compar)(const void *, const void *)) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
void *res = REAL(tsearch)(key, rootp, compar);
if (res && *(void **)res == key)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
if (res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
__sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
if (res) unpoison_file(res);
return res;
#define INIT_FCLOSE
#endif
+#if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+ void *res = REAL(dlopen)(filename, flag);
+ COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
+ return res;
+}
+
+INTERCEPTOR(int, dlclose, void *handle) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);
+ int res = REAL(dlclose)(handle);
+ COMMON_INTERCEPTOR_LIBRARY_UNLOADED();
+ return res;
+}
+#define INIT_DLOPEN_DLCLOSE \
+ COMMON_INTERCEPT_FUNCTION(dlopen); \
+ COMMON_INTERCEPT_FUNCTION(dlclose);
+#else
+#define INIT_DLOPEN_DLCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_GETPASS
+INTERCEPTOR(char *, getpass, const char *prompt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
+ if (prompt)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ char *res = REAL(getpass)(prompt);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ return res;
+}
+
+#define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass);
+#else
+#define INIT_GETPASS
+#endif
+
+#if SANITIZER_INTERCEPT_TIMERFD
+INTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value,
+ void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value,
+ old_value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz);
+ int res = REAL(timerfd_settime)(fd, flags, new_value, old_value);
+ if (res != -1 && old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz);
+ return res;
+}
+
+INTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value);
+ int res = REAL(timerfd_gettime)(fd, curr_value);
+ if (res != -1 && curr_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz);
+ return res;
+}
+#define INIT_TIMERFD \
+ COMMON_INTERCEPT_FUNCTION(timerfd_settime); \
+ COMMON_INTERCEPT_FUNCTION(timerfd_gettime);
+#else
+#define INIT_TIMERFD
+#endif
+
+#if SANITIZER_INTERCEPT_MLOCKX
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ VPrintf(1, "INFO: %s ignores mlock/mlockall/munlock/munlockall\n",
+ SanitizerToolName);
+}
+
+INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+#define INIT_MLOCKX \
+ COMMON_INTERCEPT_FUNCTION(mlock); \
+ COMMON_INTERCEPT_FUNCTION(munlock); \
+ COMMON_INTERCEPT_FUNCTION(mlockall); \
+ COMMON_INTERCEPT_FUNCTION(munlockall);
+
+#else
+#define INIT_MLOCKX
+#endif // SANITIZER_INTERCEPT_MLOCKX
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
INIT_GETSOCKNAME;
INIT_GETHOSTBYNAME;
INIT_GETHOSTBYNAME_R;
+ INIT_GETHOSTBYNAME2_R;
+ INIT_GETHOSTBYADDR_R;
+ INIT_GETHOSTENT_R;
INIT_GETSOCKOPT;
INIT_ACCEPT;
INIT_ACCEPT4;
INIT_STATVFS;
INIT_STATVFS64;
INIT_INITGROUPS;
- INIT_ETHER;
+ INIT_ETHER_NTOA_ATON;
+ INIT_ETHER_HOST;
INIT_ETHER_R;
INIT_SHMCTL;
INIT_RANDOM_R;
INIT_PTHREAD_ATTR_GET;
INIT_PTHREAD_ATTR_GETINHERITSCHED;
INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_MUTEXATTR_GETPSHARED;
+ INIT_PTHREAD_MUTEXATTR_GETTYPE;
+ INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
+ INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST_NP;
+ INIT_PTHREAD_RWLOCKATTR_GETPSHARED;
+ INIT_PTHREAD_RWLOCKATTR_GETKIND_NP;
+ INIT_PTHREAD_CONDATTR_GETPSHARED;
+ INIT_PTHREAD_CONDATTR_GETCLOCK;
+ INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
INIT_TEMPNAM;
INIT_REMQUO;
INIT_LGAMMA;
INIT_LGAMMA_R;
+ INIT_LGAMMAL_R;
INIT_DRAND48_R;
INIT_RAND_R;
INIT_GETLINE;
INIT_OBSTACK;
INIT_FFLUSH;
INIT_FCLOSE;
+ INIT_DLOPEN_DLCLOSE;
+ INIT_GETPASS;
+ INIT_TIMERFD;
+ INIT_MLOCKX;
}
case 8: \
va_arg(*aq, double); \
break; \
+ case 12: \
+ va_arg(*aq, long double); \
+ break; \
case 16: \
va_arg(*aq, long double); \
break; \
desc->name = "<DECODED_IOCTL>";
desc->size = IOC_SIZE(req);
// Sanity check.
- if (desc->size > 1024) return false;
+ if (desc->size > 0xFFFF) return false;
unsigned dir = IOC_DIR(req);
switch (dir) {
case IOC_NONE:
default:
return false;
}
- if (desc->type != IOC_NONE && desc->size == 0) return false;
- char id = IOC_TYPE(req);
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;
// Sanity check.
- if (!(id >= 'a' && id <= 'z') && !(id >= 'A' && id <= 'Z')) return false;
+ if (IOC_TYPE(req) == 0) return false;
return true;
}
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
namespace __sanitizer {
return internal_strcmp(flag, "always") == 0 ||
(internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached());
}
+
+static void (*sandboxing_callback)();
+void SetSandboxingCallback(void (*f)()) {
+ sandboxing_callback = f;
+}
+
+void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+ if (!common_flags()->print_summary)
+ return;
+ AddressInfo ai;
+#if !SANITIZER_GO
+ if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) {
+ // Currently, we include the first stack frame into the report summary.
+ // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
+ Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1);
+ }
+#endif
+ ReportErrorSummary(error_type, ai.file, ai.line, ai.function);
+}
+
} // namespace __sanitizer
+
+void NOINLINE
+__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
+ PrepareForSandboxing(args);
+ if (sandboxing_callback)
+ sandboxing_callback();
+}
}
}
+#if !SANITIZER_ANDROID
PRE_SYSCALL(statfs)(const void *path, void *buf) {
if (path)
PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
if (buf) POST_WRITE(buf, struct_statfs64_sz);
}
}
+#endif // !SANITIZER_ANDROID
PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
if (filename)
} else if (op == iocb_cmd_pread && buf && len) {
POST_WRITE(buf, len);
} else if (op == iocb_cmd_pwritev) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
for (uptr v = 0; v < len; v++)
- PRE_READ(iovec[i].iov_base, iovec[i].iov_len);
+ PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
} else if (op == iocb_cmd_preadv) {
- __sanitizer_iovec *iovec = (__sanitizer_iovec*)iocbpp[i]->aio_buf;
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
for (uptr v = 0; v < len; v++)
- POST_WRITE(iovec[i].iov_base, iovec[i].iov_len);
+ POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
}
// See comment in io_getevents.
COMMON_SYSCALL_RELEASE(data);
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if defined(__i386) || defined (__x86_64)
+#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if defined(__i386) || defined (__x86_64)
+#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64))
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
+++ /dev/null
-//===-- sanitizer_coverage.cc ---------------------------------------------===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// Sanitizer Coverage.
-// This file implements run-time support for a poor man's coverage tool.
-//
-// Compiler instrumentation:
-// For every interesting basic block the compiler injects the following code:
-// if (*Guard) {
-// __sanitizer_cov();
-// *Guard = 1;
-// }
-// It's fine to call __sanitizer_cov more than once for a given block.
-//
-// Run-time:
-// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
-// - __sanitizer_cov_dump: dump the coverage data to disk.
-// For every module of the current process that has coverage data
-// this will create a file module_name.PID.sancov. The file format is simple:
-// it's just a sorted sequence of 4-byte offsets in the module.
-//
-// Eventually, this coverage implementation should be obsoleted by a more
-// powerful general purpose Clang/LLVM coverage instrumentation.
-// Consider this implementation as prototype.
-//
-// FIXME: support (or at least test with) dlclose.
-//===----------------------------------------------------------------------===//
-
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_procmaps.h"
-#include "sanitizer_stacktrace.h"
-#include "sanitizer_flags.h"
-
-atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
-
-// pc_array is the array containing the covered PCs.
-// To make the pc_array thread- and async-signal-safe it has to be large enough.
-// 128M counters "ought to be enough for anybody" (4M on 32-bit).
-// pc_array is allocated with MmapNoReserveOrDie and so it uses only as
-// much RAM as it really needs.
-static const uptr kPcArraySize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
-static uptr *pc_array;
-static atomic_uintptr_t pc_array_index;
-
-static bool cov_sandboxed = false;
-static int cov_fd = kInvalidFd;
-static unsigned int cov_max_block_size = 0;
-
-namespace __sanitizer {
-
-// Simply add the pc into the vector under lock. If the function is called more
-// than once for a given PC it will be inserted multiple times, which is fine.
-static void CovAdd(uptr pc) {
- if (!pc_array) return;
- uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
- CHECK_LT(idx, kPcArraySize);
- pc_array[idx] = pc;
-}
-
-void CovInit() {
- pc_array = reinterpret_cast<uptr *>(
- MmapNoReserveOrDie(sizeof(uptr) * kPcArraySize, "CovInit"));
-}
-
-static inline bool CompareLess(const uptr &a, const uptr &b) {
- return a < b;
-}
-
-// Block layout for packed file format: header, followed by module name (no
-// trailing zero), followed by data blob.
-struct CovHeader {
- int pid;
- unsigned int module_name_length;
- unsigned int data_length;
-};
-
-static void CovWritePacked(int pid, const char *module, const void *blob,
- unsigned int blob_size) {
- CHECK_GE(cov_fd, 0);
- unsigned module_name_length = internal_strlen(module);
- CovHeader header = {pid, module_name_length, blob_size};
-
- if (cov_max_block_size == 0) {
- // Writing to a file. Just go ahead.
- internal_write(cov_fd, &header, sizeof(header));
- internal_write(cov_fd, module, module_name_length);
- internal_write(cov_fd, blob, blob_size);
- } else {
- // Writing to a socket. We want to split the data into appropriately sized
- // blocks.
- InternalScopedBuffer<char> block(cov_max_block_size);
- CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
- uptr header_size_with_module = sizeof(header) + module_name_length;
- CHECK_LT(header_size_with_module, cov_max_block_size);
- unsigned int max_payload_size =
- cov_max_block_size - header_size_with_module;
- char *block_pos = block.data();
- internal_memcpy(block_pos, &header, sizeof(header));
- block_pos += sizeof(header);
- internal_memcpy(block_pos, module, module_name_length);
- block_pos += module_name_length;
- char *block_data_begin = block_pos;
- char *blob_pos = (char *)blob;
- while (blob_size > 0) {
- unsigned int payload_size = Min(blob_size, max_payload_size);
- blob_size -= payload_size;
- internal_memcpy(block_data_begin, blob_pos, payload_size);
- blob_pos += payload_size;
- ((CovHeader *)block.data())->data_length = payload_size;
- internal_write(cov_fd, block.data(),
- header_size_with_module + payload_size);
- }
- }
-}
-
-// Dump the coverage on disk.
-static void CovDump() {
- if (!common_flags()->coverage) return;
-#if !SANITIZER_WINDOWS
- if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
- return;
- uptr size = atomic_load(&pc_array_index, memory_order_relaxed);
- InternalSort(&pc_array, size, CompareLess);
- InternalMmapVector<u32> offsets(size);
- const uptr *vb = pc_array;
- const uptr *ve = vb + size;
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr mb, me, off, prot;
- InternalScopedBuffer<char> module(4096);
- InternalScopedBuffer<char> path(4096 * 2);
- for (int i = 0;
- proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
- i++) {
- if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
- continue;
- while (vb < ve && *vb < mb) vb++;
- if (vb >= ve) break;
- if (*vb < me) {
- offsets.clear();
- const uptr *old_vb = vb;
- CHECK_LE(off, *vb);
- for (; vb < ve && *vb < me; vb++) {
- uptr diff = *vb - (i ? mb : 0) + off;
- CHECK_LE(diff, 0xffffffffU);
- offsets.push_back(static_cast<u32>(diff));
- }
- char *module_name = StripModuleName(module.data());
- if (cov_sandboxed) {
- CovWritePacked(internal_getpid(), module_name, offsets.data(),
- offsets.size() * sizeof(u32));
- VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
- } else {
- // One file per module per process.
- internal_snprintf((char *)path.data(), path.size(), "%s.%zd.sancov",
- module_name, internal_getpid());
- uptr fd = OpenFile(path.data(), true);
- if (internal_iserror(fd)) {
- Report(" CovDump: failed to open %s for writing\n", path.data());
- } else {
- internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
- internal_close(fd);
- VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
- vb - old_vb);
- }
- }
- InternalFree(module_name);
- }
- }
- if (cov_fd >= 0)
- internal_close(cov_fd);
-#endif // !SANITIZER_WINDOWS
-}
-
-static void OpenPackedFileForWriting() {
- CHECK(cov_fd == kInvalidFd);
- InternalScopedBuffer<char> path(1024);
- internal_snprintf((char *)path.data(), path.size(), "%zd.sancov.packed",
- internal_getpid());
- uptr fd = OpenFile(path.data(), true);
- if (internal_iserror(fd)) {
- Report(" Coverage: failed to open %s for writing\n", path.data());
- Die();
- }
- cov_fd = fd;
-}
-
-void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
- if (!args) return;
- if (!common_flags()->coverage) return;
- cov_sandboxed = args->coverage_sandboxed;
- if (!cov_sandboxed) return;
- cov_fd = args->coverage_fd;
- cov_max_block_size = args->coverage_max_block_size;
- if (cov_fd < 0)
- // Pre-open the file now. The sandbox won't allow us to do it later.
- OpenPackedFileForWriting();
-}
-
-} // namespace __sanitizer
-
-extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
- CovAdd(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
-}
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
-SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { CovInit(); }
-} // extern "C"
--- /dev/null
+//===-- sanitizer_coverage.cc ---------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage.
+// This file implements run-time support for a poor man's coverage tool.
+//
+// Compiler instrumentation:
+// For every interesting basic block the compiler injects the following code:
+// if (*Guard) {
+// __sanitizer_cov();
+// *Guard = 1;
+// }
+// It's fine to call __sanitizer_cov more than once for a given block.
+//
+// Run-time:
+// - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC).
+// - __sanitizer_cov_dump: dump the coverage data to disk.
+// For every module of the current process that has coverage data
+// this will create a file module_name.PID.sancov. The file format is simple:
+// it's just a sorted sequence of 4-byte offsets in the module.
+//
+// Eventually, this coverage implementation should be obsoleted by a more
+// powerful general purpose Clang/LLVM coverage instrumentation.
+// Consider this implementation as prototype.
+//
+// FIXME: support (or at least test with) dlclose.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_flags.h"
+
+atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
+
+// pc_array is the array containing the covered PCs.
+// To make the pc_array thread- and async-signal-safe it has to be large enough.
+// 128M counters "ought to be enough for anybody" (4M on 32-bit).
+
+// With coverage_direct=1 in ASAN_OPTIONS, pc_array memory is mapped to a file.
+// In this mode, __sanitizer_cov_dump does nothing, and CovUpdateMapping()
+// dump current memory layout to another file.
+
+static bool cov_sandboxed = false;
+static int cov_fd = kInvalidFd;
+static unsigned int cov_max_block_size = 0;
+
+namespace __sanitizer {
+
+class CoverageData {
+ public:
+ void Init();
+ void BeforeFork();
+ void AfterFork(int child_pid);
+ void Extend(uptr npcs);
+ void Add(uptr pc);
+
+ uptr *data();
+ uptr size();
+
+ private:
+ // Maximal size pc array may ever grow.
+ // We MmapNoReserve this space to ensure that the array is contiguous.
+ static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27);
+ // The amount file mapping for the pc array is grown by.
+ static const uptr kPcArrayMmapSize = 64 * 1024;
+
+ // pc_array is allocated with MmapNoReserveOrDie and so it uses only as
+ // much RAM as it really needs.
+ uptr *pc_array;
+ // Index of the first available pc_array slot.
+ atomic_uintptr_t pc_array_index;
+ // Array size.
+ atomic_uintptr_t pc_array_size;
+ // Current file mapped size of the pc array.
+ uptr pc_array_mapped_size;
+ // Descriptor of the file mapped pc array.
+ int pc_fd;
+ StaticSpinMutex mu;
+
+ void DirectOpen();
+ void ReInit();
+};
+
+static CoverageData coverage_data;
+
+void CoverageData::DirectOpen() {
+ InternalScopedString path(1024);
+ internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw",
+ common_flags()->coverage_dir, internal_getpid());
+ pc_fd = OpenFile(path.data(), true);
+ if (internal_iserror(pc_fd)) {
+ Report(" Coverage: failed to open %s for writing\n", path.data());
+ Die();
+ }
+
+ pc_array_mapped_size = 0;
+ CovUpdateMapping();
+}
+
+void CoverageData::Init() {
+ pc_array = reinterpret_cast<uptr *>(
+ MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit"));
+ pc_fd = kInvalidFd;
+ if (common_flags()->coverage_direct) {
+ atomic_store(&pc_array_size, 0, memory_order_relaxed);
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ } else {
+ atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed);
+ atomic_store(&pc_array_index, 0, memory_order_relaxed);
+ }
+}
+
+void CoverageData::ReInit() {
+ internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize);
+ if (pc_fd != kInvalidFd) internal_close(pc_fd);
+ if (common_flags()->coverage_direct) {
+ // In memory-mapped mode we must extend the new file to the known array
+ // size.
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ Init();
+ if (size) Extend(size);
+ } else {
+ Init();
+ }
+}
+
+void CoverageData::BeforeFork() {
+ mu.Lock();
+}
+
+void CoverageData::AfterFork(int child_pid) {
+ // We are single-threaded so it's OK to release the lock early.
+ mu.Unlock();
+ if (child_pid == 0) ReInit();
+}
+
+// Extend coverage PC array to fit additional npcs elements.
+void CoverageData::Extend(uptr npcs) {
+ if (!common_flags()->coverage_direct) return;
+ SpinMutexLock l(&mu);
+
+ if (pc_fd == kInvalidFd) DirectOpen();
+ CHECK_NE(pc_fd, kInvalidFd);
+
+ uptr size = atomic_load(&pc_array_size, memory_order_relaxed);
+ size += npcs * sizeof(uptr);
+
+ if (size > pc_array_mapped_size) {
+ uptr new_mapped_size = pc_array_mapped_size;
+ while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize;
+
+ // Extend the file and map the new space at the end of pc_array.
+ uptr res = internal_ftruncate(pc_fd, new_mapped_size);
+ int err;
+ if (internal_iserror(res, &err)) {
+ Printf("failed to extend raw coverage file: %d\n", err);
+ Die();
+ }
+ void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size,
+ new_mapped_size - pc_array_mapped_size,
+ pc_fd, pc_array_mapped_size);
+ CHECK_EQ(p, pc_array + pc_array_mapped_size);
+ pc_array_mapped_size = new_mapped_size;
+ }
+
+ atomic_store(&pc_array_size, size, memory_order_release);
+}
+
+// Simply add the pc into the vector under lock. If the function is called more
+// than once for a given PC it will be inserted multiple times, which is fine.
+void CoverageData::Add(uptr pc) {
+ if (!pc_array) return;
+ uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed);
+ CHECK_LT(idx * sizeof(uptr),
+ atomic_load(&pc_array_size, memory_order_acquire));
+ pc_array[idx] = pc;
+}
+
+uptr *CoverageData::data() {
+ return pc_array;
+}
+
+uptr CoverageData::size() {
+ return atomic_load(&pc_array_index, memory_order_relaxed);
+}
+
+// Block layout for packed file format: header, followed by module name (no
+// trailing zero), followed by data blob.
+struct CovHeader {
+ int pid;
+ unsigned int module_name_length;
+ unsigned int data_length;
+};
+
+static void CovWritePacked(int pid, const char *module, const void *blob,
+ unsigned int blob_size) {
+ if (cov_fd < 0) return;
+ unsigned module_name_length = internal_strlen(module);
+ CovHeader header = {pid, module_name_length, blob_size};
+
+ if (cov_max_block_size == 0) {
+ // Writing to a file. Just go ahead.
+ internal_write(cov_fd, &header, sizeof(header));
+ internal_write(cov_fd, module, module_name_length);
+ internal_write(cov_fd, blob, blob_size);
+ } else {
+ // Writing to a socket. We want to split the data into appropriately sized
+ // blocks.
+ InternalScopedBuffer<char> block(cov_max_block_size);
+ CHECK_EQ((uptr)block.data(), (uptr)(CovHeader *)block.data());
+ uptr header_size_with_module = sizeof(header) + module_name_length;
+ CHECK_LT(header_size_with_module, cov_max_block_size);
+ unsigned int max_payload_size =
+ cov_max_block_size - header_size_with_module;
+ char *block_pos = block.data();
+ internal_memcpy(block_pos, &header, sizeof(header));
+ block_pos += sizeof(header);
+ internal_memcpy(block_pos, module, module_name_length);
+ block_pos += module_name_length;
+ char *block_data_begin = block_pos;
+ char *blob_pos = (char *)blob;
+ while (blob_size > 0) {
+ unsigned int payload_size = Min(blob_size, max_payload_size);
+ blob_size -= payload_size;
+ internal_memcpy(block_data_begin, blob_pos, payload_size);
+ blob_pos += payload_size;
+ ((CovHeader *)block.data())->data_length = payload_size;
+ internal_write(cov_fd, block.data(),
+ header_size_with_module + payload_size);
+ }
+ }
+}
+
+// If packed = false: <name>.<pid>.<sancov> (name = module name).
+// If packed = true and name == 0: <pid>.<sancov>.<packed>.
+// If packed = true and name != 0: <name>.<sancov>.<packed> (name is
+// user-supplied).
+static int CovOpenFile(bool packed, const char* name) {
+ InternalScopedBuffer<char> path(1024);
+ if (!packed) {
+ CHECK(name);
+ internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
+ common_flags()->coverage_dir, name, internal_getpid());
+ } else {
+ if (!name)
+ internal_snprintf((char *)path.data(), path.size(),
+ "%s/%zd.sancov.packed", common_flags()->coverage_dir,
+ internal_getpid());
+ else
+ internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed",
+ common_flags()->coverage_dir, name);
+ }
+ uptr fd = OpenFile(path.data(), true);
+ if (internal_iserror(fd)) {
+ Report(" SanitizerCoverage: failed to open %s for writing\n", path.data());
+ return -1;
+ }
+ return fd;
+}
+
+// Dump the coverage on disk.
+static void CovDump() {
+ if (!common_flags()->coverage || common_flags()->coverage_direct) return;
+#if !SANITIZER_WINDOWS
+ if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed))
+ return;
+ uptr size = coverage_data.size();
+ InternalMmapVector<u32> offsets(size);
+ uptr *vb = coverage_data.data();
+ uptr *ve = vb + size;
+ SortArray(vb, size);
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ uptr mb, me, off, prot;
+ InternalScopedBuffer<char> module(4096);
+ InternalScopedBuffer<char> path(4096 * 2);
+ for (int i = 0;
+ proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot);
+ i++) {
+ if ((prot & MemoryMappingLayout::kProtectionExecute) == 0)
+ continue;
+ while (vb < ve && *vb < mb) vb++;
+ if (vb >= ve) break;
+ if (*vb < me) {
+ offsets.clear();
+ const uptr *old_vb = vb;
+ CHECK_LE(off, *vb);
+ for (; vb < ve && *vb < me; vb++) {
+ uptr diff = *vb - (i ? mb : 0) + off;
+ CHECK_LE(diff, 0xffffffffU);
+ offsets.push_back(static_cast<u32>(diff));
+ }
+ char *module_name = StripModuleName(module.data());
+ if (cov_sandboxed) {
+ if (cov_fd >= 0) {
+ CovWritePacked(internal_getpid(), module_name, offsets.data(),
+ offsets.size() * sizeof(u32));
+ VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb);
+ }
+ } else {
+ // One file per module per process.
+ internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov",
+ common_flags()->coverage_dir, module_name,
+ internal_getpid());
+ int fd = CovOpenFile(false /* packed */, module_name);
+ if (fd > 0) {
+ internal_write(fd, offsets.data(), offsets.size() * sizeof(u32));
+ internal_close(fd);
+ VReport(1, " CovDump: %s: %zd PCs written\n", path.data(),
+ vb - old_vb);
+ }
+ }
+ InternalFree(module_name);
+ }
+ }
+ if (cov_fd >= 0)
+ internal_close(cov_fd);
+#endif // !SANITIZER_WINDOWS
+}
+
+void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ if (!args) return;
+ if (!common_flags()->coverage) return;
+ cov_sandboxed = args->coverage_sandboxed;
+ if (!cov_sandboxed) return;
+ cov_fd = args->coverage_fd;
+ cov_max_block_size = args->coverage_max_block_size;
+ if (cov_fd < 0)
+ // Pre-open the file now. The sandbox won't allow us to do it later.
+ cov_fd = CovOpenFile(true /* packed */, 0);
+}
+
+int MaybeOpenCovFile(const char *name) {
+ CHECK(name);
+ if (!common_flags()->coverage) return -1;
+ return CovOpenFile(true /* packed */, name);
+}
+
+void CovBeforeFork() {
+ coverage_data.BeforeFork();
+}
+
+void CovAfterFork(int child_pid) {
+ coverage_data.AfterFork(child_pid);
+}
+
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() {
+ coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()));
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); }
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
+ coverage_data.Init();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) {
+ if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
+ if (SANITIZER_ANDROID) {
+ // dlopen/dlclose interceptors do not work on Android, so we rely on
+ // Extend() calls to update .sancov.map.
+ CovUpdateMapping(GET_CALLER_PC());
+ }
+ coverage_data.Extend(npcs);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+sptr __sanitizer_maybe_open_cov_file(const char *name) {
+ return MaybeOpenCovFile(name);
+}
+} // extern "C"
--- /dev/null
+//===-- sanitizer_coverage_mapping.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Mmap-based implementation of sanitizer coverage.
+//
+// This is part of the implementation of code coverage that does not require
+// __sanitizer_cov_dump() call. Data is stored in 2 files per process.
+//
+// $pid.sancov.map describes process memory layout in the following text-based
+// format:
+// <pointer size in bits> // 1 line, 32 or 64
+// <mapping start> <mapping end> <base address> <dso name> // repeated
+// ...
+// Mapping lines are NOT sorted. This file is updated every time memory layout
+// is changed (i.e. in dlopen() and dlclose() interceptors).
+//
+// $pid.sancov.raw is a binary dump of PC values, sizeof(uptr) each. Again, not
+// sorted. This file is extended by 64Kb at a time and mapped into memory. It
+// contains one or more 0 words at the end, up to the next 64Kb aligned offset.
+//
+// To convert these 2 files to the usual .sancov format, run sancov.py rawunpack
+// $pid.sancov.raw.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+static const uptr kMaxNumberOfModules = 1 << 14;
+static const uptr kMaxTextSize = 64 * 1024;
+
+struct CachedMapping {
+ public:
+ bool NeedsUpdate(uptr pc) {
+ int new_pid = internal_getpid();
+ if (last_pid == new_pid && pc && pc >= last_range_start &&
+ pc < last_range_end)
+ return false;
+ last_pid = new_pid;
+ return true;
+ }
+
+ void SetModuleRange(uptr start, uptr end) {
+ last_range_start = start;
+ last_range_end = end;
+ }
+
+ private:
+ uptr last_range_start, last_range_end;
+ int last_pid;
+};
+
+static CachedMapping cached_mapping;
+static StaticSpinMutex mapping_mu;
+
+void CovUpdateMapping(uptr caller_pc) {
+ if (!common_flags()->coverage || !common_flags()->coverage_direct) return;
+
+ SpinMutexLock l(&mapping_mu);
+
+ if (!cached_mapping.NeedsUpdate(caller_pc))
+ return;
+
+ InternalScopedString text(kMaxTextSize);
+ InternalScopedBuffer<char> modules_data(kMaxNumberOfModules *
+ sizeof(LoadedModule));
+ LoadedModule *modules = (LoadedModule *)modules_data.data();
+ CHECK(modules);
+ int n_modules = GetListOfModules(modules, kMaxNumberOfModules,
+ /* filter */ 0);
+
+ text.append("%d\n", sizeof(uptr) * 8);
+ for (int i = 0; i < n_modules; ++i) {
+ char *module_name = StripModuleName(modules[i].full_name());
+ for (unsigned j = 0; j < modules[i].n_ranges(); ++j) {
+ if (modules[i].address_range_executable(j)) {
+ uptr start = modules[i].address_range_start(j);
+ uptr end = modules[i].address_range_end(j);
+ uptr base = modules[i].base_address();
+ text.append("%zx %zx %zx %s\n", start, end, base, module_name);
+ if (caller_pc && caller_pc >= start && caller_pc < end)
+ cached_mapping.SetModuleRange(start, end);
+ }
+ }
+ InternalFree(module_name);
+ }
+
+ int err;
+ InternalScopedString tmp_path(64 +
+ internal_strlen(common_flags()->coverage_dir));
+ uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(),
+ "%s/%zd.sancov.map.tmp", common_flags()->coverage_dir,
+ internal_getpid());
+ CHECK_LE(res, tmp_path.size());
+ uptr map_fd = OpenFile(tmp_path.data(), true);
+ if (internal_iserror(map_fd)) {
+ Report(" Coverage: failed to open %s for writing\n", tmp_path.data());
+ Die();
+ }
+
+ res = internal_write(map_fd, text.data(), text.length());
+ if (internal_iserror(res, &err)) {
+ Printf("sancov.map write failed: %d\n", err);
+ Die();
+ }
+ internal_close(map_fd);
+
+ InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir));
+ res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map",
+ common_flags()->coverage_dir, internal_getpid());
+ CHECK_LE(res, path.size());
+ res = internal_rename(tmp_path.data(), path.data());
+ if (internal_iserror(res, &err)) {
+ Printf("sancov.map rename failed: %d\n", err);
+ Die();
+ }
+}
+
+} // namespace __sanitizer
id = id_gen++;
}
CHECK_LE(id, kMaxMutex);
- VPrintf(3, "#%llu: DD::allocateId assign id %d\n",
- cb->lt->ctx, id);
+ VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
return id;
}
IntrusiveList<FlagDescription> flag_descriptions;
+// If set, the tool will install its own SEGV signal handler by default.
+#ifndef SANITIZER_NEEDS_SEGV
+# define SANITIZER_NEEDS_SEGV 1
+#endif
+
void SetCommonFlagsDefaults(CommonFlags *f) {
f->symbolize = true;
f->external_symbolizer_path = 0;
f->legacy_pthread_cond = false;
f->intercept_tls_get_addr = false;
f->coverage = false;
+ f->coverage_direct = SANITIZER_ANDROID;
+ f->coverage_dir = ".";
f->full_address_space = false;
+ f->suppressions = "";
+ f->print_suppressions = true;
+ f->disable_coredump = (SANITIZER_WORDSIZE == 64);
}
void ParseCommonFlagsFromString(CommonFlags *f, const char *str) {
ParseFlag(str, &f->coverage, "coverage",
"If set, coverage information will be dumped at program shutdown (if the "
"coverage instrumentation was enabled at compile time).");
+ ParseFlag(str, &f->coverage_direct, "coverage_direct",
+ "If set, coverage information will be dumped directly to a memory "
+ "mapped file. This way data is not lost even if the process is "
+ "suddenly killed.");
+ ParseFlag(str, &f->coverage_dir, "coverage_dir",
+ "Target directory for coverage dumps. Defaults to the current "
+ "directory.");
ParseFlag(str, &f->full_address_space, "full_address_space",
"Sanitize complete address space; "
"by default kernel area on 32-bit platforms will not be sanitized");
+ ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name.");
+ ParseFlag(str, &f->print_suppressions, "print_suppressions",
+ "Print matched suppressions at exit.");
+ ParseFlag(str, &f->disable_coredump, "disable_coredump",
+ "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
+ "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ "default and for sanitizers that don't reserve lots of virtual memory.");
// Do a sanity check for certain flags.
if (f->malloc_context_size < 1)
pos = internal_strstr(env, name);
if (pos == 0)
return false;
- if (pos != env && ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) {
+ const char *name_end = pos + internal_strlen(name);
+ if ((pos != env &&
+ ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) ||
+ *name_end != '=') {
// Seems to be middle of another flag name or value.
env = pos + 1;
continue;
}
+ pos = name_end;
break;
}
- pos += internal_strlen(name);
const char *end;
if (pos[0] != '=') {
end = pos;
bool help;
uptr mmap_limit_mb;
bool coverage;
+ bool coverage_direct;
+ const char *coverage_dir;
bool full_address_space;
+ const char *suppressions;
+ bool print_suppressions;
+ bool disable_coredump;
};
inline CommonFlags *common_flags() {
--- /dev/null
+//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FREEBSD_H
+#define SANITIZER_FREEBSD_H
+
+#include "sanitizer_internal_defs.h"
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# include <link.h>
+# include <sys/param.h>
+# include <ucontext.h>
+
+namespace __sanitizer {
+
+typedef unsigned long long __xuint64_t;
+
+typedef __int32_t __xregister_t;
+
+typedef struct __xmcontext {
+ __xregister_t mc_onstack;
+ __xregister_t mc_gs;
+ __xregister_t mc_fs;
+ __xregister_t mc_es;
+ __xregister_t mc_ds;
+ __xregister_t mc_edi;
+ __xregister_t mc_esi;
+ __xregister_t mc_ebp;
+ __xregister_t mc_isp;
+ __xregister_t mc_ebx;
+ __xregister_t mc_edx;
+ __xregister_t mc_ecx;
+ __xregister_t mc_eax;
+ __xregister_t mc_trapno;
+ __xregister_t mc_err;
+ __xregister_t mc_eip;
+ __xregister_t mc_cs;
+ __xregister_t mc_eflags;
+ __xregister_t mc_esp;
+ __xregister_t mc_ss;
+
+ int mc_len;
+ int mc_fpformat;
+ int mc_ownedfp;
+ __xregister_t mc_flags;
+
+ int mc_fpstate[128] __aligned(16);
+ __xregister_t mc_fsbase;
+ __xregister_t mc_gsbase;
+ __xregister_t mc_xfpustate;
+ __xregister_t mc_xfpustate_len;
+
+ int mc_spare2[4];
+} xmcontext_t;
+
+typedef struct __xucontext {
+ sigset_t uc_sigmask;
+ xmcontext_t uc_mcontext;
+
+ struct __ucontext *uc_link;
+ stack_t uc_stack;
+ int uc_flags;
+ int __spare__[4];
+} xucontext_t;
+
+struct xkinfo_vmentry {
+ int kve_structsize;
+ int kve_type;
+ __xuint64_t kve_start;
+ __xuint64_t kve_end;
+ __xuint64_t kve_offset;
+ __xuint64_t kve_vn_fileid;
+ __uint32_t kve_vn_fsid;
+ int kve_flags;
+ int kve_resident;
+ int kve_private_resident;
+ int kve_protection;
+ int kve_ref_count;
+ int kve_shadow_count;
+ int kve_vn_type;
+ __xuint64_t kve_vn_size;
+ __uint32_t kve_vn_rdev;
+ __uint16_t kve_vn_mode;
+ __uint16_t kve_status;
+ int _kve_ispare[12];
+ char kve_path[PATH_MAX];
+};
+
+typedef struct {
+ __uint32_t p_type;
+ __uint32_t p_offset;
+ __uint32_t p_vaddr;
+ __uint32_t p_paddr;
+ __uint32_t p_filesz;
+ __uint32_t p_memsz;
+ __uint32_t p_flags;
+ __uint32_t p_align;
+} XElf32_Phdr;
+
+struct xdl_phdr_info {
+ Elf_Addr dlpi_addr;
+ const char *dlpi_name;
+ const XElf32_Phdr *dlpi_phdr;
+ Elf_Half dlpi_phnum;
+ unsigned long long int dlpi_adds;
+ unsigned long long int dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info*, size_t, void*);
+typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void*);
+
+#define xdl_iterate_phdr(callback, param) \
+ (((xdl_iterate_phdr_t*) dl_iterate_phdr)((callback), (param)))
+
+} // namespace __sanitizer
+
+# endif // __FreeBSD_version <= 902001
+#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+
+#endif // SANITIZER_FREEBSD_H
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
#endif
-// If set, the tool will install its own SEGV signal handler.
-#ifndef SANITIZER_NEEDS_SEGV
-# define SANITIZER_NEEDS_SEGV 1
+// We can use .preinit_array section on Linux to call sanitizer initialization
+// functions very early in the process startup (unless PIC macro is defined).
+// FIXME: do we have anything like this on Mac?
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && !defined(PIC)
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 1
+#else
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 0
#endif
// GCC does not understand __has_feature
uptr internal_read(fd_t fd, void *buf, uptr count);
uptr internal_write(fd_t fd, const void *buf, uptr count);
+uptr internal_ftruncate(fd_t fd, uptr size);
// OS
uptr internal_filesize(fd_t fd); // -1 on error.
uptr internal_dup2(int oldfd, int newfd);
uptr internal_readlink(const char *path, char *buf, uptr bufsize);
uptr internal_unlink(const char *path);
+uptr internal_rename(const char *oldpath, const char *newpath);
void NORETURN internal__exit(int exitcode);
uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
#include "sanitizer_libignore.h"
#include "sanitizer_flags.h"
} // namespace __sanitizer
-#endif // #if SANITIZER_LINUX
+#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
-#include <unwind.h>
#if SANITIZER_FREEBSD
+#include <sys/sysctl.h>
#include <machine/atomic.h>
extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
// FreeBSD 9.2 and 10.0.
#include <sys/umtx.h>
}
+extern char **environ; // provided by crt1
#endif // SANITIZER_FREEBSD
#if !SANITIZER_ANDROID
uptr OpenFile(const char *filename, bool write) {
return internal_open(filename,
- write ? O_WRONLY | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
+ write ? O_RDWR | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660);
}
uptr internal_read(fd_t fd, void *buf, uptr count) {
return res;
}
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd, size));
+ return res;
+}
+
#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
static void stat64_to_stat(struct stat64 *in, struct stat *out) {
internal_memset(out, 0, sizeof(*out));
#endif
}
+uptr internal_rename(const char *oldpath, const char *newpath) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath);
+#else
+ return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
+#endif
+}
+
uptr internal_sched_yield() {
return internal_syscall(SYSCALL(sched_yield));
}
return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
}
-// Like getenv, but reads env directly from /proc and does not use libc.
-// This function should be called first inside __asan_init.
+// Like getenv, but reads env directly from /proc (on Linux) or parses the
+// 'environ' array (on FreeBSD) and does not use libc. This function should be
+// called first inside __asan_init.
const char *GetEnv(const char *name) {
+#if SANITIZER_FREEBSD
+ if (::environ != 0) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = ::environ; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return 0; // Not found.
+#elif SANITIZER_LINUX
static char *environ;
static uptr len;
static bool inited;
p = endp + 1;
}
return 0; // Not found.
+#else
+#error "Unsupported platform"
+#endif
}
extern "C" {
}
#endif // SANITIZER_GO
-void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
- // Some kinds of sandboxes may forbid filesystem access, so we won't be able
- // to read the file mappings from /proc/self/maps. Luckily, neither the
- // process will be able to load additional libraries, so it's fine to use the
- // cached mappings.
- MemoryMappingLayout::CacheMemoryMappings();
- // Same for /proc/self/exe in the symbolizer.
-#if !SANITIZER_GO
- if (Symbolizer *sym = Symbolizer::GetOrNull())
- sym->PrepareForSandboxing();
- CovPrepareForSandboxing(args);
-#endif
-}
-
enum MutexState {
MtxUnlocked = 0,
MtxLocked = 1,
}
int internal_fork() {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
+#else
return internal_syscall(SYSCALL(fork));
+#endif
}
#if SANITIZER_LINUX
static uptr proc_self_exe_cache_len = 0;
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ if (proc_self_exe_cache_len > 0) {
+ // If available, use the cached module name.
+ uptr module_name_len =
+ internal_snprintf(buf, buf_len, "%s", proc_self_exe_cache_str);
+ CHECK_LT(module_name_len, buf_len);
+ return module_name_len;
+ }
+#if SANITIZER_FREEBSD
+ const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+ size_t Size = buf_len;
+ bool IsErr = (sysctl(Mib, 4, buf, &Size, NULL, 0) != 0);
+ int readlink_error = IsErr ? errno : 0;
+ uptr module_name_len = Size;
+#else
uptr module_name_len = internal_readlink(
"/proc/self/exe", buf, buf_len);
int readlink_error;
- if (internal_iserror(module_name_len, &readlink_error)) {
- if (proc_self_exe_cache_len) {
- // If available, use the cached module name.
- CHECK_LE(proc_self_exe_cache_len, buf_len);
- internal_strncpy(buf, proc_self_exe_cache_str, buf_len);
- module_name_len = internal_strlen(proc_self_exe_cache_str);
- } else {
- // We can't read /proc/self/exe for some reason, assume the name of the
- // binary is unknown.
- Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
- "some stack frames may not be symbolized\n", readlink_error);
- module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
- }
+ bool IsErr = internal_iserror(module_name_len, &readlink_error);
+#endif
+ if (IsErr) {
+ // We can't read /proc/self/exe for some reason, assume the name of the
+ // binary is unknown.
+ Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, "
+ "some stack frames may not be symbolized\n", readlink_error);
+ module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe");
CHECK_LT(module_name_len, buf_len);
- buf[module_name_len] = '\0';
}
return module_name_len;
}
#endif // defined(__x86_64__) && SANITIZER_LINUX
#if SANITIZER_ANDROID
+static atomic_uint8_t android_log_initialized;
+
+void AndroidLogInit() {
+ atomic_store(&android_log_initialized, 1, memory_order_release);
+}
// This thing is not, strictly speaking, async signal safe, but it does not seem
// to cause any issues. Alternative is writing to log devices directly, but
// their location and message format might change in the future, so we'd really
// like to avoid that.
void AndroidLogWrite(const char *buffer) {
+ if (!atomic_load(&android_log_initialized, memory_order_acquire))
+ return;
+
char *copy = internal_strdup(buffer);
char *p = copy;
char *q;
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_freebsd.h"
#include "sanitizer_linux.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include "sanitizer_atomic.h"
+#include "sanitizer_symbolizer.h"
+
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <dlfcn.h> // for dlsym()
+#endif
-#include <dlfcn.h>
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
-#if SANITIZER_FREEBSD
-#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
-#endif
-#include <unwind.h>
#if SANITIZER_FREEBSD
#include <pthread_np.h>
+#include <osreldate.h>
#define pthread_getattr_np pthread_attr_get_np
#endif
#endif
}
-//------------------------- SlowUnwindStack -----------------------------------
-
-typedef struct {
- uptr absolute_pc;
- uptr stack_top;
- uptr stack_size;
-} backtrace_frame_t;
-
-extern "C" {
-typedef void *(*acquire_my_map_info_list_func)();
-typedef void (*release_my_map_info_list_func)(void *map);
-typedef sptr (*unwind_backtrace_signal_arch_func)(
- void *siginfo, void *sigcontext, void *map_info_list,
- backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
-acquire_my_map_info_list_func acquire_my_map_info_list;
-release_my_map_info_list_func release_my_map_info_list;
-unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
-} // extern "C"
-
-#if SANITIZER_ANDROID
-void SanitizerInitializeUnwinder() {
- void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
- if (!p) {
- VReport(1,
- "Failed to open libcorkscrew.so. You may see broken stack traces "
- "in SEGV reports.");
- return;
- }
- acquire_my_map_info_list =
- (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
- release_my_map_info_list =
- (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
- unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
- p, "unwind_backtrace_signal_arch");
- if (!acquire_my_map_info_list || !release_my_map_info_list ||
- !unwind_backtrace_signal_arch) {
- VReport(1,
- "Failed to find one of the required symbols in libcorkscrew.so. "
- "You may see broken stack traces in SEGV reports.");
- acquire_my_map_info_list = NULL;
- unwind_backtrace_signal_arch = NULL;
- release_my_map_info_list = NULL;
- }
-}
-#endif
-
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_NO_REASON
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#ifdef __arm__
- uptr val;
- _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
- 15 /* r15 = PC */, _UVRSD_UINT32, &val);
- CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
- // Clear the Thumb bit.
- return val & ~(uptr)1;
-#else
- return _Unwind_GetIP(ctx);
-#endif
-}
-
-struct UnwindTraceArg {
- StackTrace *stack;
- uptr max_depth;
-};
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
- UnwindTraceArg *arg = (UnwindTraceArg*)param;
- CHECK_LT(arg->stack->size, arg->max_depth);
- uptr pc = Unwind_GetIP(ctx);
- arg->stack->trace[arg->stack->size++] = pc;
- if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
- return UNWIND_CONTINUE;
-}
-
-void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
- CHECK_GE(max_depth, 2);
- size = 0;
- UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
- _Unwind_Backtrace(Unwind_Trace, &arg);
- // We need to pop a few frames so that pc is on top.
- uptr to_pop = LocatePcInTrace(pc);
- // trace[0] belongs to the current function so we always pop it.
- if (to_pop == 0)
- to_pop = 1;
- PopStackFrames(to_pop);
- trace[0] = pc;
-}
-
-void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
- uptr max_depth) {
- CHECK_GE(max_depth, 2);
- if (!unwind_backtrace_signal_arch) {
- SlowUnwindStack(pc, max_depth);
- return;
- }
-
- void *map = acquire_my_map_info_list();
- CHECK(map);
- InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
- // siginfo argument appears to be unused.
- sptr res = unwind_backtrace_signal_arch(/* siginfo */ NULL, context, map,
- frames.data(),
- /* ignore_depth */ 0, max_depth);
- release_my_map_info_list(map);
- if (res < 0) return;
- CHECK_LE((uptr)res, kStackTraceMax);
-
- size = 0;
- // +2 compensate for libcorkscrew unwinder returning addresses of call
- // instructions instead of raw return addresses.
- for (sptr i = 0; i < res; ++i)
- trace[size++] = frames[i].absolute_pc + 2;
-}
-
#if !SANITIZER_FREEBSD
static uptr g_tls_size;
#endif
static atomic_uintptr_t kThreadDescriptorSize;
uptr ThreadDescriptorSize() {
- char buf[64];
uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed);
if (val)
return val;
#ifdef _CS_GNU_LIBC_VERSION
+ char buf[64];
uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
if (len < sizeof(buf) && internal_strncmp(buf, "glibc 2.", 8) == 0) {
char *end;
#else // SANITIZER_ANDROID
# if !SANITIZER_FREEBSD
typedef ElfW(Phdr) Elf_Phdr;
+# elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+# define Elf_Phdr XElf32_Phdr
+# define dl_phdr_info xdl_phdr_info
+# define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
# endif
struct DlIteratePhdrData {
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
- cur_module->addAddressRange(cur_beg, cur_end);
+ bool executable = phdr->p_flags & PF_X;
+ cur_module->addAddressRange(cur_beg, cur_end, executable);
}
}
return 0;
indirect_call_wrapper = wrapper;
}
+void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+ // Same for /proc/self/exe in the symbolizer.
+#if !SANITIZER_GO
+ Symbolizer::GetOrInit()->PrepareForSandboxing();
+ CovPrepareForSandboxing(args);
+#endif
+}
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
return fork();
}
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ return rename(oldpath, newpath);
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
--- /dev/null
+//===-- sanitizer_persistent_allocator.cc -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+} // namespace __sanitizer
--- /dev/null
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class PersistentAllocator {
+ public:
+ void *alloc(uptr size);
+
+ private:
+ void *tryAlloc(uptr size);
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for Node's.
+ atomic_uintptr_t region_end;
+};
+
+inline void *PersistentAllocator::tryAlloc(uptr size) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(®ion_pos, memory_order_acquire);
+ uptr end = atomic_load(®ion_end, memory_order_acquire);
+ if (cmp == 0 || cmp + size > end) return 0;
+ if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size,
+ memory_order_acquire))
+ return (void *)cmp;
+ }
+}
+
+inline void *PersistentAllocator::alloc(uptr size) {
+ // First, try to allocate optimisitically.
+ void *s = tryAlloc(size);
+ if (s) return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&mtx);
+ for (;;) {
+ s = tryAlloc(size);
+ if (s) return s;
+ atomic_store(®ion_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < size) allocsz = size;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ atomic_store(®ion_end, mem + allocsz, memory_order_release);
+ atomic_store(®ion_pos, mem, memory_order_release);
+ }
+}
+
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+ return thePersistentAllocator.alloc(sz);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
# define SI_LINUX_NOT_ANDROID 0
#endif
+#if SANITIZER_FREEBSD
+# define SI_FREEBSD 1
+#else
+# define SI_FREEBSD 0
+#endif
+
#if SANITIZER_LINUX
# define SI_LINUX 1
#else
#define SANITIZER_INTERCEPT_STRPTIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SCANF SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
#ifndef SANITIZER_INTERCEPT_PRINTF
# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
-# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
#endif
#define SANITIZER_INTERCEPT_FREXP 1
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_GETPWENT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_GETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SETPWENT SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX
+#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
(defined(__i386) || defined (__x86_64)) // NOLINT
#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX
+#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_SIGSETOPS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SIGSETOPS \
+ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_STATFS SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STATFS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
-#define SANITIZER_INTERCEPT_ETHER SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_ETHER_HOST SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SHMCTL \
(SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX
+#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID
// FIXME: getline seems to be available on OSX 10.7
#define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT__EXIT SI_LINUX
+#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD
#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
+#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
#include <asm/posix_types.h>
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
#include <linux/aio_abi.h>
-#if SANITIZER_ANDROID
-#include <asm/statfs.h>
-#else
-#include <sys/statfs.h>
-#endif
-
#if !SANITIZER_ANDROID
+#include <sys/statfs.h>
#include <linux/perf_event.h>
#endif
namespace __sanitizer {
+#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif
} // namespace __sanitizer
-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)
+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+ && !defined(__mips__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
-#include <sys/timeb.h>
#include <sys/times.h>
#include <sys/types.h>
#include <sys/utsname.h>
#if !SANITIZER_ANDROID
#include <sys/mount.h>
+#include <sys/timeb.h>
#endif
#if SANITIZER_LINUX
unsigned struct_tms_sz = sizeof(struct tms);
unsigned struct_sigevent_sz = sizeof(struct sigevent);
unsigned struct_sched_param_sz = sizeof(struct sched_param);
- unsigned struct_statfs_sz = sizeof(struct statfs);
+
#if SANITIZER_MAC && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
#endif // SANITIZER_MAC && !SANITIZER_IOS
#if !SANITIZER_ANDROID
+ unsigned struct_statfs_sz = sizeof(struct statfs);
unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
unsigned ucontext_t_sz = sizeof(ucontext_t);
#endif // !SANITIZER_ANDROID
int ptrace_setfpregs = PTRACE_SETFPREGS;
int ptrace_getfpxregs = PTRACE_GETFPXREGS;
int ptrace_setfpxregs = PTRACE_SETFPXREGS;
+ int ptrace_geteventmsg = PTRACE_GETEVENTMSG;
#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \
(defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))
int ptrace_getsiginfo = PTRACE_GETSIGINFO;
unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
#endif
- unsigned IOCTL_NOT_PRESENT = 0;
+ const unsigned IOCTL_NOT_PRESENT = 0;
unsigned IOCTL_FIOASYNC = FIOASYNC;
unsigned IOCTL_FIOCLEX = FIOCLEX;
CHECK_TYPE_SIZE(clock_t);
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(clockid_t);
+#endif
+
#if !SANITIZER_ANDROID
CHECK_TYPE_SIZE(ifaddrs);
CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
COMPILER_CHECK(sizeof(__sanitizer_mallinfo) == sizeof(struct mallinfo));
#endif
+#if !SANITIZER_ANDROID
CHECK_TYPE_SIZE(timeb);
CHECK_SIZE_AND_OFFSET(timeb, time);
CHECK_SIZE_AND_OFFSET(timeb, millitm);
CHECK_SIZE_AND_OFFSET(timeb, timezone);
CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+#endif
CHECK_TYPE_SIZE(passwd);
CHECK_SIZE_AND_OFFSET(passwd, pw_name);
extern unsigned struct_itimerspec_sz;
extern unsigned struct_sigevent_sz;
extern unsigned struct_sched_param_sz;
- extern unsigned struct_statfs_sz;
extern unsigned struct_statfs64_sz;
- extern unsigned struct_sockaddr_sz;
#if !SANITIZER_ANDROID
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_sockaddr_sz;
extern unsigned ucontext_t_sz;
#endif // !SANITIZER_ANDROID
#elif defined(__powerpc64__)
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__mips__)
+ #if SANITIZER_WORDSIZE == 64
+ const unsigned struct_kernel_stat_sz = 216;
+ #else
+ const unsigned struct_kernel_stat_sz = 144;
+ #endif
+ const unsigned struct_kernel_stat64_sz = 104;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
unsigned __seq;
u64 __unused1;
u64 __unused2;
+#elif defined(__mips__)
+ unsigned int mode;
+ unsigned short __seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
#else
unsigned short mode;
unsigned short __pad1;
u64 shm_ctime;
#else
uptr shm_atime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused1;
#endif
uptr shm_dtime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused2;
#endif
uptr shm_ctime;
- #ifndef _LP64
+ #if !defined(_LP64) && !defined(__mips__)
uptr __unused3;
#endif
#endif
typedef long __sanitizer_clock_t;
#endif
+#if SANITIZER_LINUX
+ typedef int __sanitizer_clockid_t;
+#endif
+
#if SANITIZER_LINUX || SANITIZER_FREEBSD
-#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)\
+ || defined(__mips__)
typedef unsigned __sanitizer___kernel_uid_t;
typedef unsigned __sanitizer___kernel_gid_t;
#else
typedef long __sanitizer___kernel_off_t;
#endif
-#if defined(__powerpc__) || defined(__aarch64__)
+#if defined(__powerpc__) || defined(__aarch64__) || defined(__mips__)
typedef unsigned int __sanitizer___kernel_old_uid_t;
typedef unsigned int __sanitizer___kernel_old_gid_t;
#else
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
struct __sanitizer_sigaction {
+#if defined(__mips__) && !SANITIZER_FREEBSD
+ unsigned int sa_flags;
+#endif
union {
void (*sigaction)(int sig, void *siginfo, void *uctx);
void (*handler)(int sig);
__sanitizer_sigset_t sa_mask;
#else
__sanitizer_sigset_t sa_mask;
+#ifndef __mips__
int sa_flags;
#endif
+#endif
#if SANITIZER_LINUX
void (*sa_restorer)();
+#endif
+#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
+ int sa_resv[1];
#endif
};
extern int ptrace_setsiginfo;
extern int ptrace_getregset;
extern int ptrace_setregset;
+ extern int ptrace_geteventmsg;
#endif
#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
#define IOC_NRBITS 8
#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__)
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
#define IOC_SIZEBITS 13
#define IOC_DIRBITS 3
#define IOC_NONE 1U
// A special value to mark ioctls that are not present on the target platform,
// when it can not be determined without including any system headers.
- extern unsigned IOCTL_NOT_PRESENT;
+ extern const unsigned IOCTL_NOT_PRESENT;
extern unsigned IOCTL_FIOASYNC;
extern unsigned IOCTL_FIOCLEX;
return internal_iserror(map) ? 0 : (void *)map;
}
+void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) {
+ uptr flags = MAP_SHARED;
+ if (addr) flags |= MAP_FIXED;
+ uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
+ if (internal_iserror(p)) {
+ Printf("could not map writable file (%zd, %zu, %zu): %zd\n", fd, offset,
+ size, p);
+ return 0;
+ }
+ return (void *)p;
+}
static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
uptr start2, uptr end2) {
madvise((void*)addr, size, MADV_DONTNEED);
}
-void DisableCoreDumper() {
- struct rlimit nocore;
- nocore.rlim_cur = 0;
- nocore.rlim_max = 0;
- setrlimit(RLIMIT_CORE, &nocore);
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile struct rlimit rlim;
+ rlim.rlim_cur = lim;
+ rlim.rlim_max = lim;
+ if (setrlimit(res, (struct rlimit*)&rlim)) {
+ Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
+ Die();
+ }
+}
+
+void DisableCoreDumperIfNecessary() {
+ if (common_flags()->disable_coredump) {
+ setlim(RLIMIT_CORE, 0);
+ }
}
bool StackSizeIsUnlimited() {
- struct rlimit rlim;
- CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
- return ((uptr)rlim.rlim_cur == (uptr)-1);
+ rlim_t stack_size = getlim(RLIMIT_STACK);
+ return (stack_size == RLIM_INFINITY);
}
void SetStackSizeLimitInBytes(uptr limit) {
- struct rlimit rlim;
- rlim.rlim_cur = limit;
- rlim.rlim_max = limit;
- if (setrlimit(RLIMIT_STACK, &rlim)) {
- Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
- Die();
- }
+ setlim(RLIMIT_STACK, (rlim_t)limit);
CHECK(!StackSizeIsUnlimited());
}
+bool AddressSpaceIsUnlimited() {
+ rlim_t as_size = getlim(RLIMIT_AS);
+ return (as_size == RLIM_INFINITY);
+}
+
+void SetAddressSpaceUnlimited() {
+ setlim(RLIMIT_AS, RLIM_INFINITY);
+ CHECK(AddressSpaceIsUnlimited());
+}
+
void SleepForSeconds(int seconds) {
sleep(seconds);
}
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
sigact.sa_sigaction = (sa_sigaction_t)handler;
- sigact.sa_flags = SA_SIGINFO;
+ // Do not block the signal from being received in that signal's handler.
+ // Clients are responsible for handling this correctly.
+ sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
CHECK_EQ(0, internal_sigaction(signum, &sigact, 0));
VReport(1, "Installed the sigaction for signal %d\n", signum);
}
#endif // SANITIZER_GO
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ uptr page_size = GetPageSizeCached();
+ // Checking too large memory ranges is slow.
+ CHECK_LT(size, page_size * 10);
+ int sock_pair[2];
+ if (pipe(sock_pair))
+ return false;
+ uptr bytes_written =
+ internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);
+ int write_errno;
+ bool result;
+ if (internal_iserror(bytes_written, &write_errno)) {
+ CHECK_EQ(EFAULT, write_errno);
+ result = false;
+ } else {
+ result = (bytes_written == size);
+ }
+ internal_close(sock_pair[0]);
+ internal_close(sock_pair[1]);
+ return result;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX
#include <stdio.h>
#include <stdarg.h>
-#if SANITIZER_WINDOWS && !defined(va_copy)
+#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
+ !defined(va_copy)
# define va_copy(dst, src) ((dst) = (src))
#endif
uptr mmaped_size;
uptr len;
};
+
+// Reads process memory map in an OS-specific way.
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
class MemoryMappingLayout {
// platform-specific files.
# if SANITIZER_FREEBSD || SANITIZER_LINUX
ProcSelfMapsBuff proc_self_maps_;
- char *current_;
+ const char *current_;
// Static mappings cache.
static ProcSelfMapsBuff cached_proc_self_maps_;
// Returns code range for the specified module.
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
+bool IsDecimal(char c);
+uptr ParseDecimal(const char **p);
+bool IsHex(char c);
+uptr ParseHex(const char **p);
+
} // namespace __sanitizer
#endif // SANITIZER_PROCMAPS_H
--- /dev/null
+//===-- sanitizer_procmaps_common.cc --------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (common parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+// Linker initialized.
+ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
+StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
+
+static int TranslateDigit(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+// Parse a number and promote 'p' up to the first non-digit character.
+static uptr ParseNumber(const char **p, int base) {
+ uptr n = 0;
+ int d;
+ CHECK(base >= 2 && base <= 16);
+ while ((d = TranslateDigit(**p)) >= 0 && d < base) {
+ n = n * base + d;
+ (*p)++;
+ }
+ return n;
+}
+
+bool IsDecimal(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 10;
+}
+
+uptr ParseDecimal(const char **p) {
+ return ParseNumber(p, 10);
+}
+
+bool IsHex(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 16;
+}
+
+uptr ParseHex(const char **p) {
+ return ParseNumber(p, 16);
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ ReadProcMaps(&proc_self_maps_);
+ if (cache_enabled) {
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ } else {
+ CHECK_GT(proc_self_maps_.mmaped_size, 0);
+ }
+ Reset();
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ if (cache_enabled)
+ CacheMemoryMappings();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (proc_self_maps_.data != cached_proc_self_maps_.data) {
+ UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
+ }
+}
+
+void MemoryMappingLayout::Reset() {
+ current_ = proc_self_maps_.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ SpinMutexLock l(&cache_lock_);
+ // Don't invalidate the cache if the mappings are unavailable.
+ ProcSelfMapsBuff old_proc_self_maps;
+ old_proc_self_maps = cached_proc_self_maps_;
+ ReadProcMaps(&cached_proc_self_maps_);
+ if (cached_proc_self_maps_.mmaped_size == 0) {
+ cached_proc_self_maps_ = old_proc_self_maps;
+ } else {
+ if (old_proc_self_maps.mmaped_size) {
+ UnmapOrDie(old_proc_self_maps.data,
+ old_proc_self_maps.mmaped_size);
+ }
+ }
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock_);
+ if (cached_proc_self_maps_.data) {
+ proc_self_maps_ = cached_proc_self_maps_;
+ }
+}
+
+uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
+ uptr max_modules,
+ string_predicate_t filter) {
+ Reset();
+ uptr cur_beg, cur_end, cur_offset, prot;
+ InternalScopedBuffer<char> module_name(kMaxPathLength);
+ uptr n_modules = 0;
+ for (uptr i = 0; n_modules < max_modules &&
+ Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
+ module_name.size(), &prot);
+ i++) {
+ const char *cur_name = module_name.data();
+ if (cur_name[0] == '\0')
+ continue;
+ if (filter && !filter(cur_name))
+ continue;
+ void *mem = &modules[n_modules];
+ // Don't subtract 'cur_beg' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ uptr base_address = (i ? cur_beg : 0) - cur_offset;
+ LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address);
+ cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ n_modules++;
+ }
+ return n_modules;
+}
+
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+ char *smaps = 0;
+ uptr smaps_cap = 0;
+ uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
+ &smaps, &smaps_cap, 64<<20);
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (IsHex(pos[0])) {
+ start = ParseHex(&pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ while (!IsDecimal(*pos)) pos++;
+ uptr rss = ParseDecimal(&pos) * 1024;
+ cb(start, rss, file, stats, stats_size);
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
--- /dev/null
+//===-- sanitizer_procmaps_freebsd.cc -------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (FreeBSD-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD
+#include "sanitizer_common.h"
+#include "sanitizer_freebsd.h"
+#include "sanitizer_procmaps.h"
+
+#include <unistd.h>
+#include <sys/sysctl.h>
+#include <sys/user.h>
+
+// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# define kinfo_vmentry xkinfo_vmentry
+# endif
+#endif
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() };
+ size_t Size = 0;
+ int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ CHECK_GT(Size, 0);
+
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = MmapedSize;
+ Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+
+ proc_maps->data = (char*)VmMap;
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
+}
+
+bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
+ char filename[], uptr filename_size,
+ uptr *protection) {
+ char *last = proc_self_maps_.data + proc_self_maps_.len;
+ if (current_ >= last) return false;
+ uptr dummy;
+ if (!start) start = &dummy;
+ if (!end) end = &dummy;
+ if (!offset) offset = &dummy;
+ if (!protection) protection = &dummy;
+ struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
+
+ *start = (uptr)VmEntry->kve_start;
+ *end = (uptr)VmEntry->kve_end;
+ *offset = (uptr)VmEntry->kve_offset;
+
+ *protection = 0;
+ if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
+ *protection |= kProtectionRead;
+ if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
+ *protection |= kProtectionWrite;
+ if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
+ *protection |= kProtectionExecute;
+
+ if (filename != NULL && filename_size > 0) {
+ internal_snprintf(filename,
+ Min(filename_size, (uptr)PATH_MAX),
+ "%s", VmEntry->kve_path);
+ }
+
+ current_ += VmEntry->kve_structsize;
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD
//===----------------------------------------------------------------------===//
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#if SANITIZER_LINUX
#include "sanitizer_common.h"
-#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
-#if SANITIZER_FREEBSD
-#include <unistd.h>
-#include <sys/sysctl.h>
-#include <sys/user.h>
-#endif
-
namespace __sanitizer {
-// Linker initialized.
-ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
-StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
-
-static void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
-#if SANITIZER_FREEBSD
- const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() };
- size_t Size = 0;
- int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0);
- CHECK_EQ(Err, 0);
- CHECK_GT(Size, 0);
-
- size_t MmapedSize = Size * 4 / 3;
- void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
- Size = MmapedSize;
- Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0);
- CHECK_EQ(Err, 0);
-
- proc_maps->data = (char*)VmMap;
- proc_maps->mmaped_size = MmapedSize;
- proc_maps->len = Size;
-#else
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
proc_maps->len = ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
&proc_maps->mmaped_size, 1 << 26);
-#endif
-}
-
-MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
- ReadProcMaps(&proc_self_maps_);
- if (cache_enabled) {
- if (proc_self_maps_.mmaped_size == 0) {
- LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
- }
- } else {
- CHECK_GT(proc_self_maps_.mmaped_size, 0);
- }
- Reset();
- // FIXME: in the future we may want to cache the mappings on demand only.
- if (cache_enabled)
- CacheMemoryMappings();
-}
-
-MemoryMappingLayout::~MemoryMappingLayout() {
- // Only unmap the buffer if it is different from the cached one. Otherwise
- // it will be unmapped when the cache is refreshed.
- if (proc_self_maps_.data != cached_proc_self_maps_.data) {
- UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size);
- }
-}
-
-void MemoryMappingLayout::Reset() {
- current_ = proc_self_maps_.data;
-}
-
-// static
-void MemoryMappingLayout::CacheMemoryMappings() {
- SpinMutexLock l(&cache_lock_);
- // Don't invalidate the cache if the mappings are unavailable.
- ProcSelfMapsBuff old_proc_self_maps;
- old_proc_self_maps = cached_proc_self_maps_;
- ReadProcMaps(&cached_proc_self_maps_);
- if (cached_proc_self_maps_.mmaped_size == 0) {
- cached_proc_self_maps_ = old_proc_self_maps;
- } else {
- if (old_proc_self_maps.mmaped_size) {
- UnmapOrDie(old_proc_self_maps.data,
- old_proc_self_maps.mmaped_size);
- }
- }
-}
-
-void MemoryMappingLayout::LoadFromCache() {
- SpinMutexLock l(&cache_lock_);
- if (cached_proc_self_maps_.data) {
- proc_self_maps_ = cached_proc_self_maps_;
- }
-}
-
-#if !SANITIZER_FREEBSD
-// Parse a hex value in str and update str.
-static uptr ParseHex(char **str) {
- uptr x = 0;
- char *s;
- for (s = *str; ; s++) {
- char c = *s;
- uptr v = 0;
- if (c >= '0' && c <= '9')
- v = c - '0';
- else if (c >= 'a' && c <= 'f')
- v = c - 'a' + 10;
- else if (c >= 'A' && c <= 'F')
- v = c - 'A' + 10;
- else
- break;
- x = x * 16 + v;
- }
- *str = s;
- return x;
}
static bool IsOneOf(char c, char c1, char c2) {
return c == c1 || c == c2;
}
-#endif
-
-static bool IsDecimal(char c) {
- return c >= '0' && c <= '9';
-}
-
-static bool IsHex(char c) {
- return (c >= '0' && c <= '9')
- || (c >= 'a' && c <= 'f');
-}
-
-static uptr ReadHex(const char *p) {
- uptr v = 0;
- for (; IsHex(p[0]); p++) {
- if (p[0] >= '0' && p[0] <= '9')
- v = v * 16 + p[0] - '0';
- else
- v = v * 16 + p[0] - 'a' + 10;
- }
- return v;
-}
-
-static uptr ReadDecimal(const char *p) {
- uptr v = 0;
- for (; IsDecimal(p[0]); p++)
- v = v * 10 + p[0] - '0';
- return v;
-}
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size,
if (!end) end = &dummy;
if (!offset) offset = &dummy;
if (!protection) protection = &dummy;
-#if SANITIZER_FREEBSD
- struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
-
- *start = (uptr)VmEntry->kve_start;
- *end = (uptr)VmEntry->kve_end;
- *offset = (uptr)VmEntry->kve_offset;
-
- *protection = 0;
- if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
- *protection |= kProtectionRead;
- if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
- *protection |= kProtectionWrite;
- if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
- *protection |= kProtectionExecute;
-
- if (filename != NULL && filename_size > 0) {
- internal_snprintf(filename,
- Min(filename_size, (uptr)PATH_MAX),
- "%s", VmEntry->kve_path);
- }
-
- current_ += VmEntry->kve_structsize;
-#else // !SANITIZER_FREEBSD
char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
if (next_line == 0)
next_line = last;
if (filename && i < filename_size)
filename[i] = 0;
current_ = next_line + 1;
-#endif // !SANITIZER_FREEBSD
return true;
}
-uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
- uptr max_modules,
- string_predicate_t filter) {
- Reset();
- uptr cur_beg, cur_end, cur_offset;
- InternalScopedBuffer<char> module_name(kMaxPathLength);
- uptr n_modules = 0;
- for (uptr i = 0; n_modules < max_modules &&
- Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
- module_name.size(), 0);
- i++) {
- const char *cur_name = module_name.data();
- if (cur_name[0] == '\0')
- continue;
- if (filter && !filter(cur_name))
- continue;
- void *mem = &modules[n_modules];
- // Don't subtract 'cur_beg' from the first entry:
- // * If a binary is compiled w/o -pie, then the first entry in
- // process maps is likely the binary itself (all dynamic libs
- // are mapped higher in address space). For such a binary,
- // instruction offset in binary coincides with the actual
- // instruction address in virtual memory (as code section
- // is mapped to a fixed memory range).
- // * If a binary is compiled with -pie, all the modules are
- // mapped high at address space (in particular, higher than
- // shadow memory of the tool), so the module can't be the
- // first entry.
- uptr base_address = (i ? cur_beg : 0) - cur_offset;
- LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address);
- cur_module->addAddressRange(cur_beg, cur_end);
- n_modules++;
- }
- return n_modules;
-}
-
-void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
- char *smaps = 0;
- uptr smaps_cap = 0;
- uptr smaps_len = ReadFileToBuffer("/proc/self/smaps",
- &smaps, &smaps_cap, 64<<20);
- uptr start = 0;
- bool file = false;
- const char *pos = smaps;
- while (pos < smaps + smaps_len) {
- if (IsHex(pos[0])) {
- start = ReadHex(pos);
- for (; *pos != '/' && *pos > '\n'; pos++) {}
- file = *pos == '/';
- } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
- for (; *pos < '0' || *pos > '9'; pos++) {}
- uptr rss = ReadDecimal(pos) * 1024;
- cb(start, rss, file, stats, stats_size);
- }
- while (*pos++ != '\n') {}
- }
- UnmapOrDie(smaps, smaps_cap);
-}
-
} // namespace __sanitizer
-#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif // SANITIZER_LINUX
bool MemoryMappingLayout::NextSegmentLoad(
uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection) {
- if (protection)
- UNIMPLEMENTED();
const char* lc = current_load_cmd_addr_;
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
if (((const load_command *)lc)->cmd == kLCSegment) {
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
const SegmentCommand* sc = (const SegmentCommand *)lc;
if (start) *start = sc->vmaddr + dlloff;
+ if (protection) {
+ // Return the initial protection.
+ *protection = sc->initprot;
+ }
if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
if (offset) {
if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
uptr max_modules,
string_predicate_t filter) {
Reset();
- uptr cur_beg, cur_end;
+ uptr cur_beg, cur_end, prot;
InternalScopedBuffer<char> module_name(kMaxPathLength);
uptr n_modules = 0;
for (uptr i = 0; n_modules < max_modules &&
Next(&cur_beg, &cur_end, 0, module_name.data(),
- module_name.size(), 0);
+ module_name.size(), &prot);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
cur_module = new(mem) LoadedModule(cur_name, cur_beg);
n_modules++;
}
- cur_module->addAddressRange(cur_beg, cur_end);
+ cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
}
return n_modules;
}
#include "sanitizer_common.h"
namespace __sanitizer {
-class AnsiColorDecorator {
+class SanitizerCommonDecorator {
// FIXME: This is not portable. It assumes the special strings are printed to
// stdout, which is not the case on Windows (see SetConsoleTextAttribute()).
public:
- explicit AnsiColorDecorator(bool use_ansi_colors) : ansi_(use_ansi_colors) { }
+ SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}
const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
+ const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
+ const char *Warning() { return Red(); }
+ const char *EndWarning() { return Default(); }
+ protected:
const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; }
const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; }
const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; }
const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; }
- const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
private:
bool ansi_;
};
-class SanitizerCommonDecorator: protected AnsiColorDecorator {
- public:
- SanitizerCommonDecorator()
- : __sanitizer::AnsiColorDecorator(ColorizeReports()) { }
- const char *Warning() { return Red(); }
- const char *EndWarning() { return Default(); }
-};
-
} // namespace __sanitizer
#endif // SANITIZER_REPORT_DECORATOR_H
//===----------------------------------------------------------------------===//
#include "sanitizer_stackdepot.h"
+
#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
+#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
-const int kTabSize = 1024 * 1024; // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits; // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
+struct StackDepotDesc {
+ const uptr *stack;
+ uptr size;
+ u32 hash() const {
+ // murmur2
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed ^ (size * sizeof(uptr));
+ for (uptr i = 0; i < size; i++) {
+ u32 k = stack[i];
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+ }
+ bool is_valid() { return size > 0 && stack; }
+};
-struct StackDesc {
- StackDesc *link;
+struct StackDepotNode {
+ StackDepotNode *link;
u32 id;
- u32 hash;
+ atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
uptr size;
uptr stack[1]; // [size]
-};
-static struct {
- StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
- atomic_uintptr_t region_pos; // Region allocator for StackDesc's.
- atomic_uintptr_t region_end;
- atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's.
- atomic_uint32_t seq[kPartCount]; // Unique id generators.
-} depot;
+ static const u32 kTabSizeLog = 20;
+ // Lower kTabSizeLog bits are equal for all items in one bucket.
+ // We use these bits to store the per-stack use counter.
+ static const u32 kUseCountBits = kTabSizeLog;
+ static const u32 kMaxUseCount = 1 << kUseCountBits;
+ static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
+ static const u32 kHashMask = ~kUseCountMask;
+
+ typedef StackDepotDesc args_type;
+ bool eq(u32 hash, const args_type &args) const {
+ u32 hash_bits =
+ atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
+ if ((hash & kHashMask) != hash_bits || args.size != size) return false;
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != args.stack[i]) return false;
+ }
+ return true;
+ }
+ static uptr storage_size(const args_type &args) {
+ return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ }
+ void store(const args_type &args, u32 hash) {
+ atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
+ size = args.size;
+ internal_memcpy(stack, args.stack, size * sizeof(uptr));
+ }
+ args_type load() const {
+ args_type ret = {&stack[0], size};
+ return ret;
+ }
+ StackDepotHandle get_handle() { return StackDepotHandle(this); }
-static StackDepotStats stats;
+ typedef StackDepotHandle handle_type;
+};
-StackDepotStats *StackDepotGetStats() {
- return &stats;
-}
+COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
-static u32 hash(const uptr *stack, uptr size) {
- // murmur2
- const u32 m = 0x5bd1e995;
- const u32 seed = 0x9747b28c;
- const u32 r = 24;
- u32 h = seed ^ (size * sizeof(uptr));
- for (uptr i = 0; i < size; i++) {
- u32 k = stack[i];
- k *= m;
- k ^= k >> r;
- k *= m;
- h *= m;
- h ^= k;
- }
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
- return h;
+u32 StackDepotHandle::id() { return node_->id; }
+int StackDepotHandle::use_count() {
+ return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
}
-
-static StackDesc *tryallocDesc(uptr memsz) {
- // Optimisic lock-free allocation, essentially try to bump the region ptr.
- for (;;) {
- uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
- uptr end = atomic_load(&depot.region_end, memory_order_acquire);
- if (cmp == 0 || cmp + memsz > end)
- return 0;
- if (atomic_compare_exchange_weak(
- &depot.region_pos, &cmp, cmp + memsz,
- memory_order_acquire))
- return (StackDesc*)cmp;
- }
+void StackDepotHandle::inc_use_count_unsafe() {
+ u32 prev =
+ atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
+ CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
}
+uptr StackDepotHandle::size() { return node_->size; }
+uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
-static StackDesc *allocDesc(uptr size) {
- // First, try to allocate optimisitically.
- uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
- StackDesc *s = tryallocDesc(memsz);
- if (s)
- return s;
- // If failed, lock, retry and alloc new superblock.
- SpinMutexLock l(&depot.mtx);
- for (;;) {
- s = tryallocDesc(memsz);
- if (s)
- return s;
- atomic_store(&depot.region_pos, 0, memory_order_relaxed);
- uptr allocsz = 64 * 1024;
- if (allocsz < memsz)
- allocsz = memsz;
- uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
- stats.mapped += allocsz;
- atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
- atomic_store(&depot.region_pos, mem, memory_order_release);
- }
+// FIXME(dvyukov): this single reserved bit is used in TSan.
+typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
+ StackDepot;
+static StackDepot theDepot;
+
+StackDepotStats *StackDepotGetStats() {
+ return theDepot.GetStats();
}
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
- // Searches linked list s for the stack, returns its id.
- for (; s; s = s->link) {
- if (s->hash == hash && s->size == size) {
- uptr i = 0;
- for (; i < size; i++) {
- if (stack[i] != s->stack[i])
- break;
- }
- if (i == size)
- return s->id;
- }
- }
- return 0;
+u32 StackDepotPut(const uptr *stack, uptr size) {
+ StackDepotDesc desc = {stack, size};
+ StackDepotHandle h = theDepot.Put(desc);
+ return h.valid() ? h.id() : 0;
}
-static StackDesc *lock(atomic_uintptr_t *p) {
- // Uses the pointer lsb as mutex.
- for (int i = 0;; i++) {
- uptr cmp = atomic_load(p, memory_order_relaxed);
- if ((cmp & 1) == 0
- && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
- memory_order_acquire))
- return (StackDesc*)cmp;
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- }
+StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
+ StackDepotDesc desc = {stack, size};
+ return theDepot.Put(desc);
}
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
- DCHECK_EQ((uptr)s & 1, 0);
- atomic_store(p, (uptr)s, memory_order_release);
+const uptr *StackDepotGet(u32 id, uptr *size) {
+ StackDepotDesc desc = theDepot.Get(id);
+ *size = desc.size;
+ return desc.stack;
}
-u32 StackDepotPut(const uptr *stack, uptr size) {
- if (stack == 0 || size == 0)
- return 0;
- uptr h = hash(stack, size);
- atomic_uintptr_t *p = &depot.tab[h % kTabSize];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- // First, try to find the existing stack.
- u32 id = find(s, stack, size, h);
- if (id)
- return id;
- // If failed, lock, retry and insert new.
- StackDesc *s2 = lock(p);
- if (s2 != s) {
- id = find(s2, stack, size, h);
- if (id) {
- unlock(p, s2);
- return id;
- }
- }
- uptr part = (h % kTabSize) / kPartSize;
- id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
- stats.n_uniq_ids++;
- CHECK_LT(id, kMaxId);
- id |= part << kPartShift;
- CHECK_NE(id, 0);
- CHECK_EQ(id & (1u << 31), 0);
- s = allocDesc(size);
- s->id = id;
- s->hash = h;
- s->size = size;
- internal_memcpy(s->stack, stack, size * sizeof(uptr));
- s->link = s2;
- unlock(p, s);
- return id;
+void StackDepotLockAll() {
+ theDepot.LockAll();
}
-const uptr *StackDepotGet(u32 id, uptr *size) {
- if (id == 0)
- return 0;
- CHECK_EQ(id & (1u << 31), 0);
- // High kPartBits contain part id, so we need to scan at most kPartSize lists.
- uptr part = id >> kPartShift;
- for (int i = 0; i != kPartSize; i++) {
- uptr idx = part * kPartSize + i;
- CHECK_LT(idx, kTabSize);
- atomic_uintptr_t *p = &depot.tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- for (; s; s = s->link) {
- if (s->id == id) {
- *size = s->size;
- return s->stack;
- }
- }
- }
- *size = 0;
- return 0;
+void StackDepotUnlockAll() {
+ theDepot.UnlockAll();
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
StackDepotReverseMap::StackDepotReverseMap()
: map_(StackDepotGetStats()->n_uniq_ids + 100) {
- for (int idx = 0; idx < kTabSize; idx++) {
- atomic_uintptr_t *p = &depot.tab[idx];
+ for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
+ atomic_uintptr_t *p = &theDepot.tab[idx];
uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
+ StackDepotNode *s = (StackDepotNode*)(v & ~1);
for (; s; s = s->link) {
IdDescPair pair = {s->id, s};
map_.push_back(pair);
*size = 0;
return 0;
}
- StackDesc *desc = map_[idx].desc;
+ StackDepotNode *desc = map_[idx].desc;
*size = desc->size;
return desc->stack;
}
namespace __sanitizer {
// StackDepot efficiently stores huge amounts of stack traces.
+struct StackDepotNode;
+struct StackDepotHandle {
+ StackDepotNode *node_;
+ StackDepotHandle() : node_(0) {}
+ explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id();
+ int use_count();
+ void inc_use_count_unsafe();
+ uptr size();
+ uptr *stack();
+};
+
+const int kStackDepotMaxUseCount = 1U << 20;
-// Maps stack trace to an unique id.
+StackDepotStats *StackDepotGetStats();
u32 StackDepotPut(const uptr *stack, uptr size);
+StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size);
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr mapped;
-};
-
-StackDepotStats *StackDepotGetStats();
-
-struct StackDesc;
+void StackDepotLockAll();
+void StackDepotUnlockAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
private:
struct IdDescPair {
u32 id;
- StackDesc *desc;
+ StackDepotNode *desc;
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
};
StackDepotReverseMap(const StackDepotReverseMap&);
void operator=(const StackDepotReverseMap&);
};
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H
--- /dev/null
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+class StackDepotBase {
+ public:
+ typedef typename Node::args_type args_type;
+ typedef typename Node::handle_type handle_type;
+ // Maps stack trace to an unique id.
+ handle_type Put(args_type args, bool *inserted = 0);
+ // Retrieves a stored stack trace by the id.
+ args_type Get(u32 id);
+
+ StackDepotStats *GetStats() { return &stats; }
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ static Node *find(Node *s, args_type args, u32 hash);
+ static Node *lock(atomic_uintptr_t *p);
+ static void unlock(atomic_uintptr_t *p, Node *s);
+
+ static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static const int kPartBits = 8;
+ static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+ static const int kPartCount =
+ 1 << kPartBits; // Number of subparts in the table.
+ static const int kPartSize = kTabSize / kPartCount;
+ static const int kMaxId = 1 << kPartShift;
+
+ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+
+ StackDepotStats stats;
+
+ friend class StackDepotReverseMap;
+};
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
+ args_type args,
+ u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->eq(hash, args)) {
+ return s;
+ }
+ }
+ return 0;
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
+ atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+ return (Node *)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
+ atomic_uintptr_t *p, Node *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted) *inserted = false;
+ if (!args.is_valid()) return handle_type();
+ uptr h = args.hash();
+ atomic_uintptr_t *p = &tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ // First, try to find the existing stack.
+ Node *node = find(s, args, h);
+ if (node) return node->get_handle();
+ // If failed, lock, retry and insert new.
+ Node *s2 = lock(p);
+ if (s2 != s) {
+ node = find(s2, args, h);
+ if (node) {
+ unlock(p, s2);
+ return node->get_handle();
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ uptr memsz = Node::storage_size(args);
+ s = (Node *)PersistentAlloc(memsz);
+ stats.allocated += memsz;
+ s->id = id;
+ s->store(args, h);
+ s->link = s2;
+ unlock(p, s);
+ if (inserted) *inserted = true;
+ return s->get_handle();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
+ if (id == 0) {
+ return args_type();
+ }
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ return s->load();
+ }
+ }
+ }
+ return args_type();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ lock(&tab[i]);
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ uptr s = atomic_load(p, memory_order_relaxed);
+ unlock(p, (Node *)(s & ~1UL));
+ }
+}
+
+} // namespace __sanitizer
+#endif // SANITIZER_STACKDEPOTBASE_H
#if defined(__arm__)
// Cancel Thumb bit.
pc = pc & (~1);
-#elif defined(__powerpc__) || defined(__powerpc64__)
+#endif
+#if defined(__powerpc__) || defined(__powerpc64__)
// PCs are always 4 byte aligned.
return pc - 4;
#elif defined(__sparc__)
return GET_CALLER_PC();
}
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
+ return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
+}
+
+// In GCC on ARM bp points to saved lr, not fp, so we should check the next
+// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
+// pointer to saved frame pointer in any case.
+static inline uhwptr *GetCanonicFrame(uptr bp,
+ uptr stack_top,
+ uptr stack_bottom) {
+#ifdef __arm__
+ if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
+ uhwptr *bp_prev = (uhwptr *)bp;
+ if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
+ return bp_prev - 1;
+#else
+ return (uhwptr*)bp;
+#endif
+}
+
void StackTrace::FastUnwindStack(uptr pc, uptr bp,
uptr stack_top, uptr stack_bottom,
uptr max_depth) {
CHECK_GE(max_depth, 2);
trace[0] = pc;
size = 1;
- uhwptr *frame = (uhwptr *)bp;
- uhwptr *prev_frame = frame - 1;
if (stack_top < 4096) return; // Sanity check for stack top.
+ uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
+ uhwptr *prev_frame = 0;
// Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
while (frame > prev_frame &&
- frame < (uhwptr *)stack_top - 2 &&
- frame > (uhwptr *)stack_bottom &&
+ IsValidFrame((uptr)frame, stack_top, stack_bottom) &&
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
uhwptr pc1 = frame[1];
trace[size++] = (uptr) pc1;
}
prev_frame = frame;
- frame = (uhwptr *)frame[0];
+ frame = GetCanonicFrame((uptr)frame[0], stack_top, stack_bottom);
}
}
static bool WillUseFastUnwind(bool request_fast_unwind) {
// Check if fast unwind is available. Fast unwind is the only option on Mac.
+ // It is also the only option on FreeBSD as the slow unwinding that
+ // leverages _Unwind_Backtrace() yields the call stack of the signal's
+ // handler and not of the code that raised the signal (as it does on Linux).
if (!SANITIZER_CAN_FAST_UNWIND)
return false;
- else if (SANITIZER_MAC)
+ else if (SANITIZER_MAC != 0 || SANITIZER_FREEBSD != 0)
return true;
return request_fast_unwind;
}
uptr local_stack; \
uptr sp = (uptr)&local_stack
+#define GET_CALLER_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC();
+
// Use this macro if you want to print stack trace with the current
// function in the top frame.
#define GET_CURRENT_PC_BP_SP \
uptr pc = GetPreviousInstructionPc(addr[i]);
uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
pc, addr_frames.data(), addr_frames.size());
+ if (addr_frames_num == 0) {
+ frame_desc.clear();
+ PrintStackFramePrefix(&frame_desc, frame_num, pc);
+ frame_desc.append(" (<unknown module>)");
+ Printf("%s\n", frame_desc.data());
+ frame_num++;
+ continue;
+ }
for (uptr j = 0; j < addr_frames_num; j++) {
AddressInfo &info = addr_frames[j];
frame_desc.clear();
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
namespace __sanitizer {
static const char *const kTypeStrings[SuppressionTypeCount] = {
- "none", "race", "mutex", "thread",
- "signal", "leak", "called_from_lib", "deadlock"};
+ "none", "race", "mutex", "thread", "signal",
+ "leak", "called_from_lib", "deadlock", "vptr_check"};
bool TemplateMatch(char *templ, const char *str) {
if (str == 0 || str[0] == 0)
return true;
}
+ALIGNED(64) static char placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = 0;
+
+SuppressionContext *SuppressionContext::Get() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+void SuppressionContext::InitIfNecessary() {
+ if (suppression_ctx)
+ return;
+ suppression_ctx = new(placeholder) SuppressionContext;
+ if (common_flags()->suppressions[0] == '\0')
+ return;
+ char *suppressions_from_file;
+ uptr buffer_size;
+ uptr contents_size =
+ ReadFileToBuffer(common_flags()->suppressions, &suppressions_from_file,
+ &buffer_size, 1 << 26 /* max_len */);
+ if (contents_size == 0) {
+ Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName,
+ common_flags()->suppressions);
+ Die();
+ }
+ suppression_ctx->Parse(suppressions_from_file);
+}
+
bool SuppressionContext::Match(const char *str, SuppressionType type,
Suppression **s) {
can_parse_ = false;
SuppressionLeak,
SuppressionLib,
SuppressionDeadlock,
+ SuppressionVptrCheck,
SuppressionTypeCount
};
class SuppressionContext {
public:
- SuppressionContext() : suppressions_(1), can_parse_(true) {}
void Parse(const char *str);
bool Match(const char* str, SuppressionType type, Suppression **s);
uptr SuppressionCount() const;
const Suppression *SuppressionAt(uptr i) const;
void GetMatched(InternalMmapVector<Suppression *> *matched);
+ // Create a SuppressionContext singleton if it hasn't been created earlier.
+ // Not thread safe. Must be called early during initialization (but after
+ // runtime flags are parsed).
+ static void InitIfNecessary();
+ // Returns a SuppressionContext singleton.
+ static SuppressionContext *Get();
+
private:
+ SuppressionContext() : suppressions_(1), can_parse_(true) {}
InternalMmapVector<Suppression> suppressions_;
bool can_parse_;
StaticSpinMutex Symbolizer::init_mu_;
LowLevelAllocator Symbolizer::symbolizer_allocator_;
-Symbolizer *Symbolizer::GetOrNull() {
- SpinMutexLock l(&init_mu_);
- return symbolizer_;
-}
-
-Symbolizer *Symbolizer::Get() {
- SpinMutexLock l(&init_mu_);
- RAW_CHECK_MSG(symbolizer_ != 0, "Using uninitialized symbolizer!");
- return symbolizer_;
-}
-
Symbolizer *Symbolizer::Disable() {
CHECK_EQ(0, symbolizer_);
// Initialize a dummy symbolizer.
class Symbolizer {
public:
- /// Returns platform-specific implementation of Symbolizer. The symbolizer
- /// must be initialized (with init or disable) before calling this function.
- static Symbolizer *Get();
- /// Returns platform-specific implementation of Symbolizer, or null if not
- /// initialized.
- static Symbolizer *GetOrNull();
- /// Returns platform-specific implementation of Symbolizer. Will
- /// automatically initialize symbolizer as if by calling Init(0) if needed.
+ /// Initialize and return platform-specific implementation of symbolizer
+ /// (if it wasn't already initialized).
static Symbolizer *GetOrInit();
- /// Initialize and return the symbolizer, given an optional path to an
- /// external symbolizer. The path argument is only required for legacy
- /// reasons as this function will check $PATH for an external symbolizer. Not
- /// thread safe.
- static Symbolizer *Init(const char* path_to_external = 0);
// Fills at most "max_frames" elements of "frames" with descriptions
// for a given address (in all inlined functions). Returns the number
// of descriptions actually filled.
private:
/// Platform-specific function for creating a Symbolizer object.
- static Symbolizer *PlatformInit(const char *path_to_external);
- /// Create a symbolizer and store it to symbolizer_ without checking if one
- /// already exists. Not thread safe.
- static Symbolizer *CreateAndStore(const char *path_to_external);
+ static Symbolizer *PlatformInit();
/// Initialize the symbolizer in a disabled state. Not thread safe.
static Symbolizer *Disable();
namespace __sanitizer {
-Symbolizer *Symbolizer::CreateAndStore(const char *path_to_external) {
- Symbolizer *platform_symbolizer = PlatformInit(path_to_external);
- if (!platform_symbolizer)
- return Disable();
- symbolizer_ = platform_symbolizer;
- return platform_symbolizer;
-}
-
-Symbolizer *Symbolizer::Init(const char *path_to_external) {
- CHECK_EQ(0, symbolizer_);
- return CreateAndStore(path_to_external);
-}
-
Symbolizer *Symbolizer::GetOrInit() {
SpinMutexLock l(&init_mu_);
- if (symbolizer_ == 0)
- return CreateAndStore(0);
- return symbolizer_;
+ if (symbolizer_)
+ return symbolizer_;
+ if ((symbolizer_ = PlatformInit()))
+ return symbolizer_;
+ return Disable();
}
} // namespace __sanitizer
internal_close(outfd[1]);
internal_close(infd[0]);
internal_close(infd[1]);
- for (int fd = getdtablesize(); fd > 2; fd--)
+ for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--)
internal_close(fd);
ExecuteWithDefaultArgs(path_);
internal__exit(1);
LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked.
};
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
+Symbolizer *Symbolizer::PlatformInit() {
if (!common_flags()->symbolize) {
return new(symbolizer_allocator_) POSIXSymbolizer(0, 0, 0);
}
libbacktrace_symbolizer =
LibbacktraceSymbolizer::get(&symbolizer_allocator_);
if (!libbacktrace_symbolizer) {
+ const char *path_to_external = common_flags()->external_symbolizer_path;
if (path_to_external && path_to_external[0] == '\0') {
// External symbolizer is explicitly disabled. Do nothing.
} else {
BlockingMutexLock l(&dbghelp_mu_);
if (!initialized_) {
- SymSetOptions(SYMOPT_DEFERRED_LOADS |
- SYMOPT_UNDNAME |
- SYMOPT_LOAD_LINES);
- CHECK(SymInitialize(GetCurrentProcess(), 0, TRUE));
- // FIXME: We don't call SymCleanup() on exit yet - should we?
+ if (!TrySymInitialize()) {
+ // OK, maybe the client app has called SymInitialize already.
+ // That's a bit unfortunate for us as all the DbgHelp functions are
+ // single-threaded and we can't coordinate with the app.
+ // FIXME: Can we stop the other threads at this point?
+ // Anyways, we have to reconfigure stuff to make sure that SymInitialize
+ // has all the appropriate options set.
+ // Cross our fingers and reinitialize DbgHelp.
+ Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
+ Report("*** Most likely this means that the app is already ***\n");
+ Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
+ Report("*** Due to technical reasons, symbolization might crash ***\n");
+ Report("*** or produce wrong results. ***\n");
+ SymCleanup(GetCurrentProcess());
+ TrySymInitialize();
+ }
initialized_ = true;
}
// FIXME: Implement GetModuleNameAndOffsetForPC().
private:
+ bool TrySymInitialize() {
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
+ return SymInitialize(GetCurrentProcess(), 0, TRUE);
+ // FIXME: We don't call SymCleanup() on exit yet - should we?
+ }
+
// All DbgHelp functions are single threaded, so we should use a mutex to
// serialize accesses.
BlockingMutex dbghelp_mu_;
bool initialized_;
};
-Symbolizer *Symbolizer::PlatformInit(const char *path_to_external) {
+Symbolizer *Symbolizer::PlatformInit() {
static bool called_once = false;
CHECK(!called_once && "Shouldn't create more than one symbolizer");
called_once = true;
}
}
-void ThreadRegistry::DetachThread(u32 tid) {
+void ThreadRegistry::DetachThread(u32 tid, void *arg) {
BlockingMutexLock l(&mtx_);
CHECK_LT(tid, n_contexts_);
ThreadContextBase *tctx = threads_[tid];
Report("%s: Detach of non-existent thread\n", SanitizerToolName);
return;
}
+ tctx->OnDetached(arg);
if (tctx->status == ThreadStatusFinished) {
tctx->SetDead();
QuarantinePush(tctx);
virtual void OnStarted(void *arg) {}
virtual void OnCreated(void *arg) {}
virtual void OnReset() {}
+ virtual void OnDetached(void *arg) {}
};
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
void SetThreadName(u32 tid, const char *name);
void SetThreadNameByUserId(uptr user_id, const char *name);
- void DetachThread(u32 tid);
+ void DetachThread(u32 tid, void *arg);
void JoinThread(u32 tid, void *arg);
void FinishThread(u32 tid);
void StartThread(u32 tid, uptr os_id, void *arg);
DTLS_Deallocate(dtls.dtv, s);
}
-void DTLS_on_tls_get_addr(void *arg_void, void *res) {
- if (!common_flags()->intercept_tls_get_addr) return;
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res) {
+ if (!common_flags()->intercept_tls_get_addr) return 0;
TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
uptr dso_id = arg->dso_id;
- if (dtls.dtv_size == kDestroyedThread) return;
+ if (dtls.dtv_size == kDestroyedThread) return 0;
DTLS_Resize(dso_id + 1);
- if (dtls.dtv[dso_id].beg)
- return;
+ if (dtls.dtv[dso_id].beg) return 0;
uptr tls_size = 0;
uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset;
VPrintf(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
}
dtls.dtv[dso_id].beg = tls_beg;
dtls.dtv[dso_id].size = tls_size;
+ return dtls.dtv + dso_id;
}
void DTLS_on_libc_memalign(void *ptr, uptr size) {
#else
void DTLS_on_libc_memalign(void *ptr, uptr size) {}
-void DTLS_on_tls_get_addr(void *arg, void *res) {}
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res) { return 0; }
DTLS *DTLS_Get() { return 0; }
void DTLS_Destroy() {}
#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
uptr last_memalign_ptr;
};
-void DTLS_on_tls_get_addr(void *arg, void *res);
+// Returns pointer and size of a linker-allocated TLS block.
+// Each block is returned exactly once.
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res);
void DTLS_on_libc_memalign(void *ptr, uptr size);
DTLS *DTLS_Get();
void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
--- /dev/null
+//===-- sanitizer_unwind_posix.cc ----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the unwind.h-based (aka "slow") stack unwinding routines
+// available to the tools on Linux, Android, FreeBSD and OS X.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
+#if SANITIZER_ANDROID
+#include <dlfcn.h> // for dlopen()
+#endif
+
+#if SANITIZER_FREEBSD
+#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
+#endif
+#include <unwind.h>
+
+namespace __sanitizer {
+
+//------------------------- SlowUnwindStack -----------------------------------
+
+typedef struct {
+ uptr absolute_pc;
+ uptr stack_top;
+ uptr stack_size;
+} backtrace_frame_t;
+
+extern "C" {
+typedef void *(*acquire_my_map_info_list_func)();
+typedef void (*release_my_map_info_list_func)(void *map);
+typedef sptr (*unwind_backtrace_signal_arch_func)(
+ void *siginfo, void *sigcontext, void *map_info_list,
+ backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
+acquire_my_map_info_list_func acquire_my_map_info_list;
+release_my_map_info_list_func release_my_map_info_list;
+unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
+} // extern "C"
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder() {
+ void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
+ if (!p) {
+ VReport(1,
+ "Failed to open libcorkscrew.so. You may see broken stack traces "
+ "in SEGV reports.");
+ return;
+ }
+ acquire_my_map_info_list =
+ (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
+ release_my_map_info_list =
+ (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
+ unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
+ p, "unwind_backtrace_signal_arch");
+ if (!acquire_my_map_info_list || !release_my_map_info_list ||
+ !unwind_backtrace_signal_arch) {
+ VReport(1,
+ "Failed to find one of the required symbols in libcorkscrew.so. "
+ "You may see broken stack traces in SEGV reports.");
+ acquire_my_map_info_list = 0;
+ unwind_backtrace_signal_arch = 0;
+ release_my_map_info_list = 0;
+ }
+}
+#endif
+
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+struct UnwindTraceArg {
+ StackTrace *stack;
+ uptr max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = (UnwindTraceArg*)param;
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = Unwind_GetIP(ctx);
+ arg->stack->trace[arg->stack->size++] = pc;
+ if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace[0] belongs to the current function so we always pop it.
+ if (to_pop == 0)
+ to_pop = 1;
+ PopStackFrames(to_pop);
+ trace[0] = pc;
+}
+
+void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
+ uptr max_depth) {
+ CHECK_GE(max_depth, 2);
+ if (!unwind_backtrace_signal_arch) {
+ SlowUnwindStack(pc, max_depth);
+ return;
+ }
+
+ void *map = acquire_my_map_info_list();
+ CHECK(map);
+ InternalScopedBuffer<backtrace_frame_t> frames(kStackTraceMax);
+ // siginfo argument appears to be unused.
+ sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map,
+ frames.data(),
+ /* ignore_depth */ 0, max_depth);
+ release_my_map_info_list(map);
+ if (res < 0) return;
+ CHECK_LE((uptr)res, kStackTraceMax);
+
+ size = 0;
+ // +2 compensate for libcorkscrew unwinder returning addresses of call
+ // instructions instead of raw return addresses.
+ for (sptr i = 0; i < res; ++i)
+ trace[size++] = frames[i].absolute_pc + 2;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
#define WIN32_LEAN_AND_MEAN
#define NOGDI
-#include <stdlib.h>
-#include <io.h>
#include <windows.h>
+#include <dbghelp.h>
+#include <io.h>
+#include <stdlib.h>
#include "sanitizer_common.h"
#include "sanitizer_libc.h"
return GetTid();
}
+#if !SANITIZER_GO
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr *stack_bottom) {
CHECK(stack_top);
*stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
*stack_bottom = (uptr)mbi.AllocationBase;
}
+#endif // #if !SANITIZER_GO
void *MmapOrDie(uptr size, const char *mem_type) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
UNIMPLEMENTED();
}
+void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) {
+ UNIMPLEMENTED();
+}
+
static const int kMaxEnvNameLength = 128;
static const DWORD kMaxEnvValueLength = 32767;
UNIMPLEMENTED();
}
-void DisableCoreDumper() {
+void DisableCoreDumperIfNecessary() {
// Do nothing.
}
UNIMPLEMENTED();
}
+bool AddressSpaceIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetAddressSpaceUnlimited() {
+ UNIMPLEMENTED();
+}
+
char *FindPathToBinary(const char *name) {
// Nothing here for now.
return 0;
ExitProcess(exitcode);
}
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ UNIMPLEMENTED();
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ UNIMPLEMENTED();
+}
+
// ---------------------- BlockingMutex ---------------- {{{1
const uptr LOCK_UNINITIALIZED = 0;
const uptr LOCK_READY = (uptr)-1;
#endif
}
+#if !SANITIZER_GO
void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
CHECK_GE(max_depth, 2);
// FIXME: CaptureStackBackTrace might be too slow for us.
void StackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
uptr max_depth) {
- UNREACHABLE("no signal context on windows");
+ CONTEXT ctx = *(CONTEXT *)context;
+ STACKFRAME64 stack_frame;
+ memset(&stack_frame, 0, sizeof(stack_frame));
+ size = 0;
+#if defined(_WIN64)
+ int machine_type = IMAGE_FILE_MACHINE_AMD64;
+ stack_frame.AddrPC.Offset = ctx.Rip;
+ stack_frame.AddrFrame.Offset = ctx.Rbp;
+ stack_frame.AddrStack.Offset = ctx.Rsp;
+#else
+ int machine_type = IMAGE_FILE_MACHINE_I386;
+ stack_frame.AddrPC.Offset = ctx.Eip;
+ stack_frame.AddrFrame.Offset = ctx.Ebp;
+ stack_frame.AddrStack.Offset = ctx.Esp;
+#endif
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
+ stack_frame.AddrStack.Mode = AddrModeFlat;
+ while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
+ &stack_frame, &ctx, NULL, &SymFunctionTableAccess64,
+ &SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
+ trace[size++] = (uptr)stack_frame.AddrPC.Offset;
+ }
}
+#endif // #if !SANITIZER_GO
void MaybeOpenReportFile() {
// Windows doesn't have native fork, and we don't support Cygwin or other
return false;
}
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ // FIXME: Actually implement this function.
+ return true;
+}
+
} // namespace __sanitizer
#endif // _WIN32
tsan_rtl_mutex.cc \
tsan_rtl_report.cc \
tsan_rtl_thread.cc \
+ tsan_stack_trace.cc \
tsan_stat.cc \
tsan_suppressions.cc \
tsan_symbolize.cc \
tsan_mutexset.lo tsan_platform_linux.lo tsan_platform_mac.lo \
tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \
tsan_rtl_mutex.lo tsan_rtl_report.lo tsan_rtl_thread.lo \
- tsan_stat.lo tsan_suppressions.lo tsan_symbolize.lo \
- tsan_sync.lo tsan_rtl_amd64.lo
+ tsan_stack_trace.lo tsan_stat.lo tsan_suppressions.lo \
+ tsan_symbolize.lo tsan_sync.lo tsan_rtl_amd64.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
tsan_rtl_mutex.cc \
tsan_rtl_report.cc \
tsan_rtl_thread.cc \
+ tsan_stack_trace.cc \
tsan_stat.cc \
tsan_suppressions.cc \
tsan_symbolize.cc \
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_thread.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stack_trace.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stat.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_suppressions.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_symbolize.Plo@am__quote@
//===----------------------------------------------------------------------===//
#include "tsan_clock.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
// SyncClock and ThreadClock implement vector clocks for sync variables
// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
clk_[tid_].reused = reused_;
}
-void ThreadClock::acquire(const SyncClock *src) {
+void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
DCHECK(nclk_ <= kMaxTid);
- DCHECK(src->clk_.Size() <= kMaxTid);
+ DCHECK(src->size_ <= kMaxTid);
CPP_STAT_INC(StatClockAcquire);
// Check if it's empty -> no need to do anything.
- const uptr nclk = src->clk_.Size();
+ const uptr nclk = src->size_;
if (nclk == 0) {
CPP_STAT_INC(StatClockAcquireEmpty);
return;
bool acquired = false;
if (nclk > tid_) {
CPP_STAT_INC(StatClockAcquireLarge);
- if (src->clk_[tid_].reused == reused_) {
+ if (src->elem(tid_).reused == reused_) {
CPP_STAT_INC(StatClockAcquireRepeat);
for (unsigned i = 0; i < kDirtyTids; i++) {
unsigned tid = src->dirty_tids_[i];
if (tid != kInvalidTid) {
- u64 epoch = src->clk_[tid].epoch;
+ u64 epoch = src->elem(tid).epoch;
if (clk_[tid].epoch < epoch) {
clk_[tid].epoch = epoch;
acquired = true;
CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
- u64 epoch = src->clk_[i].epoch;
+ u64 epoch = src->elem(i).epoch;
if (clk_[i].epoch < epoch) {
clk_[i].epoch = epoch;
acquired = true;
// Remember that this thread has acquired this clock.
if (nclk > tid_)
- src->clk_[tid_].reused = reused_;
+ src->elem(tid_).reused = reused_;
if (acquired) {
CPP_STAT_INC(StatClockAcquiredSomething);
}
}
-void ThreadClock::release(SyncClock *dst) const {
+void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
DCHECK_LE(nclk_, kMaxTid);
- DCHECK_LE(dst->clk_.Size(), kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
- if (dst->clk_.Size() == 0) {
+ if (dst->size_ == 0) {
// ReleaseStore will correctly set release_store_tid_,
// which can be important for future operations.
- ReleaseStore(dst);
+ ReleaseStore(c, dst);
return;
}
CPP_STAT_INC(StatClockRelease);
// Check if we need to resize dst.
- if (dst->clk_.Size() < nclk_) {
- CPP_STAT_INC(StatClockReleaseResize);
- dst->clk_.Resize(nclk_);
- }
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
// Check if we had not acquired anything from other threads
// since the last release on dst. If so, we need to update
- // only dst->clk_[tid_].
- if (dst->clk_[tid_].epoch > last_acquire_) {
+ // only dst->elem(tid_).
+ if (dst->elem(tid_).epoch > last_acquire_) {
UpdateCurrentThread(dst);
if (dst->release_store_tid_ != tid_ ||
dst->release_store_reused_ != reused_)
CPP_STAT_INC(StatClockReleaseAcquired);
// Update dst->clk_.
for (uptr i = 0; i < nclk_; i++) {
- dst->clk_[i].epoch = max(dst->clk_[i].epoch, clk_[i].epoch);
- dst->clk_[i].reused = 0;
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = max(ce.epoch, clk_[i].epoch);
+ ce.reused = 0;
}
// Clear 'acquired' flag in the remaining elements.
- if (nclk_ < dst->clk_.Size())
+ if (nclk_ < dst->size_)
CPP_STAT_INC(StatClockReleaseClearTail);
- for (uptr i = nclk_; i < dst->clk_.Size(); i++)
- dst->clk_[i].reused = 0;
+ for (uptr i = nclk_; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
for (unsigned i = 0; i < kDirtyTids; i++)
dst->dirty_tids_[i] = kInvalidTid;
dst->release_store_tid_ = kInvalidTid;
// If we've acquired dst, remember this fact,
// so that we don't need to acquire it on next acquire.
if (acquired)
- dst->clk_[tid_].reused = reused_;
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::ReleaseStore(SyncClock *dst) const {
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
DCHECK(nclk_ <= kMaxTid);
- DCHECK(dst->clk_.Size() <= kMaxTid);
+ DCHECK(dst->size_ <= kMaxTid);
CPP_STAT_INC(StatClockStore);
// Check if we need to resize dst.
- if (dst->clk_.Size() < nclk_) {
- CPP_STAT_INC(StatClockStoreResize);
- dst->clk_.Resize(nclk_);
- }
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
if (dst->release_store_tid_ == tid_ &&
dst->release_store_reused_ == reused_ &&
- dst->clk_[tid_].epoch > last_acquire_) {
+ dst->elem(tid_).epoch > last_acquire_) {
CPP_STAT_INC(StatClockStoreFast);
UpdateCurrentThread(dst);
return;
// O(N) release-store.
CPP_STAT_INC(StatClockStoreFull);
for (uptr i = 0; i < nclk_; i++) {
- dst->clk_[i].epoch = clk_[i].epoch;
- dst->clk_[i].reused = 0;
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = clk_[i].epoch;
+ ce.reused = 0;
}
// Clear the tail of dst->clk_.
- if (nclk_ < dst->clk_.Size()) {
- internal_memset(&dst->clk_[nclk_], 0,
- (dst->clk_.Size() - nclk_) * sizeof(dst->clk_[0]));
+ if (nclk_ < dst->size_) {
+ for (uptr i = nclk_; i < dst->size_; i++) {
+ ClockElem &ce = dst->elem(i);
+ ce.epoch = 0;
+ ce.reused = 0;
+ }
CPP_STAT_INC(StatClockStoreTail);
}
for (unsigned i = 0; i < kDirtyTids; i++)
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
// Rememeber that we don't need to acquire it in future.
- dst->clk_[tid_].reused = reused_;
+ dst->elem(tid_).reused = reused_;
}
-void ThreadClock::acq_rel(SyncClock *dst) {
+void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
CPP_STAT_INC(StatClockAcquireRelease);
- acquire(dst);
- ReleaseStore(dst);
+ acquire(c, dst);
+ ReleaseStore(c, dst);
}
// Updates only single element related to the current thread in dst->clk_.
void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
// Update the threads time, but preserve 'acquired' flag.
- dst->clk_[tid_].epoch = clk_[tid_].epoch;
+ dst->elem(tid_).epoch = clk_[tid_].epoch;
for (unsigned i = 0; i < kDirtyTids; i++) {
if (dst->dirty_tids_[i] == tid_) {
}
// Reset all 'acquired' flags, O(N).
CPP_STAT_INC(StatClockReleaseSlow);
- for (uptr i = 0; i < dst->clk_.Size(); i++) {
- dst->clk_[i].reused = 0;
- }
+ for (uptr i = 0; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
for (unsigned i = 0; i < kDirtyTids; i++)
dst->dirty_tids_[i] = kInvalidTid;
}
// Checks whether the current threads has already acquired src.
bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
- if (src->clk_[tid_].reused != reused_)
+ if (src->elem(tid_).reused != reused_)
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
unsigned tid = src->dirty_tids_[i];
if (tid != kInvalidTid) {
- if (clk_[tid].epoch < src->clk_[tid].epoch)
+ if (clk_[tid].epoch < src->elem(tid).epoch)
return false;
}
}
return true;
}
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
+ RoundUpTo(size_, ClockBlock::kClockCount)) {
+ // Growing within the same block.
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (nclk <= ClockBlock::kClockCount) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ size_ = nclk;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ return;
+ }
+ // Growing two-level table.
+ if (size_ == 0) {
+ // Allocate first level table.
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // Transform one-level table to two-level table.
+ u32 old = tab_idx_;
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ tab_->table[0] = old;
+ }
+ // At this point we have first level table allocated.
+ // Add second level tables as necessary.
+ for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
+ i < nclk; i += ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
+ tab_->table[i/ClockBlock::kClockCount] = idx;
+ }
+ size_ = nclk;
+}
+
// Sets a single element in the vector clock.
// This function is called only from weird places like AcquireGlobal.
void ThreadClock::set(unsigned tid, u64 v) {
}
SyncClock::SyncClock()
- : clk_(MBlockClock) {
- release_store_tid_ = kInvalidTid;
- release_store_reused_ = 0;
+ : release_store_tid_(kInvalidTid)
+ , release_store_reused_()
+ , tab_()
+ , tab_idx_()
+ , size_() {
for (uptr i = 0; i < kDirtyTids; i++)
dirty_tids_[i] = kInvalidTid;
}
-void SyncClock::Reset() {
- clk_.Reset();
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_ == 0) {
+ // nothing
+ } else if (size_ <= ClockBlock::kClockCount) {
+ // One-level table.
+ ctx->clock_alloc.Free(c, tab_idx_);
+ } else {
+ // Two-level table.
+ for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
+ ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
+ ctx->clock_alloc.Free(c, tab_idx_);
+ }
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
release_store_tid_ = kInvalidTid;
release_store_reused_ = 0;
for (uptr i = 0; i < kDirtyTids; i++)
dirty_tids_[i] = kInvalidTid;
}
+ClockElem &SyncClock::elem(unsigned tid) const {
+ DCHECK_LT(tid, size_);
+ if (size_ <= ClockBlock::kClockCount)
+ return tab_->clock[tid];
+ u32 idx = tab_->table[tid / ClockBlock::kClockCount];
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ return cb->clock[tid % ClockBlock::kClockCount];
+}
+
void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
printf("clock=[");
- for (uptr i = 0; i < clk_.Size(); i++)
- printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
printf("] reused=[");
- for (uptr i = 0; i < clk_.Size(); i++)
- printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
release_store_tid_, release_store_reused_,
dirty_tids_[0], dirty_tids_[1]);
#define TSAN_CLOCK_H
#include "tsan_defs.h"
-#include "tsan_vector.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
u64 reused : 64 - kClkBits;
};
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAllocCache ClockCache;
+
// The clock that lives in sync variables (mutexes, atomics, etc).
class SyncClock {
public:
SyncClock();
+ ~SyncClock();
uptr size() const {
- return clk_.Size();
+ return size_;
}
u64 get(unsigned tid) const {
- DCHECK_LT(tid, clk_.Size());
- return clk_[tid].epoch;
+ return elem(tid).epoch;
}
- void Reset();
+ void Resize(ClockCache *c, uptr nclk);
+ void Reset(ClockCache *c);
void DebugDump(int(*printf)(const char *s, ...));
private:
+ friend struct ThreadClock;
+ static const uptr kDirtyTids = 2;
+
unsigned release_store_tid_;
unsigned release_store_reused_;
- static const uptr kDirtyTids = 2;
unsigned dirty_tids_[kDirtyTids];
- mutable Vector<ClockElem> clk_;
- friend struct ThreadClock;
+ // tab_ contains indirect pointer to a 512b block using DenseSlabAlloc.
+ // If size_ <= 64, then tab_ points to an array with 64 ClockElem's.
+ // Otherwise, tab_ points to an array with 128 u32 elements,
+ // each pointing to the second-level 512b block with 64 ClockElem's.
+ ClockBlock *tab_;
+ u32 tab_idx_;
+ u32 size_;
+
+ ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
struct ThreadClock {
public:
+ typedef DenseSlabAllocCache Cache;
+
explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const {
return nclk_;
}
- void acquire(const SyncClock *src);
- void release(SyncClock *dst) const;
- void acq_rel(SyncClock *dst);
- void ReleaseStore(SyncClock *dst) const;
+ void acquire(ClockCache *c, const SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst) const;
+ void acq_rel(ClockCache *c, SyncClock *dst);
+ void ReleaseStore(ClockCache *c, SyncClock *dst) const;
void DebugReset();
void DebugDump(int(*printf)(const char *s, ...));
# endif
#else
// Count of shadow values in a shadow cell.
+#define TSAN_SHADOW_COUNT 4
const uptr kShadowCnt = 4;
#endif
// Shadow memory is kShadowMultiplier times larger than user memory.
const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY
const bool kCollectHistory = false;
#else
class ReportDesc;
class RegionAlloc;
class StackTrace;
-struct MBlock;
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz;
+ u32 stk;
+ u16 tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
} // namespace __tsan
--- /dev/null
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ DenseSlabAlloc() {
+ // Check that kL1Size and kL2Size are sane.
+ CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+ CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+ CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+ // Check that it makes sense to use the dense alloc.
+ CHECK_GE(sizeof(T), sizeof(IndexT));
+ internal_memset(map_, 0, sizeof(map_));
+ freelist_ = 0;
+ fillpos_ = 0;
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_;
+ uptr fillpos_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ if (fillpos_ == kL1Size) {
+ Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+ Die();
+ }
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos_ == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T();
+ *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos_ * kL2Size + start;
+ map_[fillpos_++] = batch;
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
return fd < 0 || fd >= kTableSize;
}
-static FdSync *allocsync() {
- FdSync *s = (FdSync*)internal_alloc(MBlockFD, sizeof(FdSync));
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync));
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
CHECK_NE(s, &fdctx.globsync);
CHECK_NE(s, &fdctx.filesync);
CHECK_NE(s, &fdctx.socksync);
- SyncVar *v = ctx->synctab.GetAndRemove(thr, pc, (uptr)s);
- if (v)
- DestroyAndFree(v);
- internal_free(s);
+ user_free(thr, pc, s);
}
}
}
void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
- FdSync *s = allocsync();
+ FdSync *s = allocsync(thr, pc);
init(thr, pc, rfd, ref(s));
init(thr, pc, wfd, ref(s));
unref(thr, pc, s);
DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
if (bogusfd(fd))
return;
- init(thr, pc, fd, allocsync());
+ init(thr, pc, fd, allocsync(thr, pc));
}
void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
ParseFlag(env, &f->enable_annotations, "enable_annotations", "");
ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", "");
ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", "");
- ParseFlag(env, &f->suppress_java, "suppress_java", "");
ParseFlag(env, &f->report_bugs, "report_bugs", "");
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", "");
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", "");
ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", "");
ParseFlag(env, &f->report_atomic_races, "report_atomic_races", "");
ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", "");
- ParseFlag(env, &f->suppressions, "suppressions", "");
- ParseFlag(env, &f->print_suppressions, "print_suppressions", "");
ParseFlag(env, &f->print_benign, "print_benign", "");
ParseFlag(env, &f->exitcode, "exitcode", "");
ParseFlag(env, &f->halt_on_error, "halt_on_error", "");
f->enable_annotations = true;
f->suppress_equal_stacks = true;
f->suppress_equal_addresses = true;
- f->suppress_java = false;
f->report_bugs = true;
f->report_thread_leaks = true;
f->report_destroy_locked = true;
f->report_signal_unsafe = true;
f->report_atomic_races = true;
f->force_seq_cst_atomics = false;
- f->suppressions = "";
- f->print_suppressions = false;
f->print_benign = false;
f->exitcode = 66;
f->halt_on_error = false;
// DDFlags
f->second_deadlock_stack = false;
- SetCommonFlagsDefaults(f);
+ CommonFlags *cf = common_flags();
+ SetCommonFlagsDefaults(cf);
// Override some common flags defaults.
- f->allow_addr2line = true;
+ cf->allow_addr2line = true;
+ cf->detect_deadlocks = true;
+ cf->print_suppressions = false;
// Let a frontend override.
ParseFlags(f, __tsan_default_options());
- ParseCommonFlagsFromString(f, __tsan_default_options());
+ ParseCommonFlagsFromString(cf, __tsan_default_options());
// Override from command line.
ParseFlags(f, env);
- ParseCommonFlagsFromString(f, env);
-
- // Copy back to common flags.
- *common_flags() = *f;
+ ParseCommonFlagsFromString(cf, env);
// Sanity check.
if (!f->report_bugs) {
f->report_signal_unsafe = false;
}
- if (f->help) PrintFlagDescriptions();
+ if (cf->help) PrintFlagDescriptions();
if (f->history_size < 0 || f->history_size > 7) {
Printf("ThreadSanitizer: incorrect value for history_size"
namespace __tsan {
-struct Flags : CommonFlags, DDFlags {
+struct Flags : DDFlags {
// Enable dynamic annotations, otherwise they are no-ops.
bool enable_annotations;
// Suppress a race report if we've already output another race report
// Suppress a race report if we've already output another race report
// on the same address.
bool suppress_equal_addresses;
- // Suppress weird race reports that can be seen if JVM is embed
- // into the process.
- bool suppress_java;
// Turns off bug reporting entirely (useful for benchmarking).
bool report_bugs;
// Report thread leaks at exit?
// If set, all atomics are effectively sequentially consistent (seq_cst),
// regardless of what user actually specified.
bool force_seq_cst_atomics;
- // Suppressions filename.
- const char *suppressions;
- // Print matched suppressions at exit.
- bool print_suppressions;
// Print matched "benign" races at exit.
bool print_benign;
// Override exit status if something was reported.
extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
-extern "C" int pthread_mutexattr_gettype(void *a, int *type);
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
extern "C" int pthread_yield();
extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
#define errno (*__errno_location())
+// 16K loaded modules should be enough for everyone.
+static const uptr kMaxModules = 1 << 14;
+static LoadedModule *modules;
+static uptr nmodules;
+
struct sigaction_t {
union {
sighandler_t sa_handler;
};
struct SignalContext {
- int in_blocking_func;
int int_signal_send;
- int pending_signal_count;
+ atomic_uintptr_t in_blocking_func;
+ atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
};
}
void InitializeLibIgnore() {
- libignore()->Init(*GetSuppressionContext());
+ libignore()->Init(*SuppressionContext::Get());
libignore()->OnLibraryLoaded(0);
}
static SignalContext *SigCtx(ThreadState *thr) {
SignalContext *ctx = (SignalContext*)thr->signal_ctx;
- if (ctx == 0 && thr->is_alive) {
+ if (ctx == 0 && !thr->is_dead) {
ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext");
MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
thr->signal_ctx = ctx;
if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
FuncExit(thr_);
+ CheckNoLocks(thr_);
}
}
if (REAL(func) == 0) { \
Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
- } \
+ } \
if (thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
struct BlockingCall {
explicit BlockingCall(ThreadState *thr)
- : ctx(SigCtx(thr)) {
- ctx->in_blocking_func++;
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
}
~BlockingCall() {
- ctx->in_blocking_func--;
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
}
+ ThreadState *thr;
SignalContext *ctx;
-
- // When we are in a "blocking call", we process signals asynchronously
- // (right when they arrive). In this context we do not expect to be
- // executing any user/runtime code. The known interceptor sequence when
- // this is not true is: pthread_join -> munmap(stack). It's fine
- // to ignore munmap in this case -- we handle stack shadow separately.
- ScopedIgnoreInterceptors ignore_interceptors;
};
TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
return res;
}
-TSAN_INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
- SCOPED_INTERCEPTOR_RAW(dlopen, filename, flag);
- void *res = REAL(dlopen)(filename, flag);
- libignore()->OnLibraryLoaded(filename);
- return res;
-}
-
-TSAN_INTERCEPTOR(int, dlclose, void *handle) {
- SCOPED_INTERCEPTOR_RAW(dlclose, handle);
- int res = REAL(dlclose)(handle);
- libignore()->OnLibraryUnloaded();
- return res;
-}
-
class AtExitContext {
public:
AtExitContext()
: mtx_(MutexTypeAtExit, StatMtxAtExit)
- , pos_() {
+ , stack_(MBlockAtExit) {
}
- typedef void(*atexit_t)();
+ typedef void(*atexit_cb_t)();
int atexit(ThreadState *thr, uptr pc, bool is_on_exit,
- atexit_t f, void *arg) {
+ atexit_cb_t f, void *arg, void *dso) {
Lock l(&mtx_);
- if (pos_ == kMaxAtExit)
- return 1;
Release(thr, pc, (uptr)this);
- stack_[pos_] = f;
- args_[pos_] = arg;
- is_on_exits_[pos_] = is_on_exit;
- pos_++;
+ atexit_t *a = stack_.PushBack();
+ a->cb = f;
+ a->arg = arg;
+ a->dso = dso;
+ a->is_on_exit = is_on_exit;
return 0;
}
void exit(ThreadState *thr, uptr pc) {
for (;;) {
- atexit_t f = 0;
- void *arg = 0;
- bool is_on_exit = false;
+ atexit_t a = {};
{
Lock l(&mtx_);
- if (pos_) {
- pos_--;
- f = stack_[pos_];
- arg = args_[pos_];
- is_on_exit = is_on_exits_[pos_];
+ if (stack_.Size() != 0) {
+ a = stack_[stack_.Size() - 1];
+ stack_.PopBack();
Acquire(thr, pc, (uptr)this);
}
}
- if (f == 0)
+ if (a.cb == 0)
break;
- DPrintf("#%d: executing atexit func %p\n", thr->tid, f);
- if (is_on_exit)
- ((void(*)(int status, void *arg))f)(0, arg);
+ VPrintf(2, "#%d: executing atexit func %p(%p) dso=%p\n",
+ thr->tid, a.cb, a.arg, a.dso);
+ if (a.is_on_exit)
+ ((void(*)(int status, void *arg))a.cb)(0, a.arg);
else
- ((void(*)(void *arg, void *dso))f)(arg, 0);
+ ((void(*)(void *arg, void *dso))a.cb)(a.arg, a.dso);
}
}
private:
- static const int kMaxAtExit = 128;
+ struct atexit_t {
+ atexit_cb_t cb;
+ void *arg;
+ void *dso;
+ bool is_on_exit;
+ };
+
+ static const int kMaxAtExit = 1024;
Mutex mtx_;
- atexit_t stack_[kMaxAtExit];
- void *args_[kMaxAtExit];
- bool is_on_exits_[kMaxAtExit];
- int pos_;
+ Vector<atexit_t> stack_;
};
static AtExitContext *atexit_ctx;
// We want to setup the atexit callback even if we are in ignored lib
// or after fork.
SCOPED_INTERCEPTOR_RAW(atexit, f);
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0);
+ return atexit_ctx->atexit(thr, pc, false, (void(*)())f, 0, 0);
}
TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg);
+ return atexit_ctx->atexit(thr, pc, true, (void(*)())f, arg, 0);
+}
+
+bool IsSaticModule(void *dso) {
+ if (modules == 0)
+ return false;
+ for (uptr i = 0; i < nmodules; i++) {
+ if (modules[i].containsAddress((uptr)dso))
+ return true;
+ }
+ return false;
}
TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (cur_thread()->in_symbolizer)
return 0;
SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
- if (dso) {
- // Memory allocation in __cxa_atexit will race with free during exit,
- // because we do not see synchronization around atexit callback list.
- ThreadIgnoreBegin(thr, pc);
- int res = REAL(__cxa_atexit)(f, arg, dso);
- ThreadIgnoreEnd(thr, pc);
- return res;
- }
- return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg);
+ // If it's the main executable or a statically loaded library,
+ // we will call the callback.
+ if (dso == 0 || IsSaticModule(dso))
+ return atexit_ctx->atexit(thr, pc, false, (void(*)())f, arg, dso);
+
+ // Dynamically load module, don't know when to call the callback for it.
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(__cxa_atexit)(f, arg, dso);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
}
// Cleanup old bufs.
buf->sp = sp;
buf->mangled_sp = mangled_sp;
buf->shadow_stack_pos = thr->shadow_stack_pos;
+ SignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
}
static void LongJmp(ThreadState *thr, uptr *env) {
// Unwind the stack.
while (thr->shadow_stack_pos > buf->shadow_stack_pos)
FuncExit(thr);
+ SignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
return;
}
TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
- return user_alloc_usable_size(thr, pc, p);
+ return user_alloc_usable_size(p);
}
#define OPERATOR_NEW_BODY(mangled_name) \
return user_alloc(thr, pc, sz, align);
}
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
+
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
return user_alloc(thr, pc, sz, GetPageSizeCached());
bool recursive = false;
if (a) {
int type = 0;
- if (pthread_mutexattr_gettype(a, &type) == 0)
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
recursive = (type == PTHREAD_MUTEX_RECURSIVE
|| type == PTHREAD_MUTEX_RECURSIVE_NP);
}
SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
int res = REAL(pthread_rwlock_tryrdlock)(m);
if (res == 0) {
- MutexLock(thr, pc, (uptr)m, /*rec=*/1, /*try_lock=*/true);
+ MutexReadLock(thr, pc, (uptr)m, /*try_lock=*/true);
}
return res;
}
namespace __tsan {
-static void CallUserSignalHandler(ThreadState *thr, bool sync, bool sigact,
- int sig, my_siginfo_t *info, void *uctx) {
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ bool sigact, int sig, my_siginfo_t *info, void *uctx) {
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
// Ensure that the handler does not spoil errno.
const int saved_errno = errno;
errno = 99;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeErrnoInSignal);
if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ rep.AddStack(&stack, true);
+ OutputReport(thr, rep);
}
}
errno = saved_errno;
void ProcessPendingSignals(ThreadState *thr) {
SignalContext *sctx = SigCtx(thr);
- if (sctx == 0 || sctx->pending_signal_count == 0 || thr->in_signal_handler)
+ if (sctx == 0 ||
+ atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
return;
- thr->in_signal_handler = true;
- sctx->pending_signal_count = 0;
+ atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
// These are too big for stack.
static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
REAL(sigfillset)(&emptyset);
signal->armed = false;
if (sigactions[sig].sa_handler != SIG_DFL
&& sigactions[sig].sa_handler != SIG_IGN) {
- CallUserSignalHandler(thr, false, signal->sigaction,
+ CallUserSignalHandler(thr, false, true, signal->sigaction,
sig, &signal->siginfo, &signal->ctx);
}
}
}
pthread_sigmask(SIG_SETMASK, &oldset, 0);
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
}
} // namespace __tsan
// If we are in blocking function, we can safely process it now
// (but check if we are in a recursive interceptor,
// i.e. pthread_join()->munmap()).
- (sctx && sctx->in_blocking_func == 1)) {
- CHECK_EQ(thr->in_signal_handler, false);
- thr->in_signal_handler = true;
- if (sctx && sctx->in_blocking_func == 1) {
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
// We ignore interceptors in blocking functions,
// temporary enbled them again while we are calling user function.
int const i = thr->ignore_interceptors;
thr->ignore_interceptors = 0;
- CallUserSignalHandler(thr, sync, sigact, sig, info, ctx);
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
thr->ignore_interceptors = i;
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
} else {
- CallUserSignalHandler(thr, sync, sigact, sig, info, ctx);
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
}
- CHECK_EQ(thr->in_signal_handler, true);
- thr->in_signal_handler = false;
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
return;
}
internal_memcpy(&signal->siginfo, info, sizeof(*info));
if (ctx)
internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
- sctx->pending_signal_count++;
+ atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
}
}
else
newact.sa_handler = rtl_sighandler;
}
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
int res = REAL(sigaction)(sig, &newact, 0);
return res;
}
return res;
}
-// Linux kernel has a bug that leads to kernel deadlock if a process
-// maps TBs of memory and then calls mlock().
-static void MlockIsUnsupported() {
- static atomic_uint8_t printed;
- if (atomic_exchange(&printed, 1, memory_order_relaxed))
- return;
- VPrintf(1, "INFO: ThreadSanitizer ignores mlock/munlock[all]\n");
-}
-
-TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, mlockall, int flags) {
- MlockIsUnsupported();
- return 0;
-}
-
-TSAN_INTERCEPTOR(int, munlockall, void) {
- MlockIsUnsupported();
- return 0;
-}
-
TSAN_INTERCEPTOR(int, fork, int fake) {
if (cur_thread()->in_symbolizer)
return REAL(fork)(fake);
#include "sanitizer_common/sanitizer_platform_interceptors.h"
// Causes interceptor recursion (getaddrinfo() and fopen())
#undef SANITIZER_INTERCEPT_GETADDRINFO
+// There interceptors do not seem to be strictly necessary for tsan.
+// But we see cases where the interceptors consume 70% of execution time.
+// Memory blocks passed to fgetgrent_r are "written to" by tsan several times.
+// First, there is some recursion (getgrnam_r calls fgetgrent_r), and each
+// function "writes to" the buffer. Then, the same memory is "written to"
+// twice, first as buf and then as pwbufp (both of them refer to the same
+// addresses).
+#undef SANITIZER_INTERCEPT_GETPWENT
+#undef SANITIZER_INTERCEPT_GETPWENT_R
+#undef SANITIZER_INTERCEPT_FGETPWENT
+#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
ctx = (void *)&_ctx; \
(void) ctx;
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
Acquire(thr, pc, File2addr(path)); \
if (file) { \
if (fd >= 0) FdClose(thr, pc, fd); \
}
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
TSAN_INTERCEPT(gettimeofday);
TSAN_INTERCEPT(getaddrinfo);
- TSAN_INTERCEPT(mlock);
- TSAN_INTERCEPT(munlock);
- TSAN_INTERCEPT(mlockall);
- TSAN_INTERCEPT(munlockall);
-
TSAN_INTERCEPT(fork);
TSAN_INTERCEPT(vfork);
- TSAN_INTERCEPT(dlopen);
- TSAN_INTERCEPT(dlclose);
TSAN_INTERCEPT(on_exit);
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
}
FdInit();
+
+ // Remember list of loaded libraries for atexit interceptors.
+ modules = (LoadedModule*)MmapOrDie(sizeof(*modules)*kMaxModules,
+ "LoadedModule");
+ nmodules = GetListOfModules(modules, kMaxModules, 0);
}
void *internal_start_thread(void(*func)(void *arg), void *arg) {
~ScopedAnnotation() {
FuncExit(thr_);
+ CheckNoLocks(thr_);
}
private:
ThreadState *const thr_;
static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
ExpectRace *race = FindRace(list, addr, size);
- if (race == 0 && AlternativeAddress(addr))
- race = FindRace(list, AlternativeAddress(addr), size);
if (race == 0)
return false;
DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
using namespace __tsan; // NOLINT
-#define SCOPED_ATOMIC(func, ...) \
- const uptr callpc = (uptr)__builtin_return_address(0); \
- uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
- mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
- ThreadState *const thr = cur_thread(); \
- if (thr->ignore_interceptors) \
- return NoTsanAtomic##func(__VA_ARGS__); \
- AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
- ScopedAtomic sa(thr, callpc, a, mo, __func__); \
- return Atomic##func(thr, pc, __VA_ARGS__); \
-/**/
-
// These should match declarations from public tsan_interface_atomic.h header.
typedef unsigned char a8;
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
-#if defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)
+#if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302))
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
#else
# define __TSAN_HAS_INT128 0
#endif
+#ifndef TSAN_GO
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
+#endif
// Part of ABI, do not change.
// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
mo_seq_cst
} morder;
-class ScopedAtomic {
- public:
- ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
- morder mo, const char *func)
- : thr_(thr) {
- FuncEntry(thr_, pc);
- DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
- }
- ~ScopedAtomic() {
- ProcessPendingSignals(thr_);
- FuncExit(thr_);
- }
- private:
- ThreadState *thr_;
-};
-
-static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
- StatInc(thr, StatAtomic);
- StatInc(thr, t);
- StatInc(thr, size == 1 ? StatAtomic1
- : size == 2 ? StatAtomic2
- : size == 4 ? StatAtomic4
- : size == 8 ? StatAtomic8
- : StatAtomic16);
- StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
- : mo == mo_consume ? StatAtomicConsume
- : mo == mo_acquire ? StatAtomicAcquire
- : mo == mo_release ? StatAtomicRelease
- : mo == mo_acq_rel ? StatAtomicAcq_Rel
- : StatAtomicSeq_Cst);
-}
-
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
|| mo == mo_acquire || mo == mo_seq_cst;
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO)
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
a128 cmp = *v;
// this leads to false negatives only in very obscure cases.
}
+#ifndef TSAN_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return (atomic_uint8_t*)a;
}
static atomic_uint16_t *to_atomic(const volatile a16 *a) {
return (atomic_uint16_t*)a;
}
+#endif
static atomic_uint32_t *to_atomic(const volatile a32 *a) {
return (atomic_uint32_t*)a;
return atomic_load(to_atomic(a), to_mo(mo));
}
+#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
}
+#endif
template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return NoTsanAtomicLoad(a, mo);
}
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
AcquireImpl(thr, pc, &s->clock);
T v = NoTsanAtomicLoad(a, mo);
s->mtx.ReadUnlock();
atomic_store(to_atomic(a), v, to_mo(mo));
}
+#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
}
+#endif
template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
return;
}
__sync_synchronize();
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = 0;
if (mo != mo_relaxed) {
- s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
}
+#if __TSAN_HAS_INT128
static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
morder mo, morder fmo) {
a128 old = *c;
*c = cur;
return false;
}
+#endif
template<typename T>
-static bool NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
- return NoTsanAtomicCAS(a, &c, v, mo, fmo);
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
}
template<typename T>
SyncVar *s = 0;
bool write_lock = mo != mo_acquire && mo != mo_consume;
if (mo != mo_relaxed) {
- s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
return c;
}
+#ifndef TSAN_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
// FIXME(dvyukov): not implemented.
__sync_synchronize();
}
+#endif
+
+// Interface functions follow.
+#ifndef TSAN_GO
+
+// C/C++
+
+#define SCOPED_ATOMIC(func, ...) \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
+ mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+ ThreadState *const thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ }
+ ~ScopedAtomic() {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_atomic_signal_fence(morder mo) {
}
} // extern "C"
+
+#else // #ifndef TSAN_GO
+
+// Go
+
+#define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+#define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #ifndef TSAN_GO
using namespace __tsan; // NOLINT
-namespace __tsan {
-
-const uptr kHeapShadow = 0x300000000000ull;
-const uptr kHeapAlignment = 8;
+const jptr kHeapAlignment = 8;
-struct BlockDesc {
- bool begin;
- Mutex mtx;
- SyncVar *head;
-
- BlockDesc()
- : mtx(MutexTypeJavaMBlock, StatMtxJavaMBlock)
- , head() {
- CHECK_EQ(begin, false);
- begin = true;
- }
-
- ~BlockDesc() {
- CHECK_EQ(begin, true);
- begin = false;
- ThreadState *thr = cur_thread();
- SyncVar *s = head;
- while (s) {
- SyncVar *s1 = s->next;
- StatInc(thr, StatSyncDestroyed);
- s->mtx.Lock();
- s->mtx.Unlock();
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
- s = s1;
- }
- }
-};
+namespace __tsan {
struct JavaContext {
const uptr heap_begin;
const uptr heap_size;
- BlockDesc *heap_shadow;
JavaContext(jptr heap_begin, jptr heap_size)
: heap_begin(heap_begin)
, heap_size(heap_size) {
- uptr size = heap_size / kHeapAlignment * sizeof(BlockDesc);
- heap_shadow = (BlockDesc*)MmapFixedNoReserve(kHeapShadow, size);
- if ((uptr)heap_shadow != kHeapShadow) {
- Printf("ThreadSanitizer: failed to mmap Java heap shadow\n");
- Die();
- }
}
};
static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
static JavaContext *jctx;
-static BlockDesc *getblock(uptr addr) {
- uptr i = (addr - jctx->heap_begin) / kHeapAlignment;
- return &jctx->heap_shadow[i];
-}
-
-static uptr USED getmem(BlockDesc *b) {
- uptr i = b - jctx->heap_shadow;
- uptr p = jctx->heap_begin + i * kHeapAlignment;
- CHECK_GE(p, jctx->heap_begin);
- CHECK_LT(p, jctx->heap_begin + jctx->heap_size);
- return p;
-}
-
-static BlockDesc *getblockbegin(uptr addr) {
- for (BlockDesc *b = getblock(addr);; b--) {
- CHECK_GE(b, jctx->heap_shadow);
- if (b->begin)
- return b;
- }
- return 0;
-}
-
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create) {
- if (jctx == 0 || addr < jctx->heap_begin
- || addr >= jctx->heap_begin + jctx->heap_size)
- return 0;
- BlockDesc *b = getblockbegin(addr);
- DPrintf("#%d: GetJavaSync %p->%p\n", thr->tid, addr, b);
- Lock l(&b->mtx);
- SyncVar *s = b->head;
- for (; s; s = s->next) {
- if (s->addr == addr) {
- DPrintf("#%d: found existing sync for %p\n", thr->tid, addr);
- break;
- }
- }
- if (s == 0 && create) {
- DPrintf("#%d: creating new sync for %p\n", thr->tid, addr);
- s = ctx->synctab.Create(thr, pc, addr);
- s->next = b->head;
- b->head = s;
- }
- if (s) {
- if (write_lock)
- s->mtx.Lock();
- else
- s->mtx.ReadLock();
- }
- return s;
-}
-
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr) {
- // We do not destroy Java mutexes other than in __tsan_java_free().
- return 0;
-}
-
} // namespace __tsan
#define SCOPED_JAVA_FUNC(func) \
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *b = getblock(ptr);
- new(b) BlockDesc();
+ OnUserAlloc(thr, pc, ptr, size, false);
}
void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- BlockDesc *beg = getblock(ptr);
- BlockDesc *end = getblock(ptr + size);
- for (BlockDesc *b = beg; b != end; b++) {
- if (b->begin)
- b->~BlockDesc();
- }
+ ctx->metamap.FreeRange(thr, pc, ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
CHECK_GE(dst, jctx->heap_begin);
CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
- CHECK(dst >= src + size || src >= dst + size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
// Assuming it's not running concurrently with threads that do
// memory accesses and mutex operations (stop-the-world phase).
- { // NOLINT
- BlockDesc *s = getblock(src);
- BlockDesc *d = getblock(dst);
- BlockDesc *send = getblock(src + size);
- for (; s != send; s++, d++) {
- CHECK_EQ(d->begin, false);
- if (s->begin) {
- DPrintf("#%d: moving block %p->%p\n", thr->tid, getmem(s), getmem(d));
- new(d) BlockDesc;
- d->head = s->head;
- for (SyncVar *sync = d->head; sync; sync = sync->next) {
- uptr newaddr = sync->addr - src + dst;
- DPrintf("#%d: moving sync %p->%p\n", thr->tid, sync->addr, newaddr);
- sync->addr = newaddr;
- }
- s->head = 0;
- s->~BlockDesc();
- }
- }
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Move shadow.
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
}
-
- { // NOLINT
- u64 *s = (u64*)MemToShadow(src);
- u64 *d = (u64*)MemToShadow(dst);
- u64 *send = (u64*)MemToShadow(src + size);
- for (; s != send; s++, d++) {
- *d = *s;
- *s = 0;
- }
+ for (; s != send; s += inc, d += inc) {
+ *d = *s;
+ *s = 0;
}
}
+void __tsan_java_finalize() {
+ SCOPED_JAVA_FUNC(__tsan_java_finalize);
+ DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
+ AcquireGlobal(thr, 0);
+}
+
void __tsan_java_mutex_lock(jptr addr) {
SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
// Callback for memory move by GC.
// Can be aggregated for several objects (preferably).
-// The ranges must not overlap.
+// The ranges can overlap.
void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
// Mutex lock.
// Addr is any unique address associated with the mutex.
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_mman.h"
#include "tsan_flags.h"
// May be overriden by front-end.
-extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
+extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-extern "C" void WEAK __tsan_free_hook(void *ptr) {
+extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
namespace __tsan {
-COMPILER_CHECK(sizeof(MBlock) == 16);
-
-void MBlock::Lock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- for (int iter = 0;; iter++) {
- if (v & 1) {
- if (iter < 10)
- proc_yield(20);
- else
- internal_sched_yield();
- v = atomic_load(a, memory_order_relaxed);
- continue;
- }
- if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
- break;
- }
-}
-
-void MBlock::Unlock() {
- atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
- uptr v = atomic_load(a, memory_order_relaxed);
- DCHECK(v & 1);
- atomic_store(a, v & ~1, memory_order_relaxed);
-}
-
struct MapUnmapCallback {
void OnMap(uptr p, uptr size) const { }
void OnUnmap(uptr p, uptr size) const {
}
static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
- if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
+ if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 ||
+ !flags()->report_signal_unsafe)
return;
StackTrace stack;
stack.ObtainCurrent(thr, pc);
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeSignalUnsafe);
if (!IsFiredSuppression(ctx, rep, stack)) {
- rep.AddStack(&stack);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ rep.AddStack(&stack, true);
+ OutputReport(thr, rep);
}
}
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
- MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
- b->Init(sz, thr->tid, CurrentStackId(thr, pc));
- if (ctx && ctx->initialized) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
- else
- MemoryResetRange(thr, pc, (uptr)p, sz);
- }
- DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
SignalUnsafeCall(thr, pc);
return p;
}
void user_free(ThreadState *thr, uptr pc, void *p) {
- CHECK_NE(p, (void*)0);
- DPrintf("#%d: free(%p)\n", thr->tid, p);
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- if (b->ListHead()) {
- MBlock::ScopedLock l(b);
- for (SyncVar *s = b->ListHead(); s;) {
- SyncVar *res = s;
- s = s->next;
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- DestroyAndFree(res);
- }
- b->ListReset();
- }
- if (ctx && ctx->initialized) {
- if (thr->ignore_reads_and_writes == 0)
- MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
- }
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
allocator()->Deallocate(&thr->alloc_cache, p);
SignalUnsafeCall(thr, pc);
}
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
+}
+
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
if (p2 == 0)
return 0;
if (p) {
- MBlock *b = user_mblock(thr, p);
- CHECK_NE(b, 0);
- internal_memcpy(p2, p, min(b->Size(), sz));
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
}
}
if (p)
return p2;
}
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p) {
+uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b ? b->Size() : 0;
-}
-
-MBlock *user_mblock(ThreadState *thr, void *p) {
- CHECK_NE(p, 0);
- Allocator *a = allocator();
- void *b = a->GetBlockBegin(p);
- if (b == 0)
- return 0;
- return (MBlock*)a->GetMetaData(b);
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ return b ? b->siz : 0;
}
void invoke_malloc_hook(void *ptr, uptr size) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_malloc_hook(ptr, size);
+ __sanitizer_malloc_hook(ptr, size);
}
void invoke_free_hook(void *ptr) {
ThreadState *thr = cur_thread();
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
- __tsan_free_hook(ptr);
+ __sanitizer_free_hook(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
ThreadState *thr = cur_thread();
- CHECK_LE(sz, InternalSizeClassMap::kMaxSize);
if (thr->nomalloc) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
using namespace __tsan;
extern "C" {
-uptr __tsan_get_current_allocated_bytes() {
+uptr __sanitizer_get_current_allocated_bytes() {
uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
return stats[AllocatorStatAllocated];
}
-uptr __tsan_get_heap_size() {
+uptr __sanitizer_get_heap_size() {
uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
return stats[AllocatorStatMapped];
}
-uptr __tsan_get_free_bytes() {
+uptr __sanitizer_get_free_bytes() {
return 1;
}
-uptr __tsan_get_unmapped_bytes() {
+uptr __sanitizer_get_unmapped_bytes() {
return 1;
}
-uptr __tsan_get_estimated_allocated_size(uptr size) {
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
return size;
}
-bool __tsan_get_ownership(void *p) {
+int __sanitizer_get_ownership(const void *p) {
return allocator()->GetBlockBegin(p) != 0;
}
-uptr __tsan_get_allocated_size(void *p) {
- if (p == 0)
- return 0;
- p = allocator()->GetBlockBegin(p);
- if (p == 0)
- return 0;
- MBlock *b = (MBlock*)allocator()->GetMetaData(p);
- return b->Size();
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
}
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
allocator()->SwallowCache(&thr->alloc_cache);
internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
+ ctx->metamap.OnThreadIdle(thr);
}
} // extern "C"
void user_free(ThreadState *thr, uptr pc, void *p);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
-uptr user_alloc_usable_size(ThreadState *thr, uptr pc, void *p);
-// Given the pointer p into a valid allocated block,
-// returns the descriptor of the block.
-MBlock *user_mblock(ThreadState *thr, void *p);
+uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.
void invoke_malloc_hook(void *ptr, uptr size);
MBlockSuppression,
MBlockExpectRace,
MBlockSignal,
- MBlockFD,
MBlockJmpBuf,
// This must be the last.
/*0 MutexTypeInvalid*/ {},
/*1 MutexTypeTrace*/ {MutexTypeLeaf},
/*2 MutexTypeThreads*/ {MutexTypeReport},
- /*3 MutexTypeReport*/ {MutexTypeSyncTab, MutexTypeSyncVar,
+ /*3 MutexTypeReport*/ {MutexTypeSyncVar,
MutexTypeMBlock, MutexTypeJavaMBlock},
/*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
- /*5 MutexTypeSyncTab*/ {MutexTypeSyncVar},
+ /*5 MutexTypeSyncTab*/ {}, // unused
/*6 MutexTypeSlab*/ {MutexTypeLeaf},
/*7 MutexTypeAnnotations*/ {},
- /*8 MutexTypeAtExit*/ {MutexTypeSyncTab},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
/*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
/*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
/*11 MutexTypeDDetector*/ {},
CHECK(locked_[t]);
locked_[t] = 0;
}
+
+void InternalDeadlockDetector::CheckNoLocks() {
+ for (int i = 0; i != MutexTypeCount; i++) {
+ CHECK_EQ(locked_[i], 0);
+ }
+}
+#endif
+
+void CheckNoLocks(ThreadState *thr) {
+#if TSAN_DEBUG && !TSAN_GO
+ thr->internal_deadlock_detector.CheckNoLocks();
#endif
+}
const uptr kUnlocked = 0;
const uptr kWriteLock = 1;
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !TSAN_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS
+#if TSAN_COLLECT_STATS && !TSAN_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
InternalDeadlockDetector();
void Lock(MutexType t);
void Unlock(MutexType t);
+ void CheckNoLocks();
private:
u64 seq_;
u64 locked_[MutexTypeCount];
void InitializeMutex();
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
} // namespace __tsan
#endif // TSAN_MUTEX_H
C++ linux memory layout:
0000 0000 0000 - 03c0 0000 0000: protected
03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 6000 0000 0000: protected
+1000 0000 0000 - 3000 0000 0000: protected
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: protected
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
-C++ COMPAT linux memory layout:
-0000 0000 0000 - 0400 0000 0000: protected
-0400 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 2900 0000 0000: protected
-2900 0000 0000 - 2c00 0000 0000: modules
-2c00 0000 0000 - 6000 0000 0000: -
-6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7f00 0000 0000: -
-7f00 0000 0000 - 7fff ffff ffff: main thread stack
-
Go linux and darwin memory layout:
0000 0000 0000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 1000 0000 0000: -
1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 6000 0000 0000: -
+1460 0000 0000 - 2000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7fff ffff ffff: -
00e0 0000 0000 - 0100 0000 0000: -
0100 0000 0000 - 0560 0000 0000: shadow
0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07ff ffff ffff: -
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 07ff ffff ffff: -
*/
#ifndef TSAN_PLATFORM_H
static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
# if SANITIZER_WINDOWS
static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-# else
+static const uptr kMetaShadow = 0x076000000000ULL;
+static const uptr kMetaSize = 0x007000000000ULL;
+# else // if SANITIZER_WINDOWS
static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-# endif
-// TSAN_COMPAT_SHADOW is intended for COMPAT virtual memory layout,
-// when memory addresses are of the 0x2axxxxxxxxxx form.
-// The option is enabled with 'setarch x86_64 -L'.
-#elif defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-static const uptr kLinuxAppMemBeg = 0x290000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-static const uptr kAppMemGapBeg = 0x2c0000000000ULL;
-static const uptr kAppMemGapEnd = 0x7d0000000000ULL;
-#else
+static const uptr kMetaShadow = 0x300000000000ULL;
+static const uptr kMetaSize = 0x100000000000ULL;
+# endif // if SANITIZER_WINDOWS
+#else // defined(TSAN_GO)
+static const uptr kMetaShadow = 0x300000000000ULL;
+static const uptr kMetaSize = 0x100000000000ULL;
static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
#endif
// This has to be a macro to allow constant initialization of constants below.
#ifndef TSAN_GO
#define MemToShadow(addr) \
- (((addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
+ ((((uptr)addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
+#define MemToMeta(addr) \
+ (u32*)(((((uptr)addr) & ~(kLinuxAppMemMsk | (kMetaShadowCell - 1))) \
+ / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
#else
#define MemToShadow(addr) \
- ((((addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
+ (((((uptr)addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
+#define MemToMeta(addr) \
+ (u32*)(((((uptr)addr) & ~(kMetaShadowCell - 1)) \
+ / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
#endif
static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
MemToShadow(kLinuxAppMemEnd) | 0xff;
static inline bool IsAppMem(uptr mem) {
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
- return (mem >= kLinuxAppMemBeg && mem < kAppMemGapBeg) ||
- (mem >= kAppMemGapEnd && mem <= kLinuxAppMemEnd);
+#if defined(TSAN_GO)
+ return mem <= kLinuxAppMemEnd;
#else
return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
#endif
#endif
}
-// For COMPAT mapping returns an alternative address
-// that mapped to the same shadow address.
-// COMPAT mapping is not quite one-to-one.
-static inline uptr AlternativeAddress(uptr addr) {
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
- return (addr & ~kLinuxAppMemMsk) | 0x280000000000ULL;
-#else
- return 0;
-#endif
-}
-
void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size);
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
uptr GetRSS();
-const char *InitializePlatform();
+void InitializePlatform();
void FinalizePlatform();
// The additional page is to catch shadow stack overflow as paging fault.
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
-#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <errno.h>
#include <sched.h>
#include <dlfcn.h>
+#if SANITIZER_LINUX
#define __need_res_state
#include <resolv.h>
-#include <malloc.h>
+#endif
#ifdef sa_handler
# undef sa_handler
# undef sa_sigaction
#endif
-extern "C" struct mallinfo __libc_mallinfo();
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
namespace __tsan {
const uptr kPageSize = 4096;
+enum {
+ MemTotal = 0,
+ MemShadow = 1,
+ MemMeta = 2,
+ MemFile = 3,
+ MemMmap = 4,
+ MemTrace = 5,
+ MemHeap = 6,
+ MemOther = 7,
+ MemCount = 8,
+};
+
void FillProfileCallback(uptr start, uptr rss, bool file,
uptr *mem, uptr stats_size) {
- CHECK_EQ(7, stats_size);
- mem[6] += rss; // total
+ mem[MemTotal] += rss;
start >>= 40;
- if (start < 0x10) // shadow
- mem[0] += rss;
- else if (start >= 0x20 && start < 0x30) // compat modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x7e) // modules
- mem[file ? 1 : 2] += rss;
- else if (start >= 0x60 && start < 0x62) // traces
- mem[3] += rss;
- else if (start >= 0x7d && start < 0x7e) // heap
- mem[4] += rss;
- else // other
- mem[5] += rss;
+ if (start < 0x10)
+ mem[MemShadow] += rss;
+ else if (start >= 0x20 && start < 0x30)
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (start >= 0x30 && start < 0x40)
+ mem[MemMeta] += rss;
+ else if (start >= 0x7e)
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (start >= 0x60 && start < 0x62)
+ mem[MemTrace] += rss;
+ else if (start >= 0x7d && start < 0x7e)
+ mem[MemHeap] += rss;
+ else
+ mem[MemOther] += rss;
}
-void WriteMemoryProfile(char *buf, uptr buf_size) {
- uptr mem[7] = {};
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr mem[MemCount] = {};
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- char *buf_pos = buf;
- char *buf_end = buf + buf_size;
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "RSS %zd MB: shadow:%zd file:%zd mmap:%zd trace:%zd heap:%zd other:%zd\n",
- mem[6] >> 20, mem[0] >> 20, mem[1] >> 20, mem[2] >> 20,
- mem[3] >> 20, mem[4] >> 20, mem[5] >> 20);
- struct mallinfo mi = __libc_mallinfo();
- buf_pos += internal_snprintf(buf_pos, buf_end - buf_pos,
- "mallinfo: arena=%d mmap=%d fordblks=%d keepcost=%d\n",
- mi.arena >> 20, mi.hblkhd >> 20, mi.fordblks >> 20, mi.keepcost >> 20);
+ internal_snprintf(buf, buf_size,
+ "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+ " trace:%zd heap:%zd other:%zd nthr=%zd/%zd\n",
+ mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+ mem[MemHeap] >> 20, mem[MemOther] >> 20,
+ nlive, nthread);
}
uptr GetRSS() {
- uptr mem[7] = {};
- __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
- return mem[6];
+ uptr fd = OpenFile("/proc/self/statm", false);
+ if ((sptr)fd < 0)
+ return 0;
+ char buf[64];
+ uptr len = internal_read(fd, buf, sizeof(buf) - 1);
+ internal_close(fd);
+ if ((sptr)len <= 0)
+ return 0;
+ buf[len] = 0;
+ // The format of the file is:
+ // 1084 89 69 11 0 79 0
+ // We need the second number which is RSS in 4K units.
+ char *pos = buf;
+ // Skip the first number.
+ while (*pos >= '0' && *pos <= '9')
+ pos++;
+ // Skip whitespaces.
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
+ pos++;
+ // Read the number.
+ uptr rss = 0;
+ while (*pos >= '0' && *pos <= '9')
+ rss = rss * 10 + *pos++ - '0';
+ return rss * 4096;
}
-
+#if SANITIZER_LINUX
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
}
+#endif
void FlushShadowMemory() {
+#if SANITIZER_LINUX
StopTheWorld(FlushShadowMemoryCallback, 0);
+#endif
}
#ifndef TSAN_GO
Die();
}
}
-#endif
-#ifndef TSAN_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
}
void InitializeShadowMemory() {
+ // Map memory shadow.
uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
kLinuxShadowEnd - kLinuxShadowBeg);
if (shadow != kLinuxShadowBeg) {
"to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
Die();
}
+ // This memory range is used for thread stacks and large user mmaps.
+ // Frequently a thread uses only a small part of stack and similarly
+ // a program uses a small part of large mmap. On some programs
+ // we see 20% memory usage reduction without huge pages for this range.
+#ifdef MADV_NOHUGEPAGE
+ madvise((void*)MemToShadow(0x7f0000000000ULL),
+ 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
+#endif
+ DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+ kLinuxShadowBeg, kLinuxShadowEnd,
+ (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+
+ // Map meta shadow.
+ if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) {
+ Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n",
+ kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow);
+ Die();
+ }
+ if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) {
+ Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
+ kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize);
+ Die();
+ }
+ uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize);
+ if (meta != kMetaShadow) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and "
+ "to link with -pie (%p, %p).\n", meta, kMetaShadow);
+ Die();
+ }
+ DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+ kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30);
+
+ // Protect gaps.
const uptr kClosedLowBeg = 0x200000;
const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
+ const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin),
+ kMetaShadow);
+
ProtectRange(kClosedLowBeg, kClosedLowEnd);
ProtectRange(kClosedMidBeg, kClosedMidEnd);
- DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
+ VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n",
kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kClosedMid %zx-%zx (%zuGB)\n",
+ VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n",
kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
+ VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
kLinuxAppMemBeg, kLinuxAppMemEnd,
(kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
- DPrintf("stack %zx\n", (uptr)&shadow);
+ VPrintf(2, "stack: %zx\n", (uptr)&shadow);
MapRodata();
}
#endif // #ifndef TSAN_GO
-static rlim_t getlim(int res) {
- rlimit rlim;
- CHECK_EQ(0, getrlimit(res, &rlim));
- return rlim.rlim_cur;
-}
-
-static void setlim(int res, rlim_t lim) {
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit rlim;
- rlim.rlim_cur = lim;
- rlim.rlim_max = lim;
- setrlimit(res, (rlimit*)&rlim);
-}
-
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- setlim(RLIMIT_CORE, 0);
- }
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
- if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ if (StackSizeIsUnlimited()) {
const uptr kMaxStackSize = 32 * 1024 * 1024;
VReport(1, "Program is run with unlimited stack size, which wouldn't "
"work with ThreadSanitizer.\n"
reexec = true;
}
- if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ if (!AddressSpaceIsUnlimited()) {
Report("WARNING: Program is run with limited virtual address space,"
" which wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with unlimited virtual address space.\n");
- setlim(RLIMIT_AS, -1);
+ SetAddressSpaceUnlimited();
reexec = true;
}
if (reexec)
InitTlsSize();
InitDataSeg();
#endif
- return GetEnv(kTsanOptionsEnv);
}
bool IsGlobalVar(uptr addr) {
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX
int cnt = 0;
__res_state *statp = (__res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
fds[cnt++] = statp->_u._ext.nssocks[i];
}
return cnt;
+#else
+ return 0;
+#endif
}
// Extract file descriptors passed via UNIX domain sockets.
void FlushShadowMemory() {
}
-void WriteMemoryProfile(char *buf, uptr buf_size) {
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
uptr GetRSS() {
}
#endif
-const char *InitializePlatform() {
- void *p = 0;
- if (sizeof(p) == 8) {
- // Disable core dumps, dumping of 16TB usually takes a bit long.
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit lim;
- lim.rlim_cur = 0;
- lim.rlim_max = 0;
- setrlimit(RLIMIT_CORE, (rlimit*)&lim);
- }
-
- return GetEnv(kTsanOptionsEnv);
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
}
void FinalizePlatform() {
void FlushShadowMemory() {
}
-void WriteMemoryProfile(char *buf, uptr buf_size) {
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
uptr GetRSS() {
return 0;
}
-const char *InitializePlatform() {
- return GetEnv(kTsanOptionsEnv);
+void InitializePlatform() {
}
void FinalizePlatform() {
namespace __tsan {
-class Decorator: private __sanitizer::AnsiColorDecorator {
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
public:
- Decorator() : __sanitizer::AnsiColorDecorator(PrintsToTtyCached()) { }
+ Decorator() : SanitizerCommonDecorator() { }
const char *Warning() { return Red(); }
const char *EndWarning() { return Default(); }
const char *Access() { return Blue(); }
char *file;
int line;
int col;
+ bool suppressable;
};
struct ReportMopMutex {
char *name;
char *file;
int line;
+ bool suppressable;
ReportStack *stack;
};
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
+#ifdef __SSE3__
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#define _MM_MALLOC_H_INCLUDED
+#define __MM_MALLOC_H
+#include <emmintrin.h>
+typedef __m128i m128;
+#endif
+
volatile int __tsan_resumed = 0;
extern "C" void __tsan_resume() {
uptr n_running_threads;
ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
InternalScopedBuffer<char> buf(4096);
- internal_snprintf(buf.data(), buf.size(), "%d: nthr=%d nlive=%d\n",
- i, n_threads, n_running_threads);
- internal_write(fd, buf.data(), internal_strlen(buf.data()));
- WriteMemoryProfile(buf.data(), buf.size());
+ WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
internal_write(fd, buf.data(), internal_strlen(buf.data()));
}
fd_t mprof_fd = kInvalidFd;
if (flags()->profile_memory && flags()->profile_memory[0]) {
- InternalScopedBuffer<char> filename(4096);
- internal_snprintf(filename.data(), filename.size(), "%s.%d",
- flags()->profile_memory, (int)internal_getpid());
- uptr openrv = OpenFile(filename.data(), true);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
- &filename[0]);
+ if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
+ mprof_fd = 1;
+ } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
+ mprof_fd = 2;
} else {
- mprof_fd = openrv;
+ InternalScopedBuffer<char> filename(4096);
+ internal_snprintf(filename.data(), filename.size(), "%s.%d",
+ flags()->profile_memory, (int)internal_getpid());
+ uptr openrv = OpenFile(filename.data(), true);
+ if (internal_iserror(openrv)) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = openrv;
+ }
}
}
// Flush memory if requested.
if (flags()->flush_memory_ms > 0) {
if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: periodic memory flush\n");
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
FlushShadowMemory();
last_flush = NanoTime();
}
}
+ // GetRSS can be expensive on huge programs, so don't do it every 100ms.
if (flags()->memory_limit_mb > 0) {
uptr rss = GetRSS();
uptr limit = uptr(flags()->memory_limit_mb) << 20;
- if (flags()->verbosity > 0) {
- Printf("ThreadSanitizer: memory flush check"
- " RSS=%llu LAST=%llu LIMIT=%llu\n",
- (u64)rss>>20, (u64)last_rss>>20, (u64)limit>>20);
- }
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
if (2 * rss > limit + last_rss) {
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: flushing memory due to RSS\n");
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
FlushShadowMemory();
rss = GetRSS();
- if (flags()->verbosity > 0)
- Printf("ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
}
last_rss = rss;
}
ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
}
+#ifndef TSAN_GO
static void StopBackgroundThread() {
atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
internal_join_thread(ctx->background_thread);
ctx->background_thread = 0;
}
+#endif
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier);
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+ } else {
+ // Mapping continous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ MmapFixedNoReserve(meta_begin, meta_end - meta_begin);
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
+ addr, addr+size, meta_begin, meta_end);
}
void MapThreadTrace(uptr addr, uptr size) {
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
+ ctx = new(ctx_placeholder) Context;
+ const char *options = GetEnv(kTsanOptionsEnv);
+ InitializeFlags(&ctx->flags, options);
#ifndef TSAN_GO
InitializeAllocator();
#endif
InitializeInterceptors();
- const char *env = InitializePlatform();
+ InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
- ctx = new(ctx_placeholder) Context;
#ifndef TSAN_GO
InitializeShadowMemory();
#endif
- InitializeFlags(&ctx->flags, env);
// Setup correct file descriptor for error reports.
- __sanitizer_set_report_path(flags()->log_path);
+ __sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
#ifndef TSAN_GO
InitializeLibIgnore();
- Symbolizer::Init(common_flags()->external_symbolizer_path);
- Symbolizer::Get()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
StartBackgroundThread();
+#ifndef TSAN_GO
SetSandboxingCallback(StopBackgroundThread);
- if (flags()->detect_deadlocks)
+#endif
+ if (common_flags()->detect_deadlocks)
ctx->dd = DDetector::Create(flags());
- if (ctx->flags.verbosity)
- Printf("***** Running under ThreadSanitizer v2 (pid %d) *****\n",
- (int)internal_getpid());
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
// Initialize thread 0.
int tid = ThreadCreate(thr, 0, 0, true);
}
int Finalize(ThreadState *thr) {
- Context *ctx = __tsan::ctx;
bool failed = false;
if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
ctx->report_mtx.Unlock();
#ifndef TSAN_GO
- if (ctx->flags.verbosity)
+ if (common_flags()->verbosity)
AllocatorPrintStats();
#endif
ctx->nmissed_expected);
}
- if (flags()->print_suppressions)
+ if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
#ifndef TSAN_GO
if (flags()->print_benign)
}
#endif
+#ifdef TSAN_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+}
+#endif
+
u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
- if (pc) {
+ if (pc != 0) {
+#ifndef TSAN_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
}
u32 id = StackDepotPut(thr->shadow_stack,
thr->shadow_stack_pos - thr->shadow_stack);
- if (pc)
+ if (pc != 0)
thr->shadow_stack_pos--;
return id;
}
*s = 0;
}
-static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem,
Shadow cur, Shadow old) {
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
#endif
}
-static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
- return old.epoch() >= thr->fast_synch_epoch;
-}
-
static inline bool HappensBefore(Shadow old, ThreadState *thr) {
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
-ALWAYS_INLINE USED
-void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
while (size) {
int size1 = 1;
int kAccessSizeLog = kSizeLog1;
- if (size >= 8 && (addr & ~7) == ((addr + 8) & ~7)) {
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
size1 = 8;
kAccessSizeLog = kSizeLog8;
- } else if (size >= 4 && (addr & ~7) == ((addr + 4) & ~7)) {
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
size1 = 4;
kAccessSizeLog = kSizeLog4;
- } else if (size >= 2 && (addr & ~7) == ((addr + 2) & ~7)) {
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
size1 = 2;
kAccessSizeLog = kSizeLog2;
}
}
}
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch &&
+ old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
+ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i*)s);
+ const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
ALWAYS_INLINE USED
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
}
#endif
- if (*shadow_mem == kShadowRodata) {
+ if (kCppMode && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
}
FastState fast_state = thr->fast_state;
- if (fast_state.GetIgnoreBit())
+ if (fast_state.GetIgnoreBit()) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopIgnored);
return;
- if (kCollectHistory) {
- fast_state.IncrementEpoch();
- thr->fast_state = fast_state;
- // We must not store to the trace if we do not store to the shadow.
- // That is, this call must be moved somewhere below.
- TraceAddEvent(thr, fast_state, EventTypeMop, pc);
}
Shadow cur(fast_state);
cur.SetWrite(kAccessIsWrite);
cur.SetAtomic(kIsAtomic);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cc
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
#ifndef TSAN_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
- if (thr->shadow_stack_pos == thr->shadow_stack_end) {
- const int sz = thr->shadow_stack_end - thr->shadow_stack;
- const int newsz = 2 * sz;
- uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
- newsz * sizeof(uptr));
- internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
- internal_free(thr->shadow_stack);
- thr->shadow_stack = newstack;
- thr->shadow_stack_pos = newstack + sz;
- thr->shadow_stack_end = newstack + newsz;
- }
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
#endif
thr->shadow_stack_pos[0] = pc;
thr->shadow_stack_pos++;
#include "tsan_platform.h"
#include "tsan_mutexset.h"
#include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
namespace __tsan {
-// Descriptor of user's memory block.
-struct MBlock {
- /*
- u64 mtx : 1; // must be first
- u64 lst : 44;
- u64 stk : 31; // on word boundary
- u64 tid : kTidBits;
- u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
- */
- u64 raw[2];
-
- void Init(uptr siz, u32 tid, u32 stk) {
- raw[0] = raw[1] = 0;
- raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
- raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
- raw[0] |= (u64)stk << (1 + 44);
- raw[1] |= (u64)stk >> (64 - 44 - 1);
- DCHECK_EQ(Size(), siz);
- DCHECK_EQ(Tid(), tid);
- DCHECK_EQ(StackId(), stk);
- }
-
- u32 Tid() const {
- return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
- }
-
- uptr Size() const {
- return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
- }
-
- u32 StackId() const {
- return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
- }
-
- SyncVar *ListHead() const {
- return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
- }
-
- void ListPush(SyncVar *v) {
- SyncVar *lst = ListHead();
- v->next = lst;
- u64 x = (u64)v ^ (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), v);
- }
-
- SyncVar *ListPop() {
- SyncVar *lst = ListHead();
- SyncVar *nxt = lst->next;
- lst->next = 0;
- u64 x = (u64)lst ^ (u64)nxt;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), nxt);
- return lst;
- }
-
- void ListReset() {
- SyncVar *lst = ListHead();
- u64 x = (u64)lst;
- x = (x >> 3) << 1;
- raw[0] ^= x;
- DCHECK_EQ(ListHead(), 0);
- }
-
- void Lock();
- void Unlock();
- typedef GenericScopedLock<MBlock> ScopedLock;
-};
-
#ifndef TSAN_GO
-#if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#else
const uptr kAllocatorSpace = 0x7d0000000000ULL;
-#endif
const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
+typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
// FastState (from most significant bit):
// ignore : 1
// tid : kTidBits
-// epoch : kClkBits
// unused : -
// history_size : 3
+// epoch : kClkBits
class FastState {
public:
FastState(u64 tid, u64 epoch) {
x_ = tid << kTidShift;
- x_ |= epoch << kClkShift;
+ x_ |= epoch;
DCHECK_EQ(tid, this->tid());
DCHECK_EQ(epoch, this->epoch());
DCHECK_EQ(GetIgnoreBit(), false);
}
u64 epoch() const {
- u64 res = (x_ << (kTidBits + 1)) >> (64 - kClkBits);
+ u64 res = x_ & ((1ull << kClkBits) - 1);
return res;
}
void IncrementEpoch() {
u64 old_epoch = epoch();
- x_ += 1 << kClkShift;
+ x_ += 1;
DCHECK_EQ(old_epoch + 1, epoch());
(void)old_epoch;
}
void SetHistorySize(int hs) {
CHECK_GE(hs, 0);
CHECK_LE(hs, 7);
- x_ = (x_ & ~7) | hs;
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
}
+ ALWAYS_INLINE
int GetHistorySize() const {
- return (int)(x_ & 7);
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
}
void ClearHistorySize() {
- x_ &= ~7;
+ SetHistorySize(0);
}
+ ALWAYS_INLINE
u64 GetTracePos() const {
const int hs = GetHistorySize();
// When hs == 0, the trace consists of 2 parts.
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
- static const int kClkShift = kTidShift - kClkBits;
static const u64 kIgnoreBit = 1ull << 63;
static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
u64 x_;
};
// Shadow (from most significant bit):
// freed : 1
// tid : kTidBits
-// epoch : kClkBits
// is_atomic : 1
// is_read : 1
// size_log : 2
// addr0 : 3
+// epoch : kClkBits
class Shadow : public FastState {
public:
explicit Shadow(u64 x)
}
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
- DCHECK_EQ(x_ & 31, 0);
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
DCHECK_LE(addr0, 7);
DCHECK_LE(kAccessSizeLog, 3);
- x_ |= (kAccessSizeLog << 3) | addr0;
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
DCHECK_EQ(kAccessSizeLog, size_log());
DCHECK_EQ(addr0, this->addr0());
}
return shifted_xor == 0;
}
- static inline bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
- u64 masked_xor = (s1.x_ ^ s2.x_) & 31;
+ static ALWAYS_INLINE
+ bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
return masked_xor == 0;
}
- static inline bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
unsigned kS2AccessSize) {
bool res = false;
u64 diff = s1.addr0() - s2.addr0();
if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
// if (s1.addr0() + size1) > s2.addr0()) return true;
- if (s1.size() > -diff) res = true;
+ if (s1.size() > -diff)
+ res = true;
} else {
// if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
- if (kS2AccessSize > diff) res = true;
+ if (kS2AccessSize > diff)
+ res = true;
}
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s1, s2));
- DCHECK_EQ(res, TwoRangesIntersectSLOW(s2, s1));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
return res;
}
- // The idea behind the offset is as follows.
- // Consider that we have 8 bool's contained within a single 8-byte block
- // (mapped to a single shadow "cell"). Now consider that we write to the bools
- // from a single thread (which we consider the common case).
- // W/o offsetting each access will have to scan 4 shadow values at average
- // to find the corresponding shadow value for the bool.
- // With offsetting we start scanning shadow with the offset so that
- // each access hits necessary shadow straight off (at least in an expected
- // optimistic case).
- // This logic works seamlessly for any layout of user data. For example,
- // if user data is {int, short, char, char}, then accesses to the int are
- // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
- // from a single thread won't need to scan all 8 shadow values.
- unsigned ComputeSearchOffset() {
- return x_ & 7;
- }
- u64 addr0() const { return x_ & 7; }
- u64 size() const { return 1ull << size_log(); }
- bool IsWrite() const { return !IsRead(); }
- bool IsRead() const { return x_ & kReadBit; }
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
return res;
}
- bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
- // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
- bool v = x_ & u64(((kIsWrite ^ 1) << kReadShift)
- | (kIsAtomic << kAtomicShift));
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
+ | (u64(kIsAtomic) << kAtomicShift));
DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
return v;
}
- bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
<= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
return v;
}
- bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
bool v = ((x_ >> kReadShift) & 3)
>= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
}
private:
- static const u64 kReadShift = 5;
+ static const u64 kReadShift = 5 + kClkBits;
static const u64 kReadBit = 1ull << kReadShift;
- static const u64 kAtomicShift = 6;
+ static const u64 kAtomicShift = 6 + kClkBits;
static const u64 kAtomicBit = 1ull << kAtomicShift;
- u64 size_log() const { return (x_ >> 3) & 3; }
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
- static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
if (s1.addr0() == s2.addr0()) return true;
if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
return true;
struct JmpBuf {
uptr sp;
uptr mangled_sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
uptr *shadow_stack_pos;
};
const int unique_id;
bool in_symbolizer;
bool in_ignored_lib;
- bool is_alive;
+ bool is_dead;
bool is_freeing;
bool is_vptr_access;
const uptr stk_addr;
DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt;
- bool in_signal_handler;
+ atomic_uintptr_t in_signal_handler;
SignalContext *signal_ctx;
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+
#ifndef TSAN_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
void OnStarted(void *arg);
void OnCreated(void *arg);
void OnReset();
+ void OnDetached(void *arg);
};
struct RacyStacks {
bool initialized;
bool after_multithreaded_fork;
- SyncTab synctab;
+ MetaMap metamap;
Mutex report_mtx;
int nreported;
InternalMmapVector<FiredSuppression> fired_suppressions;
DDetector *dd;
+ ClockAlloc clock_alloc;
+
Flags flags;
u64 stat[StatCnt];
explicit ScopedReport(ReportType typ);
~ScopedReport();
- void AddStack(const StackTrace *stack);
void AddMemoryAccess(uptr addr, Shadow s, const StackTrace *stack,
const MutexSet *mset);
- void AddThread(const ThreadContext *tctx);
- void AddThread(int unique_tid);
+ void AddStack(const StackTrace *stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(int unique_tid, bool suppressable = false);
void AddUniqueTid(int unique_tid);
void AddMutex(const SyncVar *s);
u64 AddMutex(u64 id);
void ForkChildAfter(ThreadState *thr, uptr pc);
void ReportRace(ThreadState *thr);
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1,
- const ReportStack *suppress_stack2 = 0,
- const ReportLocation *suppress_loc = 0);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx,
const ScopedReport &srep,
const StackTrace &trace);
void Initialize(ThreadState *thr);
int Finalize(ThreadState *thr);
-SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
- bool write_lock, bool create);
-SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (kGoMode)
+ return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
rep.AddMutex(mid);
StackTrace trace;
trace.ObtainCurrent(thr, pc);
- rep.AddStack(&trace);
+ rep.AddStack(&trace, true);
rep.AddLocation(addr, 1);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ OutputReport(thr, rep);
}
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
+ if (kCppMode && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
if (IsGlobalVar(addr))
return;
#endif
- SyncVar *s = ctx->synctab.GetAndRemove(thr, pc, addr);
- if (s == 0)
- return;
- if (flags()->detect_deadlocks) {
- Callback cb(thr, pc);
- ctx->dd->MutexDestroy(&cb, &s->dd);
- }
if (IsAppMem(addr)) {
CHECK(!thr->is_freeing);
thr->is_freeing = true;
MemoryWrite(thr, pc, addr, kSizeLog1);
thr->is_freeing = false;
}
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s == 0)
+ return;
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ bool unlock_locked = false;
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
s->is_broken = true;
+ unlock_locked = true;
+ }
+ u64 mid = s->GetId();
+ u32 last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr); // must not reset it before the report is printed
+ s->mtx.Unlock();
+ if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(s);
+ rep.AddMutex(mid);
StackTrace trace;
trace.ObtainCurrent(thr, pc);
rep.AddStack(&trace);
- FastState last(s->last_lock);
+ FastState last(last_lock);
RestoreStack(last.tid(), last.epoch(), &trace, 0);
- rep.AddStack(&trace);
- rep.AddLocation(s->addr, 1);
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ rep.AddStack(&trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+ }
+ if (unlock_locked) {
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ if (s != 0) {
+ s->Reset(thr);
+ s->mtx.Unlock();
+ }
}
- thr->mset.Remove(s->GetId());
- DestroyAndFree(s);
+ thr->mset.Remove(mid);
+ // s will be destroyed and freed in MetaMap::FreeBlock.
}
void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec, bool try_lock) {
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
bool report_double_lock = false;
}
s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
- if (flags()->detect_deadlocks && s->recursion == 1) {
+ if (common_flags()->detect_deadlocks && (s->recursion - rec) == 0) {
Callback cb(thr, pc);
if (!try_lock)
ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
// Can't touch s after this point.
if (report_double_lock)
ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
bool report_bad_unlock = false;
- if (s->recursion == 0 || s->owner_tid != thr->tid) {
+ if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
report_bad_unlock = true;
}
}
thr->mset.Del(s->GetId(), true);
- if (flags()->detect_deadlocks && s->recursion == 0) {
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
Callback cb(thr, pc);
ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
}
// Can't touch s after this point.
if (report_bad_unlock)
ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
bool report_bad_lock = false;
AcquireImpl(thr, pc, &s->clock);
s->last_lock = thr->fast_state.raw();
thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
- if (flags()->detect_deadlocks && s->recursion == 0) {
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
Callback cb(thr, pc);
if (!trylock)
ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
// Can't touch s after this point.
if (report_bad_lock)
ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
bool report_bad_unlock = false;
}
}
ReleaseImpl(thr, pc, &s->read_clock);
- if (flags()->detect_deadlocks && s->recursion == 0) {
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
Callback cb(thr, pc);
ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
}
thr->mset.Del(mid, false);
if (report_bad_unlock)
ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
bool report_bad_unlock = false;
if (s->owner_tid == SyncVar::kInvalidTid) {
report_bad_unlock = true;
}
thr->mset.Del(s->GetId(), write);
- if (flags()->detect_deadlocks && s->recursion == 0) {
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
Callback cb(thr, pc);
ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
}
// Can't touch s after this point.
if (report_bad_unlock)
ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
}
void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
s->owner_tid = SyncVar::kInvalidTid;
s->recursion = 0;
s->mtx.Unlock();
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
DPrintf("#%d: Release %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
if (thr->ignore_sync)
return;
thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(c);
+ thr->clock.acquire(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(c);
+ thr->clock.release(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(c);
+ thr->clock.ReleaseStore(&thr->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(c);
+ thr->clock.acq_rel(&thr->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
rep.AddUniqueTid((int)r->loop[i].thr_ctx);
rep.AddThread((int)r->loop[i].thr_ctx);
}
- StackTrace stacks[2 * DDReport::kMaxLoopSize];
+ InternalScopedBuffer<StackTrace> stacks(2 * DDReport::kMaxLoopSize);
uptr dummy_pc = 0x42;
for (int i = 0; i < r->n; i++) {
uptr size;
// but we should still produce some stack trace in the report.
stacks[i].Init(&dummy_pc, 1);
}
- rep.AddStack(&stacks[i]);
+ rep.AddStack(&stacks[i], true);
}
}
- // FIXME: use all stacks for suppressions, not just the second stack of the
- // first edge.
- OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
+ OutputReport(thr, rep);
}
} // namespace __tsan
ReportStack *last_frame2 = 0;
const char *prefix = "__interceptor_";
uptr prefix_len = internal_strlen(prefix);
- const char *path_prefix = flags()->strip_path_prefix;
+ const char *path_prefix = common_flags()->strip_path_prefix;
uptr path_prefix_len = internal_strlen(path_prefix);
char *pos;
for (ReportStack *ent = stack; ent; ent = ent->next) {
DestroyAndFree(rep_);
}
-void ScopedReport::AddStack(const StackTrace *stack) {
+void ScopedReport::AddStack(const StackTrace *stack, bool suppressable) {
ReportStack **rs = rep_->stacks.PushBack();
*rs = SymbolizeStack(*stack);
+ (*rs)->suppressable = suppressable;
}
void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->write = s.IsWrite();
mop->atomic = s.IsAtomic();
mop->stack = SymbolizeStack(*stack);
+ if (mop->stack)
+ mop->stack->suppressable = true;
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
u64 mid = this->AddMutex(d.id);
rep_->unique_tids.PushBack(unique_tid);
}
-void ScopedReport::AddThread(const ThreadContext *tctx) {
+void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
for (uptr i = 0; i < rep_->threads.Size(); i++) {
if ((u32)rep_->threads[i]->id == tctx->tid)
return;
rt->parent_tid = tctx->parent_tid;
rt->stack = 0;
rt->stack = SymbolizeStackId(tctx->creation_stack_id);
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
}
#ifndef TSAN_GO
}
#endif
-void ScopedReport::AddThread(int unique_tid) {
+void ScopedReport::AddThread(int unique_tid, bool suppressable) {
#ifndef TSAN_GO
- AddThread(FindThreadByUidLocked(unique_tid));
+ AddThread(FindThreadByUidLocked(unique_tid), suppressable);
#endif
}
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
- SyncVar *s = ctx->synctab.GetIfExistsAndLock(addr, false);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
// Check that the mutex is still alive.
// Another mutex can be created at the same address,
// so check uid as well.
AddDeadMutex(id);
}
if (s)
- s->mtx.ReadUnlock();
+ s->mtx.Unlock();
return mid;
}
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
- if (FdLocation(addr, &fd, &creat_tid, &creat_stack)
- || FdLocation(AlternativeAddress(addr), &fd, &creat_tid, &creat_stack)) {
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
return;
}
MBlock *b = 0;
- if (allocator()->PointerIsMine((void*)addr)
- && (b = user_mblock(0, (void*)addr))) {
- ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ void *block_begin = a->GetBlockBegin((void*)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
ReportLocation *loc = new(mem) ReportLocation();
rep_->locs.PushBack(loc);
loc->type = ReportLocationHeap;
loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
- loc->size = b->Size();
- loc->tid = tctx ? tctx->tid : b->Tid();
+ loc->size = b->siz;
+ loc->tid = tctx ? tctx->tid : b->tid;
loc->name = 0;
loc->file = 0;
loc->line = 0;
loc->stack = 0;
- loc->stack = SymbolizeStackId(b->StackId());
+ loc->stack = SymbolizeStackId(b->stk);
if (tctx)
AddThread(tctx);
return;
}
ReportLocation *loc = SymbolizeData(addr);
if (loc) {
+ loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
}
}
}
-bool OutputReport(Context *ctx,
- const ScopedReport &srep,
- const ReportStack *suppress_stack1,
- const ReportStack *suppress_stack2,
- const ReportLocation *suppress_loc) {
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed);
const ReportDesc *rep = srep.GetReport();
Suppression *supp = 0;
- uptr suppress_pc = IsSuppressed(rep->typ, suppress_stack1, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_stack2, &supp);
- if (suppress_pc == 0)
- suppress_pc = IsSuppressed(rep->typ, suppress_loc, &supp);
+ uptr suppress_pc = 0;
+ for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++)
+ suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp);
if (suppress_pc != 0) {
FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp};
ctx->fired_suppressions.push_back(s);
}
- if (OnReport(rep, suppress_pc != 0))
- return false;
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, suppress_pc != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed)
+ return false;
+ }
PrintReport(rep);
ctx->nreported++;
if (flags()->halt_on_error)
internal_strstr(frame->file, "tsan_interface_"));
}
-// On programs that use Java we see weird reports like:
-// WARNING: ThreadSanitizer: data race (pid=22512)
-// Read of size 8 at 0x7d2b00084318 by thread 100:
-// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
-// #1 <null> <null>:0 (0x7f7ad9b40193)
-// Previous write of size 8 at 0x7d2b00084318 by thread 105:
-// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
-// #1 <null> <null>:0 (0x7f7ad9b42707)
-static bool IsJavaNonsense(const ReportDesc *rep) {
-#ifndef TSAN_GO
- for (uptr i = 0; i < rep->mops.Size(); i++) {
- ReportMop *mop = rep->mops[i];
- ReportStack *frame = mop->stack;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- return true;
- }
- if (FrameIsInternal(frame)) {
- frame = frame->next;
- if (frame == 0
- || (frame->func == 0 && frame->file == 0 && frame->line == 0
- && frame->module == 0)) {
- if (frame) {
- FiredSuppression supp = {rep->typ, frame->pc, 0};
- ctx->fired_suppressions.push_back(supp);
- }
- return true;
- }
- }
- }
-#endif
- return false;
-}
-
static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
Shadow s0(thr->racy_state[0]);
Shadow s1(thr->racy_state[1]);
}
void ReportRace(ThreadState *thr) {
+ CheckNoLocks(thr);
+
// Symbolizer makes lots of intercepted calls. If we try to process them,
// at best it will cause deadlocks on internal mutexes.
ScopedIgnoreInterceptors ignore;
i == 0 ? &thr->mset : mset2.data());
}
- if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
- return;
-
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = static_cast<ThreadContext*>(
}
#endif
- ReportLocation *suppress_loc = rep.GetReport()->locs.Size() ?
- rep.GetReport()->locs[0] : 0;
- if (!OutputReport(ctx, rep, rep.GetReport()->mops[0]->stack,
- rep.GetReport()->mops[1]->stack,
- suppress_loc))
+ if (!OutputReport(thr, rep))
return;
AddRacyStacks(thr, traces, addr_min, addr_max);
#endif
void ThreadContext::OnDead() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
}
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset();
+ sync.Reset(&caller_thr->clock_cache);
}
struct OnCreatedArgs {
}
void ThreadContext::OnReset() {
- sync.Reset();
+ CHECK_EQ(sync.size(), 0);
FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
//!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
}
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState*>(arg);
+ sync.Reset(&thr1->clock_cache);
+}
+
struct OnStartedArgs {
ThreadState *thr;
uptr stk_addr;
#ifndef TSAN_GO
AllocatorThreadStart(thr);
#endif
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks) {
thr->dd_pt = ctx->dd->CreatePhysicalThread();
thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
}
Trace *thr_trace = ThreadTrace(thr->tid);
thr_trace->headers[trace].epoch0 = epoch0;
StatInc(thr, StatSyncAcquire);
- sync.Reset();
+ sync.Reset(&thr->clock_cache);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
tid, (uptr)epoch0, args->stk_addr, args->stk_size,
args->tls_addr, args->tls_size);
- thr->is_alive = true;
}
void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
- if (flags()->detect_deadlocks) {
+ if (common_flags()->detect_deadlocks) {
ctx->dd->DestroyPhysicalThread(thr->dd_pt);
ctx->dd->DestroyLogicalThread(thr->dd_lt);
}
+ ctx->clock_alloc.FlushCache(&thr->clock_cache);
+ ctx->metamap.OnThreadIdle(thr);
#ifndef TSAN_GO
AllocatorThreadFinish(thr);
#endif
MaybeReportThreadLeak, &leaks);
for (uptr i = 0; i < leaks.Size(); i++) {
ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(leaks[i].tctx);
+ rep.AddThread(leaks[i].tctx, true);
rep.SetCount(leaks[i].count);
- OutputReport(ctx, rep, rep.GetReport()->threads[0]->stack);
+ OutputReport(thr, rep);
}
#endif
}
DontNeedShadowFor(thr->stk_addr, thr->stk_size);
if (thr->tls_addr && thr->tls_size)
DontNeedShadowFor(thr->tls_addr, thr->tls_size);
- thr->is_alive = false;
+ thr->is_dead = true;
ctx->thread_registry->FinishThread(thr->tid);
}
void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
CHECK_GT(tid, 0);
CHECK_LT(tid, kMaxTid);
- ctx->thread_registry->DetachThread(tid);
+ ctx->thread_registry->DetachThread(tid, thr);
}
void ThreadSetName(ThreadState *thr, const char *name) {
--- /dev/null
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+//#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+StackTrace::StackTrace()
+ : n_()
+ , s_()
+ , c_() {
+}
+
+StackTrace::StackTrace(uptr *buf, uptr cnt)
+ : n_()
+ , s_(buf)
+ , c_(cnt) {
+ CHECK_NE(buf, 0);
+ CHECK_NE(cnt, 0);
+}
+
+StackTrace::~StackTrace() {
+ Reset();
+}
+
+void StackTrace::Reset() {
+ if (s_ && !c_) {
+ CHECK_NE(n_, 0);
+ internal_free(s_);
+ s_ = 0;
+ }
+ n_ = 0;
+}
+
+void StackTrace::Init(const uptr *pcs, uptr cnt) {
+ Reset();
+ if (cnt == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(cnt, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+ }
+ n_ = cnt;
+ internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+}
+
+void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
+ Reset();
+ n_ = thr->shadow_stack_pos - thr->shadow_stack;
+ if (n_ + !!toppc == 0)
+ return;
+ uptr start = 0;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ if (n_ + !!toppc > c_) {
+ start = n_ - c_ + !!toppc;
+ n_ = c_ - !!toppc;
+ }
+ } else {
+ // Cap potentially huge stacks.
+ if (n_ + !!toppc > kTraceStackSize) {
+ start = n_ - kTraceStackSize + !!toppc;
+ n_ = kTraceStackSize - !!toppc;
+ }
+ s_ = (uptr*)internal_alloc(MBlockStackTrace,
+ (n_ + !!toppc) * sizeof(s_[0]));
+ }
+ for (uptr i = 0; i < n_; i++)
+ s_[i] = thr->shadow_stack[start + i];
+ if (toppc) {
+ s_[n_] = toppc;
+ n_++;
+ }
+}
+
+void StackTrace::CopyFrom(const StackTrace& other) {
+ Reset();
+ Init(other.Begin(), other.Size());
+}
+
+bool StackTrace::IsEmpty() const {
+ return n_ == 0;
+}
+
+uptr StackTrace::Size() const {
+ return n_;
+}
+
+uptr StackTrace::Get(uptr i) const {
+ CHECK_LT(i, n_);
+ return s_[i];
+}
+
+const uptr *StackTrace::Begin() const {
+ return s_;
+}
+
+} // namespace __tsan
--- /dev/null
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+//#include "sanitizer_common/sanitizer_atomic.h"
+//#include "sanitizer_common/sanitizer_common.h"
+//#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "tsan_defs.h"
+//#include "tsan_clock.h"
+//#include "tsan_mutex.h"
+//#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+class StackTrace {
+ public:
+ StackTrace();
+ // Initialized the object in "static mode",
+ // in this mode it never calls malloc/free but uses the provided buffer.
+ StackTrace(uptr *buf, uptr cnt);
+ ~StackTrace();
+ void Reset();
+
+ void Init(const uptr *pcs, uptr cnt);
+ void ObtainCurrent(ThreadState *thr, uptr toppc);
+ bool IsEmpty() const;
+ uptr Size() const;
+ uptr Get(uptr i) const;
+ const uptr *Begin() const;
+ void CopyFrom(const StackTrace& other);
+
+ private:
+ uptr n_;
+ uptr *s_;
+ const uptr c_;
+
+ StackTrace(const StackTrace&);
+ void operator = (const StackTrace&);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
name[StatMop4] = " size 4 ";
name[StatMop8] = " size 8 ";
name[StatMopSame] = " Including same ";
+ name[StatMopIgnored] = " Including ignored ";
name[StatMopRange] = " Including range ";
name[StatMopRodata] = " Including .rodata ";
name[StatMopRangeRodata] = " Including .rodata range ";
StatMop4,
StatMop8,
StatMopSame,
+ StatMopIgnored,
StatMopRange,
StatMopRodata,
StatMopRangeRodata,
namespace __tsan {
-static SuppressionContext* g_ctx;
-
-static char *ReadFile(const char *filename) {
- if (filename == 0 || filename[0] == 0)
- return 0;
- InternalScopedBuffer<char> tmp(4*1024);
- if (filename[0] == '/' || GetPwd() == 0)
- internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
- else
- internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
- uptr openrv = OpenFile(tmp.data(), false);
- if (internal_iserror(openrv)) {
- Printf("ThreadSanitizer: failed to open suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- fd_t fd = openrv;
- const uptr fsize = internal_filesize(fd);
- if (fsize == (uptr)-1) {
- Printf("ThreadSanitizer: failed to stat suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- char *buf = (char*)internal_alloc(MBlockSuppression, fsize + 1);
- if (fsize != internal_read(fd, buf, fsize)) {
- Printf("ThreadSanitizer: failed to read suppressions file '%s'\n",
- tmp.data());
- Die();
- }
- internal_close(fd);
- buf[fsize] = 0;
- return buf;
-}
+static bool suppressions_inited = false;
void InitializeSuppressions() {
- ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
- g_ctx = new(placeholder_) SuppressionContext;
- const char *supp = ReadFile(flags()->suppressions);
- g_ctx->Parse(supp);
+ CHECK(!suppressions_inited);
+ SuppressionContext::InitIfNecessary();
#ifndef TSAN_GO
- supp = __tsan_default_suppressions();
- g_ctx->Parse(supp);
- g_ctx->Parse(std_suppressions);
+ SuppressionContext::Get()->Parse(__tsan_default_suppressions());
+ SuppressionContext::Get()->Parse(std_suppressions);
#endif
-}
-
-SuppressionContext *GetSuppressionContext() {
- CHECK_NE(g_ctx, 0);
- return g_ctx;
+ suppressions_inited = true;
}
SuppressionType conv(ReportType typ) {
}
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || stack == 0) return 0;
+ if (!SuppressionContext::Get()->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
+ return 0;
SuppressionType stype = conv(typ);
if (stype == SuppressionNone)
return 0;
Suppression *s;
for (const ReportStack *frame = stack; frame; frame = frame->next) {
- if (g_ctx->Match(frame->func, stype, &s) ||
- g_ctx->Match(frame->file, stype, &s) ||
- g_ctx->Match(frame->module, stype, &s)) {
+ if (SuppressionContext::Get()->Match(frame->func, stype, &s) ||
+ SuppressionContext::Get()->Match(frame->file, stype, &s) ||
+ SuppressionContext::Get()->Match(frame->module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
}
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
- CHECK(g_ctx);
- if (!g_ctx->SuppressionCount() || loc == 0 ||
- loc->type != ReportLocationGlobal)
+ if (!SuppressionContext::Get()->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
return 0;
SuppressionType stype = conv(typ);
if (stype == SuppressionNone)
return 0;
Suppression *s;
- if (g_ctx->Match(loc->name, stype, &s) ||
- g_ctx->Match(loc->file, stype, &s) ||
- g_ctx->Match(loc->module, stype, &s)) {
+ if (SuppressionContext::Get()->Match(loc->name, stype, &s) ||
+ SuppressionContext::Get()->Match(loc->file, stype, &s) ||
+ SuppressionContext::Get()->Match(loc->module, stype, &s)) {
DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ);
s->hit_count++;
*sp = s;
}
void PrintMatchedSuppressions() {
- CHECK(g_ctx);
InternalMmapVector<Suppression *> matched(1);
- g_ctx->GetMatched(&matched);
+ SuppressionContext::Get()->GetMatched(&matched);
if (!matched.size())
return;
int hit_count = 0;
void PrintMatchedSuppressions();
uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
-SuppressionContext *GetSuppressionContext();
} // namespace __tsan
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++)
new(&addr_frames[i]) AddressInfo();
- uptr addr_frames_num = Symbolizer::Get()->SymbolizePC(
+ uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC(
addr, addr_frames.data(), kMaxAddrFrames);
if (addr_frames_num == 0)
return NewReportStackEntry(addr);
ReportLocation *SymbolizeData(uptr addr) {
DataInfo info;
- if (!Symbolizer::Get()->SymbolizeData(addr, &info))
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
ReportLocation *ent = (ReportLocation*)internal_alloc(MBlockReportStack,
sizeof(ReportLocation));
}
void SymbolizeFlush() {
- Symbolizer::Get()->Flush();
+ Symbolizer::GetOrInit()->Flush();
}
} // namespace __tsan
void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
-SyncVar::SyncVar(uptr addr, u64 uid)
- : mtx(MutexTypeSyncVar, StatMtxSyncVar)
- , addr(addr)
- , uid(uid)
- , creation_stack_id()
- , owner_tid(kInvalidTid)
- , last_lock()
- , recursion()
- , is_rw()
- , is_recursive()
- , is_broken()
- , is_linker_init() {
+SyncVar::SyncVar()
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+ Reset(0);
+}
+
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = 0;
+ if (kCppMode) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
+}
+
+void SyncVar::Reset(ThreadState *thr) {
+ uid = 0;
+ creation_stack_id = 0;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ is_rw = 0;
+ is_recursive = 0;
+ is_broken = 0;
+ is_linker_init = 0;
+
+ if (thr == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&thr->clock_cache);
+ read_clock.Reset(&thr->clock_cache);
+ }
}
-SyncTab::Part::Part()
- : mtx(MutexTypeSyncTab, StatMtxSyncTab)
- , val() {
+MetaMap::MetaMap() {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
}
-SyncTab::SyncTab() {
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
}
-SyncTab::~SyncTab() {
- for (int i = 0; i < kPartCount; i++) {
- while (tab_[i].val) {
- SyncVar *tmp = tab_[i].val;
- tab_[i].val = tmp->next;
- DestroyAndFree(tmp);
+uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(thr, pc, p, sz);
+ return sz;
+}
+
+void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ *meta = 0;
+ for (;;) {
+ if (idx == 0)
+ break;
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
}
}
}
-SyncVar* SyncTab::GetOrCreateAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock) {
- return GetAndLock(thr, pc, addr, write_lock, true);
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
}
-SyncVar* SyncTab::GetIfExistsAndLock(uptr addr, bool write_lock) {
- return GetAndLock(0, 0, addr, write_lock, false);
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
}
-SyncVar* SyncTab::Create(ThreadState *thr, uptr pc, uptr addr) {
- StatInc(thr, StatSyncCreated);
- void *mem = internal_alloc(MBlockSync, sizeof(SyncVar));
- const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- SyncVar *res = new(mem) SyncVar(addr, uid);
- res->creation_stack_id = 0;
- if (!kGoMode) // Go does not use them
- res->creation_stack_id = CurrentStackId(thr, pc);
- if (flags()->detect_deadlocks)
- DDMutexInit(thr, pc, res);
- return res;
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
+ return GetAndLock(0, 0, addr, true, false);
}
-SyncVar* SyncTab::GetAndLock(ThreadState *thr, uptr pc,
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock, bool create) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetJavaSync(thr, pc, addr, write_lock, create);
- if (res)
- return res;
- }
-
- // Here we ask only PrimaryAllocator, because
- // SecondaryAllocator::PointerIsMine() is slow and we have fallback on
- // the hashmap anyway.
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- MBlock::ScopedLock l(b);
- SyncVar *res = 0;
- for (res = b->ListHead(); res; res = res->next) {
- if (res->addr == addr)
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = 0;
+ for (;;) {
+ u32 idx = idx0;
+ for (;;) {
+ if (idx == 0)
break;
- }
- if (res == 0) {
- if (!create)
- return 0;
- res = Create(thr, pc, addr);
- b->ListPush(res);
- }
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- {
- ReadLock l(&p->mtx);
- for (SyncVar *res = p->val; res; res = res->next) {
- if (res->addr == addr) {
+ if (idx & kFlagBlock)
+ break;
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (s->addr == addr) {
+ if (myidx != 0) {
+ mys->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, myidx);
+ }
if (write_lock)
- res->mtx.Lock();
+ s->mtx.Lock();
else
- res->mtx.ReadLock();
- return res;
+ s->mtx.ReadLock();
+ return s;
}
+ idx = s->next;
}
- }
- if (!create)
- return 0;
- {
- Lock l(&p->mtx);
- SyncVar *res = p->val;
- for (; res; res = res->next) {
- if (res->addr == addr)
- break;
- }
- if (res == 0) {
- res = Create(thr, pc, addr);
- res->next = p->val;
- p->val = res;
+ if (!create)
+ return 0;
+ if (*meta != idx0) {
+ idx0 = *meta;
+ continue;
}
- if (write_lock)
- res->mtx.Lock();
- else
- res->mtx.ReadLock();
- return res;
- }
-}
-SyncVar* SyncTab::GetAndRemove(ThreadState *thr, uptr pc, uptr addr) {
-#ifndef TSAN_GO
- { // NOLINT
- SyncVar *res = GetAndRemoveJavaSync(thr, pc, addr);
- if (res)
- return res;
- }
- if (PrimaryAllocator::PointerIsMine((void*)addr)) {
- MBlock *b = user_mblock(thr, (void*)addr);
- CHECK_NE(b, 0);
- SyncVar *res = 0;
- {
- MBlock::ScopedLock l(b);
- res = b->ListHead();
- if (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- b->ListPop();
- } else {
- SyncVar **prev = &res->next;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
- }
- }
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- }
+ if (myidx == 0) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid);
}
- return res;
- }
-#endif
-
- Part *p = &tab_[PartIdx(addr)];
- SyncVar *res = 0;
- {
- Lock l(&p->mtx);
- SyncVar **prev = &p->val;
- res = *prev;
- while (res) {
- if (res->addr == addr) {
- if (res->is_linker_init)
- return 0;
- *prev = res->next;
- break;
- }
- prev = &res->next;
- res = *prev;
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ if (write_lock)
+ mys->mtx.Lock();
+ else
+ mys->mtx.ReadLock();
+ return mys;
}
}
- if (res) {
- StatInc(thr, StatSyncDestroyed);
- res->mtx.Lock();
- res->mtx.Unlock();
- }
- return res;
-}
-
-int SyncTab::PartIdx(uptr addr) {
- return (addr >> 3) % kPartCount;
-}
-
-StackTrace::StackTrace()
- : n_()
- , s_()
- , c_() {
}
-StackTrace::StackTrace(uptr *buf, uptr cnt)
- : n_()
- , s_(buf)
- , c_(cnt) {
- CHECK_NE(buf, 0);
- CHECK_NE(cnt, 0);
-}
-
-StackTrace::~StackTrace() {
- Reset();
-}
-
-void StackTrace::Reset() {
- if (s_ && !c_) {
- CHECK_NE(n_, 0);
- internal_free(s_);
- s_ = 0;
- }
- n_ = 0;
-}
-
-void StackTrace::Init(const uptr *pcs, uptr cnt) {
- Reset();
- if (cnt == 0)
- return;
- if (c_) {
- CHECK_NE(s_, 0);
- CHECK_LE(cnt, c_);
- } else {
- s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
}
- n_ = cnt;
- internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
-}
-
-void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
- Reset();
- n_ = thr->shadow_stack_pos - thr->shadow_stack;
- if (n_ + !!toppc == 0)
- return;
- uptr start = 0;
- if (c_) {
- CHECK_NE(s_, 0);
- if (n_ + !!toppc > c_) {
- start = n_ - c_ + !!toppc;
- n_ = c_ - !!toppc;
- }
- } else {
- // Cap potentially huge stacks.
- if (n_ + !!toppc > kTraceStackSize) {
- start = n_ - kTraceStackSize + !!toppc;
- n_ = kTraceStackSize - !!toppc;
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
}
- s_ = (uptr*)internal_alloc(MBlockStackTrace,
- (n_ + !!toppc) * sizeof(s_[0]));
- }
- for (uptr i = 0; i < n_; i++)
- s_[i] = thr->shadow_stack[start + i];
- if (toppc) {
- s_[n_] = toppc;
- n_++;
}
}
-void StackTrace::CopyFrom(const StackTrace& other) {
- Reset();
- Init(other.Begin(), other.Size());
-}
-
-bool StackTrace::IsEmpty() const {
- return n_ == 0;
-}
-
-uptr StackTrace::Size() const {
- return n_;
-}
-
-uptr StackTrace::Get(uptr i) const {
- CHECK_LT(i, n_);
- return s_[i];
-}
-
-const uptr *StackTrace::Begin() const {
- return s_;
+void MetaMap::OnThreadIdle(ThreadState *thr) {
+ block_alloc_.FlushCache(&thr->block_cache);
+ sync_alloc_.FlushCache(&thr->sync_cache);
}
} // namespace __tsan
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
-#include "tsan_clock.h"
#include "tsan_defs.h"
+#include "tsan_clock.h"
#include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
namespace __tsan {
-class StackTrace {
- public:
- StackTrace();
- // Initialized the object in "static mode",
- // in this mode it never calls malloc/free but uses the provided buffer.
- StackTrace(uptr *buf, uptr cnt);
- ~StackTrace();
- void Reset();
-
- void Init(const uptr *pcs, uptr cnt);
- void ObtainCurrent(ThreadState *thr, uptr toppc);
- bool IsEmpty() const;
- uptr Size() const;
- uptr Get(uptr i) const;
- const uptr *Begin() const;
- void CopyFrom(const StackTrace& other);
-
- private:
- uptr n_;
- uptr *s_;
- const uptr c_;
-
- StackTrace(const StackTrace&);
- void operator = (const StackTrace&);
-};
-
struct SyncVar {
- explicit SyncVar(uptr addr, u64 uid);
+ SyncVar();
static const int kInvalidTid = -1;
+ uptr addr; // overwritten by DenseSlabAlloc freelist
Mutex mtx;
- uptr addr;
- const u64 uid; // Globally unique id.
+ u64 uid; // Globally unique id.
u32 creation_stack_id;
int owner_tid; // Set only by exclusive owners.
u64 last_lock;
bool is_recursive;
bool is_broken;
bool is_linker_init;
- SyncVar *next; // In SyncTab hashtable.
+ u32 next; // in MetaMap
DDMutex dd;
SyncClock read_clock; // Used for rw mutexes only.
// The clock is placed last, so that it is situated on a different cache line
// with the mtx. This reduces contention for hot sync objects.
SyncClock clock;
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+ void Reset(ThreadState *thr);
+
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
return GetLsb((u64)addr | (uid << 47), 61);
}
};
-class SyncTab {
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+ Currently it maps pointers to heap block descriptors and sync var descs.
+ It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
public:
- SyncTab();
- ~SyncTab();
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
+ void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+ SyncVar* GetIfExistsAndLock(uptr addr);
- // If the SyncVar does not exist, returns 0.
- SyncVar* GetAndRemove(ThreadState *thr, uptr pc, uptr addr);
+ void MoveMemory(uptr src, uptr dst, uptr sz);
- SyncVar* Create(ThreadState *thr, uptr pc, uptr addr);
+ void OnThreadIdle(ThreadState *thr);
private:
- struct Part {
- Mutex mtx;
- SyncVar *val;
- char pad[kCacheLineSize - sizeof(Mutex) - sizeof(SyncVar*)]; // NOLINT
- Part();
- };
-
- // FIXME: Implement something more sane.
- static const int kPartCount = 1009;
- Part tab_[kPartCount];
+ static const u32 kFlagMask = 3 << 30;
+ static const u32 kFlagBlock = 1 << 30;
+ static const u32 kFlagSync = 2 << 30;
+ typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
atomic_uint64_t uid_gen_;
- int PartIdx(uptr addr);
-
- SyncVar* GetAndLock(ThreadState *thr, uptr pc,
- uptr addr, bool write_lock, bool create);
-
- SyncTab(const SyncTab&); // Not implemented.
- void operator = (const SyncTab&); // Not implemented.
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create);
};
} // namespace __tsan
#include "tsan_defs.h"
#include "tsan_mutex.h"
-#include "tsan_sync.h"
+#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"
namespace __tsan {
do {
StatInc(thr, StatShadowProcessed);
const unsigned kAccessSize = 1 << kAccessSizeLog;
- unsigned off = cur.ComputeSearchOffset();
- u64 *sp = &shadow_mem[(idx + off) % kShadowCnt];
+ u64 *sp = &shadow_mem[idx];
old = LoadShadow(sp);
if (old.IsZero()) {
StatInc(thr, StatShadowZero);
// same thread?
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
- if (OldIsInSameSynchEpoch(old, thr)) {
- if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
- // found a slot that holds effectively the same info
- // (that is, same tid, same sync epoch and same size)
- StatInc(thr, StatMopSame);
- return;
- }
- StoreIfNotYetStored(sp, &store_word);
- break;
- }
if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
StoreIfNotYetStored(sp, &store_word);
break;
}
void Resize(uptr size) {
+ if (size == 0) {
+ end_ = begin_;
+ return;
+ }
uptr old_size = Size();
EnsureSize(size);
if (old_size < size) {
return;
}
uptr cap0 = last_ - begin_;
- uptr cap = 2 * cap0;
+ uptr cap = cap0 * 5 / 4; // 25% growth
if (cap == 0)
cap = 16;
if (cap < size)
ubsan_files = \
ubsan_diag.cc \
+ ubsan_flags.cc \
ubsan_handlers.cc \
ubsan_handlers_cxx.cc \
+ ubsan_init.cc \
ubsan_type_hash.cc \
ubsan_value.cc
libubsan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1)
-am__objects_1 = ubsan_diag.lo ubsan_handlers.lo ubsan_handlers_cxx.lo \
- ubsan_type_hash.lo ubsan_value.lo
+am__objects_1 = ubsan_diag.lo ubsan_flags.lo ubsan_handlers.lo \
+ ubsan_handlers_cxx.lo ubsan_init.lo ubsan_type_hash.lo \
+ ubsan_value.lo
am_libubsan_la_OBJECTS = $(am__objects_1)
libubsan_la_OBJECTS = $(am_libubsan_la_OBJECTS)
libubsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
toolexeclib_LTLIBRARIES = libubsan.la
ubsan_files = \
ubsan_diag.cc \
+ ubsan_flags.cc \
ubsan_handlers.cc \
ubsan_handlers_cxx.cc \
+ ubsan_init.cc \
ubsan_type_hash.cc \
ubsan_value.cc
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_diag.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers_cxx.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_value.Plo@am__quote@
//===----------------------------------------------------------------------===//
#include "ubsan_diag.h"
-#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_libc.h"
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
using namespace __ubsan;
-static void InitializeSanitizerCommon() {
- static StaticSpinMutex init_mu;
- SpinMutexLock l(&init_mu);
- static bool initialized;
- if (initialized)
- return;
- if (0 == internal_strcmp(SanitizerToolName, "SanitizerTool")) {
- // UBSan is run in a standalone mode. Initialize it now.
- SanitizerToolName = "UndefinedBehaviorSanitizer";
- CommonFlags *cf = common_flags();
- SetCommonFlagsDefaults(cf);
- cf->print_summary = false;
+static void MaybePrintStackTrace(uptr pc, uptr bp) {
+ // We assume that flags are already parsed: InitIfNecessary
+ // will definitely be called when we print the first diagnostics message.
+ if (!flags()->print_stacktrace)
+ return;
+ // We can only use slow unwind, as we don't have any information about stack
+ // top/bottom.
+ // FIXME: It's better to respect "fast_unwind_on_fatal" runtime flag and
+ // fetch stack top/bottom information if we have it (e.g. if we're running
+ // under ASan).
+ if (StackTrace::WillUseFastUnwind(false))
+ return;
+ StackTrace stack;
+ stack.Unwind(kStackTraceMax, pc, bp, 0, 0, 0, false);
+ stack.Print();
+}
+
+static void MaybeReportErrorSummary(Location Loc) {
+ if (!common_flags()->print_summary)
+ return;
+ // Don't try to unwind the stack trace in UBSan summaries: just use the
+ // provided location.
+ if (Loc.isSourceLocation()) {
+ SourceLocation SLoc = Loc.getSourceLocation();
+ if (!SLoc.isInvalid()) {
+ ReportErrorSummary("runtime-error", SLoc.getFilename(), SLoc.getLine(),
+ "");
+ return;
+ }
}
- initialized = true;
+ ReportErrorSummary("runtime-error");
+}
+
+namespace {
+class Decorator : public SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Highlight() const { return Green(); }
+ const char *EndHighlight() const { return Default(); }
+ const char *Note() const { return Black(); }
+ const char *EndNote() const { return Default(); }
+};
}
Location __ubsan::getCallerLocation(uptr CallerLoc) {
Location __ubsan::getFunctionLocation(uptr Loc, const char **FName) {
if (!Loc)
return Location();
- // FIXME: We may need to run initialization earlier.
- InitializeSanitizerCommon();
+ InitIfNecessary();
AddressInfo Info;
- if (!Symbolizer::GetOrInit()->SymbolizePC(Loc, &Info, 1) ||
- !Info.module || !*Info.module)
+ if (!Symbolizer::GetOrInit()->SymbolizePC(Loc, &Info, 1) || !Info.module ||
+ !*Info.module)
return Location(Loc);
if (FName && Info.function)
return Best;
}
+static inline uptr subtractNoOverflow(uptr LHS, uptr RHS) {
+ return (LHS < RHS) ? 0 : LHS - RHS;
+}
+
+static inline uptr addNoOverflow(uptr LHS, uptr RHS) {
+ const uptr Limit = (uptr)-1;
+ return (LHS > Limit - RHS) ? Limit : LHS + RHS;
+}
+
/// Render a snippet of the address space near a location.
-static void renderMemorySnippet(const __sanitizer::AnsiColorDecorator &Decor,
- MemoryLocation Loc,
+static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
Range *Ranges, unsigned NumRanges,
const Diag::Arg *Args) {
- const unsigned BytesToShow = 32;
- const unsigned MinBytesNearLoc = 4;
-
// Show at least the 8 bytes surrounding Loc.
- MemoryLocation Min = Loc - MinBytesNearLoc, Max = Loc + MinBytesNearLoc;
+ const unsigned MinBytesNearLoc = 4;
+ MemoryLocation Min = subtractNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation Max = addNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation OrigMin = Min;
for (unsigned I = 0; I < NumRanges; ++I) {
Min = __sanitizer::Min(Ranges[I].getStart().getMemoryLocation(), Min);
Max = __sanitizer::Max(Ranges[I].getEnd().getMemoryLocation(), Max);
}
// If we have too many interesting bytes, prefer to show bytes after Loc.
+ const unsigned BytesToShow = 32;
if (Max - Min > BytesToShow)
- Min = __sanitizer::Min(Max - BytesToShow, Loc - MinBytesNearLoc);
- Max = Min + BytesToShow;
+ Min = __sanitizer::Min(Max - BytesToShow, OrigMin);
+ Max = addNoOverflow(Min, BytesToShow);
+
+ if (!IsAccessibleMemoryRange(Min, Max - Min)) {
+ Printf("<memory cannot be printed>\n");
+ return;
+ }
// Emit data.
for (uptr P = Min; P != Max; ++P) {
- // FIXME: Check that the address is readable before printing it.
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
Printf("%s%02x", (P % 8 == 0) ? " " : " ", C);
}
Printf("\n");
// Emit highlights.
- Printf(Decor.Green());
+ Printf(Decor.Highlight());
Range *InRange = upperBound(Min, Ranges, NumRanges);
for (uptr P = Min; P != Max; ++P) {
char Pad = ' ', Byte = ' ';
char Buffer[] = { Pad, Pad, P == Loc ? '^' : Byte, Byte, 0 };
Printf((P % 8 == 0) ? Buffer : &Buffer[1]);
}
- Printf("%s\n", Decor.Default());
+ Printf("%s\n", Decor.EndHighlight());
// Go over the line again, and print names for the ranges.
InRange = 0;
}
Diag::~Diag() {
- __sanitizer::AnsiColorDecorator Decor(PrintsToTty());
- SpinMutexLock l(&CommonSanitizerReportMutex);
+ // All diagnostics should be printed under report mutex.
+ CommonSanitizerReportMutex.CheckLocked();
+ Decorator Decor;
Printf(Decor.Bold());
renderLocation(Loc);
switch (Level) {
case DL_Error:
Printf("%s runtime error: %s%s",
- Decor.Red(), Decor.Default(), Decor.Bold());
+ Decor.Warning(), Decor.EndWarning(), Decor.Bold());
break;
case DL_Note:
- Printf("%s note: %s", Decor.Black(), Decor.Default());
+ Printf("%s note: %s", Decor.Note(), Decor.EndNote());
break;
}
renderMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges,
NumRanges, Args);
}
+
+ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc)
+ : Opts(Opts), SummaryLoc(SummaryLoc) {
+ InitIfNecessary();
+ CommonSanitizerReportMutex.Lock();
+}
+
+ScopedReport::~ScopedReport() {
+ MaybePrintStackTrace(Opts.pc, Opts.bp);
+ MaybeReportErrorSummary(SummaryLoc);
+ CommonSanitizerReportMutex.Unlock();
+ if (Opts.DieAfterReport || flags()->halt_on_error)
+ Die();
+}
+
+bool __ubsan::MatchSuppression(const char *Str, SuppressionType Type) {
+ Suppression *s;
+ // If .preinit_array is not used, it is possible that the UBSan runtime is not
+ // initialized.
+ if (!SANITIZER_CAN_USE_PREINIT_ARRAY)
+ InitIfNecessary();
+ return SuppressionContext::Get()->Match(Str, Type, &s);
+}
#define UBSAN_DIAG_H
#include "ubsan_value.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
namespace __ubsan {
Diag &operator<<(const Range &R) { return AddRange(R); }
};
+struct ReportOptions {
+ /// If DieAfterReport is specified, UBSan will terminate the program after the
+ /// report is printed.
+ bool DieAfterReport;
+ /// pc/bp are used to unwind the stack trace.
+ uptr pc;
+ uptr bp;
+};
+
+#define GET_REPORT_OPTIONS(die_after_report) \
+ GET_CALLER_PC_BP; \
+ ReportOptions Opts = {die_after_report, pc, bp}
+
+/// \brief Instantiate this class before printing diagnostics in the error
+/// report. This class ensures that reports from different threads and from
+/// different sanitizers won't be mixed.
+class ScopedReport {
+ ReportOptions Opts;
+ Location SummaryLoc;
+
+public:
+ ScopedReport(ReportOptions Opts, Location SummaryLoc);
+ ~ScopedReport();
+};
+
+bool MatchSuppression(const char *Str, SuppressionType Type);
+
} // namespace __ubsan
#endif // UBSAN_DIAG_H
--- /dev/null
+//===-- ubsan_flags.cc ----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __ubsan {
+
+static const char *MaybeCallUbsanDefaultOptions() {
+ return (&__ubsan_default_options) ? __ubsan_default_options() : "";
+}
+
+void InitializeCommonFlags() {
+ CommonFlags *cf = common_flags();
+ SetCommonFlagsDefaults(cf);
+ cf->print_summary = false;
+ // Override from user-specified string.
+ ParseCommonFlagsFromString(cf, MaybeCallUbsanDefaultOptions());
+ // Override from environment variable.
+ ParseCommonFlagsFromString(cf, GetEnv("UBSAN_OPTIONS"));
+}
+
+Flags ubsan_flags;
+
+static void ParseFlagsFromString(Flags *f, const char *str) {
+ if (!str)
+ return;
+ ParseFlag(str, &f->halt_on_error, "halt_on_error",
+ "Crash the program after printing the first error report");
+ ParseFlag(str, &f->print_stacktrace, "print_stacktrace",
+ "Include full stacktrace into an error report");
+}
+
+void InitializeFlags() {
+ Flags *f = flags();
+ // Default values.
+ f->halt_on_error = false;
+ f->print_stacktrace = false;
+ // Override from user-specified string.
+ ParseFlagsFromString(f, MaybeCallUbsanDefaultOptions());
+ // Override from environment variable.
+ ParseFlagsFromString(f, GetEnv("UBSAN_OPTIONS"));
+}
+
+} // namespace __ubsan
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options() { return ""; }
+} // extern "C"
+#endif
--- /dev/null
+//===-- ubsan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAGS_H
+#define UBSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __ubsan {
+
+struct Flags {
+ bool halt_on_error;
+ bool print_stacktrace;
+};
+
+extern Flags ubsan_flags;
+inline Flags *flags() { return &ubsan_flags; }
+
+void InitializeCommonFlags();
+void InitializeFlags();
+
+} // namespace __ubsan
+
+extern "C" {
+// Users may provide their own implementation of __ubsan_default_options to
+// override the default flag values.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options();
+} // extern "C"
+
+#endif // UBSAN_FLAGS_H
using namespace __sanitizer;
using namespace __ubsan;
+static bool ignoreReport(SourceLocation SLoc, ReportOptions Opts) {
+ // If source location is already acquired, we don't need to print an error
+ // report for the second time. However, if we're in an unrecoverable handler,
+ // it's possible that location was required by concurrently running thread.
+ // In this case, we should continue the execution to ensure that any of
+ // threads will grab the report mutex and print the report before
+ // crashing the program.
+ return SLoc.isDisabled() && !Opts.DieAfterReport;
+}
+
namespace __ubsan {
const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
}
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
- Location FallbackLoc) {
+ Location FallbackLoc, ReportOptions Opts) {
Location Loc = Data->Loc.acquire();
-
// Use the SourceLocation from Data to track deduplication, even if 'invalid'
- if (Loc.getSourceLocation().isDisabled())
+ if (ignoreReport(Loc.getSourceLocation(), Opts))
return;
+
if (Data->Loc.isInvalid())
Loc = FallbackLoc;
+ ScopedReport R(Opts, Loc);
+
if (!Pointer)
Diag(Loc, DL_Error, "%0 null pointer of type %1")
<< TypeCheckKinds[Data->TypeCheckKind] << Data->Type;
if (Pointer)
Diag(Pointer, DL_Note, "pointer points here");
}
+
void __ubsan::__ubsan_handle_type_mismatch(TypeMismatchData *Data,
ValueHandle Pointer) {
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation());
+ GET_REPORT_OPTIONS(false);
+ handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts);
}
void __ubsan::__ubsan_handle_type_mismatch_abort(TypeMismatchData *Data,
ValueHandle Pointer) {
- handleTypeMismatchImpl(Data, Pointer, getCallerLocation());
+ GET_REPORT_OPTIONS(true);
+ handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts);
Die();
}
/// \brief Common diagnostic emission for various forms of integer overflow.
-template<typename T> static void HandleIntegerOverflow(OverflowData *Data,
- ValueHandle LHS,
- const char *Operator,
- T RHS) {
+template <typename T>
+static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ const char *Operator, T RHS,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error, "%0 integer overflow: "
"%1 %2 %3 cannot be represented in type %4")
<< (Data->Type.isSignedIntegerTy() ? "signed" : "unsigned")
<< Value(Data->Type, LHS) << Operator << RHS << Data->Type;
}
-void __ubsan::__ubsan_handle_add_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "+", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_add_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_add_overflow(Data, LHS, RHS);
- Die();
-}
+#define UBSAN_OVERFLOW_HANDLER(handler_name, op, abort) \
+ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \
+ ValueHandle RHS) { \
+ GET_REPORT_OPTIONS(abort); \
+ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \
+ if (abort) Die(); \
+ }
-void __ubsan::__ubsan_handle_sub_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "-", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_sub_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_sub_overflow(Data, LHS, RHS);
- Die();
-}
-
-void __ubsan::__ubsan_handle_mul_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
- HandleIntegerOverflow(Data, LHS, "*", Value(Data->Type, RHS));
-}
-void __ubsan::__ubsan_handle_mul_overflow_abort(OverflowData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
- __ubsan_handle_mul_overflow(Data, LHS, RHS);
- Die();
-}
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true)
-void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
- ValueHandle OldVal) {
+static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
if (Data->Type.isSignedIntegerTy())
Diag(Loc, DL_Error,
"negation of %0 cannot be represented in type %1; "
"negation of %0 cannot be represented in type %1")
<< Value(Data->Type, OldVal) << Data->Type;
}
+
+void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
+ ValueHandle OldVal) {
+ GET_REPORT_OPTIONS(false);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
+}
void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data,
ValueHandle OldVal) {
- __ubsan_handle_negate_overflow(Data, OldVal);
+ GET_REPORT_OPTIONS(true);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
Die();
}
-void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
- ValueHandle LHS, ValueHandle RHS) {
+static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ ValueHandle RHS, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value LHSVal(Data->Type, LHS);
Value RHSVal(Data->Type, RHS);
if (RHSVal.isMinusOne())
else
Diag(Loc, DL_Error, "division by zero");
}
+
+void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
+ ValueHandle LHS, ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
+}
void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data,
ValueHandle LHS,
ValueHandle RHS) {
- __ubsan_handle_divrem_overflow(Data, LHS, RHS);
+ GET_REPORT_OPTIONS(true);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
Die();
}
-void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
- ValueHandle LHS,
- ValueHandle RHS) {
+static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS, ValueHandle RHS,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value LHSVal(Data->LHSType, LHS);
Value RHSVal(Data->RHSType, RHS);
if (RHSVal.isNegative())
"left shift of %0 by %1 places cannot be represented in type %2")
<< LHSVal << RHSVal << Data->LHSType;
}
+
+void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS,
+ ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
+}
void __ubsan::__ubsan_handle_shift_out_of_bounds_abort(
ShiftOutOfBoundsData *Data,
ValueHandle LHS,
ValueHandle RHS) {
- __ubsan_handle_shift_out_of_bounds(Data, LHS, RHS);
+ GET_REPORT_OPTIONS(true);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
Die();
}
-void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
- ValueHandle Index) {
+static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Value IndexVal(Data->IndexType, Index);
Diag(Loc, DL_Error, "index %0 out of bounds for type %1")
<< IndexVal << Data->ArrayType;
}
+
+void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
+ ValueHandle Index) {
+ GET_REPORT_OPTIONS(false);
+ handleOutOfBoundsImpl(Data, Index, Opts);
+}
void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data,
ValueHandle Index) {
- __ubsan_handle_out_of_bounds(Data, Index);
+ GET_REPORT_OPTIONS(true);
+ handleOutOfBoundsImpl(Data, Index, Opts);
Die();
}
-void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+static void handleBuiltinUnreachableImpl(UnreachableData *Data,
+ ReportOptions Opts) {
+ ScopedReport R(Opts, Data->Loc);
Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call");
+}
+
+void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleBuiltinUnreachableImpl(Data, Opts);
Die();
}
-void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) {
+ ScopedReport R(Opts, Data->Loc);
Diag(Data->Loc, DL_Error,
"execution reached the end of a value-returning function "
"without returning a value");
+}
+
+void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleMissingReturnImpl(Data, Opts);
Die();
}
-void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
- ValueHandle Bound) {
+static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error, "variable length array bound evaluates to "
"non-positive value %0")
<< Value(Data->Type, Bound);
}
+
+void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(false);
+ handleVLABoundNotPositive(Data, Bound, Opts);
+}
void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data,
- ValueHandle Bound) {
- __ubsan_handle_vla_bound_not_positive(Data, Bound);
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(true);
+ handleVLABoundNotPositive(Data, Bound, Opts);
Die();
}
-
-void __ubsan::__ubsan_handle_float_cast_overflow(FloatCastOverflowData *Data,
- ValueHandle From) {
+static void handleFloatCastOverflow(FloatCastOverflowData *Data,
+ ValueHandle From, ReportOptions Opts) {
// TODO: Add deduplication once a SourceLocation is generated for this check.
- Diag(getCallerLocation(), DL_Error,
+ Location Loc = getCallerLocation();
+ ScopedReport R(Opts, Loc);
+
+ Diag(Loc, DL_Error,
"value %0 is outside the range of representable values of type %2")
- << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+ << Value(Data->FromType, From) << Data->FromType << Data->ToType;
}
-void __ubsan::__ubsan_handle_float_cast_overflow_abort(
- FloatCastOverflowData *Data,
- ValueHandle From) {
- Diag(getCallerLocation(), DL_Error,
- "value %0 is outside the range of representable values of type %2")
- << Value(Data->FromType, From) << Data->FromType << Data->ToType;
+
+void __ubsan::__ubsan_handle_float_cast_overflow(FloatCastOverflowData *Data,
+ ValueHandle From) {
+ GET_REPORT_OPTIONS(false);
+ handleFloatCastOverflow(Data, From, Opts);
+}
+void
+__ubsan::__ubsan_handle_float_cast_overflow_abort(FloatCastOverflowData *Data,
+ ValueHandle From) {
+ GET_REPORT_OPTIONS(true);
+ handleFloatCastOverflow(Data, From, Opts);
Die();
}
-void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
- ValueHandle Val) {
+static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val,
+ ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error,
"load of value %0, which is not a valid value for type %1")
<< Value(Data->Type, Val) << Data->Type;
}
+
+void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
+ ValueHandle Val) {
+ GET_REPORT_OPTIONS(false);
+ handleLoadInvalidValue(Data, Val, Opts);
+}
void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data,
ValueHandle Val) {
- __ubsan_handle_load_invalid_value(Data, Val);
+ GET_REPORT_OPTIONS(true);
+ handleLoadInvalidValue(Data, Val, Opts);
Die();
}
-void __ubsan::__ubsan_handle_function_type_mismatch(
- FunctionTypeMismatchData *Data,
- ValueHandle Function) {
+static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ReportOptions Opts) {
const char *FName = "(unknown)";
Location Loc = getFunctionLocation(Function, &FName);
+ ScopedReport R(Opts, Loc);
+
Diag(Data->Loc, DL_Error,
"call to function %0 through pointer to incorrect function type %1")
<< FName << Data->Type;
Diag(Loc, DL_Note, "%0 defined here") << FName;
}
+void
+__ubsan::__ubsan_handle_function_type_mismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function) {
+ GET_REPORT_OPTIONS(false);
+ handleFunctionTypeMismatch(Data, Function, Opts);
+}
+
void __ubsan::__ubsan_handle_function_type_mismatch_abort(
- FunctionTypeMismatchData *Data,
- ValueHandle Function) {
- __ubsan_handle_function_type_mismatch(Data, Function);
+ FunctionTypeMismatchData *Data, ValueHandle Function) {
+ GET_REPORT_OPTIONS(true);
+ handleFunctionTypeMismatch(Data, Function, Opts);
Die();
}
-static void handleNonnullReturn(NonNullReturnData *Data) {
+static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error, "null pointer returned from function declared to never "
"return null");
if (!Data->AttrLoc.isInvalid())
}
void __ubsan::__ubsan_handle_nonnull_return(NonNullReturnData *Data) {
- handleNonnullReturn(Data);
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, Opts);
}
void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
- handleNonnullReturn(Data);
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, Opts);
Die();
}
-static void handleNonNullArg(NonNullArgData *Data) {
+static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
+ if (ignoreReport(Loc, Opts))
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to "
"never be null") << Data->ArgIndex;
if (!Data->AttrLoc.isInvalid())
}
void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) {
- handleNonNullArg(Data);
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts);
}
void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
- handleNonNullArg(Data);
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts);
Die();
}
unsigned char TypeCheckKind;
};
+#define UNRECOVERABLE(checkname, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
+ void __ubsan_handle_ ## checkname( __VA_ARGS__ );
+
#define RECOVERABLE(checkname, ...) \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
void __ubsan_handle_ ## checkname( __VA_ARGS__ ); \
- extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
void __ubsan_handle_ ## checkname ## _abort( __VA_ARGS__ );
/// \brief Handle a runtime type check failure, caused by either a misaligned
};
/// \brief Handle a __builtin_unreachable which is reached.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __ubsan_handle_builtin_unreachable(UnreachableData *Data);
+UNRECOVERABLE(builtin_unreachable, UnreachableData *Data)
/// \brief Handle reaching the end of a value-returning function.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-void __ubsan_handle_missing_return(UnreachableData *Data);
+UNRECOVERABLE(missing_return, UnreachableData *Data)
struct VLABoundData {
SourceLocation Loc;
#include "ubsan_type_hash.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
using namespace __sanitizer;
using namespace __ubsan;
static void HandleDynamicTypeCacheMiss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash,
- bool Abort) {
+ ReportOptions Opts) {
if (checkDynamicType((void*)Pointer, Data->TypeInfo, Hash))
// Just a cache miss. The type matches after all.
return;
+ // Check if error report should be suppressed.
+ DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer);
+ if (DTI.isValid() &&
+ MatchSuppression(DTI.getMostDerivedTypeName(), SuppressionVptrCheck))
+ return;
+
SourceLocation Loc = Data->Loc.acquire();
if (Loc.isDisabled())
return;
+ ScopedReport R(Opts, Loc);
+
Diag(Loc, DL_Error,
"%0 address %1 which does not point to an object of type %2")
<< TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
// If possible, say what type it actually points to.
- DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer);
if (!DTI.isValid())
Diag(Pointer, DL_Note, "object has invalid vptr")
- << MangledName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
+ << MangledName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
else if (!DTI.getOffset())
Diag(Pointer, DL_Note, "object is of type %0")
- << MangledName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
+ << MangledName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
else
// FIXME: Find the type at the specified offset, and include that
// in the note.
Diag(Pointer - DTI.getOffset(), DL_Note,
"object is base class subobject at offset %0 within object of type %1")
- << DTI.getOffset() << MangledName(DTI.getMostDerivedTypeName())
- << MangledName(DTI.getSubobjectTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "vptr for %2 base class of %1");
-
- if (Abort)
- Die();
+ << DTI.getOffset() << MangledName(DTI.getMostDerivedTypeName())
+ << MangledName(DTI.getSubobjectTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr),
+ "vptr for %2 base class of %1");
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, false);
+ GET_REPORT_OPTIONS(false);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, true);
+ GET_REPORT_OPTIONS(true);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
}
--- /dev/null
+//===-- ubsan_init.cc -----------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __ubsan;
+
+static bool ubsan_inited;
+
+void __ubsan::InitIfNecessary() {
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+ // No need to lock mutex if we're initializing from preinit array.
+ static StaticSpinMutex init_mu;
+ SpinMutexLock l(&init_mu);
+#endif
+ if (LIKELY(ubsan_inited))
+ return;
+ if (0 == internal_strcmp(SanitizerToolName, "SanitizerTool")) {
+ // WARNING: If this condition holds, then either UBSan runs in a standalone
+ // mode, or initializer for another sanitizer hasn't run yet. In a latter
+ // case, another sanitizer will overwrite "SanitizerToolName" and reparse
+ // common flags. It means, that we are not allowed to *use* common flags
+ // in this function.
+ SanitizerToolName = "UndefinedBehaviorSanitizer";
+ InitializeCommonFlags();
+ }
+ // Initialize UBSan-specific flags.
+ InitializeFlags();
+ SuppressionContext::InitIfNecessary();
+ ubsan_inited = true;
+}
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+__attribute__((section(".preinit_array"), used))
+void (*__local_ubsan_preinit)(void) = __ubsan::InitIfNecessary;
+#else
+// Use a dynamic initializer.
+class UbsanInitializer {
+ public:
+ UbsanInitializer() {
+ InitIfNecessary();
+ }
+};
+static UbsanInitializer ubsan_initializer;
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
--- /dev/null
+//===-- ubsan_init.h --------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization function for UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_INIT_H
+#define UBSAN_INIT_H
+
+namespace __ubsan {
+
+// NOTE: This function might take a lock (if .preinit_array initialization is
+// not used). It's generally a bad idea to call it on a fast path.
+void InitIfNecessary();
+
+} // namespace __ubsan
+
+#endif // UBSAN_INIT_H