From: Max Ostapenko Date: Wed, 21 Oct 2015 07:32:45 +0000 (+0300) Subject: libsanitizer merge from upstream r250806. X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=696d846a56cc12549f080c6c87e6a0272bdb29f1;p=gcc.git libsanitizer merge from upstream r250806. libsanitizer/ 2015-10-20 Maxim Ostapenko * All source files: Merge from upstream r250806. * configure.ac (link_sanitizer_common): Add -lrt flag. * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. Set CXX_ABI_NEEDED=true for darwin. * asan/Makefile.am (asan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. * asan/Makefile.in: Regenerate. * ubsan/Makefile.am (ubsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=1. (libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true. * ubsan/Makefile.in: Regenerate. * tsan/Makefile.am (tsan_files): Add new files. (DEFS): Add DCAN_SANITIZE_UB=0. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. * sanitizer_common/Makefile.in: Regenerate. * asan/libtool-version: Bump the libasan SONAME. From-SVN: r229111 --- diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog index ba4c4239509..3316629c809 100644 --- a/libsanitizer/ChangeLog +++ b/libsanitizer/ChangeLog @@ -1,3 +1,24 @@ +2015-10-21 Maxim Ostapenko + + * All source files: Merge from upstream r250806. + * configure.ac (link_sanitizer_common): Add -lrt flag. + * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. + Set USE_CXX_ABI_FLAG=true for darwin. + * asan/Makefile.am (asan_files): Add new files. + (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy + DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. + * asan/Makefile.in: Regenerate. + * ubsan/Makefile.am (ubsan_files): Add new files. + (DEFS): Add DCAN_SANITIZE_UB=1. + (libubsan_la_LIBADD): Add -lc++abi if USE_CXX_ABI_FLAG is true. + * ubsan/Makefile.in: Regenerate. + * tsan/Makefile.am (tsan_files): Add new files. + (DEFS): Add DCAN_SANITIZE_UB=0. + * tsan/Makefile.in: Regenerate. + * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. + * sanitizer_common/Makefile.in: Regenerate. + * asan/libtool-version: Bump the libasan SONAME. + 2015-09-09 Markus Trippelsdorf PR sanitizer/67258 diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE index ef893cb306e..6ff9a9acc6b 100644 --- a/libsanitizer/MERGE +++ b/libsanitizer/MERGE @@ -1,4 +1,4 @@ -221802 +250806 The first line of this file holds the svn revision number of the last merge done from the master library sources. diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am index 54c74ce3e42..bd3cd735a98 100644 --- a/libsanitizer/asan/Makefile.am +++ b/libsanitizer/asan/Makefile.am @@ -3,7 +3,7 @@ AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir) # May be used by toolexeclibdir. gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) -DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 -DASAN_NEEDS_SEGV=1 +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 -DCAN_SANITIZE_UB=0 if USING_MAC_INTERPOSE DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT endif @@ -17,9 +17,10 @@ nodist_toolexeclib_HEADERS = libasan_preinit.o asan_files = \ asan_activation.cc \ - asan_allocator2.cc \ + asan_allocator.cc \ asan_debugging.cc \ asan_fake_stack.cc \ + asan_flags.cc \ asan_globals.cc \ asan_interceptors.cc \ asan_linux.cc \ @@ -34,6 +35,7 @@ asan_files = \ asan_rtl.cc \ asan_stack.cc \ asan_stats.cc \ + asan_suppressions.cc \ asan_thread.cc \ asan_win.cc \ asan_win_dll_thunk.cc \ diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in index edbed4d3d25..229c7b400d1 100644 --- a/libsanitizer/asan/Makefile.in +++ b/libsanitizer/asan/Makefile.in @@ -111,14 +111,14 @@ libasan_la_DEPENDENCIES = \ $(top_builddir)/sanitizer_common/libsanitizer_common.la \ $(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \ $(am__append_3) $(am__DEPENDENCIES_1) -am__objects_1 = asan_activation.lo asan_allocator2.lo \ - asan_debugging.lo asan_fake_stack.lo asan_globals.lo \ +am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \ + asan_fake_stack.lo asan_flags.lo asan_globals.lo \ asan_interceptors.lo asan_linux.lo asan_mac.lo \ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \ asan_new_delete.lo asan_poisoning.lo asan_posix.lo \ asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \ - asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \ - asan_win_dynamic_runtime_thunk.lo + asan_suppressions.lo asan_thread.lo asan_win.lo \ + asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo am_libasan_la_OBJECTS = $(am__objects_1) libasan_la_OBJECTS = $(am_libasan_la_OBJECTS) libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ @@ -172,8 +172,8 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \ -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \ - -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 \ - -DASAN_NEEDS_SEGV=1 $(am__append_1) + -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 \ + -DCAN_SANITIZE_UB=0 $(am__append_1) DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ @@ -306,9 +306,10 @@ toolexeclib_LTLIBRARIES = libasan.la nodist_toolexeclib_HEADERS = libasan_preinit.o asan_files = \ asan_activation.cc \ - asan_allocator2.cc \ + asan_allocator.cc \ asan_debugging.cc \ asan_fake_stack.cc \ + asan_flags.cc \ asan_globals.cc \ asan_interceptors.cc \ asan_linux.cc \ @@ -323,6 +324,7 @@ asan_files = \ asan_rtl.cc \ asan_stack.cc \ asan_stats.cc \ + asan_suppressions.cc \ asan_thread.cc \ asan_win.cc \ asan_win_dll_thunk.cc \ @@ -450,9 +452,10 @@ distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@ @@ -467,6 +470,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@ diff --git a/libsanitizer/asan/asan_activation.cc b/libsanitizer/asan/asan_activation.cc index 235451c8aec..58867956086 100644 --- a/libsanitizer/asan/asan_activation.cc +++ b/libsanitizer/asan/asan_activation.cc @@ -14,32 +14,106 @@ #include "asan_allocator.h" #include "asan_flags.h" #include "asan_internal.h" +#include "asan_poisoning.h" +#include "asan_stack.h" #include "sanitizer_common/sanitizer_flags.h" namespace __asan { static struct AsanDeactivatedFlags { - int quarantine_size; - int max_redzone; + AllocatorOptions allocator_options; int malloc_context_size; bool poison_heap; + bool coverage; + const char *coverage_dir; + + void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) { +#define ASAN_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &f->Name); +#define COMMON_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &cf->Name); +#include "asan_activation_flags.inc" +#undef ASAN_ACTIVATION_FLAG +#undef COMMON_ACTIVATION_FLAG + + RegisterIncludeFlags(parser, cf); + } + + void OverrideFromActivationFlags() { + Flags f; + CommonFlags cf; + FlagParser parser; + RegisterActivationFlags(&parser, &f, &cf); + + // Copy the current activation flags. + allocator_options.CopyTo(&f, &cf); + cf.malloc_context_size = malloc_context_size; + f.poison_heap = poison_heap; + cf.coverage = coverage; + cf.coverage_dir = coverage_dir; + cf.verbosity = Verbosity(); + cf.help = false; // this is activation-specific help + + // Check if activation flags need to be overriden. + if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) { + parser.ParseString(env); + } + + // Override from getprop asan.options. + char buf[100]; + GetExtraActivationFlags(buf, sizeof(buf)); + parser.ParseString(buf); + + SetVerbosity(cf.verbosity); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (cf.help) parser.PrintFlagDescriptions(); + + allocator_options.SetFrom(&f, &cf); + malloc_context_size = cf.malloc_context_size; + poison_heap = f.poison_heap; + coverage = cf.coverage; + coverage_dir = cf.coverage_dir; + } + + void Print() { + Report( + "quarantine_size_mb %d, max_redzone %d, poison_heap %d, " + "malloc_context_size %d, alloc_dealloc_mismatch %d, " + "allocator_may_return_null %d, coverage %d, coverage_dir %s\n", + allocator_options.quarantine_size_mb, allocator_options.max_redzone, + poison_heap, malloc_context_size, + allocator_options.alloc_dealloc_mismatch, + allocator_options.may_return_null, coverage, coverage_dir); + } } asan_deactivated_flags; static bool asan_is_deactivated; -void AsanStartDeactivated() { +void AsanDeactivate() { + CHECK(!asan_is_deactivated); VReport(1, "Deactivating ASan\n"); - // Save flag values. - asan_deactivated_flags.quarantine_size = flags()->quarantine_size; - asan_deactivated_flags.max_redzone = flags()->max_redzone; - asan_deactivated_flags.poison_heap = flags()->poison_heap; - asan_deactivated_flags.malloc_context_size = - common_flags()->malloc_context_size; - - flags()->quarantine_size = 0; - flags()->max_redzone = 16; - flags()->poison_heap = false; - common_flags()->malloc_context_size = 0; + + // Stash runtime state. + GetAllocatorOptions(&asan_deactivated_flags.allocator_options); + asan_deactivated_flags.malloc_context_size = GetMallocContextSize(); + asan_deactivated_flags.poison_heap = CanPoisonMemory(); + asan_deactivated_flags.coverage = common_flags()->coverage; + asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir; + + // Deactivate the runtime. + SetCanPoisonMemory(false); + SetMallocContextSize(1); + ReInitializeCoverage(false, nullptr); + + AllocatorOptions disabled = asan_deactivated_flags.allocator_options; + disabled.quarantine_size_mb = 0; + disabled.min_redzone = 16; // Redzone must be at least 16 bytes long. + disabled.max_redzone = 16; + disabled.alloc_dealloc_mismatch = false; + disabled.may_return_null = true; + ReInitializeAllocator(disabled); asan_is_deactivated = true; } @@ -48,25 +122,21 @@ void AsanActivate() { if (!asan_is_deactivated) return; VReport(1, "Activating ASan\n"); - // Restore flag values. - // FIXME: this is not atomic, and there may be other threads alive. - flags()->quarantine_size = asan_deactivated_flags.quarantine_size; - flags()->max_redzone = asan_deactivated_flags.max_redzone; - flags()->poison_heap = asan_deactivated_flags.poison_heap; - common_flags()->malloc_context_size = - asan_deactivated_flags.malloc_context_size; + UpdateProcessName(); - ParseExtraActivationFlags(); + asan_deactivated_flags.OverrideFromActivationFlags(); - ReInitializeAllocator(); + SetCanPoisonMemory(asan_deactivated_flags.poison_heap); + SetMallocContextSize(asan_deactivated_flags.malloc_context_size); + ReInitializeCoverage(asan_deactivated_flags.coverage, + asan_deactivated_flags.coverage_dir); + ReInitializeAllocator(asan_deactivated_flags.allocator_options); asan_is_deactivated = false; - VReport( - 1, - "quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size " - "%d\n", - flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap, - common_flags()->malloc_context_size); + if (Verbosity()) { + Report("Activated with flags:\n"); + asan_deactivated_flags.Print(); + } } } // namespace __asan diff --git a/libsanitizer/asan/asan_activation.h b/libsanitizer/asan/asan_activation.h index 01f2d46d222..162a5ebcea9 100644 --- a/libsanitizer/asan/asan_activation.h +++ b/libsanitizer/asan/asan_activation.h @@ -14,7 +14,7 @@ #define ASAN_ACTIVATION_H namespace __asan { -void AsanStartDeactivated(); +void AsanDeactivate(); void AsanActivate(); } // namespace __asan diff --git a/libsanitizer/asan/asan_activation_flags.inc b/libsanitizer/asan/asan_activation_flags.inc new file mode 100644 index 00000000000..4bab38213c1 --- /dev/null +++ b/libsanitizer/asan/asan_activation_flags.inc @@ -0,0 +1,33 @@ +//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// A subset of ASan (and common) runtime flags supported at activation time. +// +//===----------------------------------------------------------------------===// +#ifndef ASAN_ACTIVATION_FLAG +# error "Define ASAN_ACTIVATION_FLAG prior to including this file!" +#endif + +#ifndef COMMON_ACTIVATION_FLAG +# error "Define COMMON_ACTIVATION_FLAG prior to including this file!" +#endif + +// ASAN_ACTIVATION_FLAG(Type, Name) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +ASAN_ACTIVATION_FLAG(int, redzone) +ASAN_ACTIVATION_FLAG(int, max_redzone) +ASAN_ACTIVATION_FLAG(int, quarantine_size_mb) +ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch) +ASAN_ACTIVATION_FLAG(bool, poison_heap) + +COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null) +COMMON_ACTIVATION_FLAG(int, malloc_context_size) +COMMON_ACTIVATION_FLAG(bool, coverage) +COMMON_ACTIVATION_FLAG(const char *, coverage_dir) +COMMON_ACTIVATION_FLAG(int, verbosity) +COMMON_ACTIVATION_FLAG(bool, help) diff --git a/libsanitizer/asan/asan_allocator.cc b/libsanitizer/asan/asan_allocator.cc new file mode 100644 index 00000000000..187c405e118 --- /dev/null +++ b/libsanitizer/asan/asan_allocator.cc @@ -0,0 +1,906 @@ +//===-- asan_allocator.cc -------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Implementation of ASan's memory allocator, 2-nd version. +// This variant uses the allocator from sanitizer_common, i.e. the one shared +// with ThreadSanitizer and MemorySanitizer. +// +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_list.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_quarantine.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. +// We use adaptive redzones: for larger allocation larger redzones are used. +static u32 RZLog2Size(u32 rz_log) { + CHECK_LT(rz_log, 8); + return 16 << rz_log; +} + +static u32 RZSize2Log(u32 rz_size) { + CHECK_GE(rz_size, 16); + CHECK_LE(rz_size, 2048); + CHECK(IsPowerOfTwo(rz_size)); + u32 res = Log2(rz_size) - 4; + CHECK_EQ(rz_size, RZLog2Size(res)); + return res; +} + +static AsanAllocator &get_allocator(); + +// The memory chunk allocated from the underlying allocator looks like this: +// L L L L L L H H U U U U U U R R +// L -- left redzone words (0 or more bytes) +// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. +// U -- user memory. +// R -- right redzone (0 or more bytes) +// ChunkBase consists of ChunkHeader and other bytes that overlap with user +// memory. + +// If the left redzone is greater than the ChunkHeader size we store a magic +// value in the first uptr word of the memory block and store the address of +// ChunkBase in the next uptr. +// M B L L L L L L L L L H H U U U U U U +// | ^ +// ---------------------| +// M -- magic value kAllocBegMagic +// B -- address of ChunkHeader pointing to the first 'H' +static const uptr kAllocBegMagic = 0xCC6E96B9; + +struct ChunkHeader { + // 1-st 8 bytes. + u32 chunk_state : 8; // Must be first. + u32 alloc_tid : 24; + + u32 free_tid : 24; + u32 from_memalign : 1; + u32 alloc_type : 2; + u32 rz_log : 3; + u32 lsan_tag : 2; + // 2-nd 8 bytes + // This field is used for small sizes. For large sizes it is equal to + // SizeClassMap::kMaxSize and the actual size is stored in the + // SecondaryAllocator's metadata. + u32 user_requested_size; + u32 alloc_context_id; +}; + +struct ChunkBase : ChunkHeader { + // Header2, intersects with user memory. + u32 free_context_id; +}; + +static const uptr kChunkHeaderSize = sizeof(ChunkHeader); +static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; +COMPILER_CHECK(kChunkHeaderSize == 16); +COMPILER_CHECK(kChunkHeader2Size <= 16); + +// Every chunk of memory allocated by this allocator can be in one of 3 states: +// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. +// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. +// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. +enum { + CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. + CHUNK_ALLOCATED = 2, + CHUNK_QUARANTINE = 3 +}; + +struct AsanChunk: ChunkBase { + uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } + uptr UsedSize(bool locked_version = false) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + return *reinterpret_cast( + get_allocator().GetMetaData(AllocBeg(locked_version))); + } + void *AllocBeg(bool locked_version = false) { + if (from_memalign) { + if (locked_version) + return get_allocator().GetBlockBeginFastLocked( + reinterpret_cast(this)); + return get_allocator().GetBlockBegin(reinterpret_cast(this)); + } + return reinterpret_cast(Beg() - RZLog2Size(rz_log)); + } + bool AddrIsInside(uptr addr, bool locked_version = false) { + return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + } +}; + +struct QuarantineCallback { + explicit QuarantineCallback(AllocatorCache *cache) + : cache_(cache) { + } + + void Recycle(AsanChunk *m) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); + CHECK_NE(m->alloc_tid, kInvalidTid); + CHECK_NE(m->free_tid, kInvalidTid); + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapLeftRedzoneMagic); + void *p = reinterpret_cast(m->AllocBeg()); + if (p != m) { + uptr *alloc_magic = reinterpret_cast(p); + CHECK_EQ(alloc_magic[0], kAllocBegMagic); + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + alloc_magic[0] = 0; + CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); + } + + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.real_frees++; + thread_stats.really_freed += m->UsedSize(); + + get_allocator().Deallocate(cache_, p); + } + + void *Allocate(uptr size) { + return get_allocator().Allocate(cache_, size, 1, false); + } + + void Deallocate(void *p) { + get_allocator().Deallocate(cache_, p); + } + + AllocatorCache *cache_; +}; + +typedef Quarantine AsanQuarantine; +typedef AsanQuarantine::Cache QuarantineCache; + +void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { + PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mmaps++; + thread_stats.mmaped += size; +} +void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { + PoisonShadow(p, size, 0); + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + FlushUnneededASanShadowMemory(p, size); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.munmaps++; + thread_stats.munmaped += size; +} + +// We can not use THREADLOCAL because it is not supported on some of the +// platforms we care about (OSX 10.6, Android). +// static THREADLOCAL AllocatorCache cache; +AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + return &ms->allocator_cache; +} + +QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); + return reinterpret_cast(ms->quarantine_cache); +} + +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { + quarantine_size_mb = f->quarantine_size_mb; + min_redzone = f->redzone; + max_redzone = f->max_redzone; + may_return_null = cf->allocator_may_return_null; + alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; +} + +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { + f->quarantine_size_mb = quarantine_size_mb; + f->redzone = min_redzone; + f->max_redzone = max_redzone; + cf->allocator_may_return_null = may_return_null; + f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; +} + +struct Allocator { + static const uptr kMaxAllowedMallocSize = + FIRST_32_SECOND_64(3UL << 30, 1UL << 40); + static const uptr kMaxThreadLocalQuarantine = + FIRST_32_SECOND_64(1 << 18, 1 << 20); + + AsanAllocator allocator; + AsanQuarantine quarantine; + StaticSpinMutex fallback_mutex; + AllocatorCache fallback_allocator_cache; + QuarantineCache fallback_quarantine_cache; + + // ------------------- Options -------------------------- + atomic_uint16_t min_redzone; + atomic_uint16_t max_redzone; + atomic_uint8_t alloc_dealloc_mismatch; + + // ------------------- Initialization ------------------------ + explicit Allocator(LinkerInitialized) + : quarantine(LINKER_INITIALIZED), + fallback_quarantine_cache(LINKER_INITIALIZED) {} + + void CheckOptions(const AllocatorOptions &options) const { + CHECK_GE(options.min_redzone, 16); + CHECK_GE(options.max_redzone, options.min_redzone); + CHECK_LE(options.max_redzone, 2048); + CHECK(IsPowerOfTwo(options.min_redzone)); + CHECK(IsPowerOfTwo(options.max_redzone)); + } + + void SharedInitCode(const AllocatorOptions &options) { + CheckOptions(options); + quarantine.Init((uptr)options.quarantine_size_mb << 20, + kMaxThreadLocalQuarantine); + atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, + memory_order_release); + atomic_store(&min_redzone, options.min_redzone, memory_order_release); + atomic_store(&max_redzone, options.max_redzone, memory_order_release); + } + + void Initialize(const AllocatorOptions &options) { + allocator.Init(options.may_return_null); + SharedInitCode(options); + } + + void ReInitialize(const AllocatorOptions &options) { + allocator.SetMayReturnNull(options.may_return_null); + SharedInitCode(options); + } + + void GetOptions(AllocatorOptions *options) const { + options->quarantine_size_mb = quarantine.GetSize() >> 20; + options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); + options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); + options->may_return_null = allocator.MayReturnNull(); + options->alloc_dealloc_mismatch = + atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); + } + + // -------------------- Helper methods. ------------------------- + uptr ComputeRZLog(uptr user_requested_size) { + u32 rz_log = + user_requested_size <= 64 - 16 ? 0 : + user_requested_size <= 128 - 32 ? 1 : + user_requested_size <= 512 - 64 ? 2 : + user_requested_size <= 4096 - 128 ? 3 : + user_requested_size <= (1 << 14) - 256 ? 4 : + user_requested_size <= (1 << 15) - 512 ? 5 : + user_requested_size <= (1 << 16) - 1024 ? 6 : 7; + u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); + u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); + return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); + } + + // We have an address between two chunks, and we want to report just one. + AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, + AsanChunk *right_chunk) { + // Prefer an allocated chunk over freed chunk and freed chunk + // over available chunk. + if (left_chunk->chunk_state != right_chunk->chunk_state) { + if (left_chunk->chunk_state == CHUNK_ALLOCATED) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_ALLOCATED) + return right_chunk; + if (left_chunk->chunk_state == CHUNK_QUARANTINE) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_QUARANTINE) + return right_chunk; + } + // Same chunk_state: choose based on offset. + sptr l_offset = 0, r_offset = 0; + CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); + CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); + if (l_offset < r_offset) + return left_chunk; + return right_chunk; + } + + // -------------------- Allocation/Deallocation routines --------------- + void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, + AllocType alloc_type, bool can_fill) { + if (UNLIKELY(!asan_inited)) + AsanInitFromRtl(); + Flags &fl = *flags(); + CHECK(stack); + const uptr min_alignment = SHADOW_GRANULARITY; + if (alignment < min_alignment) + alignment = min_alignment; + if (size == 0) { + // We'd be happy to avoid allocating memory for zero-size requests, but + // some programs/tests depend on this behavior and assume that malloc + // would not return NULL even for zero-size allocations. Moreover, it + // looks like operator new should never return NULL, and results of + // consecutive "new" calls must be different even if the allocated size + // is zero. + size = 1; + } + CHECK(IsPowerOfTwo(alignment)); + uptr rz_log = ComputeRZLog(size); + uptr rz_size = RZLog2Size(rz_log); + uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); + uptr needed_size = rounded_size + rz_size; + if (alignment > min_alignment) + needed_size += alignment; + bool using_primary_allocator = true; + // If we are allocating from the secondary allocator, there will be no + // automatic right redzone, so add the right redzone manually. + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + needed_size += rz_size; + using_primary_allocator = false; + } + CHECK(IsAligned(needed_size, min_alignment)); + if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { + Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", + (void*)size); + return allocator.ReturnNullOrDie(); + } + + AsanThread *t = GetCurrentThread(); + void *allocated; + bool check_rss_limit = true; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = + allocator.Allocate(cache, needed_size, 8, false, check_rss_limit); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = + allocator.Allocate(cache, needed_size, 8, false, check_rss_limit); + } + + if (!allocated) + return allocator.ReturnNullOrDie(); + + if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { + // Heap poisoning is enabled, but the allocator provides an unpoisoned + // chunk. This is possible if CanPoisonMemory() was false for some + // time, for example, due to flags()->start_disabled. + // Anyway, poison the block before using it for anything else. + uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); + PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); + } + + uptr alloc_beg = reinterpret_cast(allocated); + uptr alloc_end = alloc_beg + needed_size; + uptr beg_plus_redzone = alloc_beg + rz_size; + uptr user_beg = beg_plus_redzone; + if (!IsAligned(user_beg, alignment)) + user_beg = RoundUpTo(user_beg, alignment); + uptr user_end = user_beg + size; + CHECK_LE(user_end, alloc_end); + uptr chunk_beg = user_beg - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast(chunk_beg); + m->alloc_type = alloc_type; + m->rz_log = rz_log; + u32 alloc_tid = t ? t->tid() : 0; + m->alloc_tid = alloc_tid; + CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? + m->free_tid = kInvalidTid; + m->from_memalign = user_beg != beg_plus_redzone; + if (alloc_beg != chunk_beg) { + CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); + reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; + reinterpret_cast(alloc_beg)[1] = chunk_beg; + } + if (using_primary_allocator) { + CHECK(size); + m->user_requested_size = size; + CHECK(allocator.FromPrimary(allocated)); + } else { + CHECK(!allocator.FromPrimary(allocated)); + m->user_requested_size = SizeClassMap::kMaxSize; + uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); + meta[0] = size; + meta[1] = chunk_beg; + } + + m->alloc_context_id = StackDepotPut(*stack); + + uptr size_rounded_down_to_granularity = + RoundDownTo(size, SHADOW_GRANULARITY); + // Unpoison the bulk of the memory region. + if (size_rounded_down_to_granularity) + PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); + // Deal with the end of the region if size is not aligned to granularity. + if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { + u8 *shadow = + (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); + *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; + } + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mallocs++; + thread_stats.malloced += size; + thread_stats.malloced_redzones += needed_size - size; + if (needed_size > SizeClassMap::kMaxSize) + thread_stats.malloc_large++; + else + thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; + + void *res = reinterpret_cast(user_beg); + if (can_fill && fl.max_malloc_fill_size) { + uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); + REAL(memset)(res, fl.malloc_fill_byte, fill_size); + } +#if CAN_SANITIZE_LEAKS + m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored + : __lsan::kDirectlyLeaked; +#endif + // Must be the last mutation of metadata in this function. + atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); + ASAN_MALLOC_HOOK(res, size); + return res; + } + + void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { + u8 old_chunk_state = CHUNK_ALLOCATED; + // Flip the chunk_state atomically to avoid race on double-free. + if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, + CHUNK_QUARANTINE, memory_order_acquire)) + ReportInvalidFree(ptr, old_chunk_state, stack); + CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); + } + + // Expects the chunk to already be marked as quarantined by using + // AtomicallySetQuarantineFlag. + void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, + AllocType alloc_type) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + + if (m->alloc_type != alloc_type) { + if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { + ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, + (AllocType)alloc_type); + } + } + + CHECK_GE(m->alloc_tid, 0); + if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. + CHECK_EQ(m->free_tid, kInvalidTid); + AsanThread *t = GetCurrentThread(); + m->free_tid = t ? t->tid() : 0; + m->free_context_id = StackDepotPut(*stack); + // Poison the region. + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapFreeMagic); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.frees++; + thread_stats.freed += m->UsedSize(); + + // Push into quarantine. + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m, + m->UsedSize()); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *ac = &fallback_allocator_cache; + quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m, + m->UsedSize()); + } + } + + void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, + AllocType alloc_type) { + uptr p = reinterpret_cast(ptr); + if (p == 0) return; + + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast(chunk_beg); + if (delete_size && flags()->new_delete_type_mismatch && + delete_size != m->UsedSize()) { + ReportNewDeleteSizeMismatch(p, delete_size, stack); + } + ASAN_FREE_HOOK(ptr); + // Must mark the chunk as quarantined before any changes to its metadata. + AtomicallySetQuarantineFlag(m, ptr, stack); + QuarantineChunk(m, ptr, stack, alloc_type); + } + + void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { + CHECK(old_ptr && new_size); + uptr p = reinterpret_cast(old_ptr); + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast(chunk_beg); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.reallocs++; + thread_stats.realloced += new_size; + + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + if (new_ptr) { + u8 chunk_state = m->chunk_state; + if (chunk_state != CHUNK_ALLOCATED) + ReportInvalidFree(old_ptr, chunk_state, stack); + CHECK_NE(REAL(memcpy), nullptr); + uptr memcpy_size = Min(new_size, m->UsedSize()); + // If realloc() races with free(), we may start copying freed memory. + // However, we will report racy double-free later anyway. + REAL(memcpy)(new_ptr, old_ptr, memcpy_size); + Deallocate(old_ptr, 0, stack, FROM_MALLOC); + } + return new_ptr; + } + + void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) + return allocator.ReturnNullOrDie(); + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + // If the memory comes from the secondary allocator no need to clear it + // as it comes directly from mmap. + if (ptr && allocator.FromPrimary(ptr)) + REAL(memset)(ptr, 0, nmemb * size); + return ptr; + } + + void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { + if (chunk_state == CHUNK_QUARANTINE) + ReportDoubleFree((uptr)ptr, stack); + else + ReportFreeNotMalloced((uptr)ptr, stack); + } + + void CommitBack(AsanThreadLocalMallocStorage *ms) { + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac)); + allocator.SwallowCache(ac); + } + + // -------------------------- Chunk lookup ---------------------- + + // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). + AsanChunk *GetAsanChunk(void *alloc_beg) { + if (!alloc_beg) return nullptr; + if (!allocator.FromPrimary(alloc_beg)) { + uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); + AsanChunk *m = reinterpret_cast(meta[1]); + return m; + } + uptr *alloc_magic = reinterpret_cast(alloc_beg); + if (alloc_magic[0] == kAllocBegMagic) + return reinterpret_cast(alloc_magic[1]); + return reinterpret_cast(alloc_beg); + } + + AsanChunk *GetAsanChunkByAddr(uptr p) { + void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); + return GetAsanChunk(alloc_beg); + } + + // Allocator must be locked when this function is called. + AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { + void *alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); + return GetAsanChunk(alloc_beg); + } + + uptr AllocationSize(uptr p) { + AsanChunk *m = GetAsanChunkByAddr(p); + if (!m) return 0; + if (m->chunk_state != CHUNK_ALLOCATED) return 0; + if (m->Beg() != p) return 0; + return m->UsedSize(); + } + + AsanChunkView FindHeapChunkByAddress(uptr addr) { + AsanChunk *m1 = GetAsanChunkByAddr(addr); + if (!m1) return AsanChunkView(m1); + sptr offset = 0; + if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { + // The address is in the chunk's left redzone, so maybe it is actually + // a right buffer overflow from the other chunk to the left. + // Search a bit to the left to see if there is another chunk. + AsanChunk *m2 = nullptr; + for (uptr l = 1; l < GetPageSizeCached(); l++) { + m2 = GetAsanChunkByAddr(addr - l); + if (m2 == m1) continue; // Still the same chunk. + break; + } + if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) + m1 = ChooseChunk(addr, m2, m1); + } + return AsanChunkView(m1); + } + + void PrintStats() { + allocator.PrintStats(); + } + + void ForceLock() { + allocator.ForceLock(); + fallback_mutex.Lock(); + } + + void ForceUnlock() { + fallback_mutex.Unlock(); + allocator.ForceUnlock(); + } +}; + +static Allocator instance(LINKER_INITIALIZED); + +static AsanAllocator &get_allocator() { + return instance.allocator; +} + +bool AsanChunkView::IsValid() { + return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; +} +uptr AsanChunkView::Beg() { return chunk_->Beg(); } +uptr AsanChunkView::End() { return Beg() + UsedSize(); } +uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } +uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } +uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +StackTrace AsanChunkView::GetAllocStack() { + return GetStackTraceFromId(chunk_->alloc_context_id); +} + +StackTrace AsanChunkView::GetFreeStack() { + return GetStackTraceFromId(chunk_->free_context_id); +} + +void InitializeAllocator(const AllocatorOptions &options) { + instance.Initialize(options); +} + +void ReInitializeAllocator(const AllocatorOptions &options) { + instance.ReInitialize(options); +} + +void GetAllocatorOptions(AllocatorOptions *options) { + instance.GetOptions(options); +} + +AsanChunkView FindHeapChunkByAddress(uptr addr) { + return instance.FindHeapChunkByAddress(addr); +} + +void AsanThreadLocalMallocStorage::CommitBack() { + instance.CommitBack(this); +} + +void PrintInternalAllocatorStats() { + instance.PrintStats(); +} + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + return instance.Allocate(size, alignment, stack, alloc_type, true); +} + +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { + instance.Deallocate(ptr, 0, stack, alloc_type); +} + +void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + instance.Deallocate(ptr, size, stack, alloc_type); +} + +void *asan_malloc(uptr size, BufferedStackTrace *stack) { + return instance.Allocate(size, 8, stack, FROM_MALLOC, true); +} + +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + return instance.Calloc(nmemb, size, stack); +} + +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { + if (!p) + return instance.Allocate(size, 8, stack, FROM_MALLOC, true); + if (size == 0) { + instance.Deallocate(p, 0, stack, FROM_MALLOC); + return nullptr; + } + return instance.Reallocate(p, size, stack); +} + +void *asan_valloc(uptr size, BufferedStackTrace *stack) { + return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); +} + +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + size = RoundUpTo(size, PageSize); + if (size == 0) { + // pvalloc(0) should allocate one page. + size = PageSize; + } + return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true); +} + +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack) { + void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) { + if (!ptr) return 0; + uptr usable_size = instance.AllocationSize(reinterpret_cast(ptr)); + if (flags()->check_malloc_usable_size && (usable_size == 0)) { + GET_STACK_TRACE_FATAL(pc, bp); + ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); + } + return usable_size; +} + +uptr asan_mz_size(const void *ptr) { + return instance.AllocationSize(reinterpret_cast(ptr)); +} + +void asan_mz_force_lock() { + instance.ForceLock(); +} + +void asan_mz_force_unlock() { + instance.ForceUnlock(); +} + +void AsanSoftRssLimitExceededCallback(bool exceeded) { + instance.allocator.SetRssLimitIsExceeded(exceeded); +} + +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +void LockAllocator() { + __asan::get_allocator().ForceLock(); +} + +void UnlockAllocator() { + __asan::get_allocator().ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&__asan::get_allocator(); + *end = *begin + sizeof(__asan::get_allocator()); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); + if (!m) return 0; + uptr chunk = m->Beg(); + if (m->chunk_state != __asan::CHUNK_ALLOCATED) + return 0; + if (m->AddrIsInside(addr, /*locked_version=*/true)) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), + addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + CHECK(m); + return m->Beg(); +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); +} + +bool LsanMetadata::allocated() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->chunk_state == __asan::CHUNK_ALLOCATED; +} + +ChunkTag LsanMetadata::tag() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return static_cast(m->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + m->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->UsedSize(/*locked_version=*/true); +} + +u32 LsanMetadata::stack_trace_id() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->alloc_context_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::get_allocator().ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + uptr addr = reinterpret_cast(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); + if (!m) return kIgnoreObjectInvalid; + if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { + if (m->lsan_tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->lsan_tag = __lsan::kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// ASan allocator doesn't reserve extra bytes, so normally we would +// just return "size". We don't want to expose our redzone sizes, etc here. +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + uptr ptr = reinterpret_cast(p); + return instance.AllocationSize(ptr) > 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + if (!p) return 0; + uptr ptr = reinterpret_cast(p); + uptr allocated_size = instance.AllocationSize(ptr); + // Die if p is not malloced or if it is already freed. + if (allocated_size == 0) { + GET_STACK_TRACE_FATAL_HERE; + ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); + } + return allocated_size; +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_free_hook(void *ptr) { + (void)ptr; +} +} // extern "C" +#endif diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h index d2f30af30d6..921131ab4e9 100644 --- a/libsanitizer/asan/asan_allocator.h +++ b/libsanitizer/asan/asan_allocator.h @@ -7,12 +7,13 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_allocator2.cc. +// ASan-private header for asan_allocator.cc. //===----------------------------------------------------------------------===// #ifndef ASAN_ALLOCATOR_H #define ASAN_ALLOCATOR_H +#include "asan_flags.h" #include "asan_internal.h" #include "asan_interceptors.h" #include "sanitizer_common/sanitizer_allocator.h" @@ -26,11 +27,22 @@ enum AllocType { FROM_NEW_BR = 3 // Memory block came from operator new [ ] }; -static const uptr kNumberOfSizeClasses = 255; struct AsanChunk; -void InitializeAllocator(); -void ReInitializeAllocator(); +struct AllocatorOptions { + u32 quarantine_size_mb; + u16 min_redzone; + u16 max_redzone; + u8 may_return_null; + u8 alloc_dealloc_mismatch; + + void SetFrom(const Flags *f, const CommonFlags *cf); + void CopyTo(Flags *f, CommonFlags *cf); +}; + +void InitializeAllocator(const AllocatorOptions &options); +void ReInitializeAllocator(const AllocatorOptions &options); +void GetAllocatorOptions(AllocatorOptions *options); class AsanChunkView { public: @@ -100,6 +112,11 @@ struct AsanMapUnmapCallback { # if defined(__powerpc64__) const uptr kAllocatorSpace = 0xa0000000000ULL; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +# elif defined(__aarch64__) +// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA +// so no need to different values for different VMA. +const uptr kAllocatorSpace = 0x10000000000ULL; +const uptr kAllocatorSize = 0x10000000000ULL; // 3T. # else const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. @@ -122,15 +139,16 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16, AsanMapUnmapCallback> PrimaryAllocator; #endif // SANITIZER_CAN_USE_ALLOCATOR64 +static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; typedef SizeClassAllocatorLocalCache AllocatorCache; typedef LargeMmapAllocator SecondaryAllocator; typedef CombinedAllocator Allocator; + SecondaryAllocator> AsanAllocator; struct AsanThreadLocalMallocStorage { uptr quarantine_cache[16]; - AllocatorCache allocator2_cache; + AllocatorCache allocator_cache; void CommitBack(); private: // These objects are allocated via mmap() and are zero-initialized. @@ -158,6 +176,7 @@ void asan_mz_force_lock(); void asan_mz_force_unlock(); void PrintInternalAllocatorStats(); +void AsanSoftRssLimitExceededCallback(bool exceeded); } // namespace __asan #endif // ASAN_ALLOCATOR_H diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc deleted file mode 100644 index 33d9fea70cb..00000000000 --- a/libsanitizer/asan/asan_allocator2.cc +++ /dev/null @@ -1,790 +0,0 @@ -//===-- asan_allocator2.cc ------------------------------------------------===// -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Implementation of ASan's memory allocator, 2-nd version. -// This variant uses the allocator from sanitizer_common, i.e. the one shared -// with ThreadSanitizer and MemorySanitizer. -// -//===----------------------------------------------------------------------===// -#include "asan_allocator.h" - -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_list.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_quarantine.h" -#include "lsan/lsan_common.h" - -namespace __asan { - -void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { - PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mmaps++; - thread_stats.mmaped += size; -} -void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { - PoisonShadow(p, size, 0); - // We are about to unmap a chunk of user memory. - // Mark the corresponding shadow memory as not needed. - FlushUnneededASanShadowMemory(p, size); - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.munmaps++; - thread_stats.munmaped += size; -} - -// We can not use THREADLOCAL because it is not supported on some of the -// platforms we care about (OSX 10.6, Android). -// static THREADLOCAL AllocatorCache cache; -AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { - CHECK(ms); - return &ms->allocator2_cache; -} - -static Allocator allocator; - -static const uptr kMaxAllowedMallocSize = - FIRST_32_SECOND_64(3UL << 30, 64UL << 30); - -static const uptr kMaxThreadLocalQuarantine = - FIRST_32_SECOND_64(1 << 18, 1 << 20); - -// Every chunk of memory allocated by this allocator can be in one of 3 states: -// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. -// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. -// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. -enum { - CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. - CHUNK_ALLOCATED = 2, - CHUNK_QUARANTINE = 3 -}; - -// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. -// We use adaptive redzones: for larger allocation larger redzones are used. -static u32 RZLog2Size(u32 rz_log) { - CHECK_LT(rz_log, 8); - return 16 << rz_log; -} - -static u32 RZSize2Log(u32 rz_size) { - CHECK_GE(rz_size, 16); - CHECK_LE(rz_size, 2048); - CHECK(IsPowerOfTwo(rz_size)); - u32 res = Log2(rz_size) - 4; - CHECK_EQ(rz_size, RZLog2Size(res)); - return res; -} - -static uptr ComputeRZLog(uptr user_requested_size) { - u32 rz_log = - user_requested_size <= 64 - 16 ? 0 : - user_requested_size <= 128 - 32 ? 1 : - user_requested_size <= 512 - 64 ? 2 : - user_requested_size <= 4096 - 128 ? 3 : - user_requested_size <= (1 << 14) - 256 ? 4 : - user_requested_size <= (1 << 15) - 512 ? 5 : - user_requested_size <= (1 << 16) - 1024 ? 6 : 7; - return Min(Max(rz_log, RZSize2Log(flags()->redzone)), - RZSize2Log(flags()->max_redzone)); -} - -// The memory chunk allocated from the underlying allocator looks like this: -// L L L L L L H H U U U U U U R R -// L -- left redzone words (0 or more bytes) -// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. -// U -- user memory. -// R -- right redzone (0 or more bytes) -// ChunkBase consists of ChunkHeader and other bytes that overlap with user -// memory. - -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. -// M B L L L L L L L L L H H U U U U U U -// | ^ -// ---------------------| -// M -- magic value kAllocBegMagic -// B -- address of ChunkHeader pointing to the first 'H' -static const uptr kAllocBegMagic = 0xCC6E96B9; - -struct ChunkHeader { - // 1-st 8 bytes. - u32 chunk_state : 8; // Must be first. - u32 alloc_tid : 24; - - u32 free_tid : 24; - u32 from_memalign : 1; - u32 alloc_type : 2; - u32 rz_log : 3; - u32 lsan_tag : 2; - // 2-nd 8 bytes - // This field is used for small sizes. For large sizes it is equal to - // SizeClassMap::kMaxSize and the actual size is stored in the - // SecondaryAllocator's metadata. - u32 user_requested_size; - u32 alloc_context_id; -}; - -struct ChunkBase : ChunkHeader { - // Header2, intersects with user memory. - u32 free_context_id; -}; - -static const uptr kChunkHeaderSize = sizeof(ChunkHeader); -static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; -COMPILER_CHECK(kChunkHeaderSize == 16); -COMPILER_CHECK(kChunkHeader2Size <= 16); - -struct AsanChunk: ChunkBase { - uptr Beg() { return reinterpret_cast(this) + kChunkHeaderSize; } - uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) - return user_requested_size; - return *reinterpret_cast( - allocator.GetMetaData(AllocBeg(locked_version))); - } - void *AllocBeg(bool locked_version = false) { - if (from_memalign) { - if (locked_version) - return allocator.GetBlockBeginFastLocked( - reinterpret_cast(this)); - return allocator.GetBlockBegin(reinterpret_cast(this)); - } - return reinterpret_cast(Beg() - RZLog2Size(rz_log)); - } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); - } -}; - -bool AsanChunkView::IsValid() { - return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE; -} -uptr AsanChunkView::Beg() { return chunk_->Beg(); } -uptr AsanChunkView::End() { return Beg() + UsedSize(); } -uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } -uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } -uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } - -static StackTrace GetStackTraceFromId(u32 id) { - CHECK(id); - StackTrace res = StackDepotGet(id); - CHECK(res.trace); - return res; -} - -StackTrace AsanChunkView::GetAllocStack() { - return GetStackTraceFromId(chunk_->alloc_context_id); -} - -StackTrace AsanChunkView::GetFreeStack() { - return GetStackTraceFromId(chunk_->free_context_id); -} - -struct QuarantineCallback; -typedef Quarantine AsanQuarantine; -typedef AsanQuarantine::Cache QuarantineCache; -static AsanQuarantine quarantine(LINKER_INITIALIZED); -static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); -static AllocatorCache fallback_allocator_cache; -static SpinMutex fallback_mutex; - -QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { - CHECK(ms); - CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); - return reinterpret_cast(ms->quarantine_cache); -} - -struct QuarantineCallback { - explicit QuarantineCallback(AllocatorCache *cache) - : cache_(cache) { - } - - void Recycle(AsanChunk *m) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); - CHECK_NE(m->alloc_tid, kInvalidTid); - CHECK_NE(m->free_tid, kInvalidTid); - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), - kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast(m->AllocBeg()); - if (p != m) { - uptr *alloc_magic = reinterpret_cast(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); - // Clear the magic value, as allocator internals may overwrite the - // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast(m)); - } - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.real_frees++; - thread_stats.really_freed += m->UsedSize(); - - allocator.Deallocate(cache_, p); - } - - void *Allocate(uptr size) { - return allocator.Allocate(cache_, size, 1, false); - } - - void Deallocate(void *p) { - allocator.Deallocate(cache_, p); - } - - AllocatorCache *cache_; -}; - -void InitializeAllocator() { - allocator.Init(); - quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); -} - -void ReInitializeAllocator() { - quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); -} - -static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, - AllocType alloc_type, bool can_fill) { - if (UNLIKELY(!asan_inited)) - AsanInitFromRtl(); - Flags &fl = *flags(); - CHECK(stack); - const uptr min_alignment = SHADOW_GRANULARITY; - if (alignment < min_alignment) - alignment = min_alignment; - if (size == 0) { - // We'd be happy to avoid allocating memory for zero-size requests, but - // some programs/tests depend on this behavior and assume that malloc would - // not return NULL even for zero-size allocations. Moreover, it looks like - // operator new should never return NULL, and results of consecutive "new" - // calls must be different even if the allocated size is zero. - size = 1; - } - CHECK(IsPowerOfTwo(alignment)); - uptr rz_log = ComputeRZLog(size); - uptr rz_size = RZLog2Size(rz_log); - uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); - uptr needed_size = rounded_size + rz_size; - if (alignment > min_alignment) - needed_size += alignment; - bool using_primary_allocator = true; - // If we are allocating from the secondary allocator, there will be no - // automatic right redzone, so add the right redzone manually. - if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { - needed_size += rz_size; - using_primary_allocator = false; - } - CHECK(IsAligned(needed_size, min_alignment)); - if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { - Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", - (void*)size); - return AllocatorReturnNull(); - } - - AsanThread *t = GetCurrentThread(); - void *allocated; - if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = allocator.Allocate(cache, needed_size, 8, false); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, needed_size, 8, false); - } - - if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) { - // Heap poisoning is enabled, but the allocator provides an unpoisoned - // chunk. This is possible if flags()->poison_heap was disabled for some - // time, for example, due to flags()->start_disabled. - // Anyway, poison the block before using it for anything else. - uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); - PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); - } - - uptr alloc_beg = reinterpret_cast(allocated); - uptr alloc_end = alloc_beg + needed_size; - uptr beg_plus_redzone = alloc_beg + rz_size; - uptr user_beg = beg_plus_redzone; - if (!IsAligned(user_beg, alignment)) - user_beg = RoundUpTo(user_beg, alignment); - uptr user_end = user_beg + size; - CHECK_LE(user_end, alloc_end); - uptr chunk_beg = user_beg - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); - m->alloc_type = alloc_type; - m->rz_log = rz_log; - u32 alloc_tid = t ? t->tid() : 0; - m->alloc_tid = alloc_tid; - CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? - m->free_tid = kInvalidTid; - m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); - reinterpret_cast(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast(alloc_beg)[1] = chunk_beg; - } - if (using_primary_allocator) { - CHECK(size); - m->user_requested_size = size; - CHECK(allocator.FromPrimary(allocated)); - } else { - CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; - uptr *meta = reinterpret_cast(allocator.GetMetaData(allocated)); - meta[0] = size; - meta[1] = chunk_beg; - } - - m->alloc_context_id = StackDepotPut(*stack); - - uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); - // Unpoison the bulk of the memory region. - if (size_rounded_down_to_granularity) - PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); - // Deal with the end of the region if size is not aligned to granularity. - if (size != size_rounded_down_to_granularity && fl.poison_heap) { - u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); - *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; - } - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mallocs++; - thread_stats.malloced += size; - thread_stats.malloced_redzones += needed_size - size; - uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); - thread_stats.malloced_by_size[class_id]++; - if (needed_size > SizeClassMap::kMaxSize) - thread_stats.malloc_large++; - - void *res = reinterpret_cast(user_beg); - if (can_fill && fl.max_malloc_fill_size) { - uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); - REAL(memset)(res, fl.malloc_fill_byte, fill_size); - } -#if CAN_SANITIZE_LEAKS - m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored - : __lsan::kDirectlyLeaked; -#endif - // Must be the last mutation of metadata in this function. - atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); - ASAN_MALLOC_HOOK(res, size); - return res; -} - -static void ReportInvalidFree(void *ptr, u8 chunk_state, - BufferedStackTrace *stack) { - if (chunk_state == CHUNK_QUARANTINE) - ReportDoubleFree((uptr)ptr, stack); - else - ReportFreeNotMalloced((uptr)ptr, stack); -} - -static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr, - BufferedStackTrace *stack) { - u8 old_chunk_state = CHUNK_ALLOCATED; - // Flip the chunk_state atomically to avoid race on double-free. - if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, - CHUNK_QUARANTINE, memory_order_acquire)) - ReportInvalidFree(ptr, old_chunk_state, stack); - CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); -} - -// Expects the chunk to already be marked as quarantined by using -// AtomicallySetQuarantineFlag. -static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, - AllocType alloc_type) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - - if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) - ReportAllocTypeMismatch((uptr)ptr, stack, - (AllocType)m->alloc_type, (AllocType)alloc_type); - - CHECK_GE(m->alloc_tid, 0); - if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. - CHECK_EQ(m->free_tid, kInvalidTid); - AsanThread *t = GetCurrentThread(); - m->free_tid = t ? t->tid() : 0; - m->free_context_id = StackDepotPut(*stack); - // Poison the region. - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), - kAsanHeapFreeMagic); - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.frees++; - thread_stats.freed += m->UsedSize(); - - // Push into quarantine. - if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), - m, m->UsedSize()); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *ac = &fallback_allocator_cache; - quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), - m, m->UsedSize()); - } -} - -static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, - AllocType alloc_type) { - uptr p = reinterpret_cast(ptr); - if (p == 0) return; - - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); - if (delete_size && flags()->new_delete_type_mismatch && - delete_size != m->UsedSize()) { - ReportNewDeleteSizeMismatch(p, delete_size, stack); - } - ASAN_FREE_HOOK(ptr); - // Must mark the chunk as quarantined before any changes to its metadata. - AtomicallySetQuarantineFlag(m, ptr, stack); - QuarantineChunk(m, ptr, stack, alloc_type); -} - -static void *Reallocate(void *old_ptr, uptr new_size, - BufferedStackTrace *stack) { - CHECK(old_ptr && new_size); - uptr p = reinterpret_cast(old_ptr); - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast(chunk_beg); - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.reallocs++; - thread_stats.realloced += new_size; - - void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); - if (new_ptr) { - u8 chunk_state = m->chunk_state; - if (chunk_state != CHUNK_ALLOCATED) - ReportInvalidFree(old_ptr, chunk_state, stack); - CHECK_NE(REAL(memcpy), (void*)0); - uptr memcpy_size = Min(new_size, m->UsedSize()); - // If realloc() races with free(), we may start copying freed memory. - // However, we will report racy double-free later anyway. - REAL(memcpy)(new_ptr, old_ptr, memcpy_size); - Deallocate(old_ptr, 0, stack, FROM_MALLOC); - } - return new_ptr; -} - -// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). -static AsanChunk *GetAsanChunk(void *alloc_beg) { - if (!alloc_beg) return 0; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast(allocator.GetMetaData(alloc_beg)); - AsanChunk *m = reinterpret_cast(meta[1]); - return m; - } - uptr *alloc_magic = reinterpret_cast(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - return reinterpret_cast(alloc_magic[1]); - return reinterpret_cast(alloc_beg); -} - -static AsanChunk *GetAsanChunkByAddr(uptr p) { - void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast(p)); - return GetAsanChunk(alloc_beg); -} - -// Allocator must be locked when this function is called. -static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { - void *alloc_beg = - allocator.GetBlockBeginFastLocked(reinterpret_cast(p)); - return GetAsanChunk(alloc_beg); -} - -static uptr AllocationSize(uptr p) { - AsanChunk *m = GetAsanChunkByAddr(p); - if (!m) return 0; - if (m->chunk_state != CHUNK_ALLOCATED) return 0; - if (m->Beg() != p) return 0; - return m->UsedSize(); -} - -// We have an address between two chunks, and we want to report just one. -AsanChunk *ChooseChunk(uptr addr, - AsanChunk *left_chunk, AsanChunk *right_chunk) { - // Prefer an allocated chunk over freed chunk and freed chunk - // over available chunk. - if (left_chunk->chunk_state != right_chunk->chunk_state) { - if (left_chunk->chunk_state == CHUNK_ALLOCATED) - return left_chunk; - if (right_chunk->chunk_state == CHUNK_ALLOCATED) - return right_chunk; - if (left_chunk->chunk_state == CHUNK_QUARANTINE) - return left_chunk; - if (right_chunk->chunk_state == CHUNK_QUARANTINE) - return right_chunk; - } - // Same chunk_state: choose based on offset. - sptr l_offset = 0, r_offset = 0; - CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); - CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); - if (l_offset < r_offset) - return left_chunk; - return right_chunk; -} - -AsanChunkView FindHeapChunkByAddress(uptr addr) { - AsanChunk *m1 = GetAsanChunkByAddr(addr); - if (!m1) return AsanChunkView(m1); - sptr offset = 0; - if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { - // The address is in the chunk's left redzone, so maybe it is actually - // a right buffer overflow from the other chunk to the left. - // Search a bit to the left to see if there is another chunk. - AsanChunk *m2 = 0; - for (uptr l = 1; l < GetPageSizeCached(); l++) { - m2 = GetAsanChunkByAddr(addr - l); - if (m2 == m1) continue; // Still the same chunk. - break; - } - if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) - m1 = ChooseChunk(addr, m2, m1); - } - return AsanChunkView(m1); -} - -void AsanThreadLocalMallocStorage::CommitBack() { - AllocatorCache *ac = GetAllocatorCache(this); - quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); - allocator.SwallowCache(GetAllocatorCache(this)); -} - -void PrintInternalAllocatorStats() { - allocator.PrintStats(); -} - -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { - return Allocate(size, alignment, stack, alloc_type, true); -} - -void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { - Deallocate(ptr, 0, stack, alloc_type); -} - -void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { - Deallocate(ptr, size, stack, alloc_type); -} - -void *asan_malloc(uptr size, BufferedStackTrace *stack) { - return Allocate(size, 8, stack, FROM_MALLOC, true); -} - -void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return AllocatorReturnNull(); - void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); - // If the memory comes from the secondary allocator no need to clear it - // as it comes directly from mmap. - if (ptr && allocator.FromPrimary(ptr)) - REAL(memset)(ptr, 0, nmemb * size); - return ptr; -} - -void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { - if (p == 0) - return Allocate(size, 8, stack, FROM_MALLOC, true); - if (size == 0) { - Deallocate(p, 0, stack, FROM_MALLOC); - return 0; - } - return Reallocate(p, size, stack); -} - -void *asan_valloc(uptr size, BufferedStackTrace *stack) { - return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); -} - -void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - return Allocate(size, PageSize, stack, FROM_MALLOC, true); -} - -int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - BufferedStackTrace *stack) { - void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); - CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; - return 0; -} - -uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) { - if (ptr == 0) return 0; - uptr usable_size = AllocationSize(reinterpret_cast(ptr)); - if (flags()->check_malloc_usable_size && (usable_size == 0)) { - GET_STACK_TRACE_FATAL(pc, bp); - ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); - } - return usable_size; -} - -uptr asan_mz_size(const void *ptr) { - return AllocationSize(reinterpret_cast(ptr)); -} - -void asan_mz_force_lock() { - allocator.ForceLock(); - fallback_mutex.Lock(); -} - -void asan_mz_force_unlock() { - fallback_mutex.Unlock(); - allocator.ForceUnlock(); -} - -} // namespace __asan - -// --- Implementation of LSan-specific functions --- {{{1 -namespace __lsan { -void LockAllocator() { - __asan::allocator.ForceLock(); -} - -void UnlockAllocator() { - __asan::allocator.ForceUnlock(); -} - -void GetAllocatorGlobalRange(uptr *begin, uptr *end) { - *begin = (uptr)&__asan::allocator; - *end = *begin + sizeof(__asan::allocator); -} - -uptr PointsIntoChunk(void* p) { - uptr addr = reinterpret_cast(p); - __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); - if (!m) return 0; - uptr chunk = m->Beg(); - if (m->chunk_state != __asan::CHUNK_ALLOCATED) - return 0; - if (m->AddrIsInside(addr, /*locked_version=*/true)) - return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), - addr)) - return chunk; - return 0; -} - -uptr GetUserBegin(uptr chunk) { - __asan::AsanChunk *m = - __asan::GetAsanChunkByAddrFastLocked(chunk); - CHECK(m); - return m->Beg(); -} - -LsanMetadata::LsanMetadata(uptr chunk) { - metadata_ = reinterpret_cast(chunk - __asan::kChunkHeaderSize); -} - -bool LsanMetadata::allocated() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->chunk_state == __asan::CHUNK_ALLOCATED; -} - -ChunkTag LsanMetadata::tag() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return static_cast(m->lsan_tag); -} - -void LsanMetadata::set_tag(ChunkTag value) { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - m->lsan_tag = value; -} - -uptr LsanMetadata::requested_size() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->UsedSize(/*locked_version=*/true); -} - -u32 LsanMetadata::stack_trace_id() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->alloc_context_id; -} - -void ForEachChunk(ForEachChunkCallback callback, void *arg) { - __asan::allocator.ForEachChunk(callback, arg); -} - -IgnoreObjectResult IgnoreObjectLocked(const void *p) { - uptr addr = reinterpret_cast(p); - __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); - if (!m) return kIgnoreObjectInvalid; - if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { - if (m->lsan_tag == kIgnored) - return kIgnoreObjectAlreadyIgnored; - m->lsan_tag = __lsan::kIgnored; - return kIgnoreObjectSuccess; - } else { - return kIgnoreObjectInvalid; - } -} -} // namespace __lsan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -// ASan allocator doesn't reserve extra bytes, so normally we would -// just return "size". We don't want to expose our redzone sizes, etc here. -uptr __sanitizer_get_estimated_allocated_size(uptr size) { - return size; -} - -int __sanitizer_get_ownership(const void *p) { - uptr ptr = reinterpret_cast(p); - return (AllocationSize(ptr) > 0); -} - -uptr __sanitizer_get_allocated_size(const void *p) { - if (p == 0) return 0; - uptr ptr = reinterpret_cast(p); - uptr allocated_size = AllocationSize(ptr); - // Die if p is not malloced or if it is already freed. - if (allocated_size == 0) { - GET_STACK_TRACE_FATAL_HERE; - ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); - } - return allocated_size; -} - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_free_hook(void *ptr) { - (void)ptr; -} -} // extern "C" -#endif diff --git a/libsanitizer/asan/asan_debugging.cc b/libsanitizer/asan/asan_debugging.cc index 3efad658a4c..6e33a9d4c51 100644 --- a/libsanitizer/asan/asan_debugging.cc +++ b/libsanitizer/asan/asan_debugging.cc @@ -79,8 +79,8 @@ void AsanLocateAddress(uptr addr, AddressDescription *descr) { GetInfoForHeapAddress(addr, descr); } -uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, - bool alloc_stack) { +static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id, + bool alloc_stack) { AsanChunkView chunk = FindHeapChunkByAddress(addr); if (!chunk.IsValid()) return 0; @@ -106,14 +106,14 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, return 0; } -} // namespace __asan +} // namespace __asan using namespace __asan; SANITIZER_INTERFACE_ATTRIBUTE const char *__asan_locate_address(uptr addr, char *name, uptr name_size, uptr *region_address, uptr *region_size) { - AddressDescription descr = { name, name_size, 0, 0, 0 }; + AddressDescription descr = { name, name_size, 0, 0, nullptr }; AsanLocateAddress(addr, &descr); if (region_address) *region_address = descr.region_address; if (region_size) *region_size = descr.region_size; diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc index cfe96a0882f..de190d11a16 100644 --- a/libsanitizer/asan/asan_fake_stack.cc +++ b/libsanitizer/asan/asan_fake_stack.cc @@ -9,6 +9,7 @@ // // FakeStack is used to detect use-after-return bugs. //===----------------------------------------------------------------------===// + #include "asan_allocator.h" #include "asan_poisoning.h" #include "asan_thread.h" @@ -20,13 +21,19 @@ static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; +static const u64 kAllocaRedzoneSize = 32UL; +static const u64 kAllocaRedzoneMask = 31UL; + // For small size classes inline PoisonShadow for better performance. ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3. u64 *shadow = reinterpret_cast(MemToShadow(ptr)); if (class_id <= 6) { - for (uptr i = 0; i < (1U << class_id); i++) + for (uptr i = 0; i < (1U << class_id); i++) { shadow[i] = magic; + // Make sure this does not become memset. + SanitizerBreakOptimization(nullptr); + } } else { // The size class is too big, it's cheaper to poison only size bytes. PoisonShadow(ptr, size, static_cast(magic)); @@ -56,7 +63,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) { void FakeStack::Destroy(int tid) { PoisonAll(0); - if (common_flags()->verbosity >= 2) { + if (Verbosity() >= 2) { InternalScopedString str(kNumberOfSizeClasses * 50); for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], @@ -73,7 +80,9 @@ void FakeStack::PoisonAll(u8 magic) { magic); } +#if !defined(_MSC_VER) || defined(__clang__) ALWAYS_INLINE USED +#endif FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, uptr real_stack) { CHECK_LT(class_id, kNumberOfSizeClasses); @@ -99,7 +108,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, *SavedFlagPtr(reinterpret_cast(res), class_id) = &flags[pos]; return res; } - return 0; // We are out of fake stack. + return nullptr; // We are out of fake stack. } uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { @@ -176,7 +185,7 @@ void SetTLSFakeStack(FakeStack *fs) { } static FakeStack *GetFakeStack() { AsanThread *t = GetCurrentThread(); - if (!t) return 0; + if (!t) return nullptr; return t->fake_stack(); } @@ -184,40 +193,39 @@ static FakeStack *GetFakeStackFast() { if (FakeStack *fs = GetTLSFakeStack()) return fs; if (!__asan_option_detect_stack_use_after_return) - return 0; + return nullptr; return GetFakeStack(); } -ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) { +ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { FakeStack *fs = GetFakeStackFast(); - if (!fs) return real_stack; + if (!fs) return 0; + uptr local_stack; + uptr real_stack = reinterpret_cast(&local_stack); FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); - if (!ff) - return real_stack; // Out of fake stack, return the real one. + if (!ff) return 0; // Out of fake stack. uptr ptr = reinterpret_cast(ff); SetShadow(ptr, size, class_id, 0); return ptr; } -ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) { - if (ptr == real_stack) - return; +ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { FakeStack::Deallocate(ptr, class_id); SetShadow(ptr, size, class_id, kMagic8); } -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ - __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \ - return OnMalloc(class_id, size, real_stack); \ + __asan_stack_malloc_##class_id(uptr size) { \ + return OnMalloc(class_id, size); \ } \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ - uptr ptr, uptr size, uptr real_stack) { \ - OnFree(ptr, class_id, size, real_stack); \ + uptr ptr, uptr size) { \ + OnFree(ptr, class_id, size); \ } DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) @@ -239,15 +247,35 @@ SANITIZER_INTERFACE_ATTRIBUTE void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, void **end) { FakeStack *fs = reinterpret_cast(fake_stack); - if (!fs) return 0; + if (!fs) return nullptr; uptr frame_beg, frame_end; FakeFrame *frame = reinterpret_cast(fs->AddrIsInFakeStack( reinterpret_cast(addr), &frame_beg, &frame_end)); - if (!frame) return 0; + if (!frame) return nullptr; if (frame->magic != kCurrentStackFrameMagic) - return 0; + return nullptr; if (beg) *beg = reinterpret_cast(frame_beg); if (end) *end = reinterpret_cast(frame_end); return reinterpret_cast(frame->real_stack); } -} // extern "C" + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_alloca_poison(uptr addr, uptr size) { + uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; + uptr PartialRzAddr = addr + size; + uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; + uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); + FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); + FastPoisonShadowPartialRightRedzone( + PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, + RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); + FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_allocas_unpoison(uptr top, uptr bottom) { + if ((!top) || (top > bottom)) return; + REAL(memset)(reinterpret_cast(MemToShadow(top)), 0, + (bottom - top) / SHADOW_GRANULARITY); +} +} // extern "C" diff --git a/libsanitizer/asan/asan_flags.cc b/libsanitizer/asan/asan_flags.cc new file mode 100644 index 00000000000..9a68ad4eaa8 --- /dev/null +++ b/libsanitizer/asan/asan_flags.cc @@ -0,0 +1,177 @@ +//===-- asan_flags.cc -------------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan flag parsing logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_flags.h" +#include "asan_interface_internal.h" +#include "asan_stack.h" +#include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "ubsan/ubsan_flags.h" +#include "ubsan/ubsan_platform.h" + +namespace __asan { + +Flags asan_flags_dont_use_directly; // use via flags(). + +static const char *MaybeCallAsanDefaultOptions() { + return (&__asan_default_options) ? __asan_default_options() : ""; +} + +static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { +#ifdef ASAN_DEFAULT_OPTIONS +// Stringize the macro value. +# define ASAN_STRINGIZE(x) #x +# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) + return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + +void Flags::SetDefaults() { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +static void RegisterAsanFlags(FlagParser *parser, Flags *f) { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +void InitializeFlags() { + // Set the default values and prepare for parsing ASan and common flags. + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.detect_leaks = CAN_SANITIZE_LEAKS; + cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = kDefaultMallocContextSize; + cf.intercept_tls_get_addr = true; + cf.exitcode = 1; + OverrideCommonFlags(cf); + } + Flags *f = flags(); + f->SetDefaults(); + + FlagParser asan_parser; + RegisterAsanFlags(&asan_parser, f); + RegisterCommonFlags(&asan_parser); + + // Set the default values and prepare for parsing LSan and UBSan flags + // (which can also overwrite common flags). +#if CAN_SANITIZE_LEAKS + __lsan::Flags *lf = __lsan::flags(); + lf->SetDefaults(); + + FlagParser lsan_parser; + __lsan::RegisterLsanFlags(&lsan_parser, lf); + RegisterCommonFlags(&lsan_parser); +#endif + +#if CAN_SANITIZE_UB + __ubsan::Flags *uf = __ubsan::flags(); + uf->SetDefaults(); + + FlagParser ubsan_parser; + __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); + RegisterCommonFlags(&ubsan_parser); +#endif + + // Override from ASan compile definition. + const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition(); + asan_parser.ParseString(asan_compile_def); + + // Override from user-specified string. + const char *asan_default_options = MaybeCallAsanDefaultOptions(); + asan_parser.ParseString(asan_default_options); +#if CAN_SANITIZE_UB + const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); + ubsan_parser.ParseString(ubsan_default_options); +#endif + + // Override from command line. + asan_parser.ParseString(GetEnv("ASAN_OPTIONS")); +#if CAN_SANITIZE_LEAKS + lsan_parser.ParseString(GetEnv("LSAN_OPTIONS")); +#endif +#if CAN_SANITIZE_UB + ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS")); +#endif + + // Let activation flags override current settings. On Android they come + // from a system property. On other platforms this is no-op. + if (!flags()->start_deactivated) { + char buf[100]; + GetExtraActivationFlags(buf, sizeof(buf)); + asan_parser.ParseString(buf); + } + + SetVerbosity(common_flags()->verbosity); + + // TODO(eugenis): dump all flags at verbosity>=2? + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) { + // TODO(samsonov): print all of the flags (ASan, LSan, common). + asan_parser.PrintFlagDescriptions(); + } + + // Flag validation: + if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) { + Report("%s: detect_leaks is not supported on this platform.\n", + SanitizerToolName); + Die(); + } + // Make "strict_init_order" imply "check_initialization_order". + // TODO(samsonov): Use a single runtime flag for an init-order checker. + if (f->strict_init_order) { + f->check_initialization_order = true; + } + CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax); + CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log); + CHECK_GE(f->redzone, 16); + CHECK_GE(f->max_redzone, f->redzone); + CHECK_LE(f->max_redzone, 2048); + CHECK(IsPowerOfTwo(f->redzone)); + CHECK(IsPowerOfTwo(f->max_redzone)); + + // quarantine_size is deprecated but we still honor it. + // quarantine_size can not be used together with quarantine_size_mb. + if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) { + Report("%s: please use either 'quarantine_size' (deprecated) or " + "quarantine_size_mb, but not both\n", SanitizerToolName); + Die(); + } + if (f->quarantine_size >= 0) + f->quarantine_size_mb = f->quarantine_size >> 20; + if (f->quarantine_size_mb < 0) { + const int kDefaultQuarantineSizeMb = + (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8; + f->quarantine_size_mb = kDefaultQuarantineSizeMb; + } +} + +} // namespace __asan + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char* __asan_default_options() { return ""; } +} // extern "C" +#endif diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h index 0b6a9857be0..6b33789b84c 100644 --- a/libsanitizer/asan/asan_flags.h +++ b/libsanitizer/asan/asan_flags.h @@ -14,6 +14,7 @@ #define ASAN_FLAGS_H #include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_flag_parser.h" // ASan flag values can be defined in four ways: // 1) initialized with default values at startup. @@ -22,55 +23,24 @@ // 3) overriden from string returned by user-specified function // __asan_default_options(). // 4) overriden from env variable ASAN_OPTIONS. +// 5) overriden during ASan activation (for now used on Android only). namespace __asan { struct Flags { - // Flag descriptions are in asan_rtl.cc. - int quarantine_size; - int redzone; - int max_redzone; - bool debug; - int report_globals; - bool check_initialization_order; - bool replace_str; - bool replace_intrin; - bool mac_ignore_invalid_free; - bool detect_stack_use_after_return; - int min_uar_stack_size_log; - int max_uar_stack_size_log; - bool uar_noreserve; - int max_malloc_fill_size, malloc_fill_byte; - int exitcode; - bool allow_user_poisoning; - int sleep_before_dying; - bool check_malloc_usable_size; - bool unmap_shadow_on_exit; - bool abort_on_error; - bool print_stats; - bool print_legend; - bool atexit; - bool allow_reexec; - bool print_full_thread_history; - bool poison_heap; - bool poison_partial; - bool poison_array_cookie; - bool alloc_dealloc_mismatch; - bool new_delete_type_mismatch; - bool strict_memcmp; - bool strict_init_order; - bool start_deactivated; - int detect_invalid_pointer_pairs; - bool detect_container_overflow; - int detect_odr_violation; - bool dump_instruction_bytes; +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "asan_flags.inc" +#undef ASAN_FLAG + + void SetDefaults(); }; extern Flags asan_flags_dont_use_directly; inline Flags *flags() { return &asan_flags_dont_use_directly; } -void InitializeFlags(Flags *f, const char *env); + +void InitializeFlags(); } // namespace __asan diff --git a/libsanitizer/asan/asan_flags.inc b/libsanitizer/asan/asan_flags.inc new file mode 100644 index 00000000000..3163c23cd09 --- /dev/null +++ b/libsanitizer/asan/asan_flags.inc @@ -0,0 +1,134 @@ +//===-- asan_flags.inc ------------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// ASan runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef ASAN_FLAG +# error "Define ASAN_FLAG prior to including this file!" +#endif + +// ASAN_FLAG(Type, Name, DefaultValue, Description) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +ASAN_FLAG(int, quarantine_size, -1, + "Deprecated, please use quarantine_size_mb.") +ASAN_FLAG(int, quarantine_size_mb, -1, + "Size (in Mb) of quarantine used to detect use-after-free " + "errors. Lower value may reduce memory usage but increase the " + "chance of false negatives.") +ASAN_FLAG(int, redzone, 16, + "Minimal size (in bytes) of redzones around heap objects. " + "Requirement: redzone >= 16, is a power of two.") +ASAN_FLAG(int, max_redzone, 2048, + "Maximal size (in bytes) of redzones around heap objects.") +ASAN_FLAG( + bool, debug, false, + "If set, prints some debugging information and does additional checks.") +ASAN_FLAG( + int, report_globals, 1, + "Controls the way to handle globals (0 - don't detect buffer overflow on " + "globals, 1 - detect buffer overflow, 2 - print data about registered " + "globals).") +ASAN_FLAG(bool, check_initialization_order, false, + "If set, attempts to catch initialization order issues.") +ASAN_FLAG( + bool, replace_str, true, + "If set, uses custom wrappers and replacements for libc string functions " + "to find more errors.") +ASAN_FLAG(bool, replace_intrin, true, + "If set, uses custom wrappers for memset/memcpy/memmove intinsics.") +ASAN_FLAG(bool, mac_ignore_invalid_free, false, + "Ignore invalid free() calls to work around some bugs. Used on OS X " + "only.") +ASAN_FLAG(bool, detect_stack_use_after_return, false, + "Enables stack-use-after-return checking at run-time.") +ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway. + "Minimum fake stack size log.") +ASAN_FLAG(int, max_uar_stack_size_log, + 20, // 1Mb per size class, i.e. ~11Mb per thread + "Maximum fake stack size log.") +ASAN_FLAG(bool, uar_noreserve, false, + "Use mmap with 'noreserve' flag to allocate fake stack.") +ASAN_FLAG( + int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K. + "ASan allocator flag. max_malloc_fill_size is the maximal amount of " + "bytes that will be filled with malloc_fill_byte on malloc.") +ASAN_FLAG(int, malloc_fill_byte, 0xbe, + "Value used to fill the newly allocated memory.") +ASAN_FLAG(bool, allow_user_poisoning, true, + "If set, user may manually mark memory regions as poisoned or " + "unpoisoned.") +ASAN_FLAG( + int, sleep_before_dying, 0, + "Number of seconds to sleep between printing an error report and " + "terminating the program. Useful for debugging purposes (e.g. when one " + "needs to attach gdb).") +ASAN_FLAG(bool, check_malloc_usable_size, true, + "Allows the users to work around the bug in Nvidia drivers prior to " + "295.*.") +ASAN_FLAG(bool, unmap_shadow_on_exit, false, + "If set, explicitly unmaps the (huge) shadow at exit.") +ASAN_FLAG(bool, print_stats, false, + "Print various statistics after printing an error message or if " + "atexit=1.") +ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.") +ASAN_FLAG(bool, atexit, false, + "If set, prints ASan exit stats even after program terminates " + "successfully.") +ASAN_FLAG( + bool, print_full_thread_history, true, + "If set, prints thread creation stacks for the threads involved in the " + "report and their ancestors up to the main thread.") +ASAN_FLAG( + bool, poison_heap, true, + "Poison (or not) the heap memory on [de]allocation. Zero value is useful " + "for benchmarking the allocator or instrumentator.") +ASAN_FLAG(bool, poison_partial, true, + "If true, poison partially addressable 8-byte aligned words " + "(default=true). This flag affects heap and global buffers, but not " + "stack buffers.") +ASAN_FLAG(bool, poison_array_cookie, true, + "Poison (or not) the array cookie after operator new[].") + +// Turn off alloc/dealloc mismatch checker on Mac and Windows for now. +// https://code.google.com/p/address-sanitizer/issues/detail?id=131 +// https://code.google.com/p/address-sanitizer/issues/detail?id=309 +// TODO(glider,timurrrr): Fix known issues and enable this back. +ASAN_FLAG(bool, alloc_dealloc_mismatch, + (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0), + "Report errors on malloc/delete, new/free, new/delete[], etc.") + +ASAN_FLAG(bool, new_delete_type_mismatch, true, + "Report errors on mismatch betwen size of new and delete.") +ASAN_FLAG( + bool, strict_init_order, false, + "If true, assume that dynamic initializers can never access globals from " + "other modules, even if the latter are already initialized.") +ASAN_FLAG( + bool, start_deactivated, false, + "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap " + "poisoning) to reduce memory consumption as much as possible, and " + "restores them to original values when the first instrumented module is " + "loaded into the process. This is mainly intended to be used on " + "Android. ") +ASAN_FLAG( + int, detect_invalid_pointer_pairs, 0, + "If non-zero, try to detect operations like <, <=, >, >= and - on " + "invalid pointer pairs (e.g. when pointers belong to different objects). " + "The bigger the value the harder we try.") +ASAN_FLAG( + bool, detect_container_overflow, true, + "If true, honor the container overflow annotations. " + "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow") +ASAN_FLAG(int, detect_odr_violation, 2, + "If >=2, detect violation of One-Definition-Rule (ODR); " + "If ==1, detect ODR-violation only if the two variables " + "have different sizes") +ASAN_FLAG(bool, dump_instruction_bytes, false, + "If true, dump 16 bytes starting at the instruction that caused SEGV") +ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.") diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc index 4bb88cfa001..b132c8b09b9 100644 --- a/libsanitizer/asan/asan_globals.cc +++ b/libsanitizer/asan/asan_globals.cc @@ -9,6 +9,7 @@ // // Handle globals. //===----------------------------------------------------------------------===// + #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -16,6 +17,7 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" +#include "asan_suppressions.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_mutex.h" @@ -71,7 +73,7 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) { const uptr kMinimalDistanceFromAnotherGlobal = 64; -bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { +static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; if (addr >= g.beg + g.size_with_redzone) return false; return true; @@ -88,36 +90,40 @@ static void ReportGlobal(const Global &g, const char *prefix) { } } -static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print, - Global *output_global) { - if (!flags()->report_globals) return false; +static u32 FindRegistrationSite(const Global *g) { + mu_for_globals.CheckLocked(); + CHECK(global_registration_site_vector); + for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { + GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; + if (g >= grs.g_first && g <= grs.g_last) + return grs.stack_id; + } + return 0; +} + +int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites, + int max_globals) { + if (!flags()->report_globals) return 0; BlockingMutexLock lock(&mu_for_globals); - bool res = false; + int res = 0; for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { const Global &g = *l->g; - if (print) { - if (flags()->report_globals >= 2) - ReportGlobal(g, "Search"); - res |= DescribeAddressRelativeToGlobal(addr, size, g); - } else { - if (IsAddressNearGlobal(addr, g)) { - CHECK(output_global); - *output_global = g; - return true; - } + if (flags()->report_globals >= 2) + ReportGlobal(g, "Search"); + if (IsAddressNearGlobal(addr, g)) { + globals[res] = g; + if (reg_sites) + reg_sites[res] = FindRegistrationSite(&g); + res++; + if (res == max_globals) break; } } return res; } -bool DescribeAddressIfGlobal(uptr addr, uptr size) { - return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true, - /* output_global */ nullptr); -} - bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) { Global g = {}; - if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) { + if (GetGlobalsForAddress(addr, &g, nullptr, 1)) { internal_strncpy(descr->name, g.name, descr->name_size); descr->region_address = g.beg; descr->region_size = g.size; @@ -127,16 +133,6 @@ bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) { return false; } -u32 FindRegistrationSite(const Global *g) { - CHECK(global_registration_site_vector); - for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { - GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; - if (g >= grs.g_first && g <= grs.g_last) - return grs.stack_id; - } - return 0; -} - // Register a global variable. // This function may be called more than once for every global // so we store the globals in a map. @@ -148,9 +144,7 @@ static void RegisterGlobal(const Global *g) { CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - // This "ODR violation" detection is fundamentally incompatible with - // how GCC registers globals. Disable as useless until rewritten upstream. - if (0 && flags()->detect_odr_violation) { + if (flags()->detect_odr_violation) { // Try detecting ODR (One Definition Rule) violation, i.e. the situation // where two globals with the same name are defined in different modules. if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { @@ -158,20 +152,21 @@ static void RegisterGlobal(const Global *g) { // the entire redzone of the second global may be within the first global. for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { if (g->beg == l->g->beg && - (flags()->detect_odr_violation >= 2 || g->size != l->g->size)) + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) ReportODRViolation(g, FindRegistrationSite(g), l->g, FindRegistrationSite(l->g)); } } } - if (flags()->poison_heap) + if (CanPoisonMemory()) PoisonRedZones(*g); ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; l->g = g; l->next = list_of_all_globals; list_of_all_globals = l; if (g->has_dynamic_init) { - if (dynamic_init_globals == 0) { + if (!dynamic_init_globals) { dynamic_init_globals = new(allocator_for_globals) VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); } @@ -182,11 +177,13 @@ static void RegisterGlobal(const Global *g) { static void UnregisterGlobal(const Global *g) { CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Removed"); CHECK(flags()->report_globals); CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - if (flags()->poison_heap) + if (CanPoisonMemory()) PoisonShadowForGlobal(g, 0); // We unpoison the shadow memory for the global but we do not remove it from // the list because that would require O(n^2) time with the current list @@ -208,7 +205,7 @@ void StopInitOrderChecking() { } } -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT @@ -216,7 +213,7 @@ using namespace __asan; // NOLINT // Register an array of globals. void __asan_register_globals(__asan_global *globals, uptr n) { if (!flags()->report_globals) return; - GET_STACK_TRACE_FATAL_HERE; + GET_STACK_TRACE_MALLOC; u32 stack_id = StackDepotPut(stack); BlockingMutexLock lock(&mu_for_globals); if (!global_registration_site_vector) @@ -249,7 +246,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) { // initializer can only touch global variables in the same TU. void __asan_before_dynamic_init(const char *module_name) { if (!flags()->check_initialization_order || - !flags()->poison_heap) + !CanPoisonMemory()) return; bool strict_init_order = flags()->strict_init_order; CHECK(dynamic_init_globals); @@ -275,7 +272,7 @@ void __asan_before_dynamic_init(const char *module_name) { // TU are poisoned. It simply unpoisons all dynamically initialized globals. void __asan_after_dynamic_init() { if (!flags()->check_initialization_order || - !flags()->poison_heap) + !CanPoisonMemory()) return; CHECK(asan_inited); BlockingMutexLock lock(&mu_for_globals); diff --git a/libsanitizer/asan/asan_init_version.h b/libsanitizer/asan/asan_init_version.h index da232513a09..2cda18849dd 100644 --- a/libsanitizer/asan/asan_init_version.h +++ b/libsanitizer/asan/asan_init_version.h @@ -23,8 +23,10 @@ extern "C" { // contains the function PC as the 3-rd field (see // DescribeAddressIfStack). // v3=>v4: added '__asan_global_source_location' to __asan_global. - #define __asan_init __asan_init_v4 - #define __asan_init_name "__asan_init_v4" + // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and + // __asan_stack_free_ functions. + // v5=>v6: changed the name of the version check symbol + #define __asan_version_mismatch_check __asan_version_mismatch_check_v6 } #endif // ASAN_INIT_VERSION_H diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc index 182b7842ed9..1b0592e027f 100644 --- a/libsanitizer/asan/asan_interceptors.cc +++ b/libsanitizer/asan/asan_interceptors.cc @@ -9,8 +9,8 @@ // // Intercept various libc functions. //===----------------------------------------------------------------------===// -#include "asan_interceptors.h" +#include "asan_interceptors.h" #include "asan_allocator.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -18,8 +18,19 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" +#include "asan_suppressions.h" #include "sanitizer_common/sanitizer_libc.h" +#if SANITIZER_POSIX +#include "sanitizer_common/sanitizer_posix.h" +#endif + +#if defined(__i386) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" +#elif defined(__mips__) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" +#endif + namespace __asan { // Return true if we can quickly decide that the region is unpoisoned. @@ -32,12 +43,16 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { return false; } +struct AsanInterceptorContext { + const char *interceptor_name; +}; + // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // and ASAN_WRITE_RANGE as macro instead of function so // that no extra frames are created, and stack trace contains // relevant information only. // We check all shadow bytes. -#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \ +#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \ uptr __offset = (uptr)(offset); \ uptr __size = (uptr)(size); \ uptr __bad = 0; \ @@ -47,13 +62,33 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { } \ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ (__bad = __asan_region_is_poisoned(__offset, __size))) { \ - GET_CURRENT_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \ + AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \ + bool suppressed = false; \ + if (_ctx) { \ + suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \ + if (!suppressed && HaveStackTraceBasedSuppressions()) { \ + GET_STACK_TRACE_FATAL_HERE; \ + suppressed = IsStackTraceSuppressed(&stack); \ + } \ + } \ + if (!suppressed) { \ + GET_CURRENT_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0); \ + } \ } \ } while (0) -#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false) -#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true) +#define ASAN_READ_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, false) +#define ASAN_WRITE_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, true) + +#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ + ASAN_READ_RANGE((ctx), (s), \ + common_flags()->strict_string_checks ? (len) + 1 : (n)) + +#define ASAN_READ_STRING(ctx, s, n) \ + ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) // Behavior of functions like "memcpy" or "strcpy" is undefined // if memory intervals overlap. We report error in this case. @@ -74,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1, static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { #if ASAN_INTERCEPT_STRNLEN - if (REAL(strnlen) != 0) { + if (REAL(strnlen)) { return REAL(strnlen)(s, maxlen); } #endif @@ -92,7 +127,7 @@ int OnExit() { return 0; } -} // namespace __asan +} // namespace __asan // ---------------------- Wrappers ---------------- {{{1 using namespace __asan; // NOLINT @@ -100,31 +135,28 @@ using namespace __asan; // NOLINT DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) -#if !SANITIZER_MAC -#define ASAN_INTERCEPT_FUNC(name) \ - do { \ - if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ - VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \ - } while (0) -#else -// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION. -#define ASAN_INTERCEPT_FUNC(name) -#endif // SANITIZER_MAC +#define ASAN_INTERCEPTOR_ENTER(ctx, func) \ + AsanInterceptorContext _ctx = {#func}; \ + ctx = (void *)&_ctx; \ + (void) ctx; \ #define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ - ASAN_WRITE_RANGE(ptr, size) -#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size) + ASAN_WRITE_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + ASAN_READ_RANGE(ctx, ptr, size) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + ASAN_INTERCEPTOR_ENTER(ctx, func); \ do { \ if (asan_init_is_running) \ return REAL(func)(__VA_ARGS__); \ - ctx = 0; \ - (void) ctx; \ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ return REAL(func)(__VA_ARGS__); \ ENSURE_ASAN_INITED(); \ } while (false) +#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ + do { \ + } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ do { \ } while (false) @@ -143,14 +175,30 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) do { \ } while (false) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) +// Strict init-order checking is dlopen-hostile: +// https://code.google.com/p/address-sanitizer/issues/detail?id=178 +#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ + if (flags()->strict_init_order) { \ + StopInitOrderChecking(); \ + } #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() -#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping() -#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping() +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ + CoverageUpdateMapping() +#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping() #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (AsanThread *t = GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } #include "sanitizer_common/sanitizer_common_interceptors.inc" -#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s) -#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s) +// Syscall interceptors don't have contexts, we don't support suppressions +// for them. +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ (void)(p); \ @@ -163,56 +211,81 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" +struct ThreadStartParam { + atomic_uintptr_t t; + atomic_uintptr_t is_registered; +}; + static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { - AsanThread *t = (AsanThread*)arg; + ThreadStartParam *param = reinterpret_cast(arg); + AsanThread *t = nullptr; + while ((t = reinterpret_cast( + atomic_load(¶m->t, memory_order_acquire))) == nullptr) + internal_sched_yield(); SetCurrentThread(t); - return t->ThreadStart(GetTid()); + return t->ThreadStart(GetTid(), ¶m->is_registered); } #if ASAN_INTERCEPT_PTHREAD_CREATE INTERCEPTOR(int, pthread_create, void *thread, void *attr, void *(*start_routine)(void*), void *arg) { EnsureMainThreadIDIsCorrect(); - // Strict init-order checking in thread-hostile. + // Strict init-order checking is thread-hostile. if (flags()->strict_init_order) StopInitOrderChecking(); GET_STACK_TRACE_THREAD; int detached = 0; - if (attr != 0) + if (attr) REAL(pthread_attr_getdetachstate)(attr, &detached); + ThreadStartParam param; + atomic_store(¶m.t, 0, memory_order_relaxed); + atomic_store(¶m.is_registered, 0, memory_order_relaxed); + int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); + if (result == 0) { + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + atomic_store(¶m.t, reinterpret_cast(t), memory_order_release); + // Wait until the AsanThread object is initialized and the ThreadRegistry + // entry is in "started" state. One reason for this is that after this + // interceptor exits, the child thread's stack may be the only thing holding + // the |arg| pointer. This may cause LSan to report a leak if leak checking + // happens at a point when the interceptor has already exited, but the stack + // range for the child thread is not yet known. + while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) + internal_sched_yield(); + } + return result; +} - u32 current_tid = GetCurrentTidOrInvalid(); - AsanThread *t = AsanThread::Create(start_routine, arg); - CreateThreadContextArgs args = { t, &stack }; - asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); - return REAL(pthread_create)(thread, attr, asan_thread_start, t); +INTERCEPTOR(int, pthread_join, void *t, void **arg) { + return real_pthread_join(t, arg); } + +DEFINE_REAL_PTHREAD_FUNCTIONS #endif // ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if SANITIZER_ANDROID INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { - if (!AsanInterceptsSignal(signum) || - common_flags()->allow_user_segv_handler) { + if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(bsd_signal)(signum, handler); } return 0; } -#else +#endif + INTERCEPTOR(void*, signal, int signum, void *handler) { - if (!AsanInterceptsSignal(signum) || - common_flags()->allow_user_segv_handler) { + if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(signal)(signum, handler); } - return 0; + return nullptr; } -#endif INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) { - if (!AsanInterceptsSignal(signum) || - common_flags()->allow_user_segv_handler) { + if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(sigaction)(signum, act, oldact); } return 0; @@ -220,10 +293,10 @@ INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, namespace __sanitizer { int real_sigaction(int signum, const void *act, void *oldact) { - return REAL(sigaction)(signum, - (struct sigaction *)act, (struct sigaction *)oldact); + return REAL(sigaction)(signum, (const struct sigaction *)act, + (struct sigaction *)oldact); } -} // namespace __sanitizer +} // namespace __sanitizer #elif SANITIZER_POSIX // We need to have defined REAL(sigaction) on posix systems. @@ -239,7 +312,7 @@ static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { ssize += stack - bottom; ssize = RoundUpTo(ssize, PageSize); static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb - if (ssize && ssize <= kMaxSaneContextStackSize) { + if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { PoisonShadow(bottom, ssize, 0); } } @@ -294,113 +367,73 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { } #endif -#if SANITIZER_WINDOWS -INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { - CHECK(REAL(RaiseException)); - __asan_handle_no_return(); - REAL(RaiseException)(a, b, c, d); -} - -INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { - CHECK(REAL(_except_handler3)); - __asan_handle_no_return(); - return REAL(_except_handler3)(a, b, c, d); -} - -#if ASAN_DYNAMIC -// This handler is named differently in -MT and -MD CRTs. -#define _except_handler4 _except_handler4_common -#endif -INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { - CHECK(REAL(_except_handler4)); - __asan_handle_no_return(); - return REAL(_except_handler4)(a, b, c, d); -} -#endif - -static inline int CharCmp(unsigned char c1, unsigned char c2) { - return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; -} +// memcpy is called during __asan_init() from the internals of printf(...). +// We do not treat memcpy with to==from as a bug. +// See http://llvm.org/bugs/show_bug.cgi?id=11763. +#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \ + if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \ + if (asan_init_is_running) { \ + return REAL(memcpy)(to, from, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + if (to != from) { \ + CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \ + } \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return REAL(memcpy)(to, from, size); \ + } while (0) -INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) { - if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size); - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - if (flags()->strict_memcmp) { - // Check the entire regions even if the first bytes of the buffers are - // different. - ASAN_READ_RANGE(a1, size); - ASAN_READ_RANGE(a2, size); - // Fallthrough to REAL(memcmp) below. - } else { - unsigned char c1 = 0, c2 = 0; - const unsigned char *s1 = (const unsigned char*)a1; - const unsigned char *s2 = (const unsigned char*)a2; - uptr i; - for (i = 0; i < size; i++) { - c1 = s1[i]; - c2 = s2[i]; - if (c1 != c2) break; - } - ASAN_READ_RANGE(s1, Min(i + 1, size)); - ASAN_READ_RANGE(s2, Min(i + 1, size)); - return CharCmp(c1, c2); - } - } - return REAL(memcmp(a1, a2, size)); -} void *__asan_memcpy(void *to, const void *from, uptr size) { - if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); - // memcpy is called during __asan_init() from the internals - // of printf(...). - if (asan_init_is_running) { - return REAL(memcpy)(to, from, size); - } - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - if (to != from) { - // We do not treat memcpy with to==from as a bug. - // See http://llvm.org/bugs/show_bug.cgi?id=11763. - CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); - } - ASAN_READ_RANGE(from, size); - ASAN_WRITE_RANGE(to, size); - } - return REAL(memcpy)(to, from, size); + ASAN_MEMCPY_IMPL(nullptr, to, from, size); } +// memset is called inside Printf. +#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \ + if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \ + if (asan_init_is_running) { \ + return REAL(memset)(block, c, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_WRITE_RANGE(ctx, block, size); \ + } \ + return REAL(memset)(block, c, size); \ + } while (0) + void *__asan_memset(void *block, int c, uptr size) { - if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); - // memset is called inside Printf. - if (asan_init_is_running) { - return REAL(memset)(block, c, size); - } - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - ASAN_WRITE_RANGE(block, size); - } - return REAL(memset)(block, c, size); + ASAN_MEMSET_IMPL(nullptr, block, c, size); } +#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \ + if (UNLIKELY(!asan_inited)) \ + return internal_memmove(to, from, size); \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return internal_memmove(to, from, size); \ + } while (0) + void *__asan_memmove(void *to, const void *from, uptr size) { - if (UNLIKELY(!asan_inited)) - return internal_memmove(to, from, size); - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - ASAN_READ_RANGE(from, size); - ASAN_WRITE_RANGE(to, size); - } - return internal_memmove(to, from, size); + ASAN_MEMMOVE_IMPL(nullptr, to, from, size); } INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { - return __asan_memmove(to, from, size); + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memmove); + ASAN_MEMMOVE_IMPL(ctx, to, from, size); } INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memcpy); #if !SANITIZER_MAC - return __asan_memcpy(to, from, size); + ASAN_MEMCPY_IMPL(ctx, to, from, size); #else // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced // with WRAP(memcpy). As a result, false positives are reported for memmove() @@ -408,15 +441,19 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with // internal_memcpy(), which may lead to crashes, see // http://llvm.org/bugs/show_bug.cgi?id=16362. - return __asan_memmove(to, from, size); + ASAN_MEMMOVE_IMPL(ctx, to, from, size); #endif // !SANITIZER_MAC } INTERCEPTOR(void*, memset, void *block, int c, uptr size) { - return __asan_memset(block, c, size); + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memset); + ASAN_MEMSET_IMPL(ctx, block, c, size); } INTERCEPTOR(char*, strchr, const char *str, int c) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strchr); if (UNLIKELY(!asan_inited)) return internal_strchr(str, c); // strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is // used. @@ -426,8 +463,9 @@ INTERCEPTOR(char*, strchr, const char *str, int c) { ENSURE_ASAN_INITED(); char *result = REAL(strchr)(str, c); if (flags()->replace_str) { - uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1; - ASAN_READ_RANGE(str, bytes_read); + uptr len = REAL(strlen)(str); + uptr bytes_read = (result ? result - str : len) + 1; + ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read); } return result; } @@ -449,13 +487,15 @@ DEFINE_REAL(char*, index, const char *string, int c) // For both strcat() and strncat() we need to check the validity of |to| // argument irrespective of the |from| length. INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = REAL(strlen)(from); - ASAN_READ_RANGE(from, from_length + 1); + ASAN_READ_RANGE(ctx, from, from_length + 1); uptr to_length = REAL(strlen)(to); - ASAN_READ_RANGE(to, to_length); - ASAN_WRITE_RANGE(to + to_length, from_length + 1); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); // If the copying actually happens, the |from| string should not overlap // with the resulting string starting at |to|, which has a length of // to_length + from_length + 1. @@ -468,14 +508,16 @@ INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT } INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncat); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = MaybeRealStrnlen(from, size); uptr copy_length = Min(size, from_length + 1); - ASAN_READ_RANGE(from, copy_length); + ASAN_READ_RANGE(ctx, from, copy_length); uptr to_length = REAL(strlen)(to); - ASAN_READ_RANGE(to, to_length); - ASAN_WRITE_RANGE(to + to_length, from_length + 1); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); if (from_length > 0) { CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, from, copy_length); @@ -485,6 +527,8 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { } INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT #endif @@ -497,19 +541,21 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT if (flags()->replace_str) { uptr from_size = REAL(strlen)(from) + 1; CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); - ASAN_READ_RANGE(from, from_size); - ASAN_WRITE_RANGE(to, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, from_size); } return REAL(strcpy)(to, from); // NOLINT } #if ASAN_INTERCEPT_STRDUP INTERCEPTOR(char*, strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); if (UNLIKELY(!asan_inited)) return internal_strdup(s); ENSURE_ASAN_INITED(); uptr length = REAL(strlen)(s); if (flags()->replace_str) { - ASAN_READ_RANGE(s, length + 1); + ASAN_READ_RANGE(ctx, s, length + 1); } GET_STACK_TRACE_MALLOC; void *new_mem = asan_malloc(length + 1, &stack); @@ -519,6 +565,8 @@ INTERCEPTOR(char*, strdup, const char *s) { #endif INTERCEPTOR(SIZE_T, strlen, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strlen); if (UNLIKELY(!asan_inited)) return internal_strlen(s); // strlen is called from malloc_default_purgeable_zone() // in __asan::ReplaceSystemAlloc() on Mac. @@ -528,78 +576,65 @@ INTERCEPTOR(SIZE_T, strlen, const char *s) { ENSURE_ASAN_INITED(); SIZE_T length = REAL(strlen)(s); if (flags()->replace_str) { - ASAN_READ_RANGE(s, length + 1); + ASAN_READ_RANGE(ctx, s, length + 1); } return length; } INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcslen); SIZE_T length = REAL(wcslen)(s); if (!asan_init_is_running) { ENSURE_ASAN_INITED(); - ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t)); + ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t)); } return length; } INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncpy); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); - ASAN_READ_RANGE(from, from_size); - ASAN_WRITE_RANGE(to, size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size); } return REAL(strncpy)(to, from, size); } #if ASAN_INTERCEPT_STRNLEN INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strnlen); ENSURE_ASAN_INITED(); uptr length = REAL(strnlen)(s, maxlen); if (flags()->replace_str) { - ASAN_READ_RANGE(s, Min(length + 1, maxlen)); + ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen)); } return length; } #endif // ASAN_INTERCEPT_STRNLEN -static inline bool IsValidStrtolBase(int base) { - return (base == 0) || (2 <= base && base <= 36); -} - -static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) { - CHECK(endptr); - if (nptr == *endptr) { - // No digits were found at strtol call, we need to find out the last - // symbol accessed by strtoll on our own. - // We get this symbol by skipping leading blanks and optional +/- sign. - while (IsSpace(*nptr)) nptr++; - if (*nptr == '+' || *nptr == '-') nptr++; - *endptr = (char*)nptr; - } - CHECK(*endptr >= nptr); -} - INTERCEPTOR(long, strtol, const char *nptr, // NOLINT char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtol); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtol)(nptr, endptr, base); } char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT - if (endptr != 0) { - *endptr = real_endptr; - } - if (IsValidStrtolBase(base)) { - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); - } + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(int, atoi, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoi); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); #endif @@ -614,11 +649,13 @@ INTERCEPTOR(int, atoi, const char *nptr) { // different from int). So, we just imitate this behavior. int result = REAL(strtol)(nptr, &real_endptr, 10); FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } INTERCEPTOR(long, atol, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atol); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); #endif @@ -629,33 +666,28 @@ INTERCEPTOR(long, atol, const char *nptr) { // NOLINT char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtoll)(nptr, endptr, base); } char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT - if (endptr != 0) { - *endptr = real_endptr; - } - // If base has unsupported value, strtoll can exit with EINVAL - // without reading any characters. So do additional checks only - // if base is valid. - if (IsValidStrtolBase(base)) { - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); - } + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atoll)(nptr); @@ -663,7 +695,7 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL @@ -681,7 +713,7 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, #endif ENSURE_ASAN_INITED(); int res = REAL(__cxa_atexit)(func, arg, dso_handle); - REAL(__cxa_atexit)(AtCxaAtexit, 0, 0); + REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); return res; } #endif // ASAN_INTERCEPT___CXA_ATEXIT @@ -696,35 +728,6 @@ INTERCEPTOR(int, fork, void) { } #endif // ASAN_INTERCEPT_FORK -#if SANITIZER_WINDOWS -INTERCEPTOR_WINAPI(DWORD, CreateThread, - void* security, uptr stack_size, - DWORD (__stdcall *start_routine)(void*), void* arg, - DWORD thr_flags, void* tid) { - // Strict init-order checking in thread-hostile. - if (flags()->strict_init_order) - StopInitOrderChecking(); - GET_STACK_TRACE_THREAD; - u32 current_tid = GetCurrentTidOrInvalid(); - AsanThread *t = AsanThread::Create(start_routine, arg); - CreateThreadContextArgs args = { t, &stack }; - bool detached = false; // FIXME: how can we determine it on Windows? - asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); - return REAL(CreateThread)(security, stack_size, - asan_thread_start, t, thr_flags, tid); -} - -namespace __asan { -void InitializeWindowsInterceptors() { - ASAN_INTERCEPT_FUNC(CreateThread); - ASAN_INTERCEPT_FUNC(RaiseException); - ASAN_INTERCEPT_FUNC(_except_handler3); - ASAN_INTERCEPT_FUNC(_except_handler4); -} - -} // namespace __asan -#endif - // ---------------------- InitializeAsanInterceptors ---------------- {{{1 namespace __asan { void InitializeAsanInterceptors() { @@ -734,7 +737,6 @@ void InitializeAsanInterceptors() { InitializeCommonInterceptors(); // Intercept mem* functions. - ASAN_INTERCEPT_FUNC(memcmp); ASAN_INTERCEPT_FUNC(memmove); ASAN_INTERCEPT_FUNC(memset); if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { @@ -773,9 +775,8 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(sigaction); #if SANITIZER_ANDROID ASAN_INTERCEPT_FUNC(bsd_signal); -#else - ASAN_INTERCEPT_FUNC(signal); #endif + ASAN_INTERCEPT_FUNC(signal); #endif #if ASAN_INTERCEPT_SWAPCONTEXT ASAN_INTERCEPT_FUNC(swapcontext); @@ -794,8 +795,13 @@ void InitializeAsanInterceptors() { // Intercept threading-related functions #if ASAN_INTERCEPT_PTHREAD_CREATE +#if defined(ASAN_PTHREAD_CREATE_VERSION) + ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); +#else ASAN_INTERCEPT_FUNC(pthread_create); #endif + ASAN_INTERCEPT_FUNC(pthread_join); +#endif // Intercept atexit function. #if ASAN_INTERCEPT___CXA_ATEXIT @@ -806,12 +812,9 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(fork); #endif - // Some Windows-specific interceptors. -#if SANITIZER_WINDOWS - InitializeWindowsInterceptors(); -#endif + InitializePlatformInterceptors(); VReport(1, "AddressSanitizer: libc interceptors initialized\n"); } -} // namespace __asan +} // namespace __asan diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h index 95a75db4e02..46c74176ed3 100644 --- a/libsanitizer/asan/asan_interceptors.h +++ b/libsanitizer/asan/asan_interceptors.h @@ -13,7 +13,7 @@ #define ASAN_INTERCEPTORS_H #include "asan_internal.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" // Use macro to describe if specific function should be @@ -90,9 +90,27 @@ struct sigaction; DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) +#if !SANITIZER_MAC +#define ASAN_INTERCEPT_FUNC(name) \ + do { \ + if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ + VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \ + } while (0) +#define ASAN_INTERCEPT_FUNC_VER(name, ver) \ + do { \ + if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \ + VReport( \ + 1, "AddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \ + } while (0) +#else +// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION. +#define ASAN_INTERCEPT_FUNC(name) +#endif // SANITIZER_MAC + namespace __asan { void InitializeAsanInterceptors(); +void InitializePlatformInterceptors(); #define ENSURE_ASAN_INITED() do { \ CHECK(!asan_init_is_running); \ diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h index 939327eb8a4..b512bf8bd7a 100644 --- a/libsanitizer/asan/asan_interface_internal.h +++ b/libsanitizer/asan/asan_interface_internal.h @@ -7,8 +7,11 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// This header can be included by the instrumented program to fetch -// data (mostly allocator statistics) from ASan runtime library. +// This header declares the AddressSanitizer runtime interface functions. +// The runtime library has to define these functions so the instrumented program +// could call them. +// +// See also include/sanitizer/asan_interface.h //===----------------------------------------------------------------------===// #ifndef ASAN_INTERFACE_INTERNAL_H #define ASAN_INTERFACE_INTERNAL_H @@ -22,10 +25,14 @@ using __sanitizer::uptr; extern "C" { // This function should be called at the very beginning of the process, // before any instrumented code is executed and before any call to malloc. - // Please note that __asan_init is a macro that is replaced with - // __asan_init_vXXX at compile-time. SANITIZER_INTERFACE_ATTRIBUTE void __asan_init(); + // This function exists purely to get a linker/loader error when using + // incompatible versions of instrumentation and runtime library. Please note + // that __asan_version_mismatch_check is a macro that is replaced with + // __asan_version_mismatch_check_vXXX at compile-time. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check(); + // This structure is used to describe the source location of a place where // global was defined. struct __asan_global_source_location { @@ -123,10 +130,8 @@ extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __asan_report_error(uptr pc, uptr bp, uptr sp, - uptr addr, int is_write, uptr access_size); + uptr addr, int is_write, uptr access_size, u32 exp); - SANITIZER_INTERFACE_ATTRIBUTE - int __asan_set_error_exit_code(int exit_code); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_death_callback(void (*callback)(void)); SANITIZER_INTERFACE_ATTRIBUTE @@ -160,6 +165,21 @@ extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size, + u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size, + u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void* __asan_memcpy(void *dst, const void *src, uptr size); SANITIZER_INTERFACE_ATTRIBUTE @@ -175,6 +195,10 @@ extern "C" { void __asan_poison_intra_object_redzone(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unpoison_intra_object_redzone(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_alloca_poison(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_allocas_unpoison(uptr top, uptr bottom); } // extern "C" #endif // ASAN_INTERFACE_INTERNAL_H diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h index 8911575d84a..e31f2648b1a 100644 --- a/libsanitizer/asan/asan_internal.h +++ b/libsanitizer/asan/asan_internal.h @@ -19,8 +19,6 @@ #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_libc.h" -#define ASAN_DEFAULT_FAILURE_EXITCODE 1 - #if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) # error "The AddressSanitizer run-time should not be" " instrumented by AddressSanitizer" @@ -73,13 +71,11 @@ void *AsanDoesNotSupportStaticLinkage(); void AsanCheckDynamicRTPrereqs(); void AsanCheckIncompatibleRT(); -void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); -void AsanOnSIGSEGV(int, void *siginfo, void *context); +void AsanOnDeadlySignal(int, void *siginfo, void *context); +void DisableReexec(); void MaybeReexec(); -bool AsanInterceptsSignal(int signum); void ReadContextStack(void *context, uptr *stack, uptr *ssize); -void AsanPlatformThreadInit(); void StopInitOrderChecking(); // Wrapper for TLS/TSD. @@ -90,10 +86,10 @@ void PlatformTSDDtor(void *tsd); void AppendToErrorMessageBuffer(const char *buffer); -void ParseExtraActivationFlags(); - void *AsanDlSymNext(const char *sym); +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name); + // Platform-specific options. #if SANITIZER_MAC bool PlatformHasDifferentMemcpyAndMemmove(); @@ -134,6 +130,8 @@ const int kAsanGlobalRedzoneMagic = 0xf9; const int kAsanInternalHeapMagic = 0xfe; const int kAsanArrayCookieMagic = 0xac; const int kAsanIntraObjectRedzone = 0xbb; +const int kAsanAllocaLeftMagic = 0xca; +const int kAsanAllocaRightMagic = 0xcb; static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kRetiredStackFrameMagic = 0x45E0360E; diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc index c504168b614..4e47d5a0496 100644 --- a/libsanitizer/asan/asan_linux.cc +++ b/libsanitizer/asan/asan_linux.cc @@ -66,6 +66,12 @@ asan_rt_version_t __asan_rt_version; namespace __asan { +void InitializePlatformInterceptors() {} + +void DisableReexec() { + // No need to re-exec on Linux. +} + void MaybeReexec() { // No need to re-exec on Linux. } @@ -105,8 +111,11 @@ static void ReportIncompatibleRT() { } void AsanCheckDynamicRTPrereqs() { + if (!ASAN_DYNAMIC) + return; + // Ensure that dynamic RT is the first DSO in the list - const char *first_dso_name = 0; + const char *first_dso_name = nullptr; dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); if (first_dso_name && !IsDynamicRTName(first_dso_name)) { Report("ASan runtime does not come first in initial library list; " @@ -131,7 +140,8 @@ void AsanCheckIncompatibleRT() { // system libraries, causing crashes later in ASan initialization. MemoryMappingLayout proc_maps(/*cache_enabled*/true); char filename[128]; - while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) { + while (proc_maps.Next(nullptr, nullptr, nullptr, filename, + sizeof(filename), nullptr)) { if (IsDynamicRTName(filename)) { Report("Your application is linked against " "incompatible ASan runtimes.\n"); @@ -144,87 +154,7 @@ void AsanCheckIncompatibleRT() { } } } -#endif // SANITIZER_ANDROID - -void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { -#if defined(__arm__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.arm_pc; - *bp = ucontext->uc_mcontext.arm_fp; - *sp = ucontext->uc_mcontext.arm_sp; -#elif defined(__aarch64__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.pc; - *bp = ucontext->uc_mcontext.regs[29]; - *sp = ucontext->uc_mcontext.sp; -#elif defined(__hppa__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.sc_iaoq[0]; - /* GCC uses %r3 whenever a frame pointer is needed. */ - *bp = ucontext->uc_mcontext.sc_gr[3]; - *sp = ucontext->uc_mcontext.sc_gr[30]; -#elif defined(__x86_64__) -# if SANITIZER_FREEBSD - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.mc_rip; - *bp = ucontext->uc_mcontext.mc_rbp; - *sp = ucontext->uc_mcontext.mc_rsp; -# else - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.gregs[REG_RIP]; - *bp = ucontext->uc_mcontext.gregs[REG_RBP]; - *sp = ucontext->uc_mcontext.gregs[REG_RSP]; -# endif -#elif defined(__i386__) -# if SANITIZER_FREEBSD - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.mc_eip; - *bp = ucontext->uc_mcontext.mc_ebp; - *sp = ucontext->uc_mcontext.mc_esp; -# else - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.gregs[REG_EIP]; - *bp = ucontext->uc_mcontext.gregs[REG_EBP]; - *sp = ucontext->uc_mcontext.gregs[REG_ESP]; -# endif -#elif defined(__powerpc__) || defined(__powerpc64__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.regs->nip; - *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; - // The powerpc{,64}-linux ABIs do not specify r31 as the frame - // pointer, but GCC always uses r31 when we need a frame pointer. - *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; -#elif defined(__sparc__) - ucontext_t *ucontext = (ucontext_t*)context; - uptr *stk_ptr; -# if defined (__arch64__) - *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; - *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; - stk_ptr = (uptr *) (*sp + 2047); - *bp = stk_ptr[15]; -# else - *pc = ucontext->uc_mcontext.gregs[REG_PC]; - *sp = ucontext->uc_mcontext.gregs[REG_O6]; - stk_ptr = (uptr *) *sp; - *bp = stk_ptr[15]; -# endif -#elif defined(__mips__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.gregs[31]; - *bp = ucontext->uc_mcontext.gregs[30]; - *sp = ucontext->uc_mcontext.gregs[29]; -#else -# error "Unsupported arch" -#endif -} - -bool AsanInterceptsSignal(int signum) { - return signum == SIGSEGV && common_flags()->handle_segv; -} - -void AsanPlatformThreadInit() { - // Nothing here for now. -} +#endif // SANITIZER_ANDROID #if !SANITIZER_ANDROID void ReadContextStack(void *context, uptr *stack, uptr *ssize) { @@ -242,6 +172,6 @@ void *AsanDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); } -} // namespace __asan +} // namespace __asan -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc index 70823bdef92..036d7ff65f1 100644 --- a/libsanitizer/asan/asan_mac.cc +++ b/libsanitizer/asan/asan_mac.cc @@ -22,7 +22,14 @@ #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mac.h" -#include // for _NSGetArgv +#if !SANITIZER_IOS +#include // for _NSGetArgv and _NSGetEnviron +#else +extern "C" { + extern char ***_NSGetArgv(void); +} +#endif + #include // for dladdr() #include #include @@ -38,19 +45,7 @@ namespace __asan { -void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { - ucontext_t *ucontext = (ucontext_t*)context; -# if SANITIZER_WORDSIZE == 64 - *pc = ucontext->uc_mcontext->__ss.__rip; - *bp = ucontext->uc_mcontext->__ss.__rbp; - *sp = ucontext->uc_mcontext->__ss.__rsp; -# else - *pc = ucontext->uc_mcontext->__ss.__eip; - *bp = ucontext->uc_mcontext->__ss.__ebp; - *sp = ucontext->uc_mcontext->__ss.__esp; -# endif // SANITIZER_WORDSIZE -} - +void InitializePlatformInterceptors() {} bool PlatformHasDifferentMemcpyAndMemmove() { // On OS X 10.7 memcpy() and memmove() are both resolved @@ -72,35 +67,51 @@ LowLevelAllocator allocator_for_env; // otherwise the corresponding "NAME=value" string is replaced with // |name_value|. void LeakyResetEnv(const char *name, const char *name_value) { - char ***env_ptr = _NSGetEnviron(); - CHECK(env_ptr); - char **environ = *env_ptr; - CHECK(environ); + char **env = GetEnviron(); uptr name_len = internal_strlen(name); - while (*environ != 0) { - uptr len = internal_strlen(*environ); + while (*env != 0) { + uptr len = internal_strlen(*env); if (len > name_len) { - const char *p = *environ; + const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. if (name_value) { // Replace the old value with the new one. - *environ = const_cast(name_value); + *env = const_cast(name_value); } else { // Shift the subsequent pointers back. - char **del = environ; + char **del = env; do { del[0] = del[1]; } while (*del++); } } } - environ++; + env++; } } +static bool reexec_disabled = false; + +void DisableReexec() { + reexec_disabled = true; +} + +extern "C" double dyldVersionNumber; +static const double kMinDyldVersionWithAutoInterposition = 360.0; + +bool DyldNeedsEnvVariable() { + // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if + // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via + // GetMacosVersion() doesn't work for the simulator. Let's instead check + // `dyldVersionNumber`, which is exported by dyld, against a known version + // number from the first OS release where this appeared. + return dyldVersionNumber < kMinDyldVersionWithAutoInterposition; +} + void MaybeReexec() { - if (!flags()->allow_reexec) return; + if (reexec_disabled) return; + // Make sure the dynamic ASan runtime library is preloaded so that the // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec // ourselves. @@ -111,8 +122,12 @@ void MaybeReexec() { uptr old_env_len = dyld_insert_libraries ? internal_strlen(dyld_insert_libraries) : 0; uptr fname_len = internal_strlen(info.dli_fname); - if (!dyld_insert_libraries || - !REAL(strstr)(dyld_insert_libraries, info.dli_fname)) { + const char *dylib_name = StripModuleName(info.dli_fname); + uptr dylib_name_len = internal_strlen(dylib_name); + + bool lib_is_in_env = + dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name); + if (DyldNeedsEnvVariable() && !lib_is_in_env) { // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime // library. char program_name[1024]; @@ -138,58 +153,77 @@ void MaybeReexec() { VReport(1, "exec()-ing the program with\n"); VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env); VReport(1, "to enable ASan wrappers.\n"); - VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n"); execv(program_name, *_NSGetArgv()); - } else { - // DYLD_INSERT_LIBRARIES is set and contains the runtime library. - if (old_env_len == fname_len) { - // It's just the runtime library name - fine to unset the variable. - LeakyResetEnv(kDyldInsertLibraries, NULL); + + // We get here only if execv() failed. + Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, " + "which is required for ASan to work. ASan tried to set the " + "environment variable and re-execute itself, but execv() failed, " + "possibly because of sandbox restrictions. Make sure to launch the " + "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env); + CHECK("execv failed" && 0); + } + + if (!lib_is_in_env) + return; + + // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove + // the dylib from the environment variable, because interceptors are installed + // and we don't want our children to inherit the variable. + + uptr env_name_len = internal_strlen(kDyldInsertLibraries); + // Allocate memory to hold the previous env var name, its value, the '=' + // sign and the '\0' char. + char *new_env = (char*)allocator_for_env.Allocate( + old_env_len + 2 + env_name_len); + CHECK(new_env); + internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); + internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); + new_env[env_name_len] = '='; + char *new_env_pos = new_env + env_name_len + 1; + + // Iterate over colon-separated pieces of |dyld_insert_libraries|. + char *piece_start = dyld_insert_libraries; + char *piece_end = NULL; + char *old_env_end = dyld_insert_libraries + old_env_len; + do { + if (piece_start[0] == ':') piece_start++; + piece_end = REAL(strchr)(piece_start, ':'); + if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; + if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; + uptr piece_len = piece_end - piece_start; + + char *filename_start = + (char *)internal_memrchr(piece_start, '/', piece_len); + uptr filename_len = piece_len; + if (filename_start) { + filename_start += 1; + filename_len = piece_len - (filename_start - piece_start); } else { - uptr env_name_len = internal_strlen(kDyldInsertLibraries); - // Allocate memory to hold the previous env var name, its value, the '=' - // sign and the '\0' char. - char *new_env = (char*)allocator_for_env.Allocate( - old_env_len + 2 + env_name_len); - CHECK(new_env); - internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); - internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); - new_env[env_name_len] = '='; - char *new_env_pos = new_env + env_name_len + 1; - - // Iterate over colon-separated pieces of |dyld_insert_libraries|. - char *piece_start = dyld_insert_libraries; - char *piece_end = NULL; - char *old_env_end = dyld_insert_libraries + old_env_len; - do { - if (piece_start[0] == ':') piece_start++; - piece_end = REAL(strchr)(piece_start, ':'); - if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; - if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; - uptr piece_len = piece_end - piece_start; - - // If the current piece isn't the runtime library name, - // append it to new_env. - if ((piece_len != fname_len) || - (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) { - if (new_env_pos != new_env + env_name_len + 1) { - new_env_pos[0] = ':'; - new_env_pos++; - } - internal_strncpy(new_env_pos, piece_start, piece_len); - } - // Move on to the next piece. - new_env_pos += piece_len; - piece_start = piece_end; - } while (piece_start < old_env_end); - - // Can't use setenv() here, because it requires the allocator to be - // initialized. - // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in - // a separate function called after InitializeAllocator(). - LeakyResetEnv(kDyldInsertLibraries, new_env); + filename_start = piece_start; } - } + + // If the current piece isn't the runtime library name, + // append it to new_env. + if ((dylib_name_len != filename_len) || + (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) { + if (new_env_pos != new_env + env_name_len + 1) { + new_env_pos[0] = ':'; + new_env_pos++; + } + internal_strncpy(new_env_pos, piece_start, piece_len); + new_env_pos += piece_len; + } + // Move on to the next piece. + piece_start = piece_end; + } while (piece_start < old_env_end); + + // Can't use setenv() here, because it requires the allocator to be + // initialized. + // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in + // a separate function called after InitializeAllocator(). + if (new_env_pos == new_env + env_name_len + 1) new_env = NULL; + LeakyResetEnv(kDyldInsertLibraries, new_env); } // No-op. Mac does not support static linkage anyway. @@ -203,14 +237,6 @@ void AsanCheckDynamicRTPrereqs() {} // No-op. Mac does not support static linkage anyway. void AsanCheckIncompatibleRT() {} -bool AsanInterceptsSignal(int signum) { - return (signum == SIGSEGV || signum == SIGBUS) && - common_flags()->handle_segv; -} - -void AsanPlatformThreadInit() { -} - void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } @@ -262,9 +288,8 @@ ALWAYS_INLINE void asan_register_worker_thread(int parent_tid, StackTrace *stack) { AsanThread *t = GetCurrentThread(); if (!t) { - t = AsanThread::Create(0, 0); - CreateThreadContextArgs args = { t, stack }; - asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args); + t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, + parent_tid, stack, /* detached */ true); t->Init(); asanThreadRegistry().StartThread(t->tid(), 0, 0); SetCurrentThread(t); @@ -311,7 +336,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, dispatch_function_t func) { \ GET_STACK_TRACE_THREAD; \ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ - if (common_flags()->verbosity >= 2) { \ + if (Verbosity() >= 2) { \ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ asan_ctxt, pthread_self()); \ PRINT_CURRENT_STACK(); \ @@ -329,7 +354,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_function_t func) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); - if (common_flags()->verbosity >= 2) { + if (Verbosity() >= 2) { Report("dispatch_after_f: %p\n", asan_ctxt); PRINT_CURRENT_STACK(); } @@ -342,7 +367,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, dispatch_function_t func) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); - if (common_flags()->verbosity >= 2) { + if (Verbosity() >= 2) { Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", asan_ctxt, pthread_self()); PRINT_CURRENT_STACK(); @@ -372,13 +397,6 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); work(); \ } -// Forces the compiler to generate a frame pointer in the function. -#define ENABLE_FRAME_POINTER \ - do { \ - volatile uptr enable_fp; \ - enable_fp = GET_CURRENT_FRAME(); \ - } while (0) - INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void(^work)(void)) { ENABLE_FRAME_POINTER; @@ -402,6 +420,10 @@ INTERCEPTOR(void, dispatch_after, INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds, void(^work)(void)) { + if (!work) { + REAL(dispatch_source_set_cancel_handler)(ds, work); + return; + } ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_source_set_cancel_handler)(ds, asan_block); diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc index 6a93ce1e808..fbe05490ce7 100644 --- a/libsanitizer/asan/asan_malloc_mac.cc +++ b/libsanitizer/asan/asan_malloc_mac.cc @@ -88,9 +88,9 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { ENSURE_ASAN_INITED(); // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. size_t buflen = 6 + (name ? internal_strlen(name) : 0); - InternalScopedBuffer new_name(buflen); + InternalScopedString new_name(buflen); if (name && zone->introspect == asan_zone.introspect) { - internal_snprintf(new_name.data(), buflen, "asan-%s", name); + new_name.append("asan-%s", name); name = new_name.data(); } @@ -150,13 +150,17 @@ INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { namespace { -// TODO(glider): the mz_* functions should be united with the Linux wrappers, -// as they are basically copied from there. -size_t mz_size(malloc_zone_t* zone, const void* ptr) { +// TODO(glider): the __asan_mz_* functions should be united with the Linux +// wrappers, as they are basically copied from there. +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) { return asan_mz_size(ptr); } -void *mz_malloc(malloc_zone_t *zone, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) { if (UNLIKELY(!asan_inited)) { CHECK(system_malloc_zone); return malloc_zone_malloc(system_malloc_zone, size); @@ -165,7 +169,9 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) { return asan_malloc(size, &stack); } -void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { if (UNLIKELY(!asan_inited)) { // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. const size_t kCallocPoolSize = 1024; @@ -181,7 +187,9 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { return asan_calloc(nmemb, size, &stack); } -void *mz_valloc(malloc_zone_t *zone, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) { if (UNLIKELY(!asan_inited)) { CHECK(system_malloc_zone); return malloc_zone_valloc(system_malloc_zone, size); @@ -208,11 +216,15 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) { } // TODO(glider): the allocation callbacks need to be refactored. -void mz_free(malloc_zone_t *zone, void *ptr) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_mz_free(malloc_zone_t *zone, void *ptr) { free_common(zone, ptr); } -void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (!ptr) { GET_STACK_TRACE_MALLOC; return asan_malloc(size, &stack); @@ -231,15 +243,16 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { } } -void mz_destroy(malloc_zone_t* zone) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_mz_destroy(malloc_zone_t* zone) { // A no-op -- we will not be destroyed! - Report("mz_destroy() called -- ignoring\n"); + Report("__asan_mz_destroy() called -- ignoring\n"); } - // from AvailabilityMacros.h -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 -void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { if (UNLIKELY(!asan_inited)) { CHECK(system_malloc_zone); return malloc_zone_memalign(system_malloc_zone, align, size); @@ -250,12 +263,12 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { // This function is currently unused, and we build with -Werror. #if 0 -void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { +void __asan_mz_free_definite_size( + malloc_zone_t* zone, void *ptr, size_t size) { // TODO(glider): check that |size| is valid. UNIMPLEMENTED(); } #endif -#endif kern_return_t mi_enumerator(task_t task, void *, unsigned type_mask, vm_address_t zone_address, @@ -297,13 +310,10 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); } -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 boolean_t mi_zone_locked(malloc_zone_t *zone) { // UNIMPLEMENTED(); return false; } -#endif } // unnamed namespace @@ -322,32 +332,25 @@ void ReplaceSystemMalloc() { asan_introspection.force_lock = &mi_force_lock; asan_introspection.force_unlock = &mi_force_unlock; asan_introspection.statistics = &mi_statistics; + asan_introspection.zone_locked = &mi_zone_locked; internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); - // Start with a version 4 zone which is used for OS X 10.4 and 10.5. - asan_zone.version = 4; + // Use version 6 for OSX >= 10.6. + asan_zone.version = 6; asan_zone.zone_name = "asan"; - asan_zone.size = &mz_size; - asan_zone.malloc = &mz_malloc; - asan_zone.calloc = &mz_calloc; - asan_zone.valloc = &mz_valloc; - asan_zone.free = &mz_free; - asan_zone.realloc = &mz_realloc; - asan_zone.destroy = &mz_destroy; + asan_zone.size = &__asan_mz_size; + asan_zone.malloc = &__asan_mz_malloc; + asan_zone.calloc = &__asan_mz_calloc; + asan_zone.valloc = &__asan_mz_valloc; + asan_zone.free = &__asan_mz_free; + asan_zone.realloc = &__asan_mz_realloc; + asan_zone.destroy = &__asan_mz_destroy; asan_zone.batch_malloc = 0; asan_zone.batch_free = 0; - asan_zone.introspect = &asan_introspection; - - // from AvailabilityMacros.h -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 - // Switch to version 6 on OSX 10.6 to support memalign. - asan_zone.version = 6; asan_zone.free_definite_size = 0; - asan_zone.memalign = &mz_memalign; - asan_introspection.zone_locked = &mi_zone_locked; -#endif + asan_zone.memalign = &__asan_mz_memalign; + asan_zone.introspect = &asan_introspection; // Register the ASan zone. malloc_zone_register(&asan_zone); diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc index bbcf80e96ba..f2ab188bf5b 100644 --- a/libsanitizer/asan/asan_malloc_win.cc +++ b/libsanitizer/asan/asan_malloc_win.cc @@ -17,7 +17,7 @@ #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_stack.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" #include diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h index 907704d7fc9..b158bd2979d 100644 --- a/libsanitizer/asan/asan_mapping.h +++ b/libsanitizer/asan/asan_mapping.h @@ -57,13 +57,34 @@ // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // -// Default Linux/MIPS mapping: +// Default Linux/MIPS32 mapping: // || `[0x2aaa0000, 0xffffffff]` || HighMem || // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || // || `[0x00000000, 0x0aa9ffff]` || LowMem || // +// Default Linux/MIPS64 mapping: +// || `[0x4000000000, 0xffffffffff]` || HighMem || +// || `[0x2800000000, 0x3fffffffff]` || HighShadow || +// || `[0x2400000000, 0x27ffffffff]` || ShadowGap || +// || `[0x2000000000, 0x23ffffffff]` || LowShadow || +// || `[0x0000000000, 0x1fffffffff]` || LowMem || +// +// Default Linux/AArch64 (39-bit VMA) mapping: +// || `[0x2000000000, 0x7fffffffff]` || highmem || +// || `[0x1400000000, 0x1fffffffff]` || highshadow || +// || `[0x1200000000, 0x13ffffffff]` || shadowgap || +// || `[0x1000000000, 0x11ffffffff]` || lowshadow || +// || `[0x0000000000, 0x0fffffffff]` || lowmem || +// +// Default Linux/AArch64 (42-bit VMA) mapping: +// || `[0x10000000000, 0x3ffffffffff]` || highmem || +// || `[0x0a000000000, 0x0ffffffffff]` || highshadow || +// || `[0x09000000000, 0x09fffffffff]` || shadowgap || +// || `[0x08000000000, 0x08fffffffff]` || lowshadow || +// || `[0x00000000000, 0x07fffffffff]` || lowmem || +// // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || @@ -77,36 +98,56 @@ // || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x00000000, 0x3fffffff]` || LowMem || +// +// Default Windows/i386 mapping: +// (the exact location of HighShadow/HighMem may vary depending +// on WoW64, /LARGEADDRESSAWARE, etc). +// || `[0x50000000, 0xffffffff]` || HighMem || +// || `[0x3a000000, 0x4fffffff]` || HighShadow || +// || `[0x36000000, 0x39ffffff]` || ShadowGap || +// || `[0x30000000, 0x35ffffff]` || LowShadow || +// || `[0x00000000, 0x2fffffff]` || LowMem || static const u64 kDefaultShadowScale = 3; static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 -static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. +static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kIosShadowOffset64 = 0x130000000; +static const u64 kIosSimShadowOffset32 = 1ULL << 30; +static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; +#if SANITIZER_AARCH64_VMA == 39 static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; +#elif SANITIZER_AARCH64_VMA == 42 +static const u64 kAArch64_ShadowOffset64 = 1ULL << 39; +#endif static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; -static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36; +static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 +static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 #define SHADOW_SCALE kDefaultShadowScale -#if SANITIZER_ANDROID -# define SHADOW_OFFSET (0) -#else -# if SANITIZER_WORDSIZE == 32 -# if defined(__mips__) + + +#if SANITIZER_WORDSIZE == 32 +# if SANITIZER_ANDROID +# define SHADOW_OFFSET (0) +# elif defined(__mips__) # define SHADOW_OFFSET kMIPS32_ShadowOffset32 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 +# elif SANITIZER_WINDOWS +# define SHADOW_OFFSET kWindowsShadowOffset32 +# elif SANITIZER_IOSSIM +# define SHADOW_OFFSET kIosSimShadowOffset32 +# elif SANITIZER_IOS +# define SHADOW_OFFSET kIosShadowOffset32 # else -# if SANITIZER_IOS -# define SHADOW_OFFSET kIosShadowOffset32 -# else -# define SHADOW_OFFSET kDefaultShadowOffset32 -# endif +# define SHADOW_OFFSET kDefaultShadowOffset32 # endif -# else +#else # if defined(__aarch64__) # define SHADOW_OFFSET kAArch64_ShadowOffset64 # elif defined(__powerpc64__) @@ -117,10 +158,13 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 # define SHADOW_OFFSET kDefaultShadowOffset64 # elif defined(__mips64) # define SHADOW_OFFSET kMIPS64_ShadowOffset64 +# elif SANITIZER_IOSSIM +# define SHADOW_OFFSET kIosSimShadowOffset64 +# elif SANITIZER_IOS +# define SHADOW_OFFSET kIosShadowOffset64 # else # define SHADOW_OFFSET kDefaultShort64bitShadowOffset # endif -# endif #endif #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) @@ -143,7 +187,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 // With the zero shadow base we can not actually map pages starting from 0. // This constant is somewhat arbitrary. -#define kZeroBaseShadowStart (1 << 18) +#define kZeroBaseShadowStart 0 +#define kZeroBaseMaxShadowStart (1 << 18) #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ : kZeroBaseShadowStart) diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc index 9d6660ec7b4..719cdfa2bb9 100644 --- a/libsanitizer/asan/asan_new_delete.cc +++ b/libsanitizer/asan/asan_new_delete.cc @@ -14,7 +14,7 @@ #include "asan_internal.h" #include "asan_stack.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" #include @@ -88,11 +88,11 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { #if !SANITIZER_MAC CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr) throw() { +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr) throw() { +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE @@ -104,12 +104,12 @@ void operator delete[](void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size) throw() { +void operator delete(void *ptr, size_t size) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size) throw() { +void operator delete[](void *ptr, size_t size) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW_BR); } diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc index c0f3991c1f3..78604310fcb 100644 --- a/libsanitizer/asan/asan_poisoning.cc +++ b/libsanitizer/asan/asan_poisoning.cc @@ -13,13 +13,24 @@ #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_flags.h" namespace __asan { +static atomic_uint8_t can_poison_memory; + +void SetCanPoisonMemory(bool value) { + atomic_store(&can_poison_memory, value, memory_order_release); +} + +bool CanPoisonMemory() { + return atomic_load(&can_poison_memory, memory_order_acquire); +} + void PoisonShadow(uptr addr, uptr size, u8 value) { - if (!flags()->poison_heap) return; + if (!CanPoisonMemory()) return; CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsInMem(addr)); CHECK(AddrIsAlignedByGranularity(addr + size)); @@ -32,7 +43,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, uptr size, uptr redzone_size, u8 value) { - if (!flags()->poison_heap) return; + if (!CanPoisonMemory()) return; CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsInMem(addr)); FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); @@ -61,10 +72,10 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { uptr end = ptr + size; - if (common_flags()->verbosity) { + if (Verbosity()) { Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", poison ? "" : "un", ptr, end, size); - if (common_flags()->verbosity >= 2) + if (Verbosity() >= 2) PRINT_CURRENT_STACK(); } CHECK(size); @@ -99,7 +110,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) { if (!flags()->allow_user_poisoning || size == 0) return; uptr beg_addr = (uptr)addr; uptr end_addr = beg_addr + size; - VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, + VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, (void *)end_addr); ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint end(end_addr); @@ -139,7 +150,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { if (!flags()->allow_user_poisoning || size == 0) return; uptr beg_addr = (uptr)addr; uptr end_addr = beg_addr + size; - VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, + VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, (void *)end_addr); ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint end(end_addr); @@ -205,7 +216,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) { __asan::AddressIsPoisoned(__p + __size - 1))) { \ GET_CURRENT_PC_BP_SP; \ uptr __bad = __asan_region_is_poisoned(__p, __size); \ - __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\ } \ } while (false); \ diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h index 9644b7d840e..30e39e9077c 100644 --- a/libsanitizer/asan/asan_poisoning.h +++ b/libsanitizer/asan/asan_poisoning.h @@ -17,6 +17,10 @@ namespace __asan { +// Enable/disable memory poisoning. +void SetCanPoisonMemory(bool value); +bool CanPoisonMemory(); + // Poisons the shadow memory for "size" bytes starting from "addr". void PoisonShadow(uptr addr, uptr size, u8 value); @@ -32,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, // performance-critical code with care. ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, u8 value) { - DCHECK(flags()->poison_heap); + DCHECK(CanPoisonMemory()); uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); uptr shadow_end = MEM_TO_SHADOW( aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; @@ -58,15 +62,14 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, if (page_end != shadow_end) { REAL(memset)((void *)page_end, 0, shadow_end - page_end); } - void *res = MmapFixedNoReserve(page_beg, page_end - page_beg); - CHECK_EQ(page_beg, res); + ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr); } } } ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { - DCHECK(flags()->poison_heap); + DCHECK(CanPoisonMemory()); bool poison_partial = flags()->poison_partial; u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { diff --git a/libsanitizer/asan/asan_posix.cc b/libsanitizer/asan/asan_posix.cc index 06d24d4e0ee..5b532e950bb 100644 --- a/libsanitizer/asan/asan_posix.cc +++ b/libsanitizer/asan/asan_posix.cc @@ -19,6 +19,7 @@ #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include @@ -30,26 +31,52 @@ namespace __asan { -void AsanOnSIGSEGV(int, void *siginfo, void *context) { +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { ScopedDeadlySignal signal_scope(GetCurrentThread()); - uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr; int code = (int)((siginfo_t*)siginfo)->si_code; // Write the first message using the bullet-proof write. - if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die(); - uptr pc, sp, bp; - GetPcSpBp(context, &pc, &sp, &bp); + if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die(); + SignalContext sig = SignalContext::Create(siginfo, context); // Access at a reasonable offset above SP, or slightly below it (to account // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is // probably a stack overflow. + bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF; + +#if __powerpc__ + // Large stack frames can be allocated with e.g. + // lis r0,-10000 + // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000 + // If the store faults then sp will not have been updated, so test above + // will not work, becase the fault address will be more than just "slightly" + // below sp. + if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) { + u32 inst = *(unsigned *)sig.pc; + u32 ra = (inst >> 16) & 0x1F; + u32 opcd = inst >> 26; + u32 xo = (inst >> 1) & 0x3FF; + // Check for store-with-update to sp. The instructions we accept are: + // stbu rs,d(ra) stbux rs,ra,rb + // sthu rs,d(ra) sthux rs,ra,rb + // stwu rs,d(ra) stwux rs,ra,rb + // stdu rs,ds(ra) stdux rs,ra,rb + // where ra is r1 (the stack pointer). + if (ra == 1 && + (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 || + (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181)))) + IsStackAccess = true; + } +#endif // __powerpc__ + // We also check si_code to filter out SEGV caused by something else other // then hitting the guard page or unmapped memory, like, for example, // unaligned memory access. - if (addr + 512 > sp && addr < sp + 0xFFFF && - (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) - ReportStackOverflow(pc, sp, bp, context, addr); + if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) + ReportStackOverflow(sig); + else if (signo == SIGFPE) + ReportDeadlySignal("FPE", sig); else - ReportSIGSEGV("SEGV", pc, sp, bp, context, addr); + ReportDeadlySignal("SEGV", sig); } // ---------------------- TSD ---------------- {{{1 diff --git a/libsanitizer/asan/asan_preinit.cc b/libsanitizer/asan/asan_preinit.cc index 2ce1fb9b666..6cb115bd369 100644 --- a/libsanitizer/asan/asan_preinit.cc +++ b/libsanitizer/asan/asan_preinit.cc @@ -11,9 +11,13 @@ //===----------------------------------------------------------------------===// #include "asan_internal.h" +using namespace __asan; + #if SANITIZER_CAN_USE_PREINIT_ARRAY // The symbol is called __local_asan_preinit, because it's not intended to be // exported. + // This code linked into the main executable when -fsanitize=address is in + // the link flags. It can only use exported interface functions. __attribute__((section(".preinit_array"), used)) void (*__local_asan_preinit)(void) = __asan_init; #endif diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc index fcccb70ff8b..cbfc6f548e4 100644 --- a/libsanitizer/asan/asan_report.cc +++ b/libsanitizer/asan/asan_report.cc @@ -9,6 +9,7 @@ // // This file contains error reporting code. //===----------------------------------------------------------------------===// + #include "asan_flags.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -25,7 +26,7 @@ namespace __asan { // -------------------- User-specified callbacks ----------------- {{{1 static void (*error_report_callback)(const char*); -static char *error_message_buffer = 0; +static char *error_message_buffer = nullptr; static uptr error_message_buffer_pos = 0; static uptr error_message_buffer_size = 0; @@ -51,7 +52,7 @@ void AppendToErrorMessageBuffer(const char *buffer) { buffer, remaining); error_message_buffer[error_message_buffer_size - 1] = '\0'; // FIXME: reallocate the buffer instead of truncating the message. - error_message_buffer_pos += remaining > length ? length : remaining; + error_message_buffer_pos += Min(remaining, length); } } @@ -85,6 +86,8 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator { return Cyan(); case kAsanUserPoisonedMemoryMagic: case kAsanContiguousContainerOOBMagic: + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: return Blue(); case kAsanStackUseAfterScopeMagic: return Magenta(); @@ -171,6 +174,8 @@ static void PrintLegend(InternalScopedString *str) { PrintShadowByte(str, " Intra object redzone: ", kAsanIntraObjectRedzone); PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); + PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); + PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); } void MaybeDumpInstructionBytes(uptr pc) { @@ -275,9 +280,8 @@ static void PrintGlobalLocation(InternalScopedString *str, str->append(":%d", g.location->column_no); } -bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, - const __asan_global &g) { - if (!IsAddressNearGlobal(addr, g)) return false; +static void DescribeAddressRelativeToGlobal(uptr addr, uptr size, + const __asan_global &g) { InternalScopedString str(4096); Decorator d; str.append("%s", d.Location()); @@ -300,6 +304,26 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, str.append("%s", d.EndLocation()); PrintGlobalNameIfASCII(&str, g); Printf("%s", str.data()); +} + +static bool DescribeAddressIfGlobal(uptr addr, uptr size, + const char *bug_type) { + // Assume address is close to at most four globals. + const int kMaxGlobalsInReport = 4; + __asan_global globals[kMaxGlobalsInReport]; + u32 reg_sites[kMaxGlobalsInReport]; + int globals_num = + GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals)); + if (globals_num == 0) + return false; + for (int i = 0; i < globals_num; i++) { + DescribeAddressRelativeToGlobal(addr, size, globals[i]); + if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") && + reg_sites[i]) { + Printf(" registered at:\n"); + StackDepotGet(reg_sites[i]).Print(); + } + } return true; } @@ -348,7 +372,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr, uptr next_var_beg) { uptr var_end = var.beg + var.size; uptr addr_end = addr + access_size; - const char *pos_descr = 0; + const char *pos_descr = nullptr; // If the variable [var.beg, var_end) is the nearest variable to the // current memory access, indicate it in the log. if (addr >= var.beg) { @@ -519,7 +543,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) { StackTrace alloc_stack = chunk.GetAllocStack(); char tname[128]; Decorator d; - AsanThreadContext *free_thread = 0; + AsanThreadContext *free_thread = nullptr; if (chunk.FreeTid() != kInvalidTid) { free_thread = GetThreadContextByTidLocked(chunk.FreeTid()); Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), @@ -545,12 +569,12 @@ void DescribeHeapAddress(uptr addr, uptr access_size) { DescribeThread(alloc_thread); } -void DescribeAddress(uptr addr, uptr access_size) { +static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) { // Check if this is shadow or shadow gap. if (DescribeAddressIfShadow(addr)) return; CHECK(AddrIsInMem(addr)); - if (DescribeAddressIfGlobal(addr, access_size)) + if (DescribeAddressIfGlobal(addr, access_size, bug_type)) return; if (DescribeAddressIfStack(addr, access_size)) return; @@ -572,6 +596,11 @@ void DescribeThread(AsanThreadContext *context) { InternalScopedString str(1024); str.append("Thread T%d%s", context->tid, ThreadNameWithParenthesis(context->tid, tname, sizeof(tname))); + if (context->parent_tid == kInvalidTid) { + str.append(" created by unknown thread\n"); + Printf("%s", str.data()); + return; + } str.append( " created by T%d%s here:\n", context->parent_tid, ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); @@ -609,7 +638,7 @@ class ScopedInErrorReport { } // If we're still not dead for some reason, use raw _exit() instead of // Die() to bypass any additional checks. - internal__exit(flags()->exitcode); + internal__exit(common_flags()->exitcode); } if (report) report_data = *report; report_happened = true; @@ -641,40 +670,39 @@ class ScopedInErrorReport { } }; -void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) { +void ReportStackOverflow(const SignalContext &sig) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: stack-overflow on address %p" " (pc %p bp %p sp %p T%d)\n", - (void *)addr, (void *)pc, (void *)bp, (void *)sp, + (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp, GetCurrentTidOrInvalid()); Printf("%s", d.EndWarning()); - GET_STACK_TRACE_SIGNAL(pc, bp, context); + GET_STACK_TRACE_SIGNAL(sig); stack.Print(); ReportErrorSummary("stack-overflow", &stack); } -void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, - void *context, uptr addr) { +void ReportDeadlySignal(const char *description, const SignalContext &sig) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: %s on unknown address %p" " (pc %p bp %p sp %p T%d)\n", - description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, - GetCurrentTidOrInvalid()); - if (pc < GetPageSizeCached()) { + description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, + (void *)sig.sp, GetCurrentTidOrInvalid()); + if (sig.pc < GetPageSizeCached()) { Report("Hint: pc points to the zero page.\n"); } Printf("%s", d.EndWarning()); - GET_STACK_TRACE_SIGNAL(pc, bp, context); + GET_STACK_TRACE_SIGNAL(sig); stack.Print(); - MaybeDumpInstructionBytes(pc); + MaybeDumpInstructionBytes(sig.pc); Printf("AddressSanitizer can not provide additional info.\n"); - ReportErrorSummary("SEGV", &stack); + ReportErrorSummary(description, &stack); } void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { @@ -800,8 +828,8 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function, bug_type, offset1, offset1 + length1, offset2, offset2 + length2); Printf("%s", d.EndWarning()); stack->Print(); - DescribeAddress((uptr)offset1, length1); - DescribeAddress((uptr)offset2, length2); + DescribeAddress((uptr)offset1, length1, bug_type); + DescribeAddress((uptr)offset2, length2, bug_type); ReportErrorSummary(bug_type, stack); } @@ -814,7 +842,7 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size, Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size); Printf("%s", d.EndWarning()); stack->Print(); - DescribeAddress(offset, size); + DescribeAddress(offset, size, bug_type); ReportErrorSummary(bug_type, stack); } @@ -829,6 +857,9 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, " old_mid : %p\n" " new_mid : %p\n", beg, end, old_mid, new_mid); + uptr granularity = SHADOW_GRANULARITY; + if (!IsAligned(beg, granularity)) + Report("ERROR: beg is not aligned by %d\n", granularity); stack->Print(); ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack); } @@ -866,15 +897,16 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1, static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) { ScopedInErrorReport in_report; + const char *bug_type = "invalid-pointer-pair"; Decorator d; Printf("%s", d.Warning()); Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2); Printf("%s", d.EndWarning()); GET_STACK_TRACE_FATAL(pc, bp); stack.Print(); - DescribeAddress(a1, 1); - DescribeAddress(a2, 1); - ReportErrorSummary("invalid-pointer-pair", &stack); + DescribeAddress(a1, 1, bug_type); + DescribeAddress(a2, 1, bug_type); + ReportErrorSummary(bug_type, &stack); } static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { @@ -925,13 +957,24 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, DescribeHeapAddress(addr, 1); } -} // namespace __asan +} // namespace __asan // --------------------------- Interface --------------------- {{{1 using namespace __asan; // NOLINT void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, - uptr access_size) { + uptr access_size, u32 exp) { + ENABLE_FRAME_POINTER; + + // Optimization experiments. + // The experiments can be used to evaluate potential optimizations that remove + // instrumentation (assess false negatives). Instead of completely removing + // some instrumentation, compiler can emit special calls into runtime + // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass + // mask of experiments (exp). + // The reaction to a non-zero value of exp is to be defined. + (void)exp; + // Determine the error type. const char *bug_descr = "unknown-crash"; if (AddrIsInMem(addr)) { @@ -980,6 +1023,10 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, case kAsanIntraObjectRedzone: bug_descr = "intra-object-overflow"; break; + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + bug_descr = "dynamic-stack-buffer-overflow"; + break; } } @@ -1006,7 +1053,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, GET_STACK_TRACE_FATAL(pc, bp); stack.Print(); - DescribeAddress(addr, access_size); + DescribeAddress(addr, access_size, bug_descr); ReportErrorSummary(bug_descr, &stack); PrintShadowMemoryForAddress(addr); } @@ -1024,7 +1071,7 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { void __asan_describe_address(uptr addr) { // Thread registry must be locked while we're describing an address. asanThreadRegistry().Lock(); - DescribeAddress(addr, 1); + DescribeAddress(addr, 1, ""); asanThreadRegistry().Unlock(); } @@ -1069,7 +1116,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_ptr_cmp(void *a, void *b) { CheckForInvalidPointerPair(a, b); } -} // extern "C" +} // extern "C" #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default implementation of __asan_on_error that does nothing diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h index b31b86dd08b..6214979cf4c 100644 --- a/libsanitizer/asan/asan_report.h +++ b/libsanitizer/asan/asan_report.h @@ -31,29 +31,25 @@ struct AddressDescription { const char *region_kind; }; +// Returns the number of globals close to the provided address and copies +// them to "globals" array. +int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites, + int max_globals); +bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr); // The following functions prints address description depending // on the memory type (shadow/heap/stack/global). void DescribeHeapAddress(uptr addr, uptr access_size); -bool DescribeAddressIfGlobal(uptr addr, uptr access_size); -bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, - const __asan_global &g); -bool IsAddressNearGlobal(uptr addr, const __asan_global &g); -bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr); bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr, bool print = true); bool ParseFrameDescription(const char *frame_descr, InternalMmapVector *vars); bool DescribeAddressIfStack(uptr addr, uptr access_size); -// Determines memory type on its own. -void DescribeAddress(uptr addr, uptr access_size); - void DescribeThread(AsanThreadContext *context); // Different kinds of error reports. -void NORETURN - ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr); -void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, - void *context, uptr addr); +void NORETURN ReportStackOverflow(const SignalContext &sig); +void NORETURN ReportDeadlySignal(const char* description, + const SignalContext &sig); void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, BufferedStackTrace *free_stack); void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc index 2c599047dac..551fea5b0c7 100644 --- a/libsanitizer/asan/asan_rtl.cc +++ b/libsanitizer/asan/asan_rtl.cc @@ -9,6 +9,7 @@ // // Main file of the ASan run-time library. //===----------------------------------------------------------------------===// + #include "asan_activation.h" #include "asan_allocator.h" #include "asan_interceptors.h" @@ -19,12 +20,15 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" +#include "asan_suppressions.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "lsan/lsan_common.h" +#include "ubsan/ubsan_init.h" +#include "ubsan/ubsan_platform.h" int __asan_option_detect_stack_use_after_return; // Global interface symbol. uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. @@ -51,13 +55,6 @@ static void AsanDie() { UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); } } - if (common_flags()->coverage) - __sanitizer_cov_dump(); - if (death_callback) - death_callback(); - if (flags()->abort_on_error) - Abort(); - internal__exit(flags()->exitcode); } static void AsanCheckFailed(const char *file, int line, const char *cond, @@ -69,265 +66,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, Die(); } -// -------------------------- Flags ------------------------- {{{1 -static const int kDefaultMallocContextSize = 30; - -Flags asan_flags_dont_use_directly; // use via flags(). - -static const char *MaybeCallAsanDefaultOptions() { - return (&__asan_default_options) ? __asan_default_options() : ""; -} - -static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { -#ifdef ASAN_DEFAULT_OPTIONS -// Stringize the macro value. -# define ASAN_STRINGIZE(x) #x -# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) - return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); -#else - return ""; -#endif -} - -static void ParseFlagsFromString(Flags *f, const char *str) { - CommonFlags *cf = common_flags(); - ParseCommonFlagsFromString(cf, str); - CHECK((uptr)cf->malloc_context_size <= kStackTraceMax); - // Please write meaningful flag descriptions when adding new flags. - ParseFlag(str, &f->quarantine_size, "quarantine_size", - "Size (in bytes) of quarantine used to detect use-after-free " - "errors. Lower value may reduce memory usage but increase the " - "chance of false negatives."); - ParseFlag(str, &f->redzone, "redzone", - "Minimal size (in bytes) of redzones around heap objects. " - "Requirement: redzone >= 16, is a power of two."); - ParseFlag(str, &f->max_redzone, "max_redzone", - "Maximal size (in bytes) of redzones around heap objects."); - CHECK_GE(f->redzone, 16); - CHECK_GE(f->max_redzone, f->redzone); - CHECK_LE(f->max_redzone, 2048); - CHECK(IsPowerOfTwo(f->redzone)); - CHECK(IsPowerOfTwo(f->max_redzone)); - - ParseFlag(str, &f->debug, "debug", - "If set, prints some debugging information and does additional checks."); - ParseFlag(str, &f->report_globals, "report_globals", - "Controls the way to handle globals (0 - don't detect buffer overflow on " - "globals, 1 - detect buffer overflow, 2 - print data about registered " - "globals)."); - - ParseFlag(str, &f->check_initialization_order, - "check_initialization_order", - "If set, attempts to catch initialization order issues."); - - ParseFlag(str, &f->replace_str, "replace_str", - "If set, uses custom wrappers and replacements for libc string functions " - "to find more errors."); - - ParseFlag(str, &f->replace_intrin, "replace_intrin", - "If set, uses custom wrappers for memset/memcpy/memmove intinsics."); - ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free", - "Ignore invalid free() calls to work around some bugs. Used on OS X " - "only."); - ParseFlag(str, &f->detect_stack_use_after_return, - "detect_stack_use_after_return", - "Enables stack-use-after-return checking at run-time."); - ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log", - "Minimum fake stack size log."); - ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log", - "Maximum fake stack size log."); - ParseFlag(str, &f->uar_noreserve, "uar_noreserve", - "Use mmap with 'norserve' flag to allocate fake stack."); - ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size", - "ASan allocator flag. max_malloc_fill_size is the maximal amount of " - "bytes that will be filled with malloc_fill_byte on malloc."); - ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte", - "Value used to fill the newly allocated memory."); - ParseFlag(str, &f->exitcode, "exitcode", - "Override the program exit status if the tool found an error."); - ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning", - "If set, user may manually mark memory regions as poisoned or " - "unpoisoned."); - ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying", - "Number of seconds to sleep between printing an error report and " - "terminating the program. Useful for debugging purposes (e.g. when one " - "needs to attach gdb)."); - - ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size", - "Allows the users to work around the bug in Nvidia drivers prior to " - "295.*."); - - ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit", - "If set, explicitly unmaps the (huge) shadow at exit."); - ParseFlag(str, &f->abort_on_error, "abort_on_error", - "If set, the tool calls abort() instead of _exit() after printing the " - "error report."); - ParseFlag(str, &f->print_stats, "print_stats", - "Print various statistics after printing an error message or if " - "atexit=1."); - ParseFlag(str, &f->print_legend, "print_legend", - "Print the legend for the shadow bytes."); - ParseFlag(str, &f->atexit, "atexit", - "If set, prints ASan exit stats even after program terminates " - "successfully."); - - ParseFlag(str, &f->allow_reexec, "allow_reexec", - "Allow the tool to re-exec the program. This may interfere badly with " - "the debugger."); - - ParseFlag(str, &f->print_full_thread_history, - "print_full_thread_history", - "If set, prints thread creation stacks for the threads involved in the " - "report and their ancestors up to the main thread."); - - ParseFlag(str, &f->poison_heap, "poison_heap", - "Poison (or not) the heap memory on [de]allocation. Zero value is useful " - "for benchmarking the allocator or instrumentator."); - - ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie", - "Poison (or not) the array cookie after operator new[]."); - - ParseFlag(str, &f->poison_partial, "poison_partial", - "If true, poison partially addressable 8-byte aligned words " - "(default=true). This flag affects heap and global buffers, but not " - "stack buffers."); - - ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch", - "Report errors on malloc/delete, new/free, new/delete[], etc."); - - ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch", - "Report errors on mismatch betwen size of new and delete."); - - ParseFlag(str, &f->strict_memcmp, "strict_memcmp", - "If true, assume that memcmp(p1, p2, n) always reads n bytes before " - "comparing p1 and p2."); - - ParseFlag(str, &f->strict_init_order, "strict_init_order", - "If true, assume that dynamic initializers can never access globals from " - "other modules, even if the latter are already initialized."); - - ParseFlag(str, &f->start_deactivated, "start_deactivated", - "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap " - "poisoning) to reduce memory consumption as much as possible, and " - "restores them to original values when the first instrumented module is " - "loaded into the process. This is mainly intended to be used on " - "Android. "); - - ParseFlag(str, &f->detect_invalid_pointer_pairs, - "detect_invalid_pointer_pairs", - "If non-zero, try to detect operations like <, <=, >, >= and - on " - "invalid pointer pairs (e.g. when pointers belong to different objects). " - "The bigger the value the harder we try."); - - ParseFlag(str, &f->detect_container_overflow, - "detect_container_overflow", - "If true, honor the container overflow annotations. " - "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow"); - - ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation", - "If >=2, detect violation of One-Definition-Rule (ODR); " - "If ==1, detect ODR-violation only if the two variables " - "have different sizes"); - - ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes", - "If true, dump 16 bytes starting at the instruction that caused SEGV"); -} - -void InitializeFlags(Flags *f, const char *env) { - CommonFlags *cf = common_flags(); - SetCommonFlagsDefaults(cf); - cf->detect_leaks = CAN_SANITIZE_LEAKS; - cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); - cf->malloc_context_size = kDefaultMallocContextSize; - cf->intercept_tls_get_addr = true; - cf->coverage = false; - - internal_memset(f, 0, sizeof(*f)); - f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; - f->redzone = 16; - f->max_redzone = 2048; - f->debug = false; - f->report_globals = 1; - f->check_initialization_order = false; - f->replace_str = true; - f->replace_intrin = true; - f->mac_ignore_invalid_free = false; - f->detect_stack_use_after_return = false; // Also needs the compiler flag. - f->min_uar_stack_size_log = 16; // We can't do smaller anyway. - f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread. - f->uar_noreserve = false; - f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K. - f->malloc_fill_byte = 0xbe; - f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE; - f->allow_user_poisoning = true; - f->sleep_before_dying = 0; - f->check_malloc_usable_size = true; - f->unmap_shadow_on_exit = false; - f->abort_on_error = false; - f->print_stats = false; - f->print_legend = true; - f->atexit = false; - f->allow_reexec = true; - f->print_full_thread_history = true; - f->poison_heap = true; - f->poison_array_cookie = true; - f->poison_partial = true; - // Turn off alloc/dealloc mismatch checker on Mac and Windows for now. - // https://code.google.com/p/address-sanitizer/issues/detail?id=131 - // https://code.google.com/p/address-sanitizer/issues/detail?id=309 - // TODO(glider,timurrrr): Fix known issues and enable this back. - f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0); - f->new_delete_type_mismatch = true; - f->strict_memcmp = true; - f->strict_init_order = false; - f->start_deactivated = false; - f->detect_invalid_pointer_pairs = 0; - f->detect_container_overflow = true; - f->detect_odr_violation = 2; - f->dump_instruction_bytes = false; - - // Override from compile definition. - ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition()); - - // Override from user-specified string. - ParseFlagsFromString(f, MaybeCallAsanDefaultOptions()); - VReport(1, "Using the defaults from __asan_default_options: %s\n", - MaybeCallAsanDefaultOptions()); - - // Override from command line. - ParseFlagsFromString(f, env); - if (common_flags()->help) { - PrintFlagDescriptions(); - } - - if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) { - Report("%s: detect_leaks is not supported on this platform.\n", - SanitizerToolName); - cf->detect_leaks = false; - } - - // Make "strict_init_order" imply "check_initialization_order". - // TODO(samsonov): Use a single runtime flag for an init-order checker. - if (f->strict_init_order) { - f->check_initialization_order = true; - } -} - -// Parse flags that may change between startup and activation. -// On Android they come from a system property. -// On other platforms this is no-op. -void ParseExtraActivationFlags() { - char buf[100]; - GetExtraActivationFlags(buf, sizeof(buf)); - ParseFlagsFromString(flags(), buf); - if (buf[0] != '\0') - VReport(1, "Extra activation flags: %s\n", buf); -} - // -------------------------- Globals --------------------- {{{1 int asan_inited; bool asan_init_is_running; -void (*death_callback)(void); #if !ASAN_FIXED_MAPPING uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; @@ -341,17 +82,22 @@ void ShowStatsAndAbort() { // ---------------------- mmap -------------------- {{{1 // Reserve memory range [beg, end]. -static void ReserveShadowMemoryRange(uptr beg, uptr end) { +// We need to use inclusive range because end+1 may not be representable. +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { CHECK_EQ((beg % GetPageSizeCached()), 0); CHECK_EQ(((end + 1) % GetPageSizeCached()), 0); uptr size = end - beg + 1; DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - void *res = MmapFixedNoReserve(beg, size); + void *res = MmapFixedNoReserve(beg, size, name); if (res != (void*)beg) { Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " "Perhaps you're using ulimit -v\n", size); Abort(); } + if (common_flags()->no_huge_pages_for_shadow) + NoHugePagesInRegion(beg, size); + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(beg, size); } // --------------- LowLevelAllocateCallbac ---------- {{{1 @@ -362,11 +108,15 @@ static void OnLowLevelAllocate(uptr ptr, uptr size) { // -------------------------- Run-time entry ------------------- {{{1 // exported functions #define ASAN_REPORT_ERROR(type, is_write, size) \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## size(uptr addr); \ -void __asan_report_ ## type ## size(uptr addr) { \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size, 0); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \ GET_CALLER_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, addr, is_write, size); \ + __asan_report_error(pc, bp, sp, addr, is_write, size, exp); \ } ASAN_REPORT_ERROR(load, false, 1) @@ -382,18 +132,20 @@ ASAN_REPORT_ERROR(store, true, 16) #define ASAN_REPORT_ERROR_N(type, is_write) \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## _n(uptr addr, uptr size); \ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ GET_CALLER_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, addr, is_write, size); \ + __asan_report_error(pc, bp, sp, addr, is_write, size, 0); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size, exp); \ } ASAN_REPORT_ERROR_N(load, false) ASAN_REPORT_ERROR_N(store, true) -#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ - extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \ - void __asan_##type##size(uptr addr) { \ +#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg) \ uptr sp = MEM_TO_SHADOW(addr); \ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast(sp) \ : *reinterpret_cast(sp); \ @@ -405,10 +157,19 @@ ASAN_REPORT_ERROR_N(store, true) *__asan_test_only_reported_buggy_pointer = addr; \ } else { \ GET_CALLER_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, addr, is_write, size); \ + __asan_report_error(pc, bp, sp, addr, is_write, size, exp_arg); \ } \ } \ - } \ + } + +#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_##type##size(uptr addr) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0) \ + } \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_exp_##type##size(uptr addr, u32 exp) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp) \ } ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) @@ -423,18 +184,38 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) extern "C" -NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) { +NOINLINE INTERFACE_ATTRIBUTE +void __asan_loadN(uptr addr, uptr size) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; - __asan_report_error(pc, bp, sp, addr, false, size); + __asan_report_error(pc, bp, sp, addr, false, size, 0); } } extern "C" -NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) { +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; - __asan_report_error(pc, bp, sp, addr, true, size); + __asan_report_error(pc, bp, sp, addr, false, size, exp); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_storeN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + __asan_report_error(pc, bp, sp, addr, true, size, 0); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + __asan_report_error(pc, bp, sp, addr, true, size, exp); } } @@ -453,26 +234,39 @@ static NOINLINE void force_interface_symbols() { case 3: __asan_report_load4(0); break; case 4: __asan_report_load8(0); break; case 5: __asan_report_load16(0); break; - case 6: __asan_report_store1(0); break; - case 7: __asan_report_store2(0); break; - case 8: __asan_report_store4(0); break; - case 9: __asan_report_store8(0); break; - case 10: __asan_report_store16(0); break; - case 12: __asan_register_globals(0, 0); break; - case 13: __asan_unregister_globals(0, 0); break; - case 14: __asan_set_death_callback(0); break; - case 15: __asan_set_error_report_callback(0); break; - case 16: __asan_handle_no_return(); break; - case 17: __asan_address_is_poisoned(0); break; - case 25: __asan_poison_memory_region(0, 0); break; - case 26: __asan_unpoison_memory_region(0, 0); break; - case 27: __asan_set_error_exit_code(0); break; - case 30: __asan_before_dynamic_init(0); break; - case 31: __asan_after_dynamic_init(); break; - case 32: __asan_poison_stack_memory(0, 0); break; - case 33: __asan_unpoison_stack_memory(0, 0); break; - case 34: __asan_region_is_poisoned(0, 0); break; - case 35: __asan_describe_address(0); break; + case 6: __asan_report_load_n(0, 0); break; + case 7: __asan_report_store1(0); break; + case 8: __asan_report_store2(0); break; + case 9: __asan_report_store4(0); break; + case 10: __asan_report_store8(0); break; + case 11: __asan_report_store16(0); break; + case 12: __asan_report_store_n(0, 0); break; + case 13: __asan_report_exp_load1(0, 0); break; + case 14: __asan_report_exp_load2(0, 0); break; + case 15: __asan_report_exp_load4(0, 0); break; + case 16: __asan_report_exp_load8(0, 0); break; + case 17: __asan_report_exp_load16(0, 0); break; + case 18: __asan_report_exp_load_n(0, 0, 0); break; + case 19: __asan_report_exp_store1(0, 0); break; + case 20: __asan_report_exp_store2(0, 0); break; + case 21: __asan_report_exp_store4(0, 0); break; + case 22: __asan_report_exp_store8(0, 0); break; + case 23: __asan_report_exp_store16(0, 0); break; + case 24: __asan_report_exp_store_n(0, 0, 0); break; + case 25: __asan_register_globals(nullptr, 0); break; + case 26: __asan_unregister_globals(nullptr, 0); break; + case 27: __asan_set_death_callback(nullptr); break; + case 28: __asan_set_error_report_callback(nullptr); break; + case 29: __asan_handle_no_return(); break; + case 30: __asan_address_is_poisoned(nullptr); break; + case 31: __asan_poison_memory_region(nullptr, 0); break; + case 32: __asan_unpoison_memory_region(nullptr, 0); break; + case 34: __asan_before_dynamic_init(nullptr); break; + case 35: __asan_after_dynamic_init(); break; + case 36: __asan_poison_stack_memory(0, 0); break; + case 37: __asan_unpoison_stack_memory(0, 0); break; + case 38: __asan_region_is_poisoned(0, 0); break; + case 39: __asan_describe_address(0); break; } } @@ -496,8 +290,28 @@ static void InitializeHighMemEnd() { CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0); } -static void ProtectGap(uptr a, uptr size) { - CHECK_EQ(a, (uptr)Mprotect(a, size)); +static void ProtectGap(uptr addr, uptr size) { + void *res = MmapNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == kZeroBaseShadowStart) { + uptr step = GetPageSizeCached(); + while (size > step && addr < kZeroBaseMaxShadowStart) { + addr += step; + size -= step; + void *res = MmapNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + } + } + + Report("ERROR: Failed to protect the shadow gap. " + "ASan cannot proceed correctly. ABORTING.\n"); + DumpProcessMap(); + Die(); } static void PrintAddressSpaceLayout() { @@ -536,13 +350,13 @@ static void PrintAddressSpaceLayout() { Printf("\n"); Printf("redzone=%zu\n", (uptr)flags()->redzone); Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); - Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20); + Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb); Printf("malloc_context_size=%zu\n", (uptr)common_flags()->malloc_context_size); - Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE); - Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY); - Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET); + Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); if (kMidMemBeg) CHECK(kMidShadowBeg > kLowShadowEnd && @@ -556,10 +370,19 @@ static void AsanInitInternal() { CHECK(!asan_init_is_running && "ASan init calls itself!"); asan_init_is_running = true; + CacheBinaryName(); + // Initialize flags. This must be done early, because most of the // initialization steps look at flags(). - const char *options = GetEnv("ASAN_OPTIONS"); - InitializeFlags(flags(), options); + InitializeFlags(); + + CheckVMASize(); + + AsanCheckIncompatibleRT(); + AsanCheckDynamicRTPrereqs(); + + SetCanPoisonMemory(flags()->poison_heap); + SetMallocContextSize(common_flags()->malloc_context_size); InitializeHighMemEnd(); @@ -567,24 +390,15 @@ static void AsanInitInternal() { AsanDoesNotSupportStaticLinkage(); // Install tool-specific callbacks in sanitizer_common. - SetDieCallback(AsanDie); + AddDieCallback(AsanDie); SetCheckFailedCallback(AsanCheckFailed); SetPrintfAndReportCallback(AppendToErrorMessageBuffer); - if (!flags()->start_deactivated) - ParseExtraActivationFlags(); - __sanitizer_set_report_path(common_flags()->log_path); + + // Enable UAR detection, if required. __asan_option_detect_stack_use_after_return = flags()->detect_stack_use_after_return; - CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); - - if (options) { - VReport(1, "Parsed ASAN_OPTIONS: %s\n", options); - } - - if (flags()->start_deactivated) - AsanStartDeactivated(); // Re-exec ourselves if we need to set additional env or command line args. MaybeReexec(); @@ -615,17 +429,16 @@ static void AsanInitInternal() { } #endif - if (common_flags()->verbosity) - PrintAddressSpaceLayout(); + if (Verbosity()) PrintAddressSpaceLayout(); DisableCoreDumperIfNecessary(); if (full_shadow_is_available) { // mmap the low shadow plus at least one page at the left. if (kLowShadowBeg) - ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); // mmap the high shadow. - ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); // protect the gap. ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); @@ -634,11 +447,11 @@ static void AsanInitInternal() { MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { CHECK(kLowShadowBeg != kLowShadowEnd); // mmap the low shadow plus at least one page at the left. - ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); // mmap the mid shadow. - ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd); + ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow"); // mmap the high shadow. - ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); // protect the gaps. ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); @@ -646,14 +459,21 @@ static void AsanInitInternal() { } else { Report("Shadow memory range interleaves with an existing memory mapping. " "ASan cannot proceed correctly. ABORTING.\n"); + Report("ASan shadow was supposed to be located in the [%p-%p] range.\n", + shadow_start, kHighShadowEnd); DumpProcessMap(); Die(); } AsanTSDInit(PlatformTSDDtor); - InstallDeadlySignalHandlers(AsanOnSIGSEGV); + InstallDeadlySignalHandlers(AsanOnDeadlySignal); + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + InitializeAllocator(allocator_options); - InitializeAllocator(); + MaybeStartBackgroudThread(); + SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // should be set to 1 prior to initializing the threads. @@ -663,32 +483,40 @@ static void AsanInitInternal() { if (flags()->atexit) Atexit(asan_atexit); - if (common_flags()->coverage) { - __sanitizer_cov_init(); - Atexit(__sanitizer_cov_dump); - } + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + + // Now that ASan runtime is (mostly) initialized, deactivate it if + // necessary, so that it can be re-activated when requested. + if (flags()->start_deactivated) + AsanDeactivate(); // interceptors InitTlsSize(); // Create main thread. - AsanThread *main_thread = AsanThread::Create(0, 0); - CreateThreadContextArgs create_main_args = { main_thread, 0 }; - u32 main_tid = asanThreadRegistry().CreateThread( - 0, true, 0, &create_main_args); - CHECK_EQ(0, main_tid); + AsanThread *main_thread = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, + /* stack */ nullptr, /* detached */ true); + CHECK_EQ(0, main_thread->tid()); SetCurrentThread(main_thread); - main_thread->ThreadStart(internal_getpid()); + main_thread->ThreadStart(internal_getpid(), + /* signal_thread_is_registered */ nullptr); force_interface_symbols(); // no-op. SanitizerInitializeUnwinder(); #if CAN_SANITIZE_LEAKS - __lsan::InitCommonLsan(false); + __lsan::InitCommonLsan(); if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { Atexit(__lsan::DoLeakCheck); } #endif // CAN_SANITIZE_LEAKS +#if CAN_SANITIZE_UB + __ubsan::InitAsPlugin(); +#endif + + InitializeSuppressions(); + VReport(1, "AddressSanitizer Init done\n"); } @@ -700,46 +528,38 @@ void AsanInitFromRtl() { #if ASAN_DYNAMIC // Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable -// (and thus normal initializer from .preinit_array haven't run). +// (and thus normal initializers from .preinit_array or modules haven't run). class AsanInitializer { public: // NOLINT AsanInitializer() { - AsanCheckIncompatibleRT(); - AsanCheckDynamicRTPrereqs(); - if (UNLIKELY(!asan_inited)) - __asan_init(); + AsanInitFromRtl(); } }; static AsanInitializer asan_initializer; #endif // ASAN_DYNAMIC -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -const char* __asan_default_options() { return ""; } -} // extern "C" -#endif - -int NOINLINE __asan_set_error_exit_code(int exit_code) { - int old = flags()->exitcode; - flags()->exitcode = exit_code; - return old; -} - void NOINLINE __asan_handle_no_return() { int local_stack; AsanThread *curr_thread = GetCurrentThread(); - CHECK(curr_thread); uptr PageSize = GetPageSizeCached(); - uptr top = curr_thread->stack_top(); - uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1); + uptr top, bottom; + if (curr_thread) { + top = curr_thread->stack_top(); + bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); + } else { + // If we haven't seen this thread, try asking the OS for stack bounds. + uptr tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, + &tls_size); + top = bottom + stack_size; + } static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M if (top - bottom > kMaxExpectedCleanupSize) { static bool reported_warning = false; @@ -755,18 +575,21 @@ void NOINLINE __asan_handle_no_return() { return; } PoisonShadow(bottom, top - bottom, 0); - if (curr_thread->has_fake_stack()) + if (curr_thread && curr_thread->has_fake_stack()) curr_thread->fake_stack()->HandleNoReturn(); } void NOINLINE __asan_set_death_callback(void (*callback)(void)) { - death_callback = callback; + SetUserDieCallback(callback); } // Initialize as requested from instrumented application code. // We use this call as a trigger to wake up ASan from deactivated state. void __asan_init() { - AsanCheckIncompatibleRT(); AsanActivate(); AsanInitInternal(); } + +void __asan_version_mismatch_check() { + // Do nothing. +} diff --git a/libsanitizer/asan/asan_stack.cc b/libsanitizer/asan/asan_stack.cc index 96178e8247b..973c5ce59ef 100644 --- a/libsanitizer/asan/asan_stack.cc +++ b/libsanitizer/asan/asan_stack.cc @@ -11,6 +11,21 @@ //===----------------------------------------------------------------------===// #include "asan_internal.h" #include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" + +namespace __asan { + +static atomic_uint32_t malloc_context_size; + +void SetMallocContextSize(u32 size) { + atomic_store(&malloc_context_size, size, memory_order_release); +} + +u32 GetMallocContextSize() { + return atomic_load(&malloc_context_size, memory_order_acquire); +} + +} // namespace __asan // ------------------ Interface -------------- {{{1 diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h index 640c4cf4f7b..30dc5921303 100644 --- a/libsanitizer/asan/asan_stack.h +++ b/libsanitizer/asan/asan_stack.h @@ -9,6 +9,7 @@ // // ASan-private header for asan_stack.cc. //===----------------------------------------------------------------------===// + #ifndef ASAN_STACK_H #define ASAN_STACK_H @@ -19,6 +20,11 @@ namespace __asan { +static const u32 kDefaultMallocContextSize = 30; + +void SetMallocContextSize(u32 size); +u32 GetMallocContextSize(); + // Get the stack trace with the given pc and bp. // The pc will be in the position 0 of the resulting stack trace. // The bp may refer to the current frame or to the caller's frame. @@ -41,15 +47,15 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, uptr stack_bottom = t->stack_bottom(); ScopedUnwinding unwind_scope(t); stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast); - } else if (t == 0 && !fast) { + } else if (!t && !fast) { /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ stack->Unwind(max_depth, pc, bp, context, 0, 0, false); } } -#endif // SANITIZER_WINDOWS +#endif // SANITIZER_WINDOWS } -} // namespace __asan +} // namespace __asan // NOTE: A Rule of thumb is to retrieve stack trace in the interceptors // as early as possible (in functions exposed to the user), as we generally @@ -76,9 +82,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ common_flags()->fast_unwind_on_fatal) -#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \ +#define GET_STACK_TRACE_SIGNAL(sig) \ BufferedStackTrace stack; \ - GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \ + GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \ + (sig).pc, (sig).bp, (sig).context, \ common_flags()->fast_unwind_on_fatal) #define GET_STACK_TRACE_FATAL_HERE \ @@ -90,9 +97,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, #define GET_STACK_TRACE_THREAD \ GET_STACK_TRACE(kStackTraceMax, true) -#define GET_STACK_TRACE_MALLOC \ - GET_STACK_TRACE(common_flags()->malloc_context_size, \ - common_flags()->fast_unwind_on_malloc) +#define GET_STACK_TRACE_MALLOC \ + GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc) #define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC @@ -108,4 +114,4 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, stack.Print(); \ } -#endif // ASAN_STACK_H +#endif // ASAN_STACK_H diff --git a/libsanitizer/asan/asan_stats.cc b/libsanitizer/asan/asan_stats.cc index fbd636ea558..64a788a6a5d 100644 --- a/libsanitizer/asan/asan_stats.cc +++ b/libsanitizer/asan/asan_stats.cc @@ -49,12 +49,8 @@ void AsanStats::Print() { (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, mmaps, munmaps); - PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size); PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); - PrintMallocStatsArray(" frees by size class: ", freed_by_size); - PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size); - Printf("Stats: malloc large: %zu small slow: %zu\n", - malloc_large, malloc_small_slow); + Printf("Stats: malloc large: %zu\n", malloc_large); } void AsanStats::MergeFrom(const AsanStats *stats) { @@ -159,8 +155,7 @@ uptr __sanitizer_get_free_bytes() { GetAccumulatedStats(&stats); uptr total_free = stats.mmaped - stats.munmaped - + stats.really_freed - + stats.really_freed_redzones; + + stats.really_freed; uptr total_used = stats.malloced + stats.malloced_redzones; // Return sane value if total_free < total_used due to racy diff --git a/libsanitizer/asan/asan_stats.h b/libsanitizer/asan/asan_stats.h index 5b04b1718eb..a48e3f916a9 100644 --- a/libsanitizer/asan/asan_stats.h +++ b/libsanitizer/asan/asan_stats.h @@ -30,20 +30,14 @@ struct AsanStats { uptr freed; uptr real_frees; uptr really_freed; - uptr really_freed_redzones; uptr reallocs; uptr realloced; uptr mmaps; uptr mmaped; uptr munmaps; uptr munmaped; - uptr mmaped_by_size[kNumberOfSizeClasses]; - uptr malloced_by_size[kNumberOfSizeClasses]; - uptr freed_by_size[kNumberOfSizeClasses]; - uptr really_freed_by_size[kNumberOfSizeClasses]; - uptr malloc_large; - uptr malloc_small_slow; + uptr malloced_by_size[kNumberOfSizeClasses]; // Ctor for global AsanStats (accumulated stats for dead threads). explicit AsanStats(LinkerInitialized) { } diff --git a/libsanitizer/asan/asan_suppressions.cc b/libsanitizer/asan/asan_suppressions.cc new file mode 100644 index 00000000000..c94fff0500a --- /dev/null +++ b/libsanitizer/asan/asan_suppressions.cc @@ -0,0 +1,108 @@ +//===-- asan_suppressions.cc ----------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Issue suppression and suppression-related functions. +//===----------------------------------------------------------------------===// + +#include "asan_suppressions.h" + +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kInterceptorName[] = "interceptor_name"; +static const char kInterceptorViaFunction[] = "interceptor_via_fun"; +static const char kInterceptorViaLibrary[] = "interceptor_via_lib"; +static const char kODRViolation[] = "odr_violation"; +static const char *kSuppressionTypes[] = { + kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary, + kODRViolation}; + +extern "C" { +#if SANITIZER_SUPPORTS_WEAK_HOOKS +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char *__asan_default_suppressions(); +#else +// No week hooks, provide empty implementation. +const char *__asan_default_suppressions() { return ""; } +#endif // SANITIZER_SUPPORTS_WEAK_HOOKS +} // extern "C" + +void InitializeSuppressions() { + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); + if (&__asan_default_suppressions) + suppression_ctx->Parse(__asan_default_suppressions()); +} + +bool IsInterceptorSuppressed(const char *interceptor_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "interceptor_name" suppressions. + return suppression_ctx->Match(interceptor_name, kInterceptorName, &s); +} + +bool HaveStackTraceBasedSuppressions() { + CHECK(suppression_ctx); + return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) || + suppression_ctx->HasSuppressionType(kInterceptorViaLibrary); +} + +bool IsODRViolationSuppressed(const char *global_var_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "odr_violation" suppressions. + return suppression_ctx->Match(global_var_name, kODRViolation, &s); +} + +bool IsStackTraceSuppressed(const StackTrace *stack) { + if (!HaveStackTraceBasedSuppressions()) + return false; + + CHECK(suppression_ctx); + Symbolizer *symbolizer = Symbolizer::GetOrInit(); + Suppression *s; + for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { + uptr addr = stack->trace[i]; + + if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) { + // Match "interceptor_via_lib" suppressions. + if (const char *module_name = symbolizer->GetModuleNameForPc(addr)) + if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) + return true; + } + + if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) { + SymbolizedStack *frames = symbolizer->SymbolizePC(addr); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + const char *function_name = cur->info.function; + if (!function_name) { + continue; + } + // Match "interceptor_via_fun" suppressions. + if (suppression_ctx->Match(function_name, kInterceptorViaFunction, + &s)) { + frames->ClearAll(); + return true; + } + } + frames->ClearAll(); + } + } + return false; +} + +} // namespace __asan diff --git a/libsanitizer/asan/asan_suppressions.h b/libsanitizer/asan/asan_suppressions.h new file mode 100644 index 00000000000..331d7224548 --- /dev/null +++ b/libsanitizer/asan/asan_suppressions.h @@ -0,0 +1,28 @@ +//===-- asan_suppressions.h -------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_suppressions.cc. +//===----------------------------------------------------------------------===// +#ifndef ASAN_SUPPRESSIONS_H +#define ASAN_SUPPRESSIONS_H + +#include "asan_internal.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __asan { + +void InitializeSuppressions(); +bool IsInterceptorSuppressed(const char *interceptor_name); +bool HaveStackTraceBasedSuppressions(); +bool IsStackTraceSuppressed(const StackTrace *stack); +bool IsODRViolationSuppressed(const char *global_var_name); + +} // namespace __asan + +#endif // ASAN_SUPPRESSIONS_H diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc index 95ca6720090..92f968bdee4 100644 --- a/libsanitizer/asan/asan_thread.cc +++ b/libsanitizer/asan/asan_thread.cc @@ -25,6 +25,11 @@ namespace __asan { // AsanThreadContext implementation. +struct CreateThreadContextArgs { + AsanThread *thread; + StackTrace *stack; +}; + void AsanThreadContext::OnCreated(void *arg) { CreateThreadContextArgs *args = static_cast(arg); if (args->stack) @@ -35,7 +40,7 @@ void AsanThreadContext::OnCreated(void *arg) { void AsanThreadContext::OnFinished() { // Drop the link to the AsanThread object. - thread = 0; + thread = nullptr; } // MIPS requires aligned address @@ -73,13 +78,17 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { // AsanThread implementation. -AsanThread *AsanThread::Create(thread_callback_t start_routine, - void *arg) { +AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, + bool detached) { uptr PageSize = GetPageSizeCached(); uptr size = RoundUpTo(sizeof(AsanThread), PageSize); AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); thread->start_routine_ = start_routine; thread->arg_ = arg; + CreateThreadContextArgs args = { thread, stack }; + asanThreadRegistry().CreateThread(*reinterpret_cast(thread), detached, + parent_tid, &args); return thread; } @@ -114,7 +123,7 @@ void AsanThread::Destroy() { FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { uptr stack_size = this->stack_size(); if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. - return 0; + return nullptr; uptr old_val = 0; // fake_stack_ has 3 states: // 0 -- not initialized @@ -135,11 +144,11 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { SetTLSFakeStack(fake_stack_); return fake_stack_; } - return 0; + return nullptr; } void AsanThread::Init() { - fake_stack_ = 0; // Will be initialized lazily if needed. + fake_stack_ = nullptr; // Will be initialized lazily if needed. CHECK_EQ(this->stack_size(), 0U); SetThreadStackAndTls(); CHECK_GT(this->stack_size(), 0U); @@ -150,12 +159,15 @@ void AsanThread::Init() { VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, &local); - AsanPlatformThreadInit(); } -thread_return_t AsanThread::ThreadStart(uptr os_id) { +thread_return_t AsanThread::ThreadStart( + uptr os_id, atomic_uintptr_t *signal_thread_is_registered) { Init(); - asanThreadRegistry().StartThread(tid(), os_id, 0); + asanThreadRegistry().StartThread(tid(), os_id, nullptr); + if (signal_thread_is_registered) + atomic_store(signal_thread_is_registered, 1, memory_order_release); + if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); if (!start_routine_) { @@ -262,7 +274,7 @@ AsanThread *GetCurrentThread() { return tctx->thread; } } - return 0; + return nullptr; } return context->thread; } @@ -287,7 +299,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) { AsanThreadContext *tctx = static_cast( asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, (void *)addr)); - return tctx ? tctx->thread : 0; + return tctx ? tctx->thread : nullptr; } void EnsureMainThreadIDIsCorrect() { @@ -300,10 +312,10 @@ void EnsureMainThreadIDIsCorrect() { __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) { __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); - if (!context) return 0; + if (!context) return nullptr; return context->thread; } -} // namespace __asan +} // namespace __asan // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { @@ -340,4 +352,4 @@ void UnlockThreadRegistry() { void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } -} // namespace __lsan +} // namespace __lsan diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h index f5ea53d051f..27a3367d8e7 100644 --- a/libsanitizer/asan/asan_thread.h +++ b/libsanitizer/asan/asan_thread.h @@ -9,6 +9,7 @@ // // ASan-private header for asan_thread.cc. //===----------------------------------------------------------------------===// + #ifndef ASAN_THREAD_H #define ASAN_THREAD_H @@ -32,19 +33,16 @@ class AsanThread; class AsanThreadContext : public ThreadContextBase { public: explicit AsanThreadContext(int tid) - : ThreadContextBase(tid), - announced(false), - destructor_iterations(kPthreadDestructorIterations), - stack_id(0), - thread(0) { - } + : ThreadContextBase(tid), announced(false), + destructor_iterations(GetPthreadDestructorIterations()), stack_id(0), + thread(nullptr) {} bool announced; u8 destructor_iterations; u32 stack_id; AsanThread *thread; - void OnCreated(void *arg); - void OnFinished(); + void OnCreated(void *arg) override; + void OnFinished() override; }; // AsanThreadContext objects are never freed, so we need many of them. @@ -53,12 +51,14 @@ COMPILER_CHECK(sizeof(AsanThreadContext) <= 256); // AsanThread are stored in TSD and destroyed when the thread dies. class AsanThread { public: - static AsanThread *Create(thread_callback_t start_routine, void *arg); + static AsanThread *Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, bool detached); static void TSDDtor(void *tsd); void Destroy(); void Init(); // Should be called from the thread itself. - thread_return_t ThreadStart(uptr os_id); + thread_return_t ThreadStart(uptr os_id, + atomic_uintptr_t *signal_thread_is_registered); uptr stack_top() { return stack_top_; } uptr stack_bottom() { return stack_bottom_; } @@ -83,8 +83,8 @@ class AsanThread { void DeleteFakeStack(int tid) { if (!fake_stack_) return; FakeStack *t = fake_stack_; - fake_stack_ = 0; - SetTLSFakeStack(0); + fake_stack_ = nullptr; + SetTLSFakeStack(nullptr); t->Destroy(tid); } @@ -94,7 +94,7 @@ class AsanThread { FakeStack *fake_stack() { if (!__asan_option_detect_stack_use_after_return) - return 0; + return nullptr; if (!has_fake_stack()) return AsyncSignalSafeLazyInitFakeStack(); return fake_stack_; @@ -164,11 +164,6 @@ class ScopedDeadlySignal { AsanThread *thread; }; -struct CreateThreadContextArgs { - AsanThread *thread; - StackTrace *stack; -}; - // Returns a single instance of registry. ThreadRegistry &asanThreadRegistry(); @@ -183,6 +178,6 @@ AsanThread *FindThreadByStackAddress(uptr addr); // Used to handle fork(). void EnsureMainThreadIDIsCorrect(); -} // namespace __asan +} // namespace __asan -#endif // ASAN_THREAD_H +#endif // ASAN_THREAD_H diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc index b0028763b11..caec9f83d68 100644 --- a/libsanitizer/asan/asan_win.cc +++ b/libsanitizer/asan/asan_win.cc @@ -14,27 +14,139 @@ #if SANITIZER_WINDOWS #include -#include #include #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_report.h" +#include "asan_stack.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mutex.h" +using namespace __asan; // NOLINT + extern "C" { - SANITIZER_INTERFACE_ATTRIBUTE - int __asan_should_detect_stack_use_after_return() { - __asan_init(); - return __asan_option_detect_stack_use_after_return; - } +SANITIZER_INTERFACE_ATTRIBUTE +int __asan_should_detect_stack_use_after_return() { + __asan_init(); + return __asan_option_detect_stack_use_after_return; +} + +// -------------------- A workaround for the abscence of weak symbols ----- {{{ +// We don't have a direct equivalent of weak symbols when using MSVC, but we can +// use the /alternatename directive to tell the linker to default a specific +// symbol to a specific value, which works nicely for allocator hooks and +// __asan_default_options(). +void __sanitizer_default_malloc_hook(void *ptr, uptr size) { } +void __sanitizer_default_free_hook(void *ptr) { } +const char* __asan_default_default_options() { return ""; } +const char* __asan_default_default_suppressions() { return ""; } +void __asan_default_on_error() {} +#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT +#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT +#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT +#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT +#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT +// }}} +} // extern "C" + +// ---------------------- Windows-specific inteceptors ---------------- {{{ +INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { + CHECK(REAL(RaiseException)); + __asan_handle_no_return(); + REAL(RaiseException)(a, b, c, d); +} + +INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler3)); + __asan_handle_no_return(); + return REAL(_except_handler3)(a, b, c, d); +} + +#if ASAN_DYNAMIC +// This handler is named differently in -MT and -MD CRTs. +#define _except_handler4 _except_handler4_common +#endif +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler4)); + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} + +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + AsanThread *t = (AsanThread*)arg; + SetCurrentThread(t); + return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); +} + +INTERCEPTOR_WINAPI(DWORD, CreateThread, + void* security, uptr stack_size, + DWORD (__stdcall *start_routine)(void*), void* arg, + DWORD thr_flags, void* tid) { + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + // FIXME: The CreateThread interceptor is not the same as a pthread_create + // one. This is a bandaid fix for PR22025. + bool detached = false; // FIXME: how can we determine it on Windows? + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + return REAL(CreateThread)(security, stack_size, + asan_thread_start, t, thr_flags, tid); +} + +namespace { +BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED); + +void EnsureWorkerThreadRegistered() { + // FIXME: GetCurrentThread relies on TSD, which might not play well with + // system thread pools. We might want to use something like reference + // counting to zero out GetCurrentThread() underlying storage when the last + // work item finishes? Or can we disable reclaiming of threads in the pool? + BlockingMutexLock l(&mu_for_thread_tracking); + if (__asan::GetCurrentThread()) + return; + + AsanThread *t = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, + /* parent_tid */ -1, /* stack */ nullptr, /* detached */ true); + t->Init(); + asanThreadRegistry().StartThread(t->tid(), 0, 0); + SetCurrentThread(t); +} +} // namespace + +INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) { + // NtWaitForWorkViaWorkerFactory is called from system worker pool threads to + // query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc. + // System worker pool threads are created at arbitraty point in time and + // without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory + // instead and don't register a specific parent_tid/stack. + EnsureWorkerThreadRegistered(); + return REAL(NtWaitForWorkViaWorkerFactory)(a, b); } +// }}} + namespace __asan { -// ---------------------- TSD ---------------- {{{1 +void InitializePlatformInterceptors() { + ASAN_INTERCEPT_FUNC(CreateThread); + ASAN_INTERCEPT_FUNC(RaiseException); + ASAN_INTERCEPT_FUNC(_except_handler3); + ASAN_INTERCEPT_FUNC(_except_handler4); + + // NtWaitForWorkViaWorkerFactory is always linked dynamically. + CHECK(::__interception::OverrideFunction( + "NtWaitForWorkViaWorkerFactory", + (uptr)WRAP(NtWaitForWorkViaWorkerFactory), + (uptr *)&REAL(NtWaitForWorkViaWorkerFactory))); +} + +// ---------------------- TSD ---------------- {{{ static bool tsd_key_inited = false; static __declspec(thread) void *fake_tsd = 0; @@ -57,7 +169,13 @@ void AsanTSDSet(void *tsd) { void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); } -// ---------------------- Various stuff ---------------- {{{1 +// }}} + +// ---------------------- Various stuff ---------------- {{{ +void DisableReexec() { + // No need to re-exec on Windows. +} + void MaybeReexec() { // No need to re-exec on Windows. } @@ -73,15 +191,11 @@ void AsanCheckDynamicRTPrereqs() {} void AsanCheckIncompatibleRT() {} -void AsanPlatformThreadInit() { - // Nothing here for now. -} - void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } -void AsanOnSIGSEGV(int, void *siginfo, void *context) { +void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); } @@ -90,12 +204,6 @@ static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { EXCEPTION_RECORD *exception_record = info->ExceptionRecord; CONTEXT *context = info->ContextRecord; - uptr pc = (uptr)exception_record->ExceptionAddress; -#ifdef _WIN64 - uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp; -#else - uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp; -#endif if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) { @@ -103,8 +211,8 @@ static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) ? "access-violation" : "in-page-error"; - uptr access_addr = exception_record->ExceptionInformation[1]; - ReportSIGSEGV(description, pc, sp, bp, context, access_addr); + SignalContext sig = SignalContext::Create(exception_record, context); + ReportDeadlySignal(description, sig); } // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. @@ -142,10 +250,10 @@ int __asan_set_seh_filter() { // Put a pointer to __asan_set_seh_filter at the end of the global list // of C initializers, after the default EH is set by the CRT. #pragma section(".CRT$XIZ", long, read) // NOLINT -static __declspec(allocate(".CRT$XIZ")) +__declspec(allocate(".CRT$XIZ")) int (*__intercept_seh)() = __asan_set_seh_filter; #endif - +// }}} } // namespace __asan #endif // _WIN32 diff --git a/libsanitizer/asan/asan_win_dll_thunk.cc b/libsanitizer/asan/asan_win_dll_thunk.cc index 6adb7d2e942..19d73f9f89e 100644 --- a/libsanitizer/asan/asan_win_dll_thunk.cc +++ b/libsanitizer/asan/asan_win_dll_thunk.cc @@ -19,7 +19,7 @@ // simplifies the build procedure. #ifdef ASAN_DLL_THUNK #include "asan_init_version.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" // ---------- Function interception helper functions and macros ----------- {{{1 extern "C" { @@ -28,8 +28,9 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name); void abort(); } -static void *getRealProcAddressOrDie(const char *name) { - void *ret = GetProcAddress(GetModuleHandleA(0), name); +static uptr getRealProcAddressOrDie(const char *name) { + uptr ret = + __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name); if (!ret) abort(); return ret; @@ -60,13 +61,12 @@ struct FunctionInterceptor<0> { }; #define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \ - template<> struct FunctionInterceptor<__LINE__> { \ + template <> struct FunctionInterceptor<__LINE__> { \ static void Execute() { \ - void *wrapper = getRealProcAddressOrDie(main_function); \ - if (!__interception::OverrideFunction((uptr)dll_function, \ - (uptr)wrapper, 0)) \ + uptr wrapper = getRealProcAddressOrDie(main_function); \ + if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \ abort(); \ - FunctionInterceptor<__LINE__-1>::Execute(); \ + FunctionInterceptor<__LINE__ - 1>::Execute(); \ } \ }; @@ -208,7 +208,7 @@ extern "C" { // __asan_init is expected to be called by only one thread. if (fn) return; - fn = (fntype)getRealProcAddressOrDie(__asan_init_name); + fn = (fntype)getRealProcAddressOrDie("__asan_init"); fn(); __asan_option_detect_stack_use_after_return = (__asan_should_detect_stack_use_after_return() != 0); @@ -217,6 +217,10 @@ extern "C" { } } +extern "C" void __asan_version_mismatch_check() { + // Do nothing. +} + INTERFACE_FUNCTION(__asan_handle_no_return) INTERFACE_FUNCTION(__asan_report_store1) @@ -292,7 +296,45 @@ INTERFACE_FUNCTION(__asan_stack_free_8) INTERFACE_FUNCTION(__asan_stack_free_9) INTERFACE_FUNCTION(__asan_stack_free_10) +// FIXME: we might want to have a sanitizer_win_dll_thunk? +INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container) +INTERFACE_FUNCTION(__sanitizer_cov) +INTERFACE_FUNCTION(__sanitizer_cov_dump) +INTERFACE_FUNCTION(__sanitizer_cov_indir_call16) +INTERFACE_FUNCTION(__sanitizer_cov_init) INTERFACE_FUNCTION(__sanitizer_cov_module_init) +INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block) +INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter) +INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp) +INTERFACE_FUNCTION(__sanitizer_cov_trace_switch) +INTERFACE_FUNCTION(__sanitizer_cov_with_check) +INTERFACE_FUNCTION(__sanitizer_get_allocated_size) +INTERFACE_FUNCTION(__sanitizer_get_coverage_guards) +INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes) +INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size) +INTERFACE_FUNCTION(__sanitizer_get_free_bytes) +INTERFACE_FUNCTION(__sanitizer_get_heap_size) +INTERFACE_FUNCTION(__sanitizer_get_ownership) +INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage) +INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes) +INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file) +INTERFACE_FUNCTION(__sanitizer_print_stack_trace) +INTERFACE_FUNCTION(__sanitizer_ptr_cmp) +INTERFACE_FUNCTION(__sanitizer_ptr_sub) +INTERFACE_FUNCTION(__sanitizer_report_error_summary) +INTERFACE_FUNCTION(__sanitizer_reset_coverage) +INTERFACE_FUNCTION(__sanitizer_get_number_of_counters) +INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters) +INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify) +INTERFACE_FUNCTION(__sanitizer_set_death_callback) +INTERFACE_FUNCTION(__sanitizer_set_report_path) +INTERFACE_FUNCTION(__sanitizer_unaligned_load16) +INTERFACE_FUNCTION(__sanitizer_unaligned_load32) +INTERFACE_FUNCTION(__sanitizer_unaligned_load64) +INTERFACE_FUNCTION(__sanitizer_unaligned_store16) +INTERFACE_FUNCTION(__sanitizer_unaligned_store32) +INTERFACE_FUNCTION(__sanitizer_unaligned_store64) +INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container) // TODO(timurrrr): Add more interface functions on the as-needed basis. @@ -342,11 +384,15 @@ INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT INTERCEPT_LIBRARY_FUNCTION(strchr); INTERCEPT_LIBRARY_FUNCTION(strcmp); INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT +INTERCEPT_LIBRARY_FUNCTION(strcspn); INTERCEPT_LIBRARY_FUNCTION(strlen); INTERCEPT_LIBRARY_FUNCTION(strncat); INTERCEPT_LIBRARY_FUNCTION(strncmp); INTERCEPT_LIBRARY_FUNCTION(strncpy); INTERCEPT_LIBRARY_FUNCTION(strnlen); +INTERCEPT_LIBRARY_FUNCTION(strpbrk); +INTERCEPT_LIBRARY_FUNCTION(strspn); +INTERCEPT_LIBRARY_FUNCTION(strstr); INTERCEPT_LIBRARY_FUNCTION(strtol); INTERCEPT_LIBRARY_FUNCTION(wcslen); diff --git a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc index 1b59677eeff..69da2a83c47 100644 --- a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc +++ b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc @@ -13,7 +13,8 @@ // // This includes: // - forwarding the detect_stack_use_after_return runtime option -// - installing a custom SEH handler +// - working around deficiencies of the MD runtime +// - installing a custom SEH handlerx // //===----------------------------------------------------------------------===// @@ -21,10 +22,15 @@ // Using #ifdef rather than relying on Makefiles etc. // simplifies the build procedure. #ifdef ASAN_DYNAMIC_RUNTIME_THUNK -extern "C" { -__declspec(dllimport) int __asan_set_seh_filter(); -__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); +#include +// First, declare CRT sections we'll be using in this file +#pragma section(".CRT$XID", long, read) // NOLINT +#pragma section(".CRT$XIZ", long, read) // NOLINT +#pragma section(".CRT$XTW", long, read) // NOLINT +#pragma section(".CRT$XTY", long, read) // NOLINT + +//////////////////////////////////////////////////////////////////////////////// // Define a copy of __asan_option_detect_stack_use_after_return that should be // used when linking an MD runtime with a set of object files on Windows. // @@ -35,16 +41,55 @@ __declspec(dllimport) int __asan_should_detect_stack_use_after_return(); // with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows // just to work around this issue, let's clone the a variable that is // constant after initialization anyways. +extern "C" { +__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); int __asan_option_detect_stack_use_after_return = __asan_should_detect_stack_use_after_return(); +} + +//////////////////////////////////////////////////////////////////////////////// +// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL +// unload or on exit. ASan relies on LLVM global_dtors to call +// __asan_unregister_globals on these events, which unfortunately doesn't work +// with the MD runtime, see PR22545 for the details. +// To work around this, for each DLL we schedule a call to UnregisterGlobals +// using atexit() that calls a small subset of C terminators +// where LLVM global_dtors is placed. Fingers crossed, no other C terminators +// are there. +extern "C" void __cdecl _initterm(void *a, void *b); + +namespace { +__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0; +__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0; + +void UnregisterGlobals() { + _initterm(&before_global_dtors, &after_global_dtors); +} + +int ScheduleUnregisterGlobals() { + return atexit(UnregisterGlobals); +} + +// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after +// atexit() is initialized (.CRT$XIC). As this is executed before C++ +// initializers (think ctors for globals), UnregisterGlobals gets executed after +// dtors for C++ globals. +__declspec(allocate(".CRT$XID")) +int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals; + +} // namespace + +//////////////////////////////////////////////////////////////////////////////// +// ASan SEH handling. +// We need to set the ASan-specific SEH handler at the end of CRT initialization +// of each module (see also asan_win.cc). +extern "C" { +__declspec(dllimport) int __asan_set_seh_filter(); +static int SetSEHFilter() { return __asan_set_seh_filter(); } -// Set the ASan-specific SEH handler at the end of CRT initialization of each -// module (see asan_win.cc for the details). -// // Unfortunately, putting a pointer to __asan_set_seh_filter into // __asan_intercept_seh gets optimized out, so we have to use an extra function. -static int SetSEHFilter() { return __asan_set_seh_filter(); } -#pragma section(".CRT$XIZ", long, read) // NOLINT __declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter; } + #endif // ASAN_DYNAMIC_RUNTIME_THUNK diff --git a/libsanitizer/asan/libtool-version b/libsanitizer/asan/libtool-version index f18f40737c7..7e838a5740b 100644 --- a/libsanitizer/asan/libtool-version +++ b/libsanitizer/asan/libtool-version @@ -3,4 +3,4 @@ # a separate file so that version updates don't involve re-running # automake. # CURRENT:REVISION:AGE -2:0:0 +3:0:0 diff --git a/libsanitizer/configure b/libsanitizer/configure index 809f0d76bd3..80655f05e97 100755 --- a/libsanitizer/configure +++ b/libsanitizer/configure @@ -616,6 +616,8 @@ BACKTRACE_SUPPORTED FORMAT_FILE SANITIZER_SUPPORTED_FALSE SANITIZER_SUPPORTED_TRUE +USE_CXX_ABI_FLAG_FALSE +USE_CXX_ABI_FLAG_TRUE USING_MAC_INTERPOSE_FALSE USING_MAC_INTERPOSE_TRUE link_liblsan @@ -12027,7 +12029,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 12030 "configure" +#line 12032 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -12133,7 +12135,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 12136 "configure" +#line 12138 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -15514,7 +15516,7 @@ done # Common libraries that we need to link against for all sanitizer libs. -link_sanitizer_common='-lpthread -ldl -lm' +link_sanitizer_common='-lrt -lpthread -ldl -lm' # Set up the set of additional libraries that we need to link against for libasan. link_libasan=$link_sanitizer_common @@ -15532,58 +15534,9 @@ link_libubsan=$link_sanitizer_common link_liblsan=$link_sanitizer_common -# At least for glibc, clock_gettime is in librt. But don't pull that -# in if it still doesn't give us the function we want. This -# test is copied from libgomp. -if test $ac_cv_func_clock_gettime = no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for clock_gettime in -lrt" >&5 -$as_echo_n "checking for clock_gettime in -lrt... " >&6; } -if test "${ac_cv_lib_rt_clock_gettime+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lrt $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char clock_gettime (); -int -main () -{ -return clock_gettime (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_rt_clock_gettime=yes -else - ac_cv_lib_rt_clock_gettime=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_rt_clock_gettime" >&5 -$as_echo "$ac_cv_lib_rt_clock_gettime" >&6; } -if test "x$ac_cv_lib_rt_clock_gettime" = x""yes; then : - link_libasan="-lrt $link_libasan" -link_libtsan="-lrt $link_libtsan" -# Other sanitizers do not override clock_* API - -fi - -fi - case "$host" in - *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;; - *) MAC_INTERPOSE=false ;; + *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;; + *) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;; esac if $MAC_INTERPOSE; then USING_MAC_INTERPOSE_TRUE= @@ -15593,6 +15546,14 @@ else USING_MAC_INTERPOSE_FALSE= fi + if $CXX_ABI_NEEDED; then + USE_CXX_ABI_FLAG_TRUE= + USE_CXX_ABI_FLAG_FALSE='#' +else + USE_CXX_ABI_FLAG_TRUE='#' + USE_CXX_ABI_FLAG_FALSE= +fi + backtrace_supported=yes @@ -16550,6 +16511,10 @@ if test -z "${USING_MAC_INTERPOSE_TRUE}" && test -z "${USING_MAC_INTERPOSE_FALSE as_fn_error "conditional \"USING_MAC_INTERPOSE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${USE_CXX_ABI_FLAG_TRUE}" && test -z "${USE_CXX_ABI_FLAG_FALSE}"; then + as_fn_error "conditional \"USE_CXX_ABI_FLAG\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${SANITIZER_SUPPORTED_TRUE}" && test -z "${SANITIZER_SUPPORTED_FALSE}"; then as_fn_error "conditional \"SANITIZER_SUPPORTED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 diff --git a/libsanitizer/configure.ac b/libsanitizer/configure.ac index 11e2d9961d6..ece15ed13dc 100644 --- a/libsanitizer/configure.ac +++ b/libsanitizer/configure.ac @@ -96,7 +96,7 @@ AM_CONDITIONAL(LSAN_SUPPORTED, [test "x$LSAN_SUPPORTED" = "xyes"]) AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime) # Common libraries that we need to link against for all sanitizer libs. -link_sanitizer_common='-lpthread -ldl -lm' +link_sanitizer_common='-lrt -lpthread -ldl -lm' # Set up the set of additional libraries that we need to link against for libasan. link_libasan=$link_sanitizer_common @@ -114,22 +114,12 @@ AC_SUBST(link_libubsan) link_liblsan=$link_sanitizer_common AC_SUBST(link_liblsan) -# At least for glibc, clock_gettime is in librt. But don't pull that -# in if it still doesn't give us the function we want. This -# test is copied from libgomp. -if test $ac_cv_func_clock_gettime = no; then - AC_CHECK_LIB(rt, clock_gettime, - [link_libasan="-lrt $link_libasan" -link_libtsan="-lrt $link_libtsan" -# Other sanitizers do not override clock_* API -]) -fi - case "$host" in - *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;; - *) MAC_INTERPOSE=false ;; + *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;; + *) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;; esac AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE) +AM_CONDITIONAL(USE_CXX_ABI_FLAG, $CXX_ABI_NEEDED) backtrace_supported=yes diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt index ac0eb7a4460..1ba3718cdbe 100644 --- a/libsanitizer/configure.tgt +++ b/libsanitizer/configure.tgt @@ -35,6 +35,9 @@ case "${target}" in arm*-*-linux*) ;; aarch64*-*-linux*) + if test x$ac_cv_sizeof_void_p = x8; then + TSAN_SUPPORTED=yes + fi ;; x86_64-*-darwin[1]* | i?86-*-darwin[1]*) TSAN_SUPPORTED=no diff --git a/libsanitizer/include/sanitizer/asan_interface.h b/libsanitizer/include/sanitizer/asan_interface.h index 023fa29c60f..448a0bcba01 100644 --- a/libsanitizer/include/sanitizer/asan_interface.h +++ b/libsanitizer/include/sanitizer/asan_interface.h @@ -108,12 +108,7 @@ extern "C" { void __asan_report_error(void *pc, void *bp, void *sp, void *addr, int is_write, size_t access_size); - // Sets the exit code to use when reporting an error. - // Returns the old value. - int __asan_set_error_exit_code(int exit_code); - - // Sets the callback to be called right before death on error. - // Passing 0 will unset the callback. + // Deprecated. Call __sanitizer_set_death_callback instead. void __asan_set_death_callback(void (*callback)(void)); void __asan_set_error_report_callback(void (*callback)(const char*)); diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h index 3aba519327e..6a97567fea6 100644 --- a/libsanitizer/include/sanitizer/common_interface_defs.h +++ b/libsanitizer/include/sanitizer/common_interface_defs.h @@ -60,15 +60,6 @@ extern "C" { void __sanitizer_unaligned_store32(void *p, uint32_t x); void __sanitizer_unaligned_store64(void *p, uint64_t x); - // Initialize coverage. - void __sanitizer_cov_init(); - // Record and dump coverage info. - void __sanitizer_cov_dump(); - // Open .sancov.packed in the coverage directory and return the file - // descriptor. Returns -1 on failure, or if coverage dumping is disabled. - // This is intended for use by sandboxing code. - intptr_t __sanitizer_maybe_open_cov_file(const char *name); - // Annotate the current state of a contiguous container, such as // std::vector, std::string or similar. // A contiguous container is a container that keeps all of its elements @@ -115,6 +106,20 @@ extern "C" { // Print the stack trace leading to this call. Useful for debugging user code. void __sanitizer_print_stack_trace(); + // Sets the callback to be called right before death on error. + // Passing 0 will unset the callback. + void __sanitizer_set_death_callback(void (*callback)(void)); + + // Interceptor hooks. + // Whenever a libc function interceptor is called it checks if the + // corresponding weak hook is defined, and it so -- calls it. + // The primary use case is data-flow-guided fuzzing, where the fuzzer needs + // to know what is being passed to libc functions, e.g. memcmp. + // FIXME: implement more hooks. + void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1, + const void *s2, size_t n); + void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1, + const char *s2, size_t n); #ifdef __cplusplus } // extern "C" #endif diff --git a/libsanitizer/include/sanitizer/coverage_interface.h b/libsanitizer/include/sanitizer/coverage_interface.h new file mode 100644 index 00000000000..1b7e2a4cdd8 --- /dev/null +++ b/libsanitizer/include/sanitizer/coverage_interface.h @@ -0,0 +1,61 @@ +//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Public interface for sanitizer coverage. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_COVERAG_INTERFACE_H +#define SANITIZER_COVERAG_INTERFACE_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + + // Initialize coverage. + void __sanitizer_cov_init(); + // Record and dump coverage info. + void __sanitizer_cov_dump(); + // Open .sancov.packed in the coverage directory and return the file + // descriptor. Returns -1 on failure, or if coverage dumping is disabled. + // This is intended for use by sandboxing code. + intptr_t __sanitizer_maybe_open_cov_file(const char *name); + // Get the number of total unique covered entities (blocks, edges, calls). + // This can be useful for coverage-directed in-process fuzzers. + uintptr_t __sanitizer_get_total_unique_coverage(); + + // Reset the basic-block (edge) coverage to the initial state. + // Useful for in-process fuzzing to start collecting coverage from scratch. + // Experimental, will likely not work for multi-threaded process. + void __sanitizer_reset_coverage(); + // Set *data to the array of covered PCs and return the size of that array. + // Some of the entries in *data will be zero. + uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data); + + // The coverage instrumentation may optionally provide imprecise counters. + // Rather than exposing the counter values to the user we instead map + // the counters to a bitset. + // Every counter is associated with 8 bits in the bitset. + // We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+ + // The i-th bit is set to 1 if the counter value is in the i-th range. + // This counter-based coverage implementation is *not* thread-safe. + + // Returns the number of registered coverage counters. + uintptr_t __sanitizer_get_number_of_counters(); + // Updates the counter 'bitset', clears the counters and returns the number of + // new bits in 'bitset'. + // If 'bitset' is nullptr, only clears the counters. + // Otherwise 'bitset' should be at least + // __sanitizer_get_number_of_counters bytes long and 8-aligned. + uintptr_t + __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset); +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_COVERAG_INTERFACE_H diff --git a/libsanitizer/include/sanitizer/dfsan_interface.h b/libsanitizer/include/sanitizer/dfsan_interface.h index c1b160205a7..0cebccf945e 100644 --- a/libsanitizer/include/sanitizer/dfsan_interface.h +++ b/libsanitizer/include/sanitizer/dfsan_interface.h @@ -83,6 +83,24 @@ size_t dfsan_get_label_count(void); /// callback executes. Pass in NULL to remove any callback. void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback); +/// Writes the labels currently used by the program to the given file +/// descriptor. The lines of the output have the following format: +/// +///