-b638b63b99d66786cb37336292604a2ae3490cfd
+51ff04567b2f8d06b2062bd3ed72eab2e93e4466
The first line of this file holds the git revision number of the
last merge done from the master library sources.
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_quarantine.h"
-#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
static AsanAllocator &get_allocator();
+static void AtomicContextStore(volatile atomic_uint64_t *atomic_context,
+ u32 tid, u32 stack) {
+ u64 context = tid;
+ context <<= 32;
+ context += stack;
+ atomic_store(atomic_context, context, memory_order_relaxed);
+}
+
+static void AtomicContextLoad(const volatile atomic_uint64_t *atomic_context,
+ u32 &tid, u32 &stack) {
+ u64 context = atomic_load(atomic_context, memory_order_relaxed);
+ stack = context;
+ context >>= 32;
+ tid = context;
+}
+
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
// ---------------------|
// M -- magic value kAllocBegMagic
// B -- address of ChunkHeader pointing to the first 'H'
-static const uptr kAllocBegMagic = 0xCC6E96B9;
-
-struct ChunkHeader {
- // 1-st 8 bytes.
- u32 chunk_state : 8; // Must be first.
- u32 alloc_tid : 24;
-
- u32 free_tid : 24;
- u32 from_memalign : 1;
- u32 alloc_type : 2;
- u32 rz_log : 3;
- u32 lsan_tag : 2;
- // 2-nd 8 bytes
- // This field is used for small sizes. For large sizes it is equal to
- // SizeClassMap::kMaxSize and the actual size is stored in the
- // SecondaryAllocator's metadata.
- u32 user_requested_size : 29;
+
+class ChunkHeader {
+ public:
+ atomic_uint8_t chunk_state;
+ u8 alloc_type : 2;
+ u8 lsan_tag : 2;
+
// align < 8 -> 0
// else -> log2(min(align, 512)) - 2
- u32 user_requested_alignment_log : 3;
- u32 alloc_context_id;
+ u8 user_requested_alignment_log : 3;
+
+ private:
+ u16 user_requested_size_hi;
+ u32 user_requested_size_lo;
+ atomic_uint64_t alloc_context_id;
+
+ public:
+ uptr UsedSize() const {
+ uptr R = user_requested_size_lo;
+ if (sizeof(uptr) > sizeof(user_requested_size_lo))
+ R += (uptr)user_requested_size_hi << (8 * sizeof(user_requested_size_lo));
+ return R;
+ }
+
+ void SetUsedSize(uptr size) {
+ user_requested_size_lo = size;
+ if (sizeof(uptr) > sizeof(user_requested_size_lo)) {
+ size >>= (8 * sizeof(user_requested_size_lo));
+ user_requested_size_hi = size;
+ CHECK_EQ(user_requested_size_hi, size);
+ }
+ }
+
+ void SetAllocContext(u32 tid, u32 stack) {
+ AtomicContextStore(&alloc_context_id, tid, stack);
+ }
+
+ void GetAllocContext(u32 &tid, u32 &stack) const {
+ AtomicContextLoad(&alloc_context_id, tid, stack);
+ }
};
-struct ChunkBase : ChunkHeader {
- // Header2, intersects with user memory.
- u32 free_context_id;
+class ChunkBase : public ChunkHeader {
+ atomic_uint64_t free_context_id;
+
+ public:
+ void SetFreeContext(u32 tid, u32 stack) {
+ AtomicContextStore(&free_context_id, tid, stack);
+ }
+
+ void GetFreeContext(u32 &tid, u32 &stack) const {
+ AtomicContextLoad(&free_context_id, tid, stack);
+ }
};
static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
COMPILER_CHECK(kChunkHeaderSize == 16);
COMPILER_CHECK(kChunkHeader2Size <= 16);
-// Every chunk of memory allocated by this allocator can be in one of 3 states:
-// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
-// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
-// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
enum {
- CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
- CHUNK_ALLOCATED = 2,
- CHUNK_QUARANTINE = 3
+ // Either just allocated by underlying allocator, but AsanChunk is not yet
+ // ready, or almost returned to undelying allocator and AsanChunk is already
+ // meaningless.
+ CHUNK_INVALID = 0,
+ // The chunk is allocated and not yet freed.
+ CHUNK_ALLOCATED = 2,
+ // The chunk was freed and put into quarantine zone.
+ CHUNK_QUARANTINE = 3,
};
-struct AsanChunk: ChunkBase {
+class AsanChunk : public ChunkBase {
+ public:
uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
- uptr UsedSize(bool locked_version = false) {
- if (user_requested_size != SizeClassMap::kMaxSize)
- return user_requested_size;
- return *reinterpret_cast<uptr *>(
- get_allocator().GetMetaData(AllocBeg(locked_version)));
+ bool AddrIsInside(uptr addr) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize());
}
- void *AllocBeg(bool locked_version = false) {
- if (from_memalign) {
- if (locked_version)
- return get_allocator().GetBlockBeginFastLocked(
- reinterpret_cast<void *>(this));
- return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
- }
- return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+};
+
+class LargeChunkHeader {
+ static constexpr uptr kAllocBegMagic =
+ FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
+ atomic_uintptr_t magic;
+ AsanChunk *chunk_header;
+
+ public:
+ AsanChunk *Get() const {
+ return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
+ ? chunk_header
+ : nullptr;
}
- bool AddrIsInside(uptr addr, bool locked_version = false) {
- return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+
+ void Set(AsanChunk *p) {
+ if (p) {
+ chunk_header = p;
+ atomic_store(&magic, kAllocBegMagic, memory_order_release);
+ return;
+ }
+
+ uptr old = kAllocBegMagic;
+ if (!atomic_compare_exchange_strong(&magic, &old, 0,
+ memory_order_release)) {
+ CHECK_EQ(old, kAllocBegMagic);
+ }
}
};
}
void Recycle(AsanChunk *m) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
- CHECK_NE(m->alloc_tid, kInvalidTid);
- CHECK_NE(m->free_tid, kInvalidTid);
- PoisonShadow(m->Beg(),
- RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
- kAsanHeapLeftRedzoneMagic);
- void *p = reinterpret_cast<void *>(m->AllocBeg());
+ void *p = get_allocator().GetBlockBegin(m);
if (p != m) {
- uptr *alloc_magic = reinterpret_cast<uptr *>(p);
- CHECK_EQ(alloc_magic[0], kAllocBegMagic);
// Clear the magic value, as allocator internals may overwrite the
// contents of deallocated chunk, confusing GetAsanChunk lookup.
- alloc_magic[0] = 0;
- CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ reinterpret_cast<LargeChunkHeader *>(p)->Set(nullptr);
+ }
+
+ u8 old_chunk_state = CHUNK_QUARANTINE;
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
+ CHUNK_INVALID, memory_order_acquire)) {
+ CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE);
}
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+
// Statistics.
AsanStats &thread_stats = GetCurrentThreadStats();
thread_stats.real_frees++;
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
- uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
- uptr beg = ac->Beg();
- uptr end = ac->Beg() + ac->UsedSize(true);
- uptr chunk_end = chunk + allocated_size;
- if (chunk < beg && beg < end && end <= chunk_end &&
- ac->chunk_state == CHUNK_ALLOCATED) {
- // Looks like a valid AsanChunk in use, poison redzones only.
- PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
- uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
- FastPoisonShadowPartialRightRedzone(
- end_aligned_down, end - end_aligned_down,
- chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
- } else {
- // This is either not an AsanChunk or freed or quarantined AsanChunk.
- // In either case, poison everything.
- PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)chunk);
+ if (ac && atomic_load(&ac->chunk_state, memory_order_acquire) ==
+ CHUNK_ALLOCATED) {
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize();
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end) {
+ // Looks like a valid AsanChunk in use, poison redzones only.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ return;
+ }
}
+
+ // This is either not an AsanChunk or freed or quarantined AsanChunk.
+ // In either case, poison everything.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
void ReInitialize(const AllocatorOptions &options) {
// -------------------- Helper methods. -------------------------
uptr ComputeRZLog(uptr user_requested_size) {
- u32 rz_log =
- user_requested_size <= 64 - 16 ? 0 :
- user_requested_size <= 128 - 32 ? 1 :
- user_requested_size <= 512 - 64 ? 2 :
- user_requested_size <= 4096 - 128 ? 3 :
- user_requested_size <= (1 << 14) - 256 ? 4 :
- user_requested_size <= (1 << 15) - 512 ? 5 :
- user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
- u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
- u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
- return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ u32 rz_log = user_requested_size <= 64 - 16 ? 0
+ : user_requested_size <= 128 - 32 ? 1
+ : user_requested_size <= 512 - 64 ? 2
+ : user_requested_size <= 4096 - 128 ? 3
+ : user_requested_size <= (1 << 14) - 256 ? 4
+ : user_requested_size <= (1 << 15) - 512 ? 5
+ : user_requested_size <= (1 << 16) - 1024 ? 6
+ : 7;
+ u32 hdr_log = RZSize2Log(RoundUpToPowerOfTwo(sizeof(ChunkHeader)));
+ u32 min_log = RZSize2Log(atomic_load(&min_redzone, memory_order_acquire));
+ u32 max_log = RZSize2Log(atomic_load(&max_redzone, memory_order_acquire));
+ return Min(Max(rz_log, Max(min_log, hdr_log)), Max(max_log, hdr_log));
}
static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
+ if (!left_chunk)
+ return right_chunk;
+ if (!right_chunk)
+ return left_chunk;
// Prefer an allocated chunk over freed chunk and freed chunk
// over available chunk.
- if (left_chunk->chunk_state != right_chunk->chunk_state) {
- if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ u8 left_state = atomic_load(&left_chunk->chunk_state, memory_order_relaxed);
+ u8 right_state =
+ atomic_load(&right_chunk->chunk_state, memory_order_relaxed);
+ if (left_state != right_state) {
+ if (left_state == CHUNK_ALLOCATED)
return left_chunk;
- if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ if (right_state == CHUNK_ALLOCATED)
return right_chunk;
- if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ if (left_state == CHUNK_QUARANTINE)
return left_chunk;
- if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ if (right_state == CHUNK_QUARANTINE)
return right_chunk;
}
// Same chunk_state: choose based on offset.
bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) {
AsanChunk *m = GetAsanChunkByAddr(addr);
if (!m) return false;
- if (m->chunk_state != CHUNK_ALLOCATED) return false;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return false;
if (m->Beg() != addr) return false;
- atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack),
- memory_order_relaxed);
+ AsanThread *t = GetCurrentThread();
+ m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
return true;
}
uptr needed_size = rounded_size + rz_size;
if (alignment > min_alignment)
needed_size += alignment;
- bool using_primary_allocator = true;
// If we are allocating from the secondary allocator, there will be no
// automatic right redzone, so add the right redzone manually.
- if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment))
needed_size += rz_size;
- using_primary_allocator = false;
- }
CHECK(IsAligned(needed_size, min_alignment));
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
size > max_user_defined_malloc_size) {
uptr alloc_beg = reinterpret_cast<uptr>(allocated);
uptr alloc_end = alloc_beg + needed_size;
- uptr beg_plus_redzone = alloc_beg + rz_size;
- uptr user_beg = beg_plus_redzone;
+ uptr user_beg = alloc_beg + rz_size;
if (!IsAligned(user_beg, alignment))
user_beg = RoundUpTo(user_beg, alignment);
uptr user_end = user_beg + size;
uptr chunk_beg = user_beg - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
m->alloc_type = alloc_type;
- m->rz_log = rz_log;
- u32 alloc_tid = t ? t->tid() : 0;
- m->alloc_tid = alloc_tid;
- CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
- m->free_tid = kInvalidTid;
- m->from_memalign = user_beg != beg_plus_redzone;
- if (alloc_beg != chunk_beg) {
- CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
- reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
- reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
- }
- if (using_primary_allocator) {
- CHECK(size);
- m->user_requested_size = size;
- CHECK(allocator.FromPrimary(allocated));
- } else {
- CHECK(!allocator.FromPrimary(allocated));
- m->user_requested_size = SizeClassMap::kMaxSize;
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
- meta[0] = size;
- meta[1] = chunk_beg;
- }
+ CHECK(size);
+ m->SetUsedSize(size);
m->user_requested_alignment_log = user_requested_alignment_log;
- m->alloc_context_id = StackDepotPut(*stack);
+ m->SetAllocContext(t ? t->tid() : 0, StackDepotPut(*stack));
uptr size_rounded_down_to_granularity =
RoundDownTo(size, SHADOW_GRANULARITY);
: __lsan::kDirectlyLeaked;
#endif
// Must be the last mutation of metadata in this function.
- atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release);
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
+ reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
+ }
ASAN_MALLOC_HOOK(res, size);
return res;
}
// Set quarantine flag if chunk is allocated, issue ASan error report on
// available and quarantined chunks. Return true on success, false otherwise.
bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
- BufferedStackTrace *stack) {
+ BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
+ if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state,
CHUNK_QUARANTINE,
memory_order_acquire)) {
ReportInvalidFree(ptr, old_chunk_state, stack);
return false;
}
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ // It was a user data.
+ m->SetFreeContext(kInvalidTid, 0);
return true;
}
// Expects the chunk to already be marked as quarantined by using
// AtomicallySetQuarantineFlagIfAllocated.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
- CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
- CHECK_GE(m->alloc_tid, 0);
- if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
- CHECK_EQ(m->free_tid, kInvalidTid);
+ CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed),
+ CHUNK_QUARANTINE);
AsanThread *t = GetCurrentThread();
- m->free_tid = t ? t->tid() : 0;
- m->free_context_id = StackDepotPut(*stack);
+ m->SetFreeContext(t ? t->tid() : 0, StackDepotPut(*stack));
Flags &fl = *flags();
if (fl.max_free_fill_size > 0) {
void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
if (new_ptr) {
- u8 chunk_state = m->chunk_state;
+ u8 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire);
if (chunk_state != CHUNK_ALLOCATED)
ReportInvalidFree(old_ptr, chunk_state, stack);
CHECK_NE(REAL(memcpy), nullptr);
// -------------------------- Chunk lookup ----------------------
// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ // Returns nullptr if AsanChunk is not yet initialized just after
+ // get_allocator().Allocate(), or is being destroyed just before
+ // get_allocator().Deallocate().
AsanChunk *GetAsanChunk(void *alloc_beg) {
- if (!alloc_beg) return nullptr;
- if (!allocator.FromPrimary(alloc_beg)) {
- uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
- AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
- return m;
+ if (!alloc_beg)
+ return nullptr;
+ AsanChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
+ if (!p) {
+ if (!allocator.FromPrimary(alloc_beg))
+ return nullptr;
+ p = reinterpret_cast<AsanChunk *>(alloc_beg);
}
- uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
- if (alloc_magic[0] == kAllocBegMagic)
- return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
- return reinterpret_cast<AsanChunk *>(alloc_beg);
+ u8 state = atomic_load(&p->chunk_state, memory_order_relaxed);
+ // It does not guaranty that Chunk is initialized, but it's
+ // definitely not for any other value.
+ if (state == CHUNK_ALLOCATED || state == CHUNK_QUARANTINE)
+ return p;
+ return nullptr;
}
AsanChunk *GetAsanChunkByAddr(uptr p) {
uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
- if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED)
+ return 0;
if (m->Beg() != p) return 0;
return m->UsedSize();
}
AsanChunkView FindHeapChunkByAddress(uptr addr) {
AsanChunk *m1 = GetAsanChunkByAddr(addr);
- if (!m1) return AsanChunkView(m1);
sptr offset = 0;
- if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ if (!m1 || AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
// The address is in the chunk's left redzone, so maybe it is actually
// a right buffer overflow from the other chunk to the left.
// Search a bit to the left to see if there is another chunk.
}
bool AsanChunkView::IsValid() const {
- return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) !=
+ CHUNK_INVALID;
}
bool AsanChunkView::IsAllocated() const {
- return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
+ CHUNK_ALLOCATED;
}
bool AsanChunkView::IsQuarantined() const {
- return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
+ return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) ==
+ CHUNK_QUARANTINE;
}
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
u32 AsanChunkView::UserRequestedAlignment() const {
return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
}
-uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
-uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
+
+uptr AsanChunkView::AllocTid() const {
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetAllocContext(tid, stack);
+ return tid;
+}
+
+uptr AsanChunkView::FreeTid() const {
+ if (!IsQuarantined())
+ return kInvalidTid;
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetFreeContext(tid, stack);
+ return tid;
+}
+
AllocType AsanChunkView::GetAllocType() const {
return (AllocType)chunk_->alloc_type;
}
return res;
}
-u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
-u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
+u32 AsanChunkView::GetAllocStackId() const {
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetAllocContext(tid, stack);
+ return stack;
+}
+
+u32 AsanChunkView::GetFreeStackId() const {
+ if (!IsQuarantined())
+ return 0;
+ u32 tid = 0;
+ u32 stack = 0;
+ chunk_->GetFreeContext(tid, stack);
+ return stack;
+}
StackTrace AsanChunkView::GetAllocStack() const {
return GetStackTraceFromId(GetAllocStackId());
instance.SetRssLimitExceeded(limit_exceeded);
}
-} // namespace __asan
+} // namespace __asan
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
*end = *begin + sizeof(__asan::get_allocator());
}
-uptr PointsIntoChunk(void* p) {
+uptr PointsIntoChunk(void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
- if (!m) return 0;
- uptr chunk = m->Beg();
- if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ if (!m || atomic_load(&m->chunk_state, memory_order_acquire) !=
+ __asan::CHUNK_ALLOCATED)
return 0;
- if (m->AddrIsInside(addr, /*locked_version=*/true))
+ uptr chunk = m->Beg();
+ if (m->AddrIsInside(addr))
return chunk;
- if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
- addr))
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(), addr))
return chunk;
return 0;
}
uptr GetUserBegin(uptr chunk) {
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
- CHECK(m);
- return m->Beg();
+ return m ? m->Beg() : 0;
}
LsanMetadata::LsanMetadata(uptr chunk) {
- metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+ metadata_ = chunk ? reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize)
+ : nullptr;
}
bool LsanMetadata::allocated() const {
+ if (!metadata_)
+ return false;
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->chunk_state == __asan::CHUNK_ALLOCATED;
+ return atomic_load(&m->chunk_state, memory_order_relaxed) ==
+ __asan::CHUNK_ALLOCATED;
}
ChunkTag LsanMetadata::tag() const {
uptr LsanMetadata::requested_size() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->UsedSize(/*locked_version=*/true);
+ return m->UsedSize();
}
u32 LsanMetadata::stack_trace_id() const {
__asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
- return m->alloc_context_id;
+ u32 tid = 0;
+ u32 stack = 0;
+ m->GetAllocContext(tid, stack);
+ return stack;
}
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
IgnoreObjectResult IgnoreObjectLocked(const void *p) {
uptr addr = reinterpret_cast<uptr>(p);
__asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
- if (!m) return kIgnoreObjectInvalid;
- if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
- if (m->lsan_tag == kIgnored)
- return kIgnoreObjectAlreadyIgnored;
- m->lsan_tag = __lsan::kIgnored;
- return kIgnoreObjectSuccess;
- } else {
+ if (!m ||
+ (atomic_load(&m->chunk_state, memory_order_acquire) !=
+ __asan::CHUNK_ALLOCATED) ||
+ !m->AddrIsInside(addr)) {
return kIgnoreObjectInvalid;
}
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
}
} // namespace __lsan
#define ASAN_ALLOCATOR_H
#include "asan_flags.h"
-#include "asan_internal.h"
#include "asan_interceptors.h"
+#include "asan_internal.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_platform.h"
namespace __asan {
FROM_NEW_BR = 3 // Memory block came from operator new [ ]
};
-struct AsanChunk;
+class AsanChunk;
struct AllocatorOptions {
u32 quarantine_size_mb;
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
typedef VeryCompactSizeClassMap SizeClassMap;
+#elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryDenseSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = 16;
+ static const uptr kMetadataSize = 0;
typedef __asan::SizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = AddressSpaceViewTy;
Flags asan_flags_dont_use_directly; // use via flags().
-static const char *MaybeCallAsanDefaultOptions() {
- return (&__asan_default_options) ? __asan_default_options() : "";
-}
-
static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
#ifdef ASAN_DEFAULT_OPTIONS
return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
asan_parser.ParseString(asan_compile_def);
// Override from user-specified string.
- const char *asan_default_options = MaybeCallAsanDefaultOptions();
+ const char *asan_default_options = __asan_default_options();
asan_parser.ParseString(asan_default_options);
#if CAN_SANITIZE_UB
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
#if CAN_SANITIZE_LEAKS
- const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
+ const char *lsan_default_options = __lsan_default_options();
lsan_parser.ParseString(lsan_default_options);
#endif
UNIMPLEMENTED();
}
+bool PlatformUnpoisonStacks() { return false; }
+
// We can use a plain thread_local variable for TSD.
static thread_local void *per_thread;
return false;
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ __sanitizer_fill_shadow(p, size, 0, 0);
+}
+
} // namespace __asan
// These are declared (in extern "C") by <zircon/sanitizer.h>.
}
}
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+ }
+}
+
// Clang provides two different ways for global variables protection:
// it can poison the global itself or its private alias. In former
// case we may poison same symbol multiple times, that can help us to
// where two globals with the same name are defined in different modules.
if (UseODRIndicator(g))
CheckODRViolationViaIndicator(g);
+ else
+ CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
#ifndef ASAN_INTERCEPTORS_H
#define ASAN_INTERCEPTORS_H
-#include "asan_internal.h"
#include "asan_interceptors_memintrinsics.h"
+#include "asan_internal.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
namespace __asan {
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
!SANITIZER_NETBSD
# define ASAN_INTERCEPT___CXA_THROW 1
-# if ! defined(ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION) \
- || ASAN_HAS_CXA_RETHROW_PRIMARY_EXCEPTION
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
-# else
-# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
-# endif
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
# else
# define ASAN_INTERCEPT___STRDUP 0
#endif
-#if SANITIZER_LINUX && (defined(__arm__) || defined(__aarch64__) || \
- defined(__i386__) || defined(__x86_64__))
+#if SANITIZER_LINUX && \
+ (defined(__arm__) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__x86_64__) || SANITIZER_RISCV64)
# define ASAN_INTERCEPT_VFORK 1
#else
# define ASAN_INTERCEPT_VFORK 0
#define COMMON_INTERCEPTOR_HANDLE_VFORK __asan_handle_vfork
#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S"
-#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_riscv64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
#endif
NO_EXEC_STACK_DIRECTIVE
SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
- SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
- const char* __asan_default_options();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char *__asan_default_options();
SANITIZER_INTERFACE_ATTRIBUTE
extern uptr __asan_shadow_memory_dynamic_address;
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
+// Unpoisons platform-specific stacks.
+// Returns true if all stacks have been unpoisoned.
+bool PlatformUnpoisonStacks();
+
+// asan_rtl.cpp
+// Unpoison a region containing a stack.
+// Performs a sanity check and warns if the bounds don't look right.
+// The warning contains the type string to identify the stack type.
+void UnpoisonStack(uptr bottom, uptr top, const char *type);
+
// asan_thread.cpp
AsanThread *CreateMainThread();
void *AsanDlSymNext(const char *sym);
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-
// Returns `true` iff most of ASan init process should be skipped due to the
// ASan library being loaded via `dlopen()`. Platforms may perform any
// `dlopen()` specific initialization inside this function.
return &_DYNAMIC; // defined in link.h
}
-static void UnmapFromTo(uptr from, uptr to) {
- CHECK(to >= from);
- if (to == from) return;
- uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
- if (UNLIKELY(internal_iserror(res))) {
- Report(
- "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
- "%p\n",
- to - from, to - from, from);
- CHECK("unable to unmap" && 0);
- }
-}
-
#if ASAN_PREMAP_SHADOW
-uptr FindPremappedShadowStart() {
+uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
+ uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
#endif
uptr FindDynamicShadowStart() {
+ uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
#if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
- return FindPremappedShadowStart();
+ return FindPremappedShadowStart(shadow_size_bytes);
#endif
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- UnmapFromTo(map_start, shadow_start - left_padding);
- UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
-
- return shadow_start;
+ return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
#if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
-
- uptr largest_gap_found = 0;
- uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- uptr shadow_start =
- FindAvailableMemoryRange(space_size, alignment, granularity,
- &largest_gap_found, &max_occupied_addr);
- // If the shadow doesn't fit, restrict the address space to make it fit.
- if (shadow_start == 0) {
- VReport(
- 2,
- "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
- uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
- if (new_max_vm < max_occupied_addr) {
- Report("Unable to find a memory range for dynamic shadow.\n");
- Report(
- "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
- "new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
- CHECK(0 && "cannot place shadow");
- }
- RestrictMemoryToMaxAddress(new_max_vm);
- kHighMemEnd = new_max_vm - 1;
- space_size = kHighShadowEnd + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
- nullptr, nullptr);
- if (shadow_start == 0) {
- Report("Unable to find a memory range after restricting VM.\n");
- CHECK(0 && "cannot place shadow after restricting vm");
- }
- }
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
// No-op. Mac does not support static linkage anyway.
op(globals, size / sizeof(__asan_global));
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
-static INLINE bool IsInDlsymAllocPool(const void *ptr) {
+static inline bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
}
#endif
-static INLINE bool MaybeInDlsym() {
+static inline bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
-static INLINE bool UseLocalPool() {
+static inline bool UseLocalPool() {
return EarlyMalloc() || MaybeInDlsym();
}
}
INTERCEPTOR(void, free, void *ptr) {
- GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
DeallocateFromLocalPool(ptr);
return;
}
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *ptr) {
- GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return;
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack, FROM_MALLOC);
}
#endif // SANITIZER_INTERCEPT_CFREE
#include "sanitizer_common/sanitizer_platform.h"
#include "asan_internal.h"
-static INLINE bool EarlyMalloc() {
+static inline bool EarlyMalloc() {
return SANITIZER_RTEMS &&
(!__asan::asan_inited || __asan::asan_init_is_running);
}
// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
//
+// RISC-V has only 38 bits for task size
+// Low mem size is set with kRiscv64_ShadowOffset64 in
+// compiler-rt/lib/asan/asan_allocator.h and in
+// llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp with
+// kRiscv64_ShadowOffset64, High mem top border is set with
+// GetMaxVirtualAddress() in
+// compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+// Default Linux/RISCV64 Sv39/Sv48 mapping:
+// || `[0x000820000000, 0x003fffffffff]` || HighMem ||
+// || `[0x000124000000, 0x00081fffffff]` || HighShadow ||
+// || `[0x000024000000, 0x000123ffffff]` || ShadowGap ||
+// || `[0x000020000000, 0x000023ffffff]` || LowShadow ||
+// || `[0x000000000000, 0x00001fffffff]` || LowMem ||
+//
// Default Linux/AArch64 (42-bit VMA) mapping:
// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
+static const u64 kRiscv64_ShadowOffset64 = 0x20000000;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
-static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
#else
# if SANITIZER_IOS
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# elif SANITIZER_MAC && defined(__aarch64__)
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+#elif SANITIZER_RISCV64
+#define SHADOW_OFFSET kRiscv64_ShadowOffset64
# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
namespace __asan {
+static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
+
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
}
};
-void FlushUnneededASanShadowMemory(uptr p, uptr size) {
- // Since asan's mapping is compacting, the shadow chunk may be
- // not page-aligned, so we only flush the page-aligned portion.
- ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
-}
-
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
uptr end = ptr + size;
if (Verbosity()) {
#include "asan_internal.h"
#include "asan_interceptors.h"
#include "asan_mapping.h"
+#include "asan_poisoning.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h>
+#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
ReportDeadlySignal(sig);
}
+bool PlatformUnpoisonStacks() {
+ stack_t signal_stack;
+ CHECK_EQ(0, sigaltstack(nullptr, &signal_stack));
+ uptr sigalt_bottom = (uptr)signal_stack.ss_sp;
+ uptr sigalt_top = (uptr)((char *)signal_stack.ss_sp + signal_stack.ss_size);
+ // If we're executing on the signal alternate stack AND the Linux flag
+ // SS_AUTODISARM was used, then we cannot get the signal alternate stack
+ // bounds from sigaltstack -- sigaltstack's output looks just as if no
+ // alternate stack has ever been set up.
+ // We're always unpoisoning the signal alternate stack to support jumping
+ // between the default stack and signal alternate stack.
+ if (signal_stack.ss_flags != SS_DISABLE)
+ UnpoisonStack(sigalt_bottom, sigalt_top, "sigalt");
+
+ if (signal_stack.ss_flags != SS_ONSTACK)
+ return false;
+
+ // Since we're on the signal altnerate stack, we cannot find the DEFAULT
+ // stack bottom using a local variable.
+ uptr default_bottom, tls_addr, tls_size, stack_size;
+ GetThreadStackAndTls(/*main=*/false, &default_bottom, &stack_size, &tls_addr,
+ &tls_size);
+ UnpoisonStack(default_bottom, default_bottom + stack_size, "default");
+ return true;
+}
+
// ---------------------- TSD ---------------- {{{1
#if SANITIZER_NETBSD && !ASAN_DYNAMIC
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = PremapShadowSize();
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- uptr shadow_end = shadow_start + shadow_size;
- internal_munmap(reinterpret_cast<void *>(map_start),
- shadow_start - left_padding - map_start);
- internal_munmap(reinterpret_cast<void *>(shadow_end),
- map_start + map_size - shadow_end);
- return shadow_start;
+ return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
bool PremapShadowFailed() {
return false;
}
-static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+static inline void CheckForInvalidPointerPair(void *p1, void *p2) {
switch (flags()->detect_invalid_pointer_pairs) {
case 0:
return;
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
UNIMPLEMENTED();
}
+bool PlatformUnpoisonStacks() { return false; }
+
void EarlyInit() {
// Provide early initialization of shadow memory so that
// instrumented code running before full initialzation will not
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+ kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
#endif // !SANITIZER_MYRIAD2
static AsanInitializer asan_initializer;
#endif // ASAN_DYNAMIC
-} // namespace __asan
-
-// ---------------------- Interface ---------------- {{{1
-using namespace __asan;
-
-void NOINLINE __asan_handle_no_return() {
- if (asan_init_is_running)
+void UnpoisonStack(uptr bottom, uptr top, const char *type) {
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (top - bottom > kMaxExpectedCleanupSize) {
+ static bool reported_warning = false;
+ if (reported_warning)
+ return;
+ reported_warning = true;
+ Report(
+ "WARNING: ASan is ignoring requested __asan_handle_no_return: "
+ "stack type: %s top: %p; bottom %p; size: %p (%zd)\n"
+ "False positive error reports may follow\n"
+ "For details see "
+ "https://github.com/google/sanitizers/issues/189\n",
+ type, top, bottom, top - bottom, top - bottom);
return;
+ }
+ PoisonShadow(bottom, top - bottom, 0);
+}
- int local_stack;
- AsanThread *curr_thread = GetCurrentThread();
- uptr PageSize = GetPageSizeCached();
- uptr top, bottom;
- if (curr_thread) {
+static void UnpoisonDefaultStack() {
+ uptr bottom, top;
+
+ if (AsanThread *curr_thread = GetCurrentThread()) {
+ int local_stack;
+ const uptr page_size = GetPageSizeCached();
top = curr_thread->stack_top();
- bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
+ bottom = ((uptr)&local_stack - page_size) & ~(page_size - 1);
} else if (SANITIZER_RTEMS) {
// Give up On RTEMS.
return;
&tls_size);
top = bottom + stack_size;
}
- static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
- if (top - bottom > kMaxExpectedCleanupSize) {
- static bool reported_warning = false;
- if (reported_warning)
- return;
- reported_warning = true;
- Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
- "stack top: %p; bottom %p; size: %p (%zd)\n"
- "False positive error reports may follow\n"
- "For details see "
- "https://github.com/google/sanitizers/issues/189\n",
- top, bottom, top - bottom, top - bottom);
- return;
- }
- PoisonShadow(bottom, top - bottom, 0);
+
+ UnpoisonStack(bottom, top, "default");
+}
+
+static void UnpoisonFakeStack() {
+ AsanThread *curr_thread = GetCurrentThread();
if (curr_thread && curr_thread->has_fake_stack())
curr_thread->fake_stack()->HandleNoReturn();
}
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan;
+
+void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
+ if (!PlatformUnpoisonStacks())
+ UnpoisonDefaultStack();
+
+ UnpoisonFakeStack();
+}
+
extern "C" void *__asan_extra_spill_area() {
AsanThread *t = GetCurrentThread();
CHECK(t);
namespace __asan {
-// ---------------------- mmap -------------------- {{{1
-// Reserve memory range [beg, end].
-// We need to use inclusive range because end+1 may not be representable.
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- if (!MmapFixedSuperNoReserve(beg, size, name)) {
- Report(
- "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n",
- size);
- Abort();
- }
- if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
-}
-
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
"unprotected gap shadow");
return;
}
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == kZeroBaseShadowStart) {
- uptr step = GetMmapGranularity();
- while (size > step && addr < kZeroBaseMaxShadowStart) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- }
- }
-
- Report(
- "ERROR: Failed to protect the shadow gap. "
- "ASan cannot proceed correctly. ABORTING.\n");
- DumpProcessMap();
- Die();
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
}
static void MaybeReportLinuxPIEBug() {
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
+#if SANITIZER_LINUX && \
+ (defined(__x86_64__) || defined(__aarch64__) || SANITIZER_RISCV64)
Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
Report(
"See https://github.com/google/sanitizers/issues/856 for possible "
// |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) {
- __asan_shadow_memory_dynamic_address = 0;
- CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
}
stack.Unwind(pc, bp, nullptr, \
common_flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_SIGNAL(sig) \
- BufferedStackTrace stack; \
- stack.Unwind((sig).pc, (sig).bp, (sig).context, \
- common_flags()->fast_unwind_on_fatal)
-
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
UNIMPLEMENTED();
}
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
// ---------------------- TSD ---------------- {{{
static bool tsd_key_inited = false;
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
- uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
- granularity, nullptr, nullptr);
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanCheckDynamicRTPrereqs() {}
void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); }
+bool PlatformUnpoisonStacks() { return false; }
+
#if SANITIZER_WINDOWS64
// Exception handler for dealing with shadow memory.
static LONG CALLBACK
/// \param addr Address to locate.
/// \param name Buffer to store the variable's name.
/// \param name_size Size in bytes of the variable's name buffer.
-/// \param region_address [out] Address of the region.
-/// \param region_size [out] Size of the region in bytes.
+/// \param[out] region_address Address of the region.
+/// \param[out] region_size Size of the region in bytes.
///
/// \returns Returns the category of the given pointer as a constant string.
const char *__asan_locate_address(void *addr, char *name, size_t name_size,
/// \param addr A heap address.
/// \param trace A buffer to store the stack trace.
/// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
+/// \param[out] thread_id The thread ID of the address.
///
/// \returns Returns the number of stored frames or 0 on error.
size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
/// \param addr A heap address.
/// \param trace A buffer to store the stack trace.
/// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
+/// \param[out] thread_id The thread ID of the address.
///
/// \returns Returns the number of stored frames or 0 on error.
size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
/// Gets the current shadow memory mapping (useful for calling from the
/// debugger).
///
-/// \param shadow_scale [out] Shadow scale value.
-/// \param shadow_offset [out] Offset value.
+/// \param[out] shadow_scale Shadow scale value.
+/// \param[out] shadow_offset Offset value.
void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
/// This is an internal function that is called to report an error. However,
///
/// \param fake_stack An opaque handler to a fake stack.
/// \param addr Address to test.
-/// \param beg [out] Beginning of fake frame.
-/// \param end [out] End of fake frame.
+/// \param[out] beg Beginning of fake frame.
+/// \param[out] end End of fake frame.
/// \returns Stack address or NULL.
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end);
/// signal callback runs during the switch, it will not benefit from stack
/// use-after-return detection.
///
-/// \param fake_stack_save [out] Fake stack save location.
+/// \param[out] fake_stack_save Fake stack save location.
/// \param bottom Bottom address of stack.
/// \param size Size of stack in bytes.
void __sanitizer_start_switch_fiber(void **fake_stack_save,
/// <c>__sanitizer_start_switch_fiber()</c>.
///
/// \param fake_stack_save Fake stack save location.
-/// \param bottom_old [out] Bottom address of old stack.
-/// \param size_old [out] Size of old stack in bytes.
+/// \param[out] bottom_old Bottom address of old stack.
+/// \param[out] size_old Size of old stack in bytes.
void __sanitizer_finish_switch_fiber(void *fake_stack_save,
const void **bottom_old,
size_t *size_old);
size_t dfsan_get_label_count(void);
/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory. Will work only if there are no other
-/// threads executing DFSan-instrumented code concurrently.
-/// Use this call to start over the taint tracking within the same procces.
+/// with the application memory. Use this call to start over the taint tracking
+/// within the same process.
+///
+/// Note: If another thread is working with tainted data during the flush, that
+/// taint could still be written to shadow after the flush.
void dfsan_flush(void);
/// Sets a callback to be invoked on calls to write(). The callback is invoked
call to __msan_scoped_disable_interceptor_checks. */
void __msan_scoped_enable_interceptor_checks(void);
+ void __msan_start_switch_fiber(const void *bottom, size_t size);
+ void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
+
#ifdef __cplusplus
} // extern "C"
#endif
// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
//
// Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2019-12-24
-// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
+// Generated date: 2020-09-10
+// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
__sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
#define __sanitizer_syscall_post_dup2(res, from, to) \
__sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
-/* syscall 91 has been skipped */
+#define __sanitizer_syscall_pre_getrandom(buf, buflen, flags) \
+ __sanitizer_syscall_pre_impl_getrandom( \
+ (long long)(buf), (long long)(buflen), (long long)(flags))
+#define __sanitizer_syscall_post_getrandom(res, buf, buflen, flags) \
+ __sanitizer_syscall_post_impl_getrandom( \
+ res, (long long)(buf), (long long)(buflen), (long long)(flags))
#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \
__sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd), \
(long long)(arg))
#define __sanitizer_syscall_post_sysarch(res, op, parms) \
__sanitizer_syscall_post_impl_sysarch(res, (long long)(op), \
(long long)(parms))
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+#define __sanitizer_syscall_pre___futex(uaddr, op, val, timeout, uaddr2, val2, \
+ val3) \
+ __sanitizer_syscall_pre_impl___futex((long long)(uaddr), (long long)(op), \
+ (long long)(val), (long long)(timeout), \
+ (long long)(uaddr2), (long long)(val2), \
+ (long long)(val3))
+#define __sanitizer_syscall_post___futex(res, uaddr, op, val, timeout, uaddr2, \
+ val2, val3) \
+ __sanitizer_syscall_post_impl___futex( \
+ res, (long long)(uaddr), (long long)(op), (long long)(val), \
+ (long long)(timeout), (long long)(uaddr2), (long long)(val2), \
+ (long long)(val3))
+#define __sanitizer_syscall_pre___futex_set_robust_list(head, len) \
+ __sanitizer_syscall_pre_impl___futex_set_robust_list((long long)(head), \
+ (long long)(len))
+#define __sanitizer_syscall_post___futex_set_robust_list(res, head, len) \
+ __sanitizer_syscall_post_impl___futex_set_robust_list( \
+ res, (long long)(head), (long long)(len))
+#define __sanitizer_syscall_pre___futex_get_robust_list(lwpid, headp, lenp) \
+ __sanitizer_syscall_pre_impl___futex_get_robust_list( \
+ (long long)(lwpid), (long long)(headp), (long long)(lenp))
+#define __sanitizer_syscall_post___futex_get_robust_list(res, lwpid, headp, \
+ lenp) \
+ __sanitizer_syscall_post_impl___futex_get_robust_list( \
+ res, (long long)(lwpid), (long long)(headp), (long long)(lenp))
#if !defined(_LP64)
#define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5) \
__sanitizer_syscall_pre_impl_compat_10_osemsys( \
__sanitizer_syscall_post_impl___fhstatvfs190( \
res, (long long)(fhp), (long long)(fh_size), (long long)(buf), \
(long long)(flags))
+#define __sanitizer_syscall_pre___acl_get_link(path, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_get_link( \
+ (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_link(res, path, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_get_link( \
+ res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_link(path, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_set_link( \
+ (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_link(res, path, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_set_link( \
+ res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_delete_link(path, type) \
+ __sanitizer_syscall_pre_impl___acl_delete_link((long long)(path), \
+ (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_link(res, path, type) \
+ __sanitizer_syscall_post_impl___acl_delete_link(res, (long long)(path), \
+ (long long)(type))
+#define __sanitizer_syscall_pre___acl_aclcheck_link(path, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_aclcheck_link( \
+ (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_link(res, path, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_aclcheck_link( \
+ res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_get_file(path, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_get_file( \
+ (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_file(res, path, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_get_file( \
+ res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_file(path, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_set_file( \
+ (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_file(res, path, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_set_file( \
+ res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_get_fd(filedes, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_get_fd( \
+ (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_fd(res, filedes, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_get_fd( \
+ res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_fd(filedes, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_set_fd( \
+ (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_fd(res, filedes, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_set_fd( \
+ res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_delete_file(path, type) \
+ __sanitizer_syscall_pre_impl___acl_delete_file((long long)(path), \
+ (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_file(res, path, type) \
+ __sanitizer_syscall_post_impl___acl_delete_file(res, (long long)(path), \
+ (long long)(type))
+#define __sanitizer_syscall_pre___acl_delete_fd(filedes, type) \
+ __sanitizer_syscall_pre_impl___acl_delete_fd((long long)(filedes), \
+ (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_fd(res, filedes, type) \
+ __sanitizer_syscall_post_impl___acl_delete_fd(res, (long long)(filedes), \
+ (long long)(type))
+#define __sanitizer_syscall_pre___acl_aclcheck_file(path, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_aclcheck_file( \
+ (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_file(res, path, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_aclcheck_file( \
+ res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_aclcheck_fd(filedes, type, aclp) \
+ __sanitizer_syscall_pre_impl___acl_aclcheck_fd( \
+ (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_fd(res, filedes, type, aclp) \
+ __sanitizer_syscall_post_impl___acl_aclcheck_fd( \
+ res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre_lpathconf(path, name) \
+ __sanitizer_syscall_pre_impl_lpathconf((long long)(path), (long long)(name))
+#define __sanitizer_syscall_post_lpathconf(res, path, name) \
+ __sanitizer_syscall_post_impl_lpathconf(res, (long long)(path), \
+ (long long)(name))
/* Compat with older releases */
#define __sanitizer_syscall_pre_getvfsstat \
void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
long long to);
-/* syscall 91 has been skipped */
+void __sanitizer_syscall_pre_impl_getrandom(long long buf, long long buflen,
+ long long flags);
+void __sanitizer_syscall_post_impl_getrandom(long long res, long long buf,
+ long long buflen, long long flags);
void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
long long arg);
void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
long long parms);
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+void __sanitizer_syscall_pre_impl___futex(long long uaddr, long long op,
+ long long val, long long timeout,
+ long long uaddr2, long long val2,
+ long long val3);
+void __sanitizer_syscall_post_impl___futex(long long res, long long uaddr,
+ long long op, long long val,
+ long long timeout, long long uaddr2,
+ long long val2, long long val3);
+void __sanitizer_syscall_pre_impl___futex_set_robust_list(long long head,
+ long long len);
+void __sanitizer_syscall_post_impl___futex_set_robust_list(long long res,
+ long long head,
+ long long len);
+void __sanitizer_syscall_pre_impl___futex_get_robust_list(long long lwpid,
+ long long headp,
+ long long lenp);
+void __sanitizer_syscall_post_impl___futex_get_robust_list(long long res,
+ long long lwpid,
+ long long headp,
+ long long lenp);
#if !defined(_LP64)
void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
long long a2, long long a3,
long long fh_size,
long long buf,
long long flags);
+void __sanitizer_syscall_pre_impl___acl_get_link(long long path, long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_link(long long res, long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_link(long long path, long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_link(long long res, long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl___acl_delete_link(long long path,
+ long long type);
+void __sanitizer_syscall_post_impl___acl_delete_link(long long res,
+ long long path,
+ long long type);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_link(long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_link(long long res,
+ long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl___acl_get_file(long long path, long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_file(long long res, long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_file(long long path, long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_file(long long res, long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl___acl_get_fd(long long filedes,
+ long long type, long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_fd(long long res,
+ long long filedes,
+ long long type, long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_fd(long long filedes,
+ long long type, long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_fd(long long res,
+ long long filedes,
+ long long type, long long aclp);
+void __sanitizer_syscall_pre_impl___acl_delete_file(long long path,
+ long long type);
+void __sanitizer_syscall_post_impl___acl_delete_file(long long res,
+ long long path,
+ long long type);
+void __sanitizer_syscall_pre_impl___acl_delete_fd(long long filedes,
+ long long type);
+void __sanitizer_syscall_post_impl___acl_delete_fd(long long res,
+ long long filedes,
+ long long type);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_file(long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_file(long long res,
+ long long path,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_fd(long long filedes,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_fd(long long res,
+ long long filedes,
+ long long type,
+ long long aclp);
+void __sanitizer_syscall_pre_impl_lpathconf(long long path, long long name);
+void __sanitizer_syscall_post_impl_lpathconf(long long res, long long path,
+ long long name);
#ifdef __cplusplus
} // extern "C"
RegisterCommonFlags(&parser);
// Override from user-specified string.
- const char *lsan_default_options = MaybeCallLsanDefaultOptions();
+ const char *lsan_default_options = __lsan_default_options();
parser.ParseString(lsan_default_options);
parser.ParseStringFromEnv("LSAN_OPTIONS");
template <typename AddressSpaceView>
using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
-#elif defined(__x86_64__) || defined(__powerpc64__)
+#elif defined(__x86_64__) || defined(__powerpc64__) || defined(__s390x__)
# if SANITIZER_FUCHSIA
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# elif defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+#elif defined(__s390x__)
+const uptr kAllocatorSpace = 0x40000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
Flags lsan_flags;
+
void DisableCounterUnderflow() {
if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
root_regions = new (placeholder) InternalMmapVector<RootRegion>();
}
-const char *MaybeCallLsanDefaultOptions() {
- return (&__lsan_default_options) ? __lsan_default_options() : "";
-}
-
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
- InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
- uptr registers_begin = reinterpret_cast<uptr>(registers.data());
- uptr registers_end =
- reinterpret_cast<uptr>(registers.data() + registers.size());
+ InternalMmapVector<uptr> registers;
for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
}
uptr sp;
PtraceRegistersStatus have_registers =
- suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ suspended_threads.GetRegistersAndSP(i, ®isters, &sp);
if (have_registers != REGISTERS_AVAILABLE) {
Report("Unable to get registers from thread %d.\n", os_id);
// If unable to get SP, consider the entire stack to be reachable unless
sp = stack_begin;
}
- if (flags()->use_registers && have_registers)
+ if (flags()->use_registers && have_registers) {
+ uptr registers_begin = reinterpret_cast<uptr>(registers.data());
+ uptr registers_end =
+ reinterpret_cast<uptr>(registers.data() + registers.size());
ScanRangeForPointers(registers_begin, registers_end, frontier,
"REGISTERS", kReachable);
+ }
if (flags()->use_stacks) {
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
return 0;
}
-#if !SANITIZER_SUPPORTS_WEAK_HOOKS
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-const char * __lsan_default_options() {
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
return "";
}
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;
// To enable LeakSanitizer on a new architecture, one needs to implement the
// internal_clone function as well as (probably) adjust the TLS machinery for
// the new architecture inside the sanitizer library.
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
- (SANITIZER_WORDSIZE == 64) && \
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+ (SANITIZER_WORDSIZE == 64) && \
(defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
- defined(__powerpc64__))
+ defined(__powerpc64__) || defined(__s390x__))
#define CAN_SANITIZE_LEAKS 1
#elif defined(__i386__) && \
(SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
return res;
}
+INTERCEPTOR(int, pthread_detach, void *th) {
+ ENSURE_LSAN_INITED;
+ int tid = ThreadTid((uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0)
+ ThreadDetach(tid);
+ return res;
+}
+
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
REAL(_exit)(status);
LSAN_MAYBE_INTERCEPT_MALLINFO;
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_detach);
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);
return thread_registry->FindThread(FindThreadByUid, (void *)uid);
}
+void ThreadDetach(u32 tid) {
+ CHECK_NE(tid, kInvalidTid);
+ thread_registry->DetachThread(tid, /* arg */ nullptr);
+}
+
void ThreadJoin(u32 tid) {
CHECK_NE(tid, kInvalidTid);
thread_registry->JoinThread(tid, /* arg */ nullptr);
u32 ThreadCreate(u32 tid, uptr uid, bool detached, void *arg = nullptr);
void ThreadFinish();
+void ThreadDetach(u32 tid);
void ThreadJoin(u32 tid);
u32 ThreadTid(uptr uid);
#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+namespace {
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+struct BlockHeader {
+ u64 magic;
+};
+} // namespace
+
static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
SetAllocatorOutOfMemory();
Report("FATAL: %s: internal allocator is out of memory trying to allocate "
}
void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
- if (size + sizeof(u64) < size)
+ uptr s = size + sizeof(BlockHeader);
+ if (s < size)
return nullptr;
- void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
+ BlockHeader *p = (BlockHeader *)RawInternalAlloc(s, cache, alignment);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
- ((u64*)p)[0] = kBlockMagic;
- return (char*)p + sizeof(u64);
+ ReportInternalAllocatorOutOfMemory(s);
+ p->magic = kBlockMagic;
+ return p + 1;
}
void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
if (!addr)
return InternalAlloc(size, cache);
- if (size + sizeof(u64) < size)
+ uptr s = size + sizeof(BlockHeader);
+ if (s < size)
return nullptr;
- addr = (char*)addr - sizeof(u64);
- size = size + sizeof(u64);
- CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
- void *p = RawInternalRealloc(addr, size, cache);
+ BlockHeader *p = (BlockHeader *)addr - 1;
+ CHECK_EQ(kBlockMagic, p->magic);
+ p = (BlockHeader *)RawInternalRealloc(p, s, cache);
if (UNLIKELY(!p))
- ReportInternalAllocatorOutOfMemory(size);
- return (char*)p + sizeof(u64);
+ ReportInternalAllocatorOutOfMemory(s);
+ return p + 1;
}
void *InternalReallocArray(void *addr, uptr count, uptr size,
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;
- addr = (char*)addr - sizeof(u64);
- CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
- ((u64*)addr)[0] = 0;
- RawInternalFree(addr, cache);
+ BlockHeader *p = (BlockHeader *)addr - 1;
+ CHECK_EQ(kBlockMagic, p->magic);
+ p->magic = 0;
+ RawInternalFree(p, cache);
}
// LowLevelAllocator
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+inline u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
return (*state = *state * 1103515245 + 12345) >> 16;
}
-INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+inline u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
template<typename T>
-INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
+inline void RandomShuffle(T *a, u32 n, u32 *rand_state) {
if (n <= 1) return;
u32 state = *rand_state;
for (u32 i = n - 1; i > 0; i--)
void SetErrnoToENOMEM();
// A common errno setting logic shared by almost all sanitizer allocator APIs.
-INLINE void *SetErrnoOnNull(void *ptr) {
+inline void *SetErrnoOnNull(void *ptr) {
if (UNLIKELY(!ptr))
SetErrnoToENOMEM();
return ptr;
// two and that the size is a multiple of alignment for POSIX implementation,
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
// of alignment.
-INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
+inline bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX
return alignment != 0 && IsPowerOfTwo(alignment) &&
(size & (alignment - 1)) == 0;
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
-INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
+inline bool CheckPosixMemalignAlignment(uptr alignment) {
return alignment != 0 && IsPowerOfTwo(alignment) &&
(alignment % sizeof(void *)) == 0;
}
// Returns true if calloc(size, n) call overflows on size*n calculation.
-INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
+inline bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size)
return false;
uptr max = (uptr)-1L;
// Returns true if the size passed to pvalloc overflows when rounded to the next
// multiple of page_size.
-INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
+inline bool CheckForPvallocOverflow(uptr size, uptr page_size) {
return RoundUpTo(size, page_size) < size;
}
}
void *GetMetaData(const void *p) {
+ CHECK(kMetadataSize);
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
+ if (class_id >= kNumClasses) return nullptr;
uptr size = ClassIdToSize(class_id);
if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = GetRegionBegin(p);
uptr beg = chunk_idx * size;
uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return nullptr;
const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
if (region->mapped_user >= next_beg)
return reinterpret_cast<void*>(reg_beg + beg);
static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
void *GetMetaData(const void *p) {
+ CHECK(kMetadataSize);
uptr class_id = GetSizeClass(p);
uptr size = ClassIdToSize(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
// (currently, 32 bits and internal allocator).
class LargeMmapAllocatorPtrArrayStatic {
public:
- INLINE void *Init() { return &p_[0]; }
- INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
+ inline void *Init() { return &p_[0]; }
+ inline void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
private:
static const int kMaxNumChunks = 1 << 15;
uptr p_[kMaxNumChunks];
// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
class LargeMmapAllocatorPtrArrayDynamic {
public:
- INLINE void *Init() {
+ inline void *Init() {
uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
SecondaryAllocatorName);
CHECK(p);
return reinterpret_cast<void*>(p);
}
- INLINE void EnsureSpace(uptr n) {
+ inline void EnsureSpace(uptr n) {
CHECK_LT(n, kMaxNumChunks);
DCHECK(n <= n_reserved_);
if (UNLIKELY(n == n_reserved_)) {
// Clutter-reducing helpers.
template<typename T>
-INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
+inline typename T::Type atomic_load_relaxed(const volatile T *a) {
return atomic_load(a, memory_order_relaxed);
}
template<typename T>
-INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
+inline void atomic_store_relaxed(volatile T *a, typename T::Type v) {
atomic_store(a, v, memory_order_relaxed);
}
// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
// for mappings of the memory model to different processors.
-INLINE void atomic_signal_fence(memory_order) {
+inline void atomic_signal_fence(memory_order) {
__asm__ __volatile__("" ::: "memory");
}
-INLINE void atomic_thread_fence(memory_order) {
+inline void atomic_thread_fence(memory_order) {
__sync_synchronize();
}
template<typename T>
-INLINE typename T::Type atomic_fetch_add(volatile T *a,
+inline typename T::Type atomic_fetch_add(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
}
template<typename T>
-INLINE typename T::Type atomic_fetch_sub(volatile T *a,
+inline typename T::Type atomic_fetch_sub(volatile T *a,
typename T::Type v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
}
template<typename T>
-INLINE typename T::Type atomic_exchange(volatile T *a,
+inline typename T::Type atomic_exchange(volatile T *a,
typename T::Type v, memory_order mo) {
DCHECK(!((uptr)a % sizeof(*a)));
if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
}
template <typename T>
-INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
+inline bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
typedef typename T::Type Type;
}
template<typename T>
-INLINE bool atomic_compare_exchange_weak(volatile T *a,
+inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
} __attribute__((aligned(32))) lock = {0, {0}};
template <>
-INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
+inline atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
DCHECK(mo &
}
template <>
-INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
+inline atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type val,
memory_order mo) {
return atomic_fetch_add(ptr, -val, mo);
}
template <>
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
atomic_uint64_t::Type *cmp,
atomic_uint64_t::Type xchg,
memory_order mo) {
}
template <>
-INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
+inline atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
}
template <>
-INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
+inline void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
memory_order mo) {
DCHECK(mo &
(memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
namespace __sanitizer {
-INLINE void proc_yield(int cnt) {
+inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
}
template<typename T>
-INLINE typename T::Type atomic_load(
+inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
}
template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
namespace __sanitizer {
-INLINE void proc_yield(int cnt) {
+inline void proc_yield(int cnt) {
__asm__ __volatile__("" ::: "memory");
for (int i = 0; i < cnt; i++)
__asm__ __volatile__("pause");
}
template<typename T>
-INLINE typename T::Type atomic_load(
+inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
}
template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
namespace __sanitizer {
-INLINE void atomic_signal_fence(memory_order) {
+inline void atomic_signal_fence(memory_order) {
_ReadWriteBarrier();
}
-INLINE void atomic_thread_fence(memory_order) {
+inline void atomic_thread_fence(memory_order) {
_mm_mfence();
}
-INLINE void proc_yield(int cnt) {
+inline void proc_yield(int cnt) {
for (int i = 0; i < cnt; i++)
_mm_pause();
}
template<typename T>
-INLINE typename T::Type atomic_load(
+inline typename T::Type atomic_load(
const volatile T *a, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_consume
| memory_order_acquire | memory_order_seq_cst));
}
template<typename T>
-INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
atomic_thread_fence(memory_order_seq_cst);
}
-INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
+inline u32 atomic_fetch_add(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
(long)v);
}
-INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+inline uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
#endif
}
-INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+inline u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
-(long)v);
}
-INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+inline uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
uptr v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
#endif
}
-INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
+inline u8 atomic_exchange(volatile atomic_uint8_t *a,
u8 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
}
-INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
+inline u16 atomic_exchange(volatile atomic_uint16_t *a,
u16 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
}
-INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
+inline u32 atomic_exchange(volatile atomic_uint32_t *a,
u32 v, memory_order mo) {
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
memory_order mo) {
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
memory_order mo) {
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
u16 *cmp,
u16 xchg,
memory_order mo) {
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
u32 *cmp,
u32 xchg,
memory_order mo) {
return false;
}
-INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+inline bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
u64 *cmp,
u64 xchg,
memory_order mo) {
}
template<typename T>
-INLINE bool atomic_compare_exchange_weak(volatile T *a,
+inline bool atomic_compare_exchange_weak(volatile T *a,
typename T::Type *cmp,
typename T::Type xchg,
memory_order mo) {
extern const char *SanitizerToolName; // Can be changed by the tool.
extern atomic_uint32_t current_verbosity;
-INLINE void SetVerbosity(int verbosity) {
+inline void SetVerbosity(int verbosity) {
atomic_store(¤t_verbosity, verbosity, memory_order_relaxed);
}
-INLINE int Verbosity() {
+inline int Verbosity() {
return atomic_load(¤t_verbosity, memory_order_relaxed);
}
#if SANITIZER_ANDROID
-INLINE uptr GetPageSize() {
+inline uptr GetPageSize() {
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
return 4096;
}
-INLINE uptr GetPageSizeCached() {
+inline uptr GetPageSizeCached() {
return 4096;
}
#else
uptr GetPageSize();
extern uptr PageSizeCached;
-INLINE uptr GetPageSizeCached() {
+inline uptr GetPageSizeCached() {
if (!PageSizeCached)
PageSizeCached = GetPageSize();
return PageSizeCached;
// Memory management
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
-INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
+inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
void MprotectMallocZones(void *addr, int prot);
+#if SANITIZER_LINUX
+// Unmap memory. Currently only used on Linux.
+void UnmapFromTo(uptr from, uptr to);
+#endif
+
+// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
+// be aligned to the mmap granularity * 2^shadow_scale, or to
+// 2^min_shadow_base_alignment if that is larger. The returned address will
+// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
+// shadow_size_bytes bytes on the right, which on linux is mapped no access.
+// The high_mem_end may be updated if the original shadow size doesn't fit.
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment, uptr &high_mem_end);
+
+// Reserve memory range [beg, end]. If madvise_shadow is true then apply
+// madvise (e.g. hugepages, core dumping) requested by options.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
+ bool madvise_shadow = true);
+
+// Protect size bytes of memory starting at addr. Also try to protect
+// several pages at the start of the address space as specified by
+// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
+void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
+ uptr zero_base_max_shadow_start);
+
// Find an available address space.
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found, uptr *max_occupied_addr);
}
#endif
-INLINE uptr MostSignificantSetBitIndex(uptr x) {
+inline uptr MostSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
return up;
}
-INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+inline uptr LeastSignificantSetBitIndex(uptr x) {
CHECK_NE(x, 0U);
unsigned long up;
#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
return up;
}
-INLINE bool IsPowerOfTwo(uptr x) {
+inline bool IsPowerOfTwo(uptr x) {
return (x & (x - 1)) == 0;
}
-INLINE uptr RoundUpToPowerOfTwo(uptr size) {
+inline uptr RoundUpToPowerOfTwo(uptr size) {
CHECK(size);
if (IsPowerOfTwo(size)) return size;
return 1ULL << (up + 1);
}
-INLINE uptr RoundUpTo(uptr size, uptr boundary) {
+inline uptr RoundUpTo(uptr size, uptr boundary) {
RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
-INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+inline uptr RoundDownTo(uptr x, uptr boundary) {
return x & ~(boundary - 1);
}
-INLINE bool IsAligned(uptr a, uptr alignment) {
+inline bool IsAligned(uptr a, uptr alignment) {
return (a & (alignment - 1)) == 0;
}
-INLINE uptr Log2(uptr x) {
+inline uptr Log2(uptr x) {
CHECK(IsPowerOfTwo(x));
return LeastSignificantSetBitIndex(x);
}
}
// Char handling
-INLINE bool IsSpace(int c) {
+inline bool IsSpace(int c) {
return (c == ' ') || (c == '\n') || (c == '\t') ||
(c == '\f') || (c == '\r') || (c == '\v');
}
-INLINE bool IsDigit(int c) {
+inline bool IsDigit(int c) {
return (c >= '0') && (c <= '9');
}
-INLINE int ToLower(int c) {
+inline int ToLower(int c) {
return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
}
kModuleArchARMV7,
kModuleArchARMV7S,
kModuleArchARMV7K,
- kModuleArchARM64
+ kModuleArchARM64,
+ kModuleArchRISCV64
};
// Opens the file 'file_name" and reads up to 'max_len' bytes.
return "armv7k";
case kModuleArchARM64:
return "arm64";
+ case kModuleArchRISCV64:
+ return "riscv64";
}
CHECK(0 && "Invalid module arch");
return "";
#if SANITIZER_MAC || SANITIZER_WIN_TRACE
void LogFullErrorReport(const char *buffer);
#else
-INLINE void LogFullErrorReport(const char *buffer) {}
+inline void LogFullErrorReport(const char *buffer) {}
#endif
#if SANITIZER_LINUX || SANITIZER_MAC
void WriteOneLineToSyslog(const char *s);
void LogMessageOnPrintf(const char *str);
#else
-INLINE void WriteOneLineToSyslog(const char *s) {}
-INLINE void LogMessageOnPrintf(const char *str) {}
+inline void WriteOneLineToSyslog(const char *s) {}
+inline void LogMessageOnPrintf(const char *str) {}
#endif
#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
void AndroidLogInit();
void SetAbortMessage(const char *);
#else
-INLINE void AndroidLogInit() {}
+inline void AndroidLogInit() {}
// FIXME: MacOS implementation could use CRSetCrashLogMessage.
-INLINE void SetAbortMessage(const char *) {}
+inline void SetAbortMessage(const char *) {}
#endif
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder();
AndroidApiLevel AndroidGetApiLevel();
#else
-INLINE void AndroidLogWrite(const char *buffer_unused) {}
-INLINE void SanitizerInitializeUnwinder() {}
-INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
+inline void AndroidLogWrite(const char *buffer_unused) {}
+inline void SanitizerInitializeUnwinder() {}
+inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
#endif
-INLINE uptr GetPthreadDestructorIterations() {
+inline uptr GetPthreadDestructorIterations() {
#if SANITIZER_ANDROID
return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
#elif SANITIZER_POSIX
#if SANITIZER_LINUX && SANITIZER_S390_64
void AvoidCVE_2016_2143();
#else
-INLINE void AvoidCVE_2016_2143() {}
+inline void AvoidCVE_2016_2143() {}
#endif
struct StackDepotStats {
// Returns the number of logical processors on the system.
u32 GetNumberOfCPUs();
extern u32 NumberOfCPUsCached;
-INLINE u32 GetNumberOfCPUsCached() {
+inline u32 GetNumberOfCPUsCached() {
if (!NumberOfCPUsCached)
NumberOfCPUsCached = GetNumberOfCPUs();
return NumberOfCPUsCached;
}
+template <typename T>
+class ArrayRef {
+ public:
+ ArrayRef() {}
+ ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
+
+ T *begin() { return begin_; }
+ T *end() { return end_; }
+
+ private:
+ T *begin_ = nullptr;
+ T *end_ = nullptr;
+};
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
- COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ if (common_flags()->intercept_strcmp) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ }
int result = CharCmpX(c1, c2);
CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
s2, result);
#define INIT_CLOCK_GETTIME
#endif
+#if SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID
+INTERCEPTOR(int, clock_getcpuclockid, pid_t pid,
+ __sanitizer_clockid_t *clockid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_getcpuclockid, pid, clockid);
+ int res = REAL(clock_getcpuclockid)(pid, clockid);
+ if (!res && clockid) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, clockid, sizeof *clockid);
+ }
+ return res;
+}
+
+#define INIT_CLOCK_GETCPUCLOCKID \
+ COMMON_INTERCEPT_FUNCTION(clock_getcpuclockid);
+#else
+#define INIT_CLOCK_GETCPUCLOCKID
+#endif
+
#if SANITIZER_INTERCEPT_GETITIMER
INTERCEPTOR(int, getitimer, int which, void *curr_value) {
void *ctx;
#define INIT_SENDMMSG
#endif
+#if SANITIZER_INTERCEPT_SYSMSG
+INTERCEPTOR(int, msgsnd, int msqid, const void *msgp, SIZE_T msgsz,
+ int msgflg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, msgsnd, msqid, msgp, msgsz, msgflg);
+ if (msgp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msgp, sizeof(long) + msgsz);
+ int res = REAL(msgsnd)(msqid, msgp, msgsz, msgflg);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, msgrcv, int msqid, void *msgp, SIZE_T msgsz,
+ long msgtyp, int msgflg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, msgrcv, msqid, msgp, msgsz, msgtyp, msgflg);
+ SSIZE_T len = REAL(msgrcv)(msqid, msgp, msgsz, msgtyp, msgflg);
+ if (len != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msgp, sizeof(long) + len);
+ return len;
+}
+
+#define INIT_SYSMSG \
+ COMMON_INTERCEPT_FUNCTION(msgsnd); \
+ COMMON_INTERCEPT_FUNCTION(msgrcv);
+#else
+#define INIT_SYSMSG
+#endif
+
#if SANITIZER_INTERCEPT_GETPEERNAME
INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
void *ctx;
#define INIT_SIGSETOPS
#endif
+#if SANITIZER_INTERCEPT_SIGSET_LOGICOPS
+INTERCEPTOR(int, sigandset, __sanitizer_sigset_t *dst,
+ __sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigandset, dst, src1, src2);
+ if (src1)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));
+ if (src2)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));
+ int res = REAL(sigandset)(dst, src1, src2);
+ if (!res && dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+ return res;
+}
+
+INTERCEPTOR(int, sigorset, __sanitizer_sigset_t *dst,
+ __sanitizer_sigset_t *src1, __sanitizer_sigset_t *src2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigorset, dst, src1, src2);
+ if (src1)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src1, sizeof(*src1));
+ if (src2)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src2, sizeof(*src2));
+ int res = REAL(sigorset)(dst, src1, src2);
+ if (!res && dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+ return res;
+}
+#define INIT_SIGSET_LOGICOPS \
+ COMMON_INTERCEPT_FUNCTION(sigandset); \
+ COMMON_INTERCEPT_FUNCTION(sigorset);
+#else
+#define INIT_SIGSET_LOGICOPS
+#endif
+
#if SANITIZER_INTERCEPT_SIGPENDING
INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
void *ctx;
#define INIT_TMPNAM_R
#endif
+#if SANITIZER_INTERCEPT_PTSNAME
+INTERCEPTOR(char *, ptsname, int fd) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ptsname, fd);
+ char *res = REAL(ptsname)(fd);
+ if (res != nullptr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_PTSNAME COMMON_INTERCEPT_FUNCTION(ptsname);
+#else
+#define INIT_PTSNAME
+#endif
+
+#if SANITIZER_INTERCEPT_PTSNAME_R
+INTERCEPTOR(int, ptsname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ptsname_r, fd, name, namesize);
+ int res = REAL(ptsname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_PTSNAME_R COMMON_INTERCEPT_FUNCTION(ptsname_r);
+#else
+#define INIT_PTSNAME_R
+#endif
+
#if SANITIZER_INTERCEPT_TTYNAME
INTERCEPTOR(char *, ttyname, int fd) {
void *ctx;
#define INIT_XDR
#endif // SANITIZER_INTERCEPT_XDR
+#if SANITIZER_INTERCEPT_XDRREC
+typedef int (*xdrrec_cb)(char*, char*, int);
+struct XdrRecWrapper {
+ char *handle;
+ xdrrec_cb rd, wr;
+};
+typedef AddrHashMap<XdrRecWrapper *, 11> XdrRecWrapMap;
+static XdrRecWrapMap *xdrrec_wrap_map;
+
+static int xdrrec_wr_wrap(char *handle, char *buf, int count) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(buf, count);
+ XdrRecWrapper *wrap = (XdrRecWrapper *)handle;
+ return wrap->wr(wrap->handle, buf, count);
+}
+
+static int xdrrec_rd_wrap(char *handle, char *buf, int count) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ XdrRecWrapper *wrap = (XdrRecWrapper *)handle;
+ return wrap->rd(wrap->handle, buf, count);
+}
+
+// This doesn't apply to the solaris version as it has a different function
+// signature.
+INTERCEPTOR(void, xdrrec_create, __sanitizer_XDR *xdr, unsigned sndsize,
+ unsigned rcvsize, char *handle, int (*rd)(char*, char*, int),
+ int (*wr)(char*, char*, int)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrrec_create, xdr, sndsize, rcvsize,
+ handle, rd, wr);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &xdr->x_op, sizeof xdr->x_op);
+
+ // We can't allocate a wrapper on the stack, as the handle is used outside
+ // this stack frame. So we put it on the heap, and keep track of it with
+ // the HashMap (keyed by x_private). When we later need to xdr_destroy,
+ // we can index the map, free the wrapper, and then clean the map entry.
+ XdrRecWrapper *wrap_data =
+ (XdrRecWrapper *)InternalAlloc(sizeof(XdrRecWrapper));
+ wrap_data->handle = handle;
+ wrap_data->rd = rd;
+ wrap_data->wr = wr;
+ if (wr)
+ wr = xdrrec_wr_wrap;
+ if (rd)
+ rd = xdrrec_rd_wrap;
+ handle = (char *)wrap_data;
+
+ REAL(xdrrec_create)(xdr, sndsize, rcvsize, handle, rd, wr);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdr, sizeof *xdr);
+
+ XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, false, true);
+ *wrap = wrap_data;
+}
+
+// We have to intercept this to be able to free wrapper memory;
+// otherwise it's not necessary.
+INTERCEPTOR(void, xdr_destroy, __sanitizer_XDR *xdr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_destroy, xdr);
+
+ XdrRecWrapMap::Handle wrap(xdrrec_wrap_map, xdr->x_private, true);
+ InternalFree(*wrap);
+ REAL(xdr_destroy)(xdr);
+}
+#define INIT_XDRREC_LINUX \
+ static u64 xdrrec_wrap_mem[sizeof(XdrRecWrapMap) / sizeof(u64) + 1]; \
+ xdrrec_wrap_map = new ((void *)&xdrrec_wrap_mem) XdrRecWrapMap(); \
+ COMMON_INTERCEPT_FUNCTION(xdrrec_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_destroy);
+#else
+#define INIT_XDRREC_LINUX
+#endif
+
#if SANITIZER_INTERCEPT_TSEARCH
INTERCEPTOR(void *, tsearch, void *key, void **rootp,
int (*compar)(const void *, const void *)) {
#endif
#if SANITIZER_INTERCEPT_PROTOENT
-INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
- struct __sanitizer_protoent *p = REAL(getprotoent)();
- if (p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+static void write_protoent(void *ctx, struct __sanitizer_protoent *p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
- SIZE_T pp_size = 1; // One handles the trailing \0
+ SIZE_T pp_size = 1; // One handles the trailing \0
- for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+ for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
- pp_size * sizeof(char **));
- }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
+ pp_size * sizeof(char **));
+}
+
+INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
+ struct __sanitizer_protoent *p = REAL(getprotoent)();
+ if (p)
+ write_protoent(ctx, p);
return p;
}
if (name)
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
- if (p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
-
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
-
- SIZE_T pp_size = 1; // One handles the trailing \0
-
- for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
-
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
- pp_size * sizeof(char **));
- }
+ if (p)
+ write_protoent(ctx, p);
return p;
}
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto);
struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto);
- if (p) {
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
-
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
-
- SIZE_T pp_size = 1; // One handles the trailing \0
-
- for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
-
- COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
- pp_size * sizeof(char **));
- }
+ if (p)
+ write_protoent(ctx, p);
return p;
}
#define INIT_PROTOENT \
#define INIT_PROTOENT
#endif
+#if SANITIZER_INTERCEPT_PROTOENT_R
+INTERCEPTOR(int, getprotoent_r, struct __sanitizer_protoent *result_buf,
+ char *buf, SIZE_T buflen, struct __sanitizer_protoent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotoent_r, result_buf, buf, buflen,
+ result);
+ int res = REAL(getprotoent_r)(result_buf, buf, buflen, result);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
+ if (!res && *result)
+ write_protoent(ctx, *result);
+ return res;
+}
+
+INTERCEPTOR(int, getprotobyname_r, const char *name,
+ struct __sanitizer_protoent *result_buf, char *buf, SIZE_T buflen,
+ struct __sanitizer_protoent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname_r, name, result_buf, buf,
+ buflen, result);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ int res = REAL(getprotobyname_r)(name, result_buf, buf, buflen, result);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
+ if (!res && *result)
+ write_protoent(ctx, *result);
+ return res;
+}
+
+INTERCEPTOR(int, getprotobynumber_r, int num,
+ struct __sanitizer_protoent *result_buf, char *buf,
+ SIZE_T buflen, struct __sanitizer_protoent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber_r, num, result_buf, buf,
+ buflen, result);
+ int res = REAL(getprotobynumber_r)(num, result_buf, buf, buflen, result);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof *result);
+ if (!res && *result)
+ write_protoent(ctx, *result);
+ return res;
+}
+
+#define INIT_PROTOENT_R \
+ COMMON_INTERCEPT_FUNCTION(getprotoent_r); \
+ COMMON_INTERCEPT_FUNCTION(getprotobyname_r); \
+ COMMON_INTERCEPT_FUNCTION(getprotobynumber_r);
+#else
+#define INIT_PROTOENT_R
+#endif
+
#if SANITIZER_INTERCEPT_NETENT
INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
void *ctx;
}
}
qsort_compar_f old_compar = qsort_compar;
- qsort_compar = compar;
SIZE_T old_size = qsort_size;
- qsort_size = size;
+ // Handle qsort() implementations that recurse using an
+ // interposable function call:
+ bool already_wrapped = compar == wrapped_qsort_compar;
+ if (already_wrapped) {
+ // This case should only happen if the qsort() implementation calls itself
+ // using a preemptible function call (e.g. the FreeBSD libc version).
+ // Check that the size and comparator arguments are as expected.
+ CHECK_NE(compar, qsort_compar);
+ CHECK_EQ(qsort_size, size);
+ } else {
+ qsort_compar = compar;
+ qsort_size = size;
+ }
REAL(qsort)(base, nmemb, size, wrapped_qsort_compar);
- qsort_compar = old_compar;
- qsort_size = old_size;
+ if (!already_wrapped) {
+ qsort_compar = old_compar;
+ qsort_size = old_size;
+ }
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
#define INIT_QSORT COMMON_INTERCEPT_FUNCTION(qsort)
}
}
qsort_r_compar_f old_compar = qsort_r_compar;
- qsort_r_compar = compar;
SIZE_T old_size = qsort_r_size;
- qsort_r_size = size;
+ // Handle qsort_r() implementations that recurse using an
+ // interposable function call:
+ bool already_wrapped = compar == wrapped_qsort_r_compar;
+ if (already_wrapped) {
+ // This case should only happen if the qsort() implementation calls itself
+ // using a preemptible function call (e.g. the FreeBSD libc version).
+ // Check that the size and comparator arguments are as expected.
+ CHECK_NE(compar, qsort_r_compar);
+ CHECK_EQ(qsort_r_size, size);
+ } else {
+ qsort_r_compar = compar;
+ qsort_r_size = size;
+ }
REAL(qsort_r)(base, nmemb, size, wrapped_qsort_r_compar, arg);
- qsort_r_compar = old_compar;
- qsort_r_size = old_size;
+ if (!already_wrapped) {
+ qsort_r_compar = old_compar;
+ qsort_r_size = old_size;
+ }
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, base, nmemb * size);
}
#define INIT_QSORT_R COMMON_INTERCEPT_FUNCTION(qsort_r)
INIT_FGETGRENT_R;
INIT_SETPWENT;
INIT_CLOCK_GETTIME;
+ INIT_CLOCK_GETCPUCLOCKID;
INIT_GETITIMER;
INIT_TIME;
INIT_GLOB;
INIT_SENDMSG;
INIT_RECVMMSG;
INIT_SENDMMSG;
+ INIT_SYSMSG;
INIT_GETPEERNAME;
INIT_IOCTL;
INIT_INET_ATON;
INIT_SIGWAITINFO;
INIT_SIGTIMEDWAIT;
INIT_SIGSETOPS;
+ INIT_SIGSET_LOGICOPS;
INIT_SIGPENDING;
INIT_SIGPROCMASK;
INIT_PTHREAD_SIGMASK;
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
+ INIT_PTSNAME;
+ INIT_PTSNAME_R;
INIT_TTYNAME;
INIT_TTYNAME_R;
INIT_TEMPNAM;
INIT_BZERO;
INIT_FTIME;
INIT_XDR;
+ INIT_XDRREC_LINUX;
INIT_TSEARCH;
INIT_LIBIO_INTERNALS;
INIT_FOPEN;
INIT_STRMODE;
INIT_TTYENT;
INIT_PROTOENT;
+ INIT_PROTOENT_R;
INIT_NETENT;
INIT_GETMNTINFO;
INIT_MI_VECTOR_HASH;
size = 0;
}
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ // For %ms/%mc, write the allocated output buffer as well.
+ if (dir.allocate) {
+ char *buf = *(char **)argp;
+ if (buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, internal_strlen(buf) + 1);
+ }
}
}
--- /dev/null
+#if (defined(__riscv) && (__riscv_xlen == 64)) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
+
+.comm _ZN14__interception10real_vforkE,8,8
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Save ra in the off-stack spill area.
+ // allocate space on stack
+ addi sp, sp, -16
+ // store ra value
+ sd ra, 8(sp)
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ // restore previous values from stack
+ ld ra, 8(sp)
+ // adjust stack
+ addi sp, sp, 16
+ // store ra by x10
+ sd ra, 0(x10)
+
+ // Call real vfork. This may return twice. User code that runs between the first and the second return
+ // may clobber the stack frame of the interceptor; that's why it does not have a frame.
+ la x10, _ZN14__interception10real_vforkE
+ ld x10, 0(x10)
+ jalr x10
+
+ // adjust stack
+ addi sp, sp, -16
+ // store x10 by adjusted stack
+ sd x10, 8(sp)
+ // jump to exit label if x10 is 0
+ beqz x10, .L_exit
+
+ // x0 != 0 => parent process. Clear stack shadow.
+ // put old sp to x10
+ addi x10, sp, 16
+ call COMMON_INTERCEPTOR_HANDLE_VFORK
+
+.L_exit:
+ // Restore ra
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ ld ra, 0(x10)
+ // load value by stack
+ ld x10, 8(sp)
+ // adjust stack
+ addi sp, sp, 16
+ ret
+ASM_SIZE(vfork)
+
+.weak vfork
+.set vfork, ASM_WRAPPER_NAME(vfork)
+
+#endif
return start;
}
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+// Reserve memory range [beg, end].
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
+ bool madvise_shadow) {
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
+ uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)
+ : !MmapFixedNoReserve(beg, size, name)) {
+ Report(
+ "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
+ "Perhaps you're using ulimit -v\n",
+ size);
+ Abort();
+ }
+ if (madvise_shadow && common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
+}
+
+void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
+ uptr zero_base_max_shadow_start) {
+ if (!size)
+ return;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == zero_base_shadow_start) {
+ uptr step = GetMmapGranularity();
+ while (size > step && addr < zero_base_max_shadow_start) {
+ addr += step;
+ size -= step;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ }
+ }
+
+ Report(
+ "ERROR: Failed to protect the shadow gap. "
+ "%s cannot proceed correctly. ABORTING.\n",
+ SanitizerToolName);
+ DumpProcessMap();
+ Die();
+}
+
+#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ SANITIZER_RISCV64)
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
}
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
-#if !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \
+ SANITIZER_RISCV64)
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
#define errno_ENOMEM 12
#define errno_EBUSY 16
#define errno_EINVAL 22
+#define errno_ENAMETOOLONG 36
// Those might not present or their value differ on different platforms.
extern const int errno_EOWNERDEAD;
}
return parser_->ParseFile(value, ignore_missing_);
}
- bool Format(char *buffer, uptr size) {
+ bool Format(char *buffer, uptr size) override {
// Note `original_path_` isn't actually what's parsed due to `%`
// substitutions. Printing the substituted path would require holding onto
// mmap'ed memory.
COMMON_FLAG(bool, fast_unwind_on_fatal, false,
"If available, use the fast frame-pointer-based unwinder on fatal "
"errors.")
-COMMON_FLAG(bool, fast_unwind_on_malloc, true,
+// ARM thumb/thumb2 frame pointer is inconsistent on GCC and Clang [1]
+// and fast-unwider is also unreliable with mixing arm and thumb code [2].
+// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92172
+// [2] https://bugs.llvm.org/show_bug.cgi?id=44158
+COMMON_FLAG(bool, fast_unwind_on_malloc,
+ !(SANITIZER_LINUX && !SANITIZER_ANDROID && SANITIZER_ARM),
"If available, use the fast frame-pointer-based unwinder on "
"malloc/free.")
COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
+COMMON_FLAG(
+ bool, intercept_strcmp, true,
+ "If set, uses custom wrappers for strcmp functions to find more errors.")
COMMON_FLAG(bool, intercept_strlen, true,
"If set, uses custom wrappers for strlen and strnlen functions "
"to find more errors.")
#if SANITIZER_LINUX || SANITIZER_FUCHSIA
-# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
- SANITIZER_FUCHSIA
+# if (__GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
+ SANITIZER_FUCHSIA) && \
+ !SANITIZER_GO
# define SANITIZER_USE_GETAUXVAL 1
# else
# define SANITIZER_USE_GETAUXVAL 0
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
-#ifndef INLINE
-#define INLINE inline
-#endif
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
#define SANITIZER_WEAK_DEFAULT_IMPL \
extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
-#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+#define COMPILER_CHECK(pred) static_assert(pred, "")
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
-#define IMPL_PASTE(a, b) a##b
-#define IMPL_COMPILER_ASSERT(pred, line) \
- typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
-
// Limits for integral types. We have to redefine it in case we don't
// have stdint.h (like in Visual Studio 9).
#undef __INT64_C
#if SANITIZER_LINUX && defined(__x86_64__)
#include "sanitizer_syscall_linux_x86_64.inc"
+#elif SANITIZER_LINUX && SANITIZER_RISCV64
+#include "sanitizer_syscall_linux_riscv64.inc"
#elif SANITIZER_LINUX && defined(__aarch64__)
#include "sanitizer_syscall_linux_aarch64.inc"
#elif SANITIZER_LINUX && defined(__arm__)
int internal_mprotect(void *addr, uptr length, int prot) {
return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
}
+
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return internal_syscall(SYSCALL(madvise), addr, length, advice);
+}
#endif
uptr internal_close(fd_t fd) {
return internal_syscall(SYSCALL(sched_yield));
}
-void internal__exit(int exitcode) {
-#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
- internal_syscall(SYSCALL(exit), exitcode);
-#else
- internal_syscall(SYSCALL(exit_group), exitcode);
-#endif
- Die(); // Unreachable.
-}
-
unsigned int internal_sleep(unsigned int seconds) {
struct timespec ts;
ts.tv_sec = seconds;
}
#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#if !SANITIZER_NETBSD
+void internal__exit(int exitcode) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
+ Die(); // Unreachable.
+}
+#endif // !SANITIZER_NETBSD
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
if (ShouldMockFailureToOpen(filename))
};
#else
struct linux_dirent {
-#if SANITIZER_X32 || defined(__aarch64__)
+#if SANITIZER_X32 || defined(__aarch64__) || SANITIZER_RISCV64
u64 d_ino;
u64 d_off;
#else
unsigned long d_off;
#endif
unsigned short d_reclen;
-#ifdef __aarch64__
+#if defined(__aarch64__) || SANITIZER_RISCV64
unsigned char d_type;
#endif
char d_name[256];
#if SANITIZER_FREEBSD
int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
const void *newp, uptr newlen) {
- static decltype(sysctlbyname) *real = nullptr;
- if (!real)
- real = (decltype(sysctlbyname) *)dlsym(RTLD_NEXT, "sysctlbyname");
- CHECK(real);
- return real(sname, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
+ // Note: this function can be called during startup, so we need to avoid
+ // calling any interceptable functions. On FreeBSD >= 1300045 sysctlbyname()
+ // is a real syscall, but for older versions it calls sysctlnametomib()
+ // followed by sysctl(). To avoid calling the intercepted version and
+ // asserting if this happens during startup, call the real sysctlnametomib()
+ // followed by internal_sysctl() if the syscall is not available.
+#ifdef SYS___sysctlbyname
+ return internal_syscall(SYSCALL(__sysctlbyname), sname,
+ internal_strlen(sname), oldp, (size_t *)oldlenp, newp,
+ (size_t)newlen);
+#else
+ static decltype(sysctlnametomib) *real_sysctlnametomib = nullptr;
+ if (!real_sysctlnametomib)
+ real_sysctlnametomib =
+ (decltype(sysctlnametomib) *)dlsym(RTLD_NEXT, "sysctlnametomib");
+ CHECK(real_sysctlnametomib);
+
+ int oid[CTL_MAXNAME];
+ size_t len = CTL_MAXNAME;
+ if (real_sysctlnametomib(sname, oid, &len) == -1)
+ return (-1);
+ return internal_sysctl(oid, len, oldp, oldlenp, newp, newlen);
+#endif
}
#endif
#endif
#else
__sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
__sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
- return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
- (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
- sizeof(__sanitizer_kernel_sigset_t));
+ return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how, (uptr)k_set,
+ (uptr)k_oldset, sizeof(__sanitizer_kernel_sigset_t));
#endif
}
// This should (does) work for both PowerPC64 Endian modes.
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+#elif SANITIZER_RISCV64
+ return (1ULL << 38) - 1;
# elif defined(__mips64)
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
# elif defined(__s390x__)
: "memory", "$29" );
return res;
}
+#elif SANITIZER_RISCV64
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+ register int (*__fn)(void *) __asm__("a0") = fn;
+ register void *__stack __asm__("a1") = child_stack;
+ register int __flags __asm__("a2") = flags;
+ register void *__arg __asm__("a3") = arg;
+ register int *__ptid __asm__("a4") = parent_tidptr;
+ register void *__tls __asm__("a5") = newtls;
+ register int *__ctid __asm__("a6") = child_tidptr;
+
+ __asm__ __volatile__(
+ "mv a0,a2\n" /* flags */
+ "mv a2,a4\n" /* ptid */
+ "mv a3,a5\n" /* tls */
+ "mv a4,a6\n" /* ctid */
+ "addi a7, zero, %9\n" /* clone */
+
+ "ecall\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "bnez a0, 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ld a0, 8(sp)\n"
+ "ld a1, 16(sp)\n"
+ "jalr a1\n"
+
+ /* Call _exit(%r0). */
+ "addi a7, zero, %10\n"
+ "ecall\n"
+ "1:\n"
+
+ : "=r"(res)
+ : "i"(-EINVAL), "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid), "i"(__NR_clone), "i"(__NR_exit)
+ : "ra", "memory");
+ return res;
+}
#elif defined(__aarch64__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr) {
if (flag & RTLD_DEEPBIND) {
Report(
"You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
- " which is incompatibe with sanitizer runtime "
+ " which is incompatible with sanitizer runtime "
"(see https://github.com/google/sanitizers/issues/611 for details"
"). If you want to run %s library under sanitizers please remove "
"RTLD_DEEPBIND from dlopen flags.\n",
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
- || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
- || defined(__arm__)
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
+ defined(__arm__) || SANITIZER_RISCV64
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
// Releases memory pages entirely within the [beg, end] address range.
// The pages no longer count toward RSS; reads are guaranteed to return 0.
// Requires (but does not verify!) that pages are MAP_PRIVATE.
-INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
+inline void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
// man madvise on Linux promises zero-fill for anonymous private pages.
// Testing shows the same behaviour for private (but not anonymous) mappings
// of shm_open() files, as long as the underlying file is untouched.
#include "sanitizer_platform.h"
-#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
#include "sanitizer_allocator_internal.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
+#if SANITIZER_NETBSD
+#define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast()
+#endif
+
#include <dlfcn.h> // for dlsym()
#include <link.h>
#include <pthread.h>
CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
-#endif // SANITIZER_SOLARIS
+#endif // SANITIZER_SOLARIS
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
#endif
}
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
!SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
static uptr g_tls_size;
#ifdef __i386__
-# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
+#define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
#else
-# define CHECK_GET_TLS_STATIC_INFO_VERSION 0
+#define CHECK_GET_TLS_STATIC_INFO_VERSION 0
#endif
#if CHECK_GET_TLS_STATIC_INFO_VERSION
-# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
+#define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
#else
-# define DL_INTERNAL_FUNCTION
+#define DL_INTERNAL_FUNCTION
#endif
namespace {
}
#else
void InitTlsSize() { }
-#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
- // !SANITIZER_NETBSD && !SANITIZER_SOLARIS
+#endif
-#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
- defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
- defined(__arm__)) && \
+#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
+ defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
+ defined(__arm__) || SANITIZER_RISCV64) && \
SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t thread_descriptor_size;
#elif defined(__mips__)
// TODO(sagarthakur): add more values as per different glibc versions.
val = FIRST_32_SECOND_64(1152, 1776);
+#elif SANITIZER_RISCV64
+ int major;
+ int minor;
+ int patch;
+ if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
+ // TODO: consider adding an optional runtime check for an unknown (untested)
+ // glibc version
+ if (minor <= 28) // WARNING: the highest tested version is 2.29
+ val = 1772; // no guarantees for this one
+ else if (minor <= 31)
+ val = 1772; // tested against glibc 2.29, 2.31
+ else
+ val = 1936; // tested against glibc 2.32
+ }
+
#elif defined(__aarch64__)
// The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
val = 1776;
return kThreadSelfOffset;
}
-#if defined(__mips__) || defined(__powerpc64__)
+#if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
// head structure. It lies before the static tls blocks.
static uptr TlsPreTcbSize() {
-# if defined(__mips__)
+#if defined(__mips__)
const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-# elif defined(__powerpc64__)
+#elif defined(__powerpc64__)
const uptr kTcbHead = 88; // sizeof (tcbhead_t)
-# endif
+#elif SANITIZER_RISCV64
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+#endif
const uptr kTlsAlign = 16;
const uptr kTlsPreTcbSize =
RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
uptr ThreadSelf() {
uptr descr_addr;
-# if defined(__i386__)
+#if defined(__i386__)
asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-# elif defined(__x86_64__)
+#elif defined(__x86_64__)
asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
-# elif defined(__mips__)
+#elif defined(__mips__)
// MIPS uses TLS variant I. The thread pointer (in hardware register $29)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
rdhwr %0,$29;\
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
-# elif defined(__aarch64__) || defined(__arm__)
+#elif defined(__aarch64__) || defined(__arm__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
ThreadDescriptorSize();
-# elif defined(__s390__)
+#elif SANITIZER_RISCV64
+ uptr tcb_end;
+ asm volatile("mv %0, tp;\n" : "=r"(tcb_end));
+ // https://github.com/riscv/riscv-elf-psabi-doc/issues/53
+ const uptr kTlsTcbOffset = 0x800;
+ descr_addr =
+ reinterpret_cast<uptr>(tcb_end - kTlsTcbOffset - TlsPreTcbSize());
+#elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
-# elif defined(__powerpc64__)
+#elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
// points to the end of the TCB + 0x7000. The pthread_descr structure is
// immediately in front of the TCB. TlsPreTcbSize() includes the size of the
uptr thread_pointer;
asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
descr_addr = thread_pointer - TlsPreTcbSize();
-# else
-# error "unsupported CPU arch"
-# endif
+#else
+#error "unsupported CPU arch"
+#endif
return descr_addr;
}
#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
#if SANITIZER_FREEBSD
static void **ThreadSelfSegbase() {
void **segbase = 0;
-# if defined(__i386__)
+#if defined(__i386__)
// sysarch(I386_GET_GSBASE, segbase);
__asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
-# elif defined(__x86_64__)
+#elif defined(__x86_64__)
// sysarch(AMD64_GET_FSBASE, segbase);
__asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
-# else
-# error "unsupported CPU arch"
-# endif
+#else
+#error "unsupported CPU arch"
+#endif
return segbase;
}
#if SANITIZER_NETBSD
static struct tls_tcb * ThreadSelfTlsTcb() {
- return (struct tls_tcb *)_lwp_getprivate();
+ struct tls_tcb *tcb = nullptr;
+#ifdef __HAVE___LWP_GETTCB_FAST
+ tcb = (struct tls_tcb *)__lwp_gettcb_fast();
+#elif defined(__HAVE___LWP_GETPRIVATE_FAST)
+ tcb = (struct tls_tcb *)__lwp_getprivate_fast();
+#endif
+ return tcb;
}
uptr ThreadSelf() {
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
*addr = ThreadSelf();
*size = GetTlsSize();
*addr -= *size;
*addr += ThreadDescriptorSize();
-# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
- || defined(__arm__)
+#elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) || \
+ defined(__arm__) || SANITIZER_RISCV64
*addr = ThreadSelf();
*size = GetTlsSize();
-# else
+#else
*addr = 0;
*size = 0;
-# endif
+#endif
#elif SANITIZER_FREEBSD
void** segbase = ThreadSelfSegbase();
*addr = 0;
*addr = 0;
*size = 0;
#else
-# error "Unknown OS"
+#error "Unknown OS"
#endif
}
#endif
#if !SANITIZER_GO
uptr GetTlsSize() {
-#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
SANITIZER_OPENBSD || SANITIZER_SOLARIS
uptr addr, size;
GetTls(&addr, &size);
return size;
-#elif defined(__mips__) || defined(__powerpc64__)
+#elif defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64
return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
#else
return g_tls_size;
#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
typedef ElfW(Phdr) Elf_Phdr;
-#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
#define Elf_Phdr XElf32_Phdr
#define dl_phdr_info xdl_phdr_info
#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
-#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
struct DlIteratePhdrData {
InternalMmapVectorNoCtor<LoadedModule> *modules;
#elif SANITIZER_SOLARIS
return sysconf(_SC_NPROCESSORS_ONLN);
#else
-#if defined(CPU_COUNT)
cpu_set_t CPUs;
CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
return CPU_COUNT(&CPUs);
-#else
- return 1;
-#endif
#endif
}
#if SANITIZER_LINUX
-# if SANITIZER_ANDROID
+#if SANITIZER_ANDROID
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
if (&android_set_abort_message)
android_set_abort_message(str);
}
-# else
+#else
void AndroidLogInit() {}
static bool ShouldLogAfterPrintf() { return true; }
void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
void SetAbortMessage(const char *str) {}
-# endif // SANITIZER_ANDROID
+#endif // SANITIZER_ANDROID
void LogMessageOnPrintf(const char *str) {
if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
// initialized after the vDSO function pointers, so if it exists, is not null
// and is not empty, we can use clock_gettime.
extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
-INLINE bool CanUseVDSO() {
+inline bool CanUseVDSO() {
// Bionic is safe, it checks for the vDSO function pointers to be initialized.
if (SANITIZER_ANDROID)
return true;
}
#endif // !SANITIZER_OPENBSD
+void UnmapFromTo(uptr from, uptr to) {
+ if (to == from)
+ return;
+ CHECK(to >= from);
+ uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, to - from, to - from, (void *)from);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment,
+ UNUSED uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+
+ const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
+ const uptr map_size = shadow_size + left_padding + alignment;
+
+ const uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, shadow_start - left_padding);
+ UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
+
+ return shadow_start;
+}
+
} // namespace __sanitizer
#endif
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
-#include "sanitizer_placement_new.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_ptrauth.h"
extern char **environ;
#endif
-#if defined(__has_include) && __has_include(<os/trace.h>) && defined(__BLOCKS__)
+#if defined(__has_include) && __has_include(<os/trace.h>)
#define SANITIZER_OS_TRACE 1
#include <os/trace.h>
#else
return mprotect(addr, length, prot);
}
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return madvise((void *)addr, length, advice);
+}
+
uptr internal_close(fd_t fd) {
return close(fd);
}
// pthread_get_stacksize_np() returns an incorrect stack size for the main
// thread on Mavericks. See
// https://github.com/google/sanitizers/issues/261
- if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
+ if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization &&
stacksize == (1 << 19)) {
struct rlimit rl;
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
return result;
}
-MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
+// Offset example:
+// XNU 17 -- macOS 10.13 -- iOS 11 -- tvOS 11 -- watchOS 4
+constexpr u16 GetOSMajorKernelOffset() {
+ if (TARGET_OS_OSX) return 4;
+ if (TARGET_OS_IOS || TARGET_OS_TV) return 6;
+ if (TARGET_OS_WATCH) return 13;
+}
+
+using VersStr = char[64];
-MacosVersion GetMacosVersionInternal() {
- int mib[2] = { CTL_KERN, KERN_OSRELEASE };
- char version[100];
- uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
- for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
- // Get the version length.
- CHECK_NE(internal_sysctl(mib, 2, 0, &len, 0, 0), -1);
- CHECK_LT(len, maxlen);
- CHECK_NE(internal_sysctl(mib, 2, version, &len, 0, 0), -1);
+static void GetOSVersion(VersStr vers) {
+ uptr len = sizeof(VersStr);
+ if (SANITIZER_IOSSIM) {
+ const char *vers_env = GetEnv("SIMULATOR_RUNTIME_VERSION");
+ if (!vers_env) {
+ Report("ERROR: Running in simulator but SIMULATOR_RUNTIME_VERSION env "
+ "var is not set.\n");
+ Die();
+ }
+ len = internal_strlcpy(vers, vers_env, len);
+ } else {
+ int res =
+ internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
+ if (res) {
+ // Fallback for XNU 17 (macOS 10.13) and below that do not provide the
+ // `kern.osproductversion` property.
+ u16 kernel_major = GetDarwinKernelVersion().major;
+ u16 offset = GetOSMajorKernelOffset();
+ CHECK_LE(kernel_major, 17);
+ CHECK_GE(kernel_major, offset);
+ u16 os_major = kernel_major - offset;
+
+ auto format = TARGET_OS_OSX ? "10.%d" : "%d.0";
+ len = internal_snprintf(vers, len, format, os_major);
+ }
+ }
+ CHECK_LT(len, sizeof(VersStr));
+}
- // Expect <major>.<minor>(.<patch>)
- CHECK_GE(len, 3);
- const char *p = version;
- int major = internal_simple_strtoll(p, &p, /*base=*/10);
- if (*p != '.') return MACOS_VERSION_UNKNOWN;
+void ParseVersion(const char *vers, u16 *major, u16 *minor) {
+ // Format: <major>.<minor>[.<patch>]\0
+ CHECK_GE(internal_strlen(vers), 3);
+ const char *p = vers;
+ *major = internal_simple_strtoll(p, &p, /*base=*/10);
+ CHECK_EQ(*p, '.');
p += 1;
- int minor = internal_simple_strtoll(p, &p, /*base=*/10);
- if (*p != '.') return MACOS_VERSION_UNKNOWN;
-
- switch (major) {
- case 11: return MACOS_VERSION_LION;
- case 12: return MACOS_VERSION_MOUNTAIN_LION;
- case 13: return MACOS_VERSION_MAVERICKS;
- case 14: return MACOS_VERSION_YOSEMITE;
- case 15: return MACOS_VERSION_EL_CAPITAN;
- case 16: return MACOS_VERSION_SIERRA;
- case 17: return MACOS_VERSION_HIGH_SIERRA;
- case 18: return MACOS_VERSION_MOJAVE;
- case 19: return MACOS_VERSION_CATALINA;
- default:
- if (major < 9) return MACOS_VERSION_UNKNOWN;
- return MACOS_VERSION_UNKNOWN_NEWER;
+ *minor = internal_simple_strtoll(p, &p, /*base=*/10);
+}
+
+// Aligned versions example:
+// macOS 10.15 -- iOS 13 -- tvOS 13 -- watchOS 6
+static void MapToMacos(u16 *major, u16 *minor) {
+ if (TARGET_OS_OSX)
+ return;
+
+ if (TARGET_OS_IOS || TARGET_OS_TV)
+ *major += 2;
+ else if (TARGET_OS_WATCH)
+ *major += 9;
+ else
+ UNREACHABLE("unsupported platform");
+
+ if (*major >= 16) { // macOS 11+
+ *major -= 5;
+ } else { // macOS 10.15 and below
+ *minor = *major;
+ *major = 10;
}
}
-MacosVersion GetMacosVersion() {
- atomic_uint32_t *cache =
- reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
- MacosVersion result =
- static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
- if (result == MACOS_VERSION_UNINITIALIZED) {
- result = GetMacosVersionInternal();
- atomic_store(cache, result, memory_order_release);
+static MacosVersion GetMacosAlignedVersionInternal() {
+ VersStr vers;
+ GetOSVersion(vers);
+
+ u16 major, minor;
+ ParseVersion(vers, &major, &minor);
+ MapToMacos(&major, &minor);
+
+ return MacosVersion(major, minor);
+}
+
+static_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type),
+ "MacosVersion cache size");
+static atomic_uint32_t cached_macos_version;
+
+MacosVersion GetMacosAlignedVersion() {
+ atomic_uint32_t::Type result =
+ atomic_load(&cached_macos_version, memory_order_acquire);
+ if (!result) {
+ MacosVersion version = GetMacosAlignedVersionInternal();
+ result = *reinterpret_cast<atomic_uint32_t::Type *>(&version);
+ atomic_store(&cached_macos_version, result, memory_order_release);
}
- return result;
+ return *reinterpret_cast<MacosVersion *>(&result);
}
DarwinKernelVersion GetDarwinKernelVersion() {
- char buf[100];
- size_t len = sizeof(buf);
- int res = internal_sysctlbyname("kern.osrelease", buf, &len, nullptr, 0);
+ VersStr vers;
+ uptr len = sizeof(VersStr);
+ int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
CHECK_EQ(res, 0);
+ CHECK_LT(len, sizeof(VersStr));
- // Format: <major>.<minor>.<patch>\0
- CHECK_GE(len, 6);
- const char *p = buf;
- u16 major = internal_simple_strtoll(p, &p, /*base=*/10);
- CHECK_EQ(*p, '.');
- p += 1;
- u16 minor = internal_simple_strtoll(p, &p, /*base=*/10);
+ u16 major, minor;
+ ParseVersion(vers, &major, &minor);
return DarwinKernelVersion(major, minor);
}
#if !SANITIZER_GO
// Log with os_trace. This will make it into the crash log.
#if SANITIZER_OS_TRACE
- if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
+ if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) {
// os_trace requires the message (format parameter) to be a string literal.
if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
sizeof("AddressSanitizer") - 1) == 0)
GetPcSpBp(context, &pc, &sp, &bp);
}
+// ASan/TSan use mmap in a way that creates “deallocation gaps” which triggers
+// EXC_GUARD exceptions on macOS 10.15+ (XNU 19.0+).
+static void DisableMmapExcGuardExceptions() {
+ using task_exc_guard_behavior_t = uint32_t;
+ using task_set_exc_guard_behavior_t =
+ kern_return_t(task_t task, task_exc_guard_behavior_t behavior);
+ auto *set_behavior = (task_set_exc_guard_behavior_t *)dlsym(
+ RTLD_DEFAULT, "task_set_exc_guard_behavior");
+ if (set_behavior == nullptr) return;
+ const task_exc_guard_behavior_t task_exc_guard_none = 0;
+ set_behavior(mach_task_self(), task_exc_guard_none);
+}
+
void InitializePlatformEarly() {
// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
use_xnu_fast_mmap =
#else
false;
#endif
+ if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
+ DisableMmapExcGuardExceptions();
}
#if !SANITIZER_GO
return false;
}
-extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
-static const double kMinDyldVersionWithAutoInterposition = 360.0;
-
-bool DyldNeedsEnvVariable() {
- // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
- // still may want use them on older systems. On older Darwin platforms, dyld
- // doesn't export dyldVersionNumber symbol and we simply return true.
- if (!&dyldVersionNumber) return true;
+static bool DyldNeedsEnvVariable() {
// If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
- // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
- // GetMacosVersion() doesn't work for the simulator. Let's instead check
- // `dyldVersionNumber`, which is exported by dyld, against a known version
- // number from the first OS release where this appeared.
- return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
+ // DYLD_INSERT_LIBRARIES is not set.
+ return GetMacosAlignedVersion() < MacosVersion(10, 11);
}
void MaybeReexec() {
return GetMaxUserVirtualAddress();
}
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment, uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+
+ uptr space_size = shadow_size_bytes + left_padding;
+
+ uptr largest_gap_found = 0;
+ uptr max_occupied_addr = 0;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ uptr shadow_start =
+ FindAvailableMemoryRange(space_size, alignment, granularity,
+ &largest_gap_found, &max_occupied_addr);
+ // If the shadow doesn't fit, restrict the address space to make it fit.
+ if (shadow_start == 0) {
+ VReport(
+ 2,
+ "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
+ largest_gap_found, max_occupied_addr);
+ uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
+ if (new_max_vm < max_occupied_addr) {
+ Report("Unable to find a memory range for dynamic shadow.\n");
+ Report(
+ "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
+ "new_max_vm = %p\n",
+ space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ CHECK(0 && "cannot place shadow");
+ }
+ RestrictMemoryToMaxAddress(new_max_vm);
+ high_mem_end = new_max_vm - 1;
+ space_size = (high_mem_end >> shadow_scale) + left_padding;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
+ nullptr, nullptr);
+ if (shadow_start == 0) {
+ Report("Unable to find a memory range after restricting VM.\n");
+ CHECK(0 && "cannot place shadow after restricting vm");
+ }
+ }
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
bool current_instrumented;
};
-enum MacosVersion {
- MACOS_VERSION_UNINITIALIZED = 0,
- MACOS_VERSION_UNKNOWN,
- MACOS_VERSION_LION, // macOS 10.7; oldest currently supported
- MACOS_VERSION_MOUNTAIN_LION,
- MACOS_VERSION_MAVERICKS,
- MACOS_VERSION_YOSEMITE,
- MACOS_VERSION_EL_CAPITAN,
- MACOS_VERSION_SIERRA,
- MACOS_VERSION_HIGH_SIERRA,
- MACOS_VERSION_MOJAVE,
- MACOS_VERSION_CATALINA,
- MACOS_VERSION_UNKNOWN_NEWER
-};
-
-struct DarwinKernelVersion {
+template <typename VersionType>
+struct VersionBase {
u16 major;
u16 minor;
- DarwinKernelVersion(u16 major, u16 minor) : major(major), minor(minor) {}
+ VersionBase(u16 major, u16 minor) : major(major), minor(minor) {}
- bool operator==(const DarwinKernelVersion &other) const {
+ bool operator==(const VersionType &other) const {
return major == other.major && minor == other.minor;
}
- bool operator>=(const DarwinKernelVersion &other) const {
- return major >= other.major ||
+ bool operator>=(const VersionType &other) const {
+ return major > other.major ||
(major == other.major && minor >= other.minor);
}
+ bool operator<(const VersionType &other) const { return !(*this >= other); }
+};
+
+struct MacosVersion : VersionBase<MacosVersion> {
+ MacosVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
+};
+
+struct DarwinKernelVersion : VersionBase<DarwinKernelVersion> {
+ DarwinKernelVersion(u16 major, u16 minor) : VersionBase(major, minor) {}
};
-MacosVersion GetMacosVersion();
+MacosVersion GetMacosAlignedVersion();
DarwinKernelVersion GetDarwinKernelVersion();
char **GetEnviron();
namespace __sanitizer {
static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
-INLINE void CRAppendCrashLogMessage(const char *msg) {
+inline void CRAppendCrashLogMessage(const char *msg) {
BlockingMutexLock l(&crashreporter_info_mutex);
internal_strlcat(__crashreporter_info_buff__, msg,
sizeof(__crashreporter_info_buff__)); }
return _REAL(mprotect, addr, length, prot);
}
+int internal_madvise(uptr addr, uptr length, int advice) {
+ DEFINE__REAL(int, madvise, void *a, uptr b, int c);
+ return _REAL(madvise, (void *)addr, length, advice);
+}
+
uptr internal_close(fd_t fd) {
CHECK(&_sys_close);
return _sys_close(fd);
return mprotect(addr, length, prot);
}
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return madvise((void *)addr, length, advice);
+}
+
int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
const void *newp, uptr newlen) {
Printf("internal_sysctlbyname not implemented for OpenBSD");
# define SANITIZER_X32 0
#endif
+#if defined(__i386__) || defined(_M_IX86)
+# define SANITIZER_I386 1
+#else
+# define SANITIZER_I386 0
+#endif
+
#if defined(__mips__)
# define SANITIZER_MIPS 1
# if defined(__mips64)
# define SANITIZER_MYRIAD2 0
#endif
+#if defined(__riscv) && (__riscv_xlen == 64)
+#define SANITIZER_RISCV64 1
+#else
+#define SANITIZER_RISCV64 0
+#endif
+
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
// does not work well and we need to fallback to SizeClassAllocator32.
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+#elif SANITIZER_RISCV64
+#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 38)
#elif defined(__aarch64__)
# if SANITIZER_MAC
// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
#include "sanitizer_glibc_version.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#if SANITIZER_POSIX
# define SI_POSIX 1
(SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
(SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CLOCK_GETCPUCLOCKID SI_LINUX
#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
#define SANITIZER_INTERCEPT_TIME SI_POSIX
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX
#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX
#define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX
+#define SANITIZER_INTERCEPT_SYSMSG SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX
#define SANITIZER_INTERCEPT_IOCTL SI_POSIX
#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_POSIX
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
-#if SI_LINUX_NOT_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__))
+#if SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__) || SANITIZER_RISCV64)
#define SANITIZER_INTERCEPT_PTRACE 1
#else
#define SANITIZER_INTERCEPT_PTRACE 0
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
#define SANITIZER_INTERCEPT_SIGSETOPS \
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SIGSET_LOGICOPS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
#define SANITIZER_INTERCEPT_STATFS64 \
- ((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+ (((SI_MAC && !TARGET_CPU_ARM64) && !SI_IOS) || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_PTSNAME SI_LINUX
+#define SANITIZER_INTERCEPT_PTSNAME_R SI_LINUX
#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
#define SANITIZER_INTERCEPT_FTIME \
(!SI_FREEBSD && !SI_NETBSD && !SI_OPENBSD && SI_POSIX)
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_XDRREC SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TSEARCH \
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MMAP SI_POSIX
#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
- SI_NOT_RTEMS)
+ SI_NOT_RTEMS && !SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_MEMALIGN \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS)
-#define SANITIZER_INTERCEPT_PVALLOC \
+#define SANITIZER_INTERCEPT_PVALLOC \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
- SI_NOT_RTEMS)
-#define SANITIZER_INTERCEPT_CFREE \
+ SI_NOT_RTEMS && !SI_SOLARIS) // NOLINT
+#define SANITIZER_INTERCEPT_CFREE \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
- SI_NOT_RTEMS)
+ SI_NOT_RTEMS && !SI_SOLARIS) // NOLINT
#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE \
#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
-#define SANITIZER_INTERCEPT_PROTOENT SI_NETBSD
+#define SANITIZER_INTERCEPT_PROTOENT (SI_NETBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_PROTOENT_R (SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
#define SANITIZER_INTERCEPT_SETVBUF (SI_NETBSD || SI_FREEBSD || \
SI_LINUX || SI_MAC)
#define SANITIZER_INTERCEPT_QSORT \
(SI_POSIX && !SI_IOSSIM && !SI_WATCHOS && !SI_TVOS && !SI_ANDROID)
#define SANITIZER_INTERCEPT_QSORT_R (SI_LINUX && !SI_ANDROID)
-#define SANITIZER_INTERCEPT_SIGALTSTACK SI_POSIX
+// sigaltstack on i386 macOS cannot be intercepted due to setjmp()
+// calling it and assuming that it does not clobber registers.
+#define SANITIZER_INTERCEPT_SIGALTSTACK \
+ (SI_POSIX && !(SANITIZER_MAC && SANITIZER_I386))
#define SANITIZER_INTERCEPT_UNAME (SI_POSIX && !SI_FREEBSD)
#define SANITIZER_INTERCEPT___XUNAME SI_FREEBSD
#include <sys/shm.h>
#undef _KERNEL
-#undef INLINE // to avoid clashes with sanitizers' definitions
-
#undef IOC_DIRMASK
// Include these after system headers to avoid name clashes and ambiguities.
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
-// fine with newer headers, too. Beware that with <sys/stat.h>, struct stat
-// takes the form of struct stat64 on 32-bit platforms if _FILE_OFFSET_BITS=64.
-// Also, for some platforms (e.g. mips) there are additional members in the
-// <sys/stat.h> struct stat:s.
+// fine with newer headers, too.
#include <linux/posix_types.h>
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
#define ino_t __kernel_ino_t
#include <sys/chio.h>
#include <sys/clockctl.h>
#include <sys/cpuio.h>
+#include <sys/dkbad.h>
#include <sys/dkio.h>
#include <sys/drvctlio.h>
#include <sys/dvdio.h>
#include <sys/resource.h>
#include <sys/sem.h>
+#include <sys/scsiio.h>
#include <sys/sha1.h>
#include <sys/sha2.h>
#include <sys/shm.h>
#include <dev/ir/irdaio.h>
#include <dev/isa/isvio.h>
#include <dev/isa/wtreg.h>
+#if __has_include(<dev/iscsi/iscsi_ioctl.h>)
#include <dev/iscsi/iscsi_ioctl.h>
+#else
+/* Fallback for MKISCSI=no */
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ uint32_t connection_id;
+} iscsi_conn_status_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint16_t interface_version;
+ uint16_t major;
+ uint16_t minor;
+ uint8_t version_string[224];
+} iscsi_get_version_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ uint32_t connection_id;
+ struct {
+ unsigned int immediate : 1;
+ } options;
+ uint64_t lun;
+ scsireq_t req; /* from <sys/scsiio.h> */
+} iscsi_iocommand_parameters_t;
+
+typedef enum {
+ ISCSI_AUTH_None = 0,
+ ISCSI_AUTH_CHAP = 1,
+ ISCSI_AUTH_KRB5 = 2,
+ ISCSI_AUTH_SRP = 3
+} iscsi_auth_types_t;
+
+typedef enum {
+ ISCSI_LOGINTYPE_DISCOVERY = 0,
+ ISCSI_LOGINTYPE_NOMAP = 1,
+ ISCSI_LOGINTYPE_MAP = 2
+} iscsi_login_session_type_t;
+
+typedef enum { ISCSI_DIGEST_None = 0, ISCSI_DIGEST_CRC32C = 1 } iscsi_digest_t;
+
+typedef enum {
+ ISCSI_SESSION_TERMINATED = 1,
+ ISCSI_CONNECTION_TERMINATED,
+ ISCSI_RECOVER_CONNECTION,
+ ISCSI_DRIVER_TERMINATING
+} iscsi_event_t;
+
+typedef struct {
+ unsigned int mutual_auth : 1;
+ unsigned int is_secure : 1;
+ unsigned int auth_number : 4;
+ iscsi_auth_types_t auth_type[4];
+} iscsi_auth_info_t;
+
+typedef struct {
+ uint32_t status;
+ int socket;
+ struct {
+ unsigned int HeaderDigest : 1;
+ unsigned int DataDigest : 1;
+ unsigned int MaxConnections : 1;
+ unsigned int DefaultTime2Wait : 1;
+ unsigned int DefaultTime2Retain : 1;
+ unsigned int MaxRecvDataSegmentLength : 1;
+ unsigned int auth_info : 1;
+ unsigned int user_name : 1;
+ unsigned int password : 1;
+ unsigned int target_password : 1;
+ unsigned int TargetName : 1;
+ unsigned int TargetAlias : 1;
+ unsigned int ErrorRecoveryLevel : 1;
+ } is_present;
+ iscsi_auth_info_t auth_info;
+ iscsi_login_session_type_t login_type;
+ iscsi_digest_t HeaderDigest;
+ iscsi_digest_t DataDigest;
+ uint32_t session_id;
+ uint32_t connection_id;
+ uint32_t MaxRecvDataSegmentLength;
+ uint16_t MaxConnections;
+ uint16_t DefaultTime2Wait;
+ uint16_t DefaultTime2Retain;
+ uint16_t ErrorRecoveryLevel;
+ void *user_name;
+ void *password;
+ void *target_password;
+ void *TargetName;
+ void *TargetAlias;
+} iscsi_login_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+} iscsi_logout_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t event_id;
+} iscsi_register_event_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ uint32_t connection_id;
+} iscsi_remove_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t session_id;
+ void *response_buffer;
+ uint32_t response_size;
+ uint32_t response_used;
+ uint32_t response_total;
+ uint8_t key[224];
+} iscsi_send_targets_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint8_t InitiatorName[224];
+ uint8_t InitiatorAlias[224];
+ uint8_t ISID[6];
+} iscsi_set_node_name_parameters_t;
+
+typedef struct {
+ uint32_t status;
+ uint32_t event_id;
+ iscsi_event_t event_kind;
+ uint32_t session_id;
+ uint32_t connection_id;
+ uint32_t reason;
+} iscsi_wait_event_parameters_t;
+
+#define ISCSI_GET_VERSION _IOWR(0, 1, iscsi_get_version_parameters_t)
+#define ISCSI_LOGIN _IOWR(0, 2, iscsi_login_parameters_t)
+#define ISCSI_LOGOUT _IOWR(0, 3, iscsi_logout_parameters_t)
+#define ISCSI_ADD_CONNECTION _IOWR(0, 4, iscsi_login_parameters_t)
+#define ISCSI_RESTORE_CONNECTION _IOWR(0, 5, iscsi_login_parameters_t)
+#define ISCSI_REMOVE_CONNECTION _IOWR(0, 6, iscsi_remove_parameters_t)
+#define ISCSI_CONNECTION_STATUS _IOWR(0, 7, iscsi_conn_status_parameters_t)
+#define ISCSI_SEND_TARGETS _IOWR(0, 8, iscsi_send_targets_parameters_t)
+#define ISCSI_SET_NODE_NAME _IOWR(0, 9, iscsi_set_node_name_parameters_t)
+#define ISCSI_IO_COMMAND _IOWR(0, 10, iscsi_iocommand_parameters_t)
+#define ISCSI_REGISTER_EVENT _IOWR(0, 11, iscsi_register_event_parameters_t)
+#define ISCSI_DEREGISTER_EVENT _IOWR(0, 12, iscsi_register_event_parameters_t)
+#define ISCSI_WAIT_EVENT _IOWR(0, 13, iscsi_wait_event_parameters_t)
+#define ISCSI_POLL_EVENT _IOWR(0, 14, iscsi_wait_event_parameters_t)
+#endif
#include <dev/ofw/openfirmio.h>
#include <dev/pci/amrio.h>
#include <dev/pci/mlyreg.h>
#include "sanitizer_platform_limits_netbsd.h"
namespace __sanitizer {
-void *__sanitizer_get_link_map_by_dlopen_handle(void* handle) {
+void *__sanitizer_get_link_map_by_dlopen_handle(void *handle) {
void *p = nullptr;
return internal_dlinfo(handle, RTLD_DI_LINKMAP, &p) == 0 ? p : nullptr;
}
namespace __sanitizer {
void *__sanitizer_get_link_map_by_dlopen_handle(void *handle);
-# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
- (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ (link_map *)__sanitizer_get_link_map_by_dlopen_handle(handle)
extern unsigned struct_utsname_sz;
extern unsigned struct_stat_sz;
extern unsigned struct_nvlist_ref_sz;
extern unsigned struct_StringList_sz;
-
// A special value to mark ioctls that are not present on the target platform,
// when it can not be determined without including any system headers.
extern const unsigned IOCTL_NOT_PRESENT;
-
extern unsigned IOCTL_AFM_ADDFMAP;
extern unsigned IOCTL_AFM_DELFMAP;
extern unsigned IOCTL_AFM_CLEANFMAP;
#if SANITIZER_LINUX
# include <utime.h>
# include <sys/ptrace.h>
-# if defined(__mips64) || defined(__aarch64__) || defined(__arm__)
+#if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \
+ SANITIZER_RISCV64
# include <asm/ptrace.h>
# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
namespace __sanitizer {
unsigned struct_utsname_sz = sizeof(struct utsname);
unsigned struct_stat_sz = sizeof(struct stat);
-#if !SANITIZER_IOS
+#if !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
unsigned struct_stat64_sz = sizeof(struct stat64);
-#endif // !SANITIZER_IOS
+#endif // !SANITIZER_IOS && !(SANITIZER_MAC && TARGET_CPU_ARM64)
unsigned struct_rusage_sz = sizeof(struct rusage);
unsigned struct_tm_sz = sizeof(struct tm);
unsigned struct_passwd_sz = sizeof(struct passwd);
unsigned struct_regex_sz = sizeof(regex_t);
unsigned struct_regmatch_sz = sizeof(regmatch_t);
-#if SANITIZER_MAC && !SANITIZER_IOS
+#if (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
unsigned struct_statfs64_sz = sizeof(struct statfs64);
-#endif // SANITIZER_MAC && !SANITIZER_IOS
+#endif // (SANITIZER_MAC && !TARGET_CPU_ARM64) && !SANITIZER_IOS
#if !SANITIZER_ANDROID
unsigned struct_fstab_sz = sizeof(struct fstab);
#if SANITIZER_LINUX && !SANITIZER_ANDROID
// Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
// has been removed from glibc 2.28.
-#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \
- || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \
- || defined(__x86_64__) || (defined(__riscv) && __riscv_xlen == 64)
+#if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \
+ defined(__x86_64__) || SANITIZER_RISCV64
#define SIZEOF_STRUCT_USTAT 32
#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
|| defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
int glob_altdirfunc = GLOB_ALTDIRFUNC;
#endif
-#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
- (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__))
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__) || SANITIZER_RISCV64)
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
+#elif SANITIZER_RISCV64
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#endif // __mips64 || __powerpc64__ || __aarch64__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
- defined(__aarch64__) || defined(__arm__) || defined(__s390__)
+ defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \
+ SANITIZER_RISCV64
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#elif defined(__mips__)
const unsigned struct_kernel_stat_sz = SANITIZER_ANDROID
? FIRST_32_SECOND_64(104, 128)
- : FIRST_32_SECOND_64(144, 216);
+ : FIRST_32_SECOND_64(160, 216);
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct___old_kernel_stat_sz = 0;
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__riscv) && __riscv_xlen == 64
+#elif SANITIZER_RISCV64
const unsigned struct_kernel_stat_sz = 128;
-const unsigned struct_kernel_stat64_sz = 104;
+const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
extern unsigned struct_ElfW_Phdr_sz;
#endif
+struct __sanitizer_protoent {
+ char *p_name;
+ char **p_aliases;
+ int p_proto;
+};
+
struct __sanitizer_addrinfo {
int ai_flags;
int ai_family;
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
- defined(__s390__))
+ defined(__s390__) || SANITIZER_RISCV64)
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
-CHECK_TYPE_SIZE(glob_t);
+// There are additional fields we are not interested in.
+COMPILER_CHECK(sizeof(__sanitizer_glob_t) <= sizeof(glob_t));
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
bool SignalContext::IsMemoryAccess() const {
auto si = static_cast<const siginfo_t *>(siginfo);
- return si->si_signo == SIGSEGV;
+ return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;
}
int SignalContext::GetType() const {
int fd = ReserveStandardFds(
internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
CHECK_GE(fd, 0);
- if (!o_cloexec) {
- int res = fcntl(fd, F_SETFD, FD_CLOEXEC);
- CHECK_EQ(0, res);
- }
int res = internal_ftruncate(fd, size);
+#if !defined(O_CLOEXEC)
+ res = fcntl(fd, F_SETFD, FD_CLOEXEC);
+ CHECK_EQ(0, res);
+#endif
CHECK_EQ(0, res);
res = internal_unlink(shmname);
CHECK_EQ(0, res);
int fd, u64 offset);
uptr internal_munmap(void *addr, uptr length);
int internal_mprotect(void *addr, uptr length, int prot);
+int internal_madvise(uptr addr, uptr length, int advice);
// OS
uptr internal_filesize(fd_t fd); // -1 on error.
uptr beg_aligned = RoundUpTo(beg, page_size);
uptr end_aligned = RoundDownTo(end, page_size);
if (beg_aligned < end_aligned)
- // In the default Solaris compilation environment, madvise() is declared
- // to take a caddr_t arg; casting it to void * results in an invalid
- // conversion error, so use char * instead.
- madvise((char *)beg_aligned, end_aligned - beg_aligned,
- SANITIZER_MADVISE_DONTNEED);
+ internal_madvise(beg_aligned, end_aligned - beg_aligned,
+ SANITIZER_MADVISE_DONTNEED);
}
void SetShadowRegionHugePageMode(uptr addr, uptr size) {
#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
if (common_flags()->no_huge_pages_for_shadow)
- madvise((char *)addr, size, MADV_NOHUGEPAGE);
+ internal_madvise(addr, size, MADV_NOHUGEPAGE);
else
- madvise((char *)addr, size, MADV_HUGEPAGE);
+ internal_madvise(addr, size, MADV_HUGEPAGE);
#endif // MADV_NOHUGEPAGE
}
bool DontDumpShadowMemory(uptr addr, uptr length) {
#if defined(MADV_DONTDUMP)
- return madvise((char *)addr, length, MADV_DONTDUMP) == 0;
+ return internal_madvise(addr, length, MADV_DONTDUMP) == 0;
#elif defined(MADV_NOCORE)
- return madvise((char *)addr, length, MADV_NOCORE) == 0;
+ return internal_madvise(addr, length, MADV_NOCORE) == 0;
#else
return true;
#endif // MADV_DONTDUMP
char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
if (data_.current >= last) return false;
- prxmap_t *xmapentry = (prxmap_t*)data_.current;
+ prxmap_t *xmapentry =
+ const_cast<prxmap_t *>(reinterpret_cast<const prxmap_t *>(data_.current));
segment->start = (uptr)xmapentry->pr_vaddr;
segment->end = (uptr)(xmapentry->pr_vaddr + xmapentry->pr_size);
#define ptrauth_string_discriminator(__string) ((int)0)
#endif
+#define STRIP_PC(pc) ((uptr)ptrauth_strip(pc, 0))
+
#endif // SANITIZER_PTRAUTH_H
INTERCEPTOR(int, sigaction_symname, int signum,
const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
- if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) {
+ if (!oldact) return 0;
+ act = nullptr;
+ }
SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
}
#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)
return _REAL(mprotect)(addr, length, prot);
}
+// Illumos' declaration of madvise cannot be made visible if _XOPEN_SOURCE
+// is defined as g++ does on Solaris.
+//
+// This declaration is consistent with Solaris 11.4. Both Illumos and Solaris
+// versions older than 11.4 declared madvise with a caddr_t as the first
+// argument, but we don't currently support Solaris versions older than 11.4,
+// and as mentioned above the declaration is not visible on Illumos so we can
+// use any declaration we like on Illumos.
+extern "C" int madvise(void *, size_t, int);
+
+int internal_madvise(uptr addr, uptr length, int advice) {
+ return madvise((void *)addr, length, advice);
+}
+
DECLARE__REAL_AND_INTERNAL(uptr, close, fd_t fd) {
return _REAL(close)(fd);
}
return sched_yield();
}
-DECLARE__REAL_AND_INTERNAL(void, _exit, int exitcode) {
- _exit(exitcode);
-}
-
DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
char *const argv[], char *const envp[]) {
return _REAL(execve)(filename, argv, envp);
theDepot.UnlockAll();
}
+void StackDepotPrintAll() {
+#if !SANITIZER_GO
+ theDepot.PrintAll();
+#endif
+}
+
bool StackDepotReverseMap::IdDescPair::IdComparator(
const StackDepotReverseMap::IdDescPair &a,
const StackDepotReverseMap::IdDescPair &b) {
void StackDepotLockAll();
void StackDepotUnlockAll();
+void StackDepotPrintAll();
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
#ifndef SANITIZER_STACKDEPOTBASE_H
#define SANITIZER_STACKDEPOTBASE_H
+#include <stdio.h>
+
+#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
#include "sanitizer_persistent_allocator.h"
namespace __sanitizer {
void LockAll();
void UnlockAll();
+ void PrintAll();
private:
static Node *find(Node *s, args_type args, u32 hash);
}
}
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::PrintAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ lock(p);
+ uptr v = atomic_load(p, memory_order_relaxed);
+ Node *s = (Node *)(v & ~1UL);
+ for (; s; s = s->link) {
+ Printf("Stack for id %u:\n", s->id);
+ s->load().Print();
+ }
+ unlock(p, s);
+ }
+}
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOTBASE_H
// run-time libraries.
//===----------------------------------------------------------------------===//
+#include "sanitizer_stacktrace.h"
+
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
-#include "sanitizer_stacktrace.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
return pc + 8;
#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
return pc + 4;
+#elif SANITIZER_RISCV64
+ // Current check order is 4 -> 2 -> 6 -> 8
+ u8 InsnByte = *(u8 *)(pc);
+ if (((InsnByte & 0x3) == 0x3) && ((InsnByte & 0x1c) != 0x1c)) {
+ // xxxxxxxxxxxbbb11 | 32 bit | bbb != 111
+ return pc + 4;
+ }
+ if ((InsnByte & 0x3) != 0x3) {
+ // xxxxxxxxxxxxxxaa | 16 bit | aa != 11
+ return pc + 2;
+ }
+ // RISC-V encoding allows instructions to be up to 8 bytes long
+ if ((InsnByte & 0x3f) == 0x1f) {
+ // xxxxxxxxxx011111 | 48 bit |
+ return pc + 6;
+ }
+ if ((InsnByte & 0x7f) == 0x3f) {
+ // xxxxxxxxx0111111 | 64 bit |
+ return pc + 8;
+ }
+ // bail-out if could not figure out the instruction size
+ return 0;
#else
return pc + 1;
#endif
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
- // layouts. Assume GCC.
- return bp_prev - 1;
+ // layouts. Assume LLVM.
+ return bp_prev;
#else
return (uhwptr*)bp;
#endif
IsAligned((uptr)frame, sizeof(*frame)) &&
size < max_depth) {
#ifdef __powerpc__
- // PowerPC ABIs specify that the return address is saved on the
- // *caller's* stack frame. Thus we must dereference the back chain
- // to find the caller frame before extracting it.
+ // PowerPC ABIs specify that the return address is saved at offset
+ // 16 of the *caller's* stack frame. Thus we must dereference the
+ // back chain to find the caller frame before extracting it.
uhwptr *caller_frame = (uhwptr*)frame[0];
if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
- // For most ABIs the offset where the return address is saved is two
- // register sizes. The exception is the SVR4 ABI, which uses an
- // offset of only one register size.
-#ifdef _CALL_SYSV
- uhwptr pc1 = caller_frame[1];
-#else
uhwptr pc1 = caller_frame[2];
-#endif
#elif defined(__s390__)
uhwptr pc1 = frame[14];
+#elif defined(__riscv)
+ // frame[-1] contains the return address
+ uhwptr pc1 = frame[-1];
#else
uhwptr pc1 = frame[1];
#endif
trace_buffer[size++] = (uptr) pc1;
}
bottom = (uptr)frame;
- frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
+#if defined(__riscv)
+ // frame[-2] contain fp of the previous frame
+ uptr new_bp = (uptr)frame[-2];
+#else
+ uptr new_bp = (uptr)frame[0];
+#endif
+ frame = GetCanonicFrame(new_bp, stack_top, bottom);
}
}
#define SANITIZER_STACKTRACE_H
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
namespace __sanitizer {
return pc - 4;
#elif defined(__sparc__) || defined(__mips__)
return pc - 8;
+#elif SANITIZER_RISCV64
+ // RV-64 has variable instruciton length...
+ // C extentions gives us 2-byte instructoins
+ // RV-64 has 4-byte instructions
+ // + RISCV architecture allows instructions up to 8 bytes
+ // It seems difficult to figure out the exact instruction length -
+ // pc - 2 seems like a safe option for the purposes of stack tracing
+ return pc - 2;
#else
return pc - 1;
#endif
friend class FastUnwindTest;
};
+#if defined(__s390x__)
+static const uptr kFrameSize = 160;
+#elif defined(__s390__)
+static const uptr kFrameSize = 96;
+#else
+static const uptr kFrameSize = 2 * sizeof(uhwptr);
+#endif
+
// Check if given pointer points into allocated stack area.
static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
- return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
+ return frame > stack_bottom && frame < stack_top - kFrameSize;
}
} // namespace __sanitizer
InternalScopedString frame_desc(GetPageSizeCached() * 2);
InternalScopedString dedup_token(GetPageSizeCached());
int dedup_frames = common_flags()->dedup_token_length;
+ bool symbolize = RenderNeedsSymbolization(common_flags()->stack_trace_format);
uptr frame_num = 0;
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
// addresses of the next instructions after the call.
uptr pc = GetPreviousInstructionPc(trace[i]);
- SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ SymbolizedStack *frames;
+ if (symbolize)
+ frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ else
+ frames = SymbolizedStack::New(pc);
CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
frame_desc.clear();
RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
- cur->info, common_flags()->symbolize_vs_style,
+ cur->info.address, symbolize ? &cur->info : nullptr,
+ common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
Printf("%s\n", frame_desc.data());
if (dedup_frames-- > 0) {
uptr out_buf_size) {
if (!out_buf_size) return;
pc = StackTrace::GetPreviousInstructionPc(pc);
- SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ SymbolizedStack *frame;
+ bool symbolize = RenderNeedsSymbolization(fmt);
+ if (symbolize)
+ frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ else
+ frame = SymbolizedStack::New(pc);
if (!frame) {
internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
out_buf[out_buf_size - 1] = 0;
for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
cur = cur->next) {
frame_desc.clear();
- RenderFrame(&frame_desc, fmt, frame_num++, cur->info,
+ RenderFrame(&frame_desc, fmt, frame_num++, cur->info.address,
+ symbolize ? &cur->info : nullptr,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
if (!frame_desc.length())
}
CHECK(out_buf <= out_end);
*out_buf = 0;
+ frame->ClearAll();
}
SANITIZER_INTERFACE_ATTRIBUTE
static const char kDefaultFormat[] = " #%n %p %F %L";
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- const AddressInfo &info, bool vs_style,
+ uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
+ // info will be null in the case where symbolization is not needed for the
+ // given format. This ensures that the code below will get a hard failure
+ // rather than print incorrect information in case RenderNeedsSymbolization
+ // ever ends up out of sync with this function. If non-null, the addresses
+ // should match.
+ CHECK(!info || address == info->address);
if (0 == internal_strcmp(format, "DEFAULT"))
format = kDefaultFormat;
for (const char *p = format; *p != '\0'; p++) {
buffer->append("%zu", frame_no);
break;
case 'p':
- buffer->append("0x%zx", info.address);
+ buffer->append("0x%zx", address);
break;
case 'm':
- buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
+ buffer->append("%s", StripPathPrefix(info->module, strip_path_prefix));
break;
case 'o':
- buffer->append("0x%zx", info.module_offset);
+ buffer->append("0x%zx", info->module_offset);
break;
case 'f':
- buffer->append("%s",
- DemangleFunctionName(
- StripFunctionName(info.function, strip_func_prefix)));
+ buffer->append("%s", DemangleFunctionName(StripFunctionName(
+ info->function, strip_func_prefix)));
break;
case 'q':
- buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
- ? info.function_offset
+ buffer->append("0x%zx", info->function_offset != AddressInfo::kUnknown
+ ? info->function_offset
: 0x0);
break;
case 's':
- buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
+ buffer->append("%s", StripPathPrefix(info->file, strip_path_prefix));
break;
case 'l':
- buffer->append("%d", info.line);
+ buffer->append("%d", info->line);
break;
case 'c':
- buffer->append("%d", info.column);
+ buffer->append("%d", info->column);
break;
// Smarter special cases.
case 'F':
// Function name and offset, if file is unknown.
- if (info.function) {
- buffer->append("in %s",
- DemangleFunctionName(
- StripFunctionName(info.function, strip_func_prefix)));
- if (!info.file && info.function_offset != AddressInfo::kUnknown)
- buffer->append("+0x%zx", info.function_offset);
+ if (info->function) {
+ buffer->append("in %s", DemangleFunctionName(StripFunctionName(
+ info->function, strip_func_prefix)));
+ if (!info->file && info->function_offset != AddressInfo::kUnknown)
+ buffer->append("+0x%zx", info->function_offset);
}
break;
case 'S':
// File/line information.
- RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style,
- strip_path_prefix);
+ RenderSourceLocation(buffer, info->file, info->line, info->column,
+ vs_style, strip_path_prefix);
break;
case 'L':
// Source location, or module location.
- if (info.file) {
- RenderSourceLocation(buffer, info.file, info.line, info.column,
+ if (info->file) {
+ RenderSourceLocation(buffer, info->file, info->line, info->column,
vs_style, strip_path_prefix);
- } else if (info.module) {
- RenderModuleLocation(buffer, info.module, info.module_offset,
- info.module_arch, strip_path_prefix);
+ } else if (info->module) {
+ RenderModuleLocation(buffer, info->module, info->module_offset,
+ info->module_arch, strip_path_prefix);
} else {
buffer->append("(<unknown module>)");
}
break;
case 'M':
// Module basename and offset, or PC.
- if (info.address & kExternalPCBit)
- {} // There PCs are not meaningful.
- else if (info.module)
+ if (address & kExternalPCBit) {
+ // There PCs are not meaningful.
+ } else if (info->module) {
// Always strip the module name for %M.
- RenderModuleLocation(buffer, StripModuleName(info.module),
- info.module_offset, info.module_arch, "");
- else
- buffer->append("(%p)", (void *)info.address);
+ RenderModuleLocation(buffer, StripModuleName(info->module),
+ info->module_offset, info->module_arch, "");
+ } else {
+ buffer->append("(%p)", (void *)address);
+ }
break;
default:
Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
}
}
+bool RenderNeedsSymbolization(const char *format) {
+ if (0 == internal_strcmp(format, "DEFAULT"))
+ format = kDefaultFormat;
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%')
+ continue;
+ p++;
+ switch (*p) {
+ case '%':
+ break;
+ case 'n':
+ // frame_no
+ break;
+ case 'p':
+ // address
+ break;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
void RenderData(InternalScopedString *buffer, const char *format,
const DataInfo *DI, const char *strip_path_prefix) {
for (const char *p = format; *p != '\0'; p++) {
// module+offset if it is known, or (<unknown module>) string.
// %M - prints module basename and offset, if it is known, or PC.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- const AddressInfo &info, bool vs_style,
+ uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix = "",
const char *strip_func_prefix = "");
+bool RenderNeedsSymbolization(const char *format);
+
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix);
// Can't declare pure virtual functions in sanitizer runtimes:
// __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.
- virtual PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
- uptr *sp) const {
+ virtual PtraceRegistersStatus GetRegistersAndSP(
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
UNIMPLEMENTED();
}
- // The buffer in GetRegistersAndSP should be at least this big.
- virtual uptr RegisterCount() const { UNIMPLEMENTED(); }
virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }
#include "sanitizer_platform.h"
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
- defined(__aarch64__) || defined(__powerpc64__) || \
- defined(__s390__) || defined(__i386__) || \
- defined(__arm__))
+#if SANITIZER_LINUX && \
+ (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
+ defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
+ defined(__arm__) || SANITIZER_RISCV64)
#include "sanitizer_stoptheworld.h"
#include <sys/types.h> // for pid_t
#include <sys/uio.h> // for iovec
#include <elf.h> // for NT_PRSTATUS
-#if defined(__aarch64__) && !SANITIZER_ANDROID
+#if (defined(__aarch64__) || SANITIZER_RISCV64) && !SANITIZER_ANDROID
// GLIBC 2.20+ sys/user does not include asm/ptrace.h
# include <asm/ptrace.h>
#endif
public:
SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
- tid_t GetThreadID(uptr index) const;
- uptr ThreadCount() const;
+ tid_t GetThreadID(uptr index) const override;
+ uptr ThreadCount() const override;
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
- PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
- uptr *sp) const;
- uptr RegisterCount() const;
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
private:
InternalMmapVector<tid_t> thread_ids_;
#else
#define REG_SP rsp
#endif
+#define ARCH_IOVEC_FOR_GETREGSET
+// Compiler may use FP registers to store pointers.
+static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
#elif defined(__powerpc__) || defined(__powerpc64__)
typedef pt_regs regs_struct;
#elif defined(__aarch64__)
typedef struct user_pt_regs regs_struct;
#define REG_SP sp
+static constexpr uptr kExtraRegs[] = {0};
+#define ARCH_IOVEC_FOR_GETREGSET
+
+#elif SANITIZER_RISCV64
+typedef struct user_regs_struct regs_struct;
+#define REG_SP sp
+static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#elif defined(__s390__)
typedef _user_regs_struct regs_struct;
#define REG_SP gprs[15]
+static constexpr uptr kExtraRegs[] = {0};
#define ARCH_IOVEC_FOR_GETREGSET
#else
}
PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
- uptr index, uptr *buffer, uptr *sp) const {
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
pid_t tid = GetThreadID(index);
- regs_struct regs;
+ constexpr uptr uptr_sz = sizeof(uptr);
int pterrno;
#ifdef ARCH_IOVEC_FOR_GETREGSET
- struct iovec regset_io;
- regset_io.iov_base = ®s;
- regset_io.iov_len = sizeof(regs_struct);
- bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
- (void*)NT_PRSTATUS, (void*)®set_io),
- &pterrno);
+ auto append = [&](uptr regset) {
+ uptr size = buffer->size();
+ // NT_X86_XSTATE requires 64bit alignment.
+ uptr size_up = RoundUpTo(size, 8 / uptr_sz);
+ buffer->reserve(Max<uptr>(1024, size_up));
+ struct iovec regset_io;
+ for (;; buffer->resize(buffer->capacity() * 2)) {
+ buffer->resize(buffer->capacity());
+ uptr available_bytes = (buffer->size() - size_up) * uptr_sz;
+ regset_io.iov_base = buffer->data() + size_up;
+ regset_io.iov_len = available_bytes;
+ bool fail =
+ internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
+ (void *)regset, (void *)®set_io),
+ &pterrno);
+ if (fail) {
+ VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
+ (void *)regset, tid, pterrno);
+ buffer->resize(size);
+ return false;
+ }
+
+ // Far enough from the buffer size, no need to resize and repeat.
+ if (regset_io.iov_len + 64 < available_bytes)
+ break;
+ }
+ buffer->resize(size_up + RoundUpTo(regset_io.iov_len, uptr_sz) / uptr_sz);
+ return true;
+ };
+
+ buffer->clear();
+ bool fail = !append(NT_PRSTATUS);
+ if (!fail) {
+ // Accept the first available and do not report errors.
+ for (uptr regs : kExtraRegs)
+ if (regs && append(regs))
+ break;
+ }
#else
- bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
- ®s), &pterrno);
-#endif
- if (isErr) {
+ buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);
+ bool fail = internal_iserror(
+ internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);
+ if (fail)
VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
pterrno);
+#endif
+ if (fail) {
// ESRCH means that the given thread is not suspended or already dead.
// Therefore it's unsafe to inspect its data (e.g. walk through stack) and
// we should notify caller about this.
: REGISTERS_UNAVAILABLE;
}
- *sp = regs.REG_SP;
- internal_memcpy(buffer, ®s, sizeof(regs));
+ *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;
return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsListLinux::RegisterCount() const {
- return sizeof(regs_struct) / sizeof(uptr);
-}
} // namespace __sanitizer
#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
public:
SuspendedThreadsListMac() : threads_(1024) {}
- tid_t GetThreadID(uptr index) const;
+ tid_t GetThreadID(uptr index) const override;
thread_t GetThread(uptr index) const;
- uptr ThreadCount() const;
+ uptr ThreadCount() const override;
bool ContainsThread(thread_t thread) const;
void Append(thread_t thread);
- PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
- uptr *sp) const;
- uptr RegisterCount() const;
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
+ uptr *sp) const override;
private:
InternalMmapVector<SuspendedThreadInfo> threads_;
}
PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
- uptr index, uptr *buffer, uptr *sp) const {
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
thread_t thread = GetThread(index);
regs_struct regs;
int err;
: REGISTERS_UNAVAILABLE;
}
- internal_memcpy(buffer, ®s, sizeof(regs));
+ buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
+ internal_memcpy(buffer->data(), ®s, sizeof(regs));
#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
*sp = arm_thread_state64_get_sp(regs);
#else
return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsListMac::RegisterCount() const {
- return MACHINE_THREAD_STATE_COUNT;
-}
} // namespace __sanitizer
#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
bool ContainsTid(tid_t thread_id) const;
void Append(tid_t tid);
- PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ PtraceRegistersStatus GetRegistersAndSP(uptr index,
+ InternalMmapVector<uptr> *buffer,
uptr *sp) const;
- uptr RegisterCount() const;
private:
InternalMmapVector<tid_t> thread_ids_;
pl.pl_lwpid = 0;
int val;
- while ((val = ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
+ while ((val = internal_ptrace(op, pid_, (void *)&pl, sizeof(pl))) != -1 &&
pl.pl_lwpid != 0) {
suspended_threads_list_.Append(pl.pl_lwpid);
VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
}
PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
- uptr index, uptr *buffer, uptr *sp) const {
+ uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
lwpid_t tid = GetThreadID(index);
pid_t ppid = internal_getppid();
struct reg regs;
}
*sp = PTRACE_REG_SP(®s);
- internal_memcpy(buffer, ®s, sizeof(regs));
+ buffer->resize(RoundUpTo(sizeof(regs), sizeof(uptr)) / sizeof(uptr));
+ internal_memcpy(buffer->data(), ®s, sizeof(regs));
return REGISTERS_AVAILABLE;
}
-uptr SuspendedThreadsListNetBSD::RegisterCount() const {
- return sizeof(struct reg) / sizeof(uptr);
-}
} // namespace __sanitizer
#endif
#include "sanitizer_allocator_internal.h"
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
#include "sanitizer_symbolizer_internal.h"
namespace __sanitizer {
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
+#elif SANITIZER_RISCV64
+ const char *const kSymbolizerArch = "--default-arch=riscv64";
#elif defined(__aarch64__)
const char* const kSymbolizerArch = "--default-arch=arm64";
#elif defined(__arm__)
#endif
const char *const inline_flag = common_flags()->symbolize_inline_frames
- ? "--inlining=true"
- : "--inlining=false";
+ ? "--inlines"
+ : "--no-inlines";
int i = 0;
argv[i++] = path_to_binary;
argv[i++] = inline_flag;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
- CHECK(addr >= reinterpret_cast<uptr>(info.dli_saddr));
- stack->info.function_offset = addr - reinterpret_cast<uptr>(info.dli_saddr);
+ // Compute offset if possible. `dladdr()` doesn't always ensure that `addr >=
+ // sym_addr` so only compute the offset when this holds. Failure to find the
+ // function offset is not treated as a failure because it might still be
+ // possible to get the symbol name.
+ uptr sym_addr = reinterpret_cast<uptr>(info.dli_saddr);
+ if (addr >= sym_addr) {
+ stack->info.function_offset = addr - sym_addr;
+ }
+
const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
if (!demangled) return false;
stack->info.function = internal_strdup(demangled);
argv[i++] = path_to_binary;
argv[i++] = "-p";
argv[i++] = &pid_str_[0];
- if (GetMacosVersion() == MACOS_VERSION_MAVERICKS) {
+ if (GetMacosAlignedVersion() == MacosVersion(10, 9)) {
// On Mavericks atos prints a deprecation warning which we suppress by
// passing -d. The warning isn't present on other OSX versions, even the
// newer ones.
start_address = reinterpret_cast<uptr>(info.dli_saddr);
}
- // Only assig to `function_offset` if we were able to get the function's
- // start address.
- if (start_address != AddressInfo::kUnknown) {
- CHECK(addr >= start_address);
+ // Only assign to `function_offset` if we were able to get the function's
+ // start address and we got a sensible `start_address` (dladdr doesn't always
+ // ensure that `addr >= sym_addr`).
+ if (start_address != AddressInfo::kUnknown && addr >= start_address) {
stack->info.function_offset = addr - start_address;
}
return true;
buffer->append(kFormatData, DI->start);
}
+bool RenderNeedsSymbolization(const char *format) { return false; }
+
// We don't support the stack_trace_format flag at all.
void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
- const AddressInfo &info, bool vs_style,
+ uptr address, const AddressInfo *info, bool vs_style,
const char *strip_path_prefix, const char *strip_func_prefix) {
- buffer->append(kFormatFrame, frame_no, info.address);
+ CHECK(!RenderNeedsSymbolization(format));
+ buffer->append(kFormatFrame, frame_no, address);
}
Symbolizer *Symbolizer::PlatformInit() {
// Attempts to demangle a Swift name. The demangler will return nullptr if a
// non-Swift name is passed in.
const char *DemangleSwift(const char *name) {
- if (!name) return nullptr;
-
- // Check if we are dealing with a Swift mangled name first.
- if (name[0] != '_' || name[1] != 'T') {
- return nullptr;
- }
-
if (swift_demangle_f)
return swift_demangle_f(name, internal_strlen(name), 0, 0, 0);
#if SANITIZER_SUPPORTS_WEAK_HOOKS
extern "C" {
-SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
-bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
- char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE bool
+__sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength,
+ bool SymbolizeInlineFrames);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
char *Buffer, int MaxLength);
bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
bool result = __sanitizer_symbolize_code(
- stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
+ stack->info.module, stack->info.module_offset, buffer_, kBufferSize,
+ common_flags()->symbolize_inline_frames);
if (result) ParseSymbolizePCOutput(buffer_, stack);
return result;
}
if (!common_flags()->print_summary) return;
InternalScopedString buff(kMaxSummaryLength);
buff.append("%s ", error_type);
- RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
+ RenderFrame(&buff, "%L %F", 0, info.address, &info,
+ common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
ReportErrorSummary(buff.data(), alt_tool_name);
}
return SupportsColoredOutput(fd);
}
-static INLINE bool ReportSupportsColors() {
+static inline bool ReportSupportsColors() {
return report_file.SupportsColors();
}
#else // SANITIZER_FUCHSIA
// Fuchsia's logs always go through post-processing that handles colorization.
-static INLINE bool ReportSupportsColors() { return true; }
+static inline bool ReportSupportsColors() { return true; }
#endif // !SANITIZER_FUCHSIA
--- /dev/null
+//===-- sanitizer_syscall_linux_riscv64.inc ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/riscv64.
+//
+//===----------------------------------------------------------------------===//
+
+// About local register variables:
+// https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
+//
+// Kernel ABI...
+// To my surprise I haven't found much information regarding it.
+// Kernel source and internet browsing shows that:
+// syscall number is passed in a7
+// (http://man7.org/linux/man-pages/man2/syscall.2.html) results are return in
+// a0 and a1 (http://man7.org/linux/man-pages/man2/syscall.2.html) arguments
+// are passed in: a0-a7 (see below)
+//
+// Regarding the arguments. The only "documentation" I could find is
+// this comment (!!!) by Bruce Hold on google forums (!!!):
+// https://groups.google.com/a/groups.riscv.org/forum/#!topic/sw-dev/exbrzM3GZDQ
+// Confirmed by inspecting glibc sources.
+// Great way to document things.
+#define SYSCALL(name) __NR_##name
+
+#define INTERNAL_SYSCALL_CLOBBERS "memory"
+
+static uptr __internal_syscall(u64 nr) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0");
+ __asm__ volatile("ecall\n\t"
+ : "=r"(a0)
+ : "r"(a7)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall0(n) (__internal_syscall)(n)
+
+static uptr __internal_syscall(u64 nr, u64 arg1) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ register u64 a4 asm("a4") = arg5;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5, long arg6) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ register u64 a4 asm("a4") = arg5;
+ register u64 a5 asm("a5") = arg6;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
+ long arg5, long arg6, long arg7) {
+ register u64 a7 asm("a7") = nr;
+ register u64 a0 asm("a0") = arg1;
+ register u64 a1 asm("a1") = arg2;
+ register u64 a2 asm("a2") = arg3;
+ register u64 a3 asm("a3") = arg4;
+ register u64 a4 asm("a4") = arg5;
+ register u64 a5 asm("a5") = arg6;
+ register u64 a6 asm("a6") = arg7;
+ __asm__ volatile("ecall\n\t"
+ : "+r"(a0)
+ : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
+ "r"(a6)
+ : INTERNAL_SYSCALL_CLOBBERS);
+ return a0;
+}
+#define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6), (long)(a7))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid clobbering of errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
//
// Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2019-12-24
-// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
+// Generated date: 2020-09-10
+// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp
//
//===----------------------------------------------------------------------===//
POST_SYSCALL(dup2)(long long res, long long from_, long long to_) {
/* Nothing to do */
}
-/* syscall 91 has been skipped */
+PRE_SYSCALL(getrandom)(void *buf_, long long buflen_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(getrandom)
+(long long res, void *buf_, long long buflen_, long long flags_) {
+ /* TODO */
+}
PRE_SYSCALL(fcntl)(long long fd_, long long cmd_, void *arg_) {
/* Nothing to do */
}
POST_SYSCALL(compat_09_ouname)(long long res, void *name_) { /* TODO */ }
PRE_SYSCALL(sysarch)(long long op_, void *parms_) { /* TODO */ }
POST_SYSCALL(sysarch)(long long res, long long op_, void *parms_) { /* TODO */ }
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+PRE_SYSCALL(__futex)
+(void *uaddr_, long long op_, long long val_, void *timeout_, void *uaddr2_,
+ long long val2_, long long val3_) {
+ /* TODO */
+}
+POST_SYSCALL(__futex)
+(long long res, void *uaddr_, long long op_, long long val_, void *timeout_,
+ void *uaddr2_, long long val2_, long long val3_) {
+ /* TODO */
+}
+PRE_SYSCALL(__futex_set_robust_list)(void *head_, long long len_) { /* TODO */ }
+POST_SYSCALL(__futex_set_robust_list)
+(long long res, void *head_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(__futex_get_robust_list)
+(long long lwpid_, void **headp_, void *lenp_) {
+ /* TODO */
+}
+POST_SYSCALL(__futex_get_robust_list)
+(long long res, long long lwpid_, void **headp_, void *lenp_) {
+ /* TODO */
+}
#if !defined(_LP64)
PRE_SYSCALL(compat_10_osemsys)
(long long which_, long long a2_, long long a3_, long long a4_, long long a5_) {
}
POST_SYSCALL(__fhstatvfs190)
(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
+PRE_SYSCALL(__acl_get_link)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_get_link)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_set_link)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_set_link)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_delete_link)(void *path_, long long type_) { /* TODO */ }
+POST_SYSCALL(__acl_delete_link)(long long res, void *path_, long long type_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_aclcheck_link)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_aclcheck_link)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_get_file)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_get_file)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_set_file)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_set_file)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_get_fd)(long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_get_fd)
+(long long res, long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_set_fd)(long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_set_fd)
+(long long res, long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_delete_file)(void *path_, long long type_) { /* TODO */ }
+POST_SYSCALL(__acl_delete_file)(long long res, void *path_, long long type_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_delete_fd)(long long filedes_, long long type_) { /* TODO */ }
+POST_SYSCALL(__acl_delete_fd)
+(long long res, long long filedes_, long long type_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_aclcheck_file)(void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_aclcheck_file)
+(long long res, void *path_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__acl_aclcheck_fd)
+(long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+POST_SYSCALL(__acl_aclcheck_fd)
+(long long res, long long filedes_, long long type_, void *aclp_) {
+ /* TODO */
+}
+PRE_SYSCALL(lpathconf)(void *path_, long long name_) { /* TODO */ }
+POST_SYSCALL(lpathconf)(long long res, void *path_, long long name_) {
+ /* TODO */
+}
#undef SYS_MAXSYSARGS
} // extern "C"
return true;
}
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment,
+ UNUSED uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+ uptr space_size = shadow_size_bytes + left_padding;
+ uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
+ granularity, nullptr, nullptr);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
#define DISPATCH_DATA_DESTRUCTOR_MUNMAP _dispatch_data_destructor_munmap
#if __has_attribute(noescape)
- #define DISPATCH_NOESCAPE __attribute__((__noescape__))
+# define DISPATCH_NOESCAPE __attribute__((__noescape__))
#else
- #define DISPATCH_NOESCAPE
+# define DISPATCH_NOESCAPE
#endif
+#if SANITIZER_MAC
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak_import))
+#else
+# define SANITIZER_WEAK_IMPORT extern "C" __attribute((weak))
+#endif
+
+
// Data types used in dispatch APIs
typedef unsigned long size_t;
typedef unsigned long uintptr_t;
//===----------------------------------------------------------------------===//
#include "tsan_rtl.h"
#include "tsan_interceptors.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
namespace __tsan {
#if !SANITIZER_GO
typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
-void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
+void ExternalAccess(void *addr, uptr caller_pc, void *tag, AccessFunc access) {
CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
ThreadState *thr = cur_thread();
- if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
+ if (caller_pc) FuncEntry(thr, caller_pc);
InsertShadowStackFrameForTag(thr, (uptr)tag);
bool in_ignored_lib;
- if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
+ if (!caller_pc || !libignore()->IsIgnored(caller_pc, &in_ignored_lib)) {
access(thr, CALLERPC, (uptr)addr, kSizeLog1);
}
FuncExit(thr);
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, caller_pc, tag, MemoryRead);
+ ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryRead);
}
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
- ExternalAccess(addr, caller_pc, tag, MemoryWrite);
+ ExternalAccess(addr, STRIP_PC(caller_pc), tag, MemoryWrite);
}
} // extern "C"
// Let a frontend override.
parser.ParseString(__tsan_default_options());
#if TSAN_CONTAINS_UBSAN
- const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ const char *ubsan_default_options = __ubsan_default_options();
ubsan_parser.ParseString(ubsan_default_options);
#endif
// Override from command line.
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+TSAN_FLAG(bool, ignore_interceptors_accesses, SANITIZER_MAC ? true : false,
+ "Ignore reads and writes from all interceptors.")
TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
"Interceptors should only detect races when called from instrumented "
"modules.")
LibIgnore *libignore();
#if !SANITIZER_GO
-INLINE bool in_symbolizer() {
+inline bool in_symbolizer() {
cur_thread_init();
return UNLIKELY(cur_thread()->in_symbolizer);
}
#include "BlocksRuntime/Block.h"
#include "tsan_dispatch_defs.h"
+#if SANITIZER_MAC
+# include <Availability.h>
+#endif
+
namespace __tsan {
typedef u16 uint16_t;
DISPATCH_INTERCEPT(dispatch, false)
DISPATCH_INTERCEPT(dispatch_barrier, true)
+// dispatch_async_and_wait() and friends were introduced in macOS 10.14.
+// Linking of these interceptors fails when using an older SDK.
+#if !SANITIZER_MAC || defined(__MAC_10_14)
+// macOS 10.14 is greater than our minimal deployment target. To ensure we
+// generate a weak reference so the TSan dylib continues to work on older
+// systems, we need to forward declare the intercepted functions as "weak
+// imports". Note that this file is multi-platform, so we cannot include the
+// actual header file (#include <dispatch/dispatch.h>).
+SANITIZER_WEAK_IMPORT void dispatch_async_and_wait(
+ dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
+SANITIZER_WEAK_IMPORT void dispatch_async_and_wait_f(
+ dispatch_queue_t queue, void *context, dispatch_function_t work);
+SANITIZER_WEAK_IMPORT void dispatch_barrier_async_and_wait(
+ dispatch_queue_t queue, DISPATCH_NOESCAPE dispatch_block_t block);
+SANITIZER_WEAK_IMPORT void dispatch_barrier_async_and_wait_f(
+ dispatch_queue_t queue, void *context, dispatch_function_t work);
+
+DISPATCH_INTERCEPT_SYNC_F(dispatch_async_and_wait_f, false)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_async_and_wait, false)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_barrier_async_and_wait_f, true)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_barrier_async_and_wait, true)
+#endif
+
+
DECLARE_REAL(void, dispatch_after_f, dispatch_time_t when,
dispatch_queue_t queue, void *context, dispatch_function_t work)
INTERCEPT_FUNCTION(dispatch_barrier_async_f);
INTERCEPT_FUNCTION(dispatch_barrier_sync);
INTERCEPT_FUNCTION(dispatch_barrier_sync_f);
+ INTERCEPT_FUNCTION(dispatch_async_and_wait);
+ INTERCEPT_FUNCTION(dispatch_async_and_wait_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_and_wait);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_and_wait_f);
INTERCEPT_FUNCTION(dispatch_after);
INTERCEPT_FUNCTION(dispatch_after_f);
INTERCEPT_FUNCTION(dispatch_once);
namespace __tsan {
-static bool intersects_with_shadow(mach_vm_address_t *address,
+static bool intersects_with_shadow(mach_vm_address_t address,
mach_vm_size_t size, int flags) {
// VM_FLAGS_FIXED is 0x0, so we have to test for VM_FLAGS_ANYWHERE.
if (flags & VM_FLAGS_ANYWHERE) return false;
- uptr ptr = *address;
- return !IsAppMem(ptr) || !IsAppMem(ptr + size - 1);
+ return !IsAppMem(address) || !IsAppMem(address + size - 1);
}
TSAN_INTERCEPTOR(kern_return_t, mach_vm_allocate, vm_map_t target,
SCOPED_TSAN_INTERCEPTOR(mach_vm_allocate, target, address, size, flags);
if (target != mach_task_self())
return REAL(mach_vm_allocate)(target, address, size, flags);
- if (intersects_with_shadow(address, size, flags))
+ if (address && intersects_with_shadow(*address, size, flags))
return KERN_NO_SPACE;
- kern_return_t res = REAL(mach_vm_allocate)(target, address, size, flags);
- if (res == KERN_SUCCESS)
+ kern_return_t kr = REAL(mach_vm_allocate)(target, address, size, flags);
+ if (kr == KERN_SUCCESS)
MemoryRangeImitateWriteOrResetRange(thr, pc, *address, size);
- return res;
+ return kr;
}
TSAN_INTERCEPTOR(kern_return_t, mach_vm_deallocate, vm_map_t target,
SCOPED_TSAN_INTERCEPTOR(mach_vm_deallocate, target, address, size);
if (target != mach_task_self())
return REAL(mach_vm_deallocate)(target, address, size);
- UnmapShadow(thr, address, size);
- return REAL(mach_vm_deallocate)(target, address, size);
+ kern_return_t kr = REAL(mach_vm_deallocate)(target, address, size);
+ if (kr == KERN_SUCCESS && address)
+ UnmapShadow(thr, address, size);
+ return kr;
}
} // namespace __tsan
#include "tsan_mman.h"
#include "tsan_fd.h"
+#include <stdarg.h>
+
using namespace __tsan;
#if SANITIZER_FREEBSD || SANITIZER_MAC
#endif
const int MAP_FIXED = 0x10;
typedef long long_t;
+typedef __sanitizer::u16 mode_t;
// From /usr/include/unistd.h
# define F_ULOCK 0 /* Unlock a previously locked region. */
if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
ignoring_ =
- !thr_->in_ignored_lib && libignore()->IsIgnored(pc, &in_ignored_lib_);
+ !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
+ libignore()->IsIgnored(pc, &in_ignored_lib_));
EnableIgnores();
}
#define TSAN_MAYBE_INTERCEPT_FSTAT64
#endif
-TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
- SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
+TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode_t mode = va_arg(ap, int);
+ va_end(ap);
+ SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
READ_STRING(thr, pc, name, 0);
- int fd = REAL(open)(name, flags, mode);
+ int fd = REAL(open)(name, oflag, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
}
#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
- SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
+TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
+ va_list ap;
+ va_start(ap, oflag);
+ mode_t mode = va_arg(ap, int);
+ va_end(ap);
+ SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
READ_STRING(thr, pc, name, 0);
- int fd = REAL(open64)(name, flags, mode);
+ int fd = REAL(open64)(name, oflag, mode);
if (fd >= 0)
FdFileCreate(thr, pc, fd);
return fd;
MemoryAccessRange(thr, pc, p, s, write);
}
-static void syscall_acquire(uptr pc, uptr addr) {
+static USED void syscall_acquire(uptr pc, uptr addr) {
TSAN_SYSCALL();
Acquire(thr, pc, addr);
DPrintf("syscall_acquire(%p)\n", addr);
}
-static void syscall_release(uptr pc, uptr addr) {
+static USED void syscall_release(uptr pc, uptr addr) {
TSAN_SYSCALL();
DPrintf("syscall_release(%p)\n", addr);
Release(thr, pc, addr);
#include "tsan_interface_ann.h"
#include "tsan_rtl.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
using namespace __tsan;
-typedef u16 uint16_t;
-typedef u32 uint32_t;
-typedef u64 uint64_t;
-
void __tsan_init() {
cur_thread_init();
Initialize(cur_thread());
}
void __tsan_read16_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr + 8, kSizeLog8);
}
// __tsan_unaligned_read/write calls are emitted by compiler.
#include "tsan_interface.h"
#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_ptrauth.h"
#define CALLERPC ((uptr)__builtin_return_address(0))
}
void __tsan_read1_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_read2_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_read4_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_read8_pc(void *addr, void *pc) {
- MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_write1_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog1);
}
void __tsan_write2_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog2);
}
void __tsan_write4_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog4);
}
void __tsan_write8_pc(void *addr, void *pc) {
- MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), STRIP_PC(pc), (uptr)addr, kSizeLog8);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
}
void __tsan_func_entry(void *pc) {
- FuncEntry(cur_thread(), (uptr)pc);
+ FuncEntry(cur_thread(), STRIP_PC(pc));
}
void __tsan_func_exit() {
}
void __tsan_read_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), (uptr)pc, (uptr)addr, size, false);
+ MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, false);
}
void __tsan_write_range_pc(void *addr, uptr size, void *pc) {
- MemoryAccessRange(cur_thread(), (uptr)pc, (uptr)addr, size, true);
+ MemoryAccessRange(cur_thread(), STRIP_PC(pc), (uptr)addr, size, true);
}
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
-// Linux- and FreeBSD-specific code.
+// Linux- and BSD-specific code.
//===----------------------------------------------------------------------===//
-
#include "sanitizer_common/sanitizer_platform.h"
-#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_openbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
-#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "tsan_flags.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
-#include "tsan_flags.h"
#include <fcntl.h>
#include <pthread.h>
#endif
}
-#ifdef __powerpc__
+#if SANITIZER_NETBSD
+# ifdef __x86_64__
+# define LONG_JMP_SP_ENV_SLOT 6
+# else
+# error unsupported
+# endif
+#elif defined(__powerpc__)
# define LONG_JMP_SP_ENV_SLOT 0
#elif SANITIZER_FREEBSD
# define LONG_JMP_SP_ENV_SLOT 2
-#elif SANITIZER_NETBSD
-# define LONG_JMP_SP_ENV_SLOT 6
#elif SANITIZER_LINUX
# ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT 13
} // namespace __tsan
-#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_OPENBSD
pthread_introspection_hook_install(&my_pthread_introspection_hook);
#endif
- if (GetMacosVersion() >= MACOS_VERSION_MOJAVE) {
+ if (GetMacosAlignedVersion() >= MacosVersion(10, 14)) {
// Libsystem currently uses a process-global key; this might change.
const unsigned kTLSLongjmpXorKeySlot = 0x7;
longjmp_xor_key = (uptr)pthread_getspecific(kTLSLongjmpXorKeySlot);
#ifdef __aarch64__
# define LONG_JMP_SP_ENV_SLOT \
- ((GetMacosVersion() >= MACOS_VERSION_MOJAVE) ? 12 : 13)
+ ((GetMacosAlignedVersion() >= MacosVersion(10, 14)) ? 12 : 13)
#else
# define LONG_JMP_SP_ENV_SLOT 2
#endif
"HINT: if %s is not supported in your environment, you may set "
"TSAN_OPTIONS=%s=0\n";
-static void NoHugePagesInShadow(uptr addr, uptr size) {
- SetShadowRegionHugePageMode(addr, size);
-}
-
static void DontDumpShadow(uptr addr, uptr size) {
if (common_flags()->use_madv_dontdump)
if (!DontDumpShadowMemory(addr, size)) {
#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
- if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
+ if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
+ "shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
// Frequently a thread uses only a small part of stack and similarly
// a program uses a small part of large mmap. On some programs
// we see 20% memory usage reduction without huge pages for this range.
- // FIXME: don't use constants here.
-#if defined(__x86_64__)
- const uptr kMadviseRangeBeg = 0x7f0000000000ull;
- const uptr kMadviseRangeSize = 0x010000000000ull;
-#elif defined(__mips64)
- const uptr kMadviseRangeBeg = 0xff00000000ull;
- const uptr kMadviseRangeSize = 0x0100000000ull;
-#elif defined(__aarch64__) && defined(__APPLE__)
- uptr kMadviseRangeBeg = LoAppMemBeg();
- uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
-#elif defined(__aarch64__)
- uptr kMadviseRangeBeg = 0;
- uptr kMadviseRangeSize = 0;
- if (vmaSize == 39) {
- kMadviseRangeBeg = 0x7d00000000ull;
- kMadviseRangeSize = 0x0300000000ull;
- } else if (vmaSize == 42) {
- kMadviseRangeBeg = 0x3f000000000ull;
- kMadviseRangeSize = 0x01000000000ull;
- } else {
- DCHECK(0);
- }
-#elif defined(__powerpc64__)
- uptr kMadviseRangeBeg = 0;
- uptr kMadviseRangeSize = 0;
- if (vmaSize == 44) {
- kMadviseRangeBeg = 0x0f60000000ull;
- kMadviseRangeSize = 0x0010000000ull;
- } else if (vmaSize == 46) {
- kMadviseRangeBeg = 0x3f0000000000ull;
- kMadviseRangeSize = 0x010000000000ull;
- } else {
- DCHECK(0);
- }
-#endif
- NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg),
- kMadviseRangeSize * kShadowMultiplier);
DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
ShadowBeg(), ShadowEnd(),
// Map meta shadow.
const uptr meta = MetaShadowBeg();
const uptr meta_size = MetaShadowEnd() - meta;
- if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
+ if (!MmapFixedSuperNoReserve(meta, meta_size, "meta shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
- NoHugePagesInShadow(meta, meta_size);
DontDumpShadow(meta, meta_size);
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
meta, meta + meta_size, meta_size >> 30);
SymbolizedStack *frame = ent->frames;
for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
InternalScopedString res(2 * GetPageSizeCached());
- RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
+ RenderFrame(&res, common_flags()->stack_trace_format, i,
+ frame->info.address, &frame->info,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
Printf("%s\n", res.data());
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
+ "shadow"))
Die();
// Meta shadow is 2:1, so tread carefully.
if (!data_mapped) {
// First call maps data+bss.
data_mapped = true;
- if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
+ "meta shadow"))
Die();
} else {
// Mapping continous heap.
return;
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
- if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
+ "meta shadow"))
Die();
mapped_meta_end = meta_end;
}
CHECK_GE(addr, TraceMemBeg());
CHECK_LE(addr + size, TraceMemEnd());
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- if (!MmapFixedNoReserve(addr, size, name)) {
+ if (!MmapFixedSuperNoReserve(addr, size, name)) {
Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
addr, size);
Die();
u64 *p1 = p;
p = RoundDown(end, kPageSize);
UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
- if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1))
Die();
// Set the ending.
while (p < end) {
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
-INLINE void cur_thread_init() { }
+inline void cur_thread_init() { }
#else
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
-INLINE ThreadState *cur_thread() {
+inline ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
}
-INLINE void cur_thread_init() {
+inline void cur_thread_init() {
ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
if (UNLIKELY(!thr->current))
thr->current = thr;
}
-INLINE void set_cur_thread(ThreadState *thr) {
+inline void set_cur_thread(ThreadState *thr) {
reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
}
-INLINE void cur_thread_finalize() { }
+inline void cur_thread_finalize() { }
#endif // SANITIZER_MAC || SANITIZER_ANDROID
#endif // SANITIZER_GO
#include "tsan_ppc_regs.h"
- .machine altivec
.section .text
.hidden __tsan_setjmp
.globl _setjmp
ExtractTagFromStack(stk, tag);
}
-static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
- uptr addr_min, uptr addr_max) {
- bool equal_stack = false;
- RacyStacks hash;
- bool equal_address = false;
- RacyAddress ra0 = {addr_min, addr_max};
- {
- ReadLock lock(&ctx->racy_mtx);
- if (flags()->suppress_equal_stacks) {
- hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
- for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
- if (hash == ctx->racy_stacks[i]) {
- VPrintf(2,
- "ThreadSanitizer: suppressing report as doubled (stack)\n");
- equal_stack = true;
- break;
- }
- }
- }
- if (flags()->suppress_equal_addresses) {
- for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
- RacyAddress ra2 = ctx->racy_addresses[i];
- uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
- uptr minend = min(ra0.addr_max, ra2.addr_max);
- if (maxbeg < minend) {
- VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
- equal_address = true;
- break;
- }
- }
+static bool FindRacyStacks(const RacyStacks &hash) {
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
+ return true;
}
}
- if (!equal_stack && !equal_address)
+ return false;
+}
+
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
+ if (!flags()->suppress_equal_stacks)
return false;
- if (!equal_stack) {
- Lock lock(&ctx->racy_mtx);
- ctx->racy_stacks.PushBack(hash);
- }
- if (!equal_address) {
- Lock lock(&ctx->racy_mtx);
- ctx->racy_addresses.PushBack(ra0);
+ RacyStacks hash;
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (FindRacyStacks(hash))
+ return true;
}
- return true;
+ Lock lock(&ctx->racy_mtx);
+ if (FindRacyStacks(hash))
+ return true;
+ ctx->racy_stacks.PushBack(hash);
+ return false;
}
-static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
- uptr addr_min, uptr addr_max) {
- Lock lock(&ctx->racy_mtx);
- if (flags()->suppress_equal_stacks) {
- RacyStacks hash;
- hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
- hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
- ctx->racy_stacks.PushBack(hash);
+static bool FindRacyAddress(const RacyAddress &ra0) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
+ return true;
+ }
}
- if (flags()->suppress_equal_addresses) {
- RacyAddress ra0 = {addr_min, addr_max};
- ctx->racy_addresses.PushBack(ra0);
+ return false;
+}
+
+static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
+ if (!flags()->suppress_equal_addresses)
+ return false;
+ RacyAddress ra0 = {addr_min, addr_max};
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (FindRacyAddress(ra0))
+ return true;
}
+ Lock lock(&ctx->racy_mtx);
+ if (FindRacyAddress(ra0))
+ return true;
+ ctx->racy_addresses.PushBack(ra0);
+ return false;
}
bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
if (IsExpectedReport(addr_min, addr_max - addr_min))
return;
}
+ if (HandleRacyAddress(thr, addr_min, addr_max))
+ return;
ReportType typ = ReportTypeRace;
if (thr->is_vptr_access && freed)
if (IsFiredSuppression(ctx, typ, traces[1]))
return;
- if (HandleRacyStacks(thr, traces, addr_min, addr_max))
+ if (HandleRacyStacks(thr, traces))
return;
// If any of the accesses has a tag, treat this as an "external" race.
}
#endif
- if (!OutputReport(thr, rep))
- return;
-
- AddRacyStacks(thr, traces, addr_min, addr_max);
+ OutputReport(thr, rep);
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
uptr metap = (uptr)MemToMeta(p0);
uptr metasz = sz0 / kMetaRatio;
UnmapOrDie((void*)metap, metasz);
- if (!MmapFixedNoReserve(metap, metasz))
+ if (!MmapFixedSuperNoReserve(metap, metasz))
Die();
}
"integer-divide-by-zero")
UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero")
UBSAN_CHECK(InvalidBuiltin, "invalid-builtin-use", "invalid-builtin-use")
+UBSAN_CHECK(InvalidObjCCast, "invalid-objc-cast", "invalid-objc-cast")
UBSAN_CHECK(ImplicitUnsignedIntegerTruncation,
"implicit-unsigned-integer-truncation",
"implicit-unsigned-integer-truncation")
namespace __ubsan {
-const char *MaybeCallUbsanDefaultOptions() {
- return (&__ubsan_default_options) ? __ubsan_default_options() : "";
-}
-
static const char *GetFlag(const char *flag) {
// We cannot call getenv() from inside a preinit array initializer
if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
{
CommonFlags cf;
cf.CopyFrom(*common_flags());
- cf.print_summary = false;
cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
OverrideCommonFlags(cf);
}
RegisterUbsanFlags(&parser, f);
// Override from user-specified string.
- parser.ParseString(MaybeCallUbsanDefaultOptions());
+ parser.ParseString(__ubsan_default_options());
// Override from environment variable.
parser.ParseStringFromEnv("UBSAN_OPTIONS");
InitializeCommonFlags();
void InitializeFlags();
void RegisterUbsanFlags(FlagParser *parser, Flags *f);
-const char *MaybeCallUbsanDefaultOptions();
-
} // namespace __ubsan
extern "C" {
#include "ubsan_diag.h"
#include "ubsan_flags.h"
#include "ubsan_monitor.h"
+#include "ubsan_value.h"
#include "sanitizer_common/sanitizer_common.h"
Die();
}
+static void handleInvalidObjCCast(InvalidObjCCast *Data, ValueHandle Pointer,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::InvalidObjCCast;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ const char *GivenClass = getObjCClassName(Pointer);
+ const char *GivenClassStr = GivenClass ? GivenClass : "<unknown type>";
+
+ Diag(Loc, DL_Error, ET,
+ "invalid ObjC cast, object is a '%0', but expected a %1")
+ << GivenClassStr << Data->ExpectedType;
+}
+
+void __ubsan::__ubsan_handle_invalid_objc_cast(InvalidObjCCast *Data,
+ ValueHandle Pointer) {
+ GET_REPORT_OPTIONS(false);
+ handleInvalidObjCCast(Data, Pointer, Opts);
+}
+void __ubsan::__ubsan_handle_invalid_objc_cast_abort(InvalidObjCCast *Data,
+ ValueHandle Pointer) {
+ GET_REPORT_OPTIONS(true);
+ handleInvalidObjCCast(Data, Pointer, Opts);
+ Die();
+}
+
static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr,
ReportOptions Opts, bool IsAttr) {
if (!LocPtr)
} // namespace __ubsan
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(false);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *CallData,
- ValueHandle Function) {
- GET_REPORT_OPTIONS(true);
- CFICheckFailData Data = {CFITCK_ICall, CallData->Loc, CallData->Type};
- handleCFIBadIcall(&Data, Function, Opts);
- Die();
-}
-
void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
ValueHandle Value,
uptr ValidVtable) {
/// Handle a builtin called in an invalid way.
RECOVERABLE(invalid_builtin, InvalidBuiltinData *Data)
+struct InvalidObjCCast {
+ SourceLocation Loc;
+ const TypeDescriptor &ExpectedType;
+};
+
+/// Handle an invalid ObjC cast.
+RECOVERABLE(invalid_objc_cast, InvalidObjCCast *Data, ValueHandle Pointer)
+
struct NonNullReturnData {
SourceLocation AttrLoc;
};
CFITCK_VMFCall,
};
-struct CFIBadIcallData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
-};
-
struct CFICheckFailData {
CFITypeCheckKind CheckKind;
SourceLocation Loc;
const TypeDescriptor &Type;
};
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
/// \brief Handle control flow integrity failures.
RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
uptr VtableIsValid)
INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort)
INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin)
INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin_abort)
+INTERFACE_FUNCTION(__ubsan_handle_invalid_objc_cast)
+INTERFACE_FUNCTION(__ubsan_handle_invalid_objc_cast_abort)
INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value)
INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort)
INTERFACE_FUNCTION(__ubsan_handle_missing_return)
#ifndef UBSAN_PLATFORM_H
#define UBSAN_PLATFORM_H
-#ifndef CAN_SANITIZE_UB
// Other platforms should be easy to add, and probably work as-is.
#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
defined(__NetBSD__) || defined(__OpenBSD__) || \
#else
# define CAN_SANITIZE_UB 0
#endif
-#endif //CAN_SANITIZE_UB
#endif
#include "ubsan_value.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+
+// TODO(dliew): Prefer '__APPLE__' here over 'SANITIZER_MAC', as the latter is
+// unclear. rdar://58124919 tracks using a more obviously portable guard.
+#if defined(__APPLE__)
+#include <dlfcn.h>
+#endif
using namespace __ubsan;
+typedef const char *(*ObjCGetClassNameTy)(void *);
+
+const char *__ubsan::getObjCClassName(ValueHandle Pointer) {
+#if defined(__APPLE__)
+ // We need to query the ObjC runtime for some information, but do not want
+ // to introduce a static dependency from the ubsan runtime onto ObjC. Try to
+ // grab a handle to the ObjC runtime used by the process.
+ static bool AttemptedDlopen = false;
+ static void *ObjCHandle = nullptr;
+ static void *ObjCObjectGetClassName = nullptr;
+
+ // Prevent threads from racing to dlopen().
+ static __sanitizer::StaticSpinMutex Lock;
+ {
+ __sanitizer::SpinMutexLock Guard(&Lock);
+
+ if (!AttemptedDlopen) {
+ ObjCHandle = dlopen(
+ "/usr/lib/libobjc.A.dylib",
+ RTLD_LAZY // Only bind symbols when used.
+ | RTLD_LOCAL // Only make symbols available via the handle.
+ | RTLD_NOLOAD // Do not load the dylib, just grab a handle if the
+ // image is already loaded.
+ | RTLD_FIRST // Only search the image pointed-to by the handle.
+ );
+ AttemptedDlopen = true;
+ if (!ObjCHandle)
+ return nullptr;
+ ObjCObjectGetClassName = dlsym(ObjCHandle, "object_getClassName");
+ }
+ }
+
+ if (!ObjCObjectGetClassName)
+ return nullptr;
+
+ return ObjCGetClassNameTy(ObjCObjectGetClassName)((void *)Pointer);
+#else
+ return nullptr;
+#endif
+}
+
SIntMax Value::getSIntValue() const {
CHECK(getType().isSignedIntegerTy());
if (isInlineInt()) {
/// \brief An opaque handle to a value.
typedef uptr ValueHandle;
+/// Returns the class name of the given ObjC object, or null if the name
+/// cannot be found.
+const char *getObjCClassName(ValueHandle Pointer);
/// \brief Representation of an operand value provided by the instrumented code.
///