1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between run-time libraries of sanitizers.
10 // It declares common functions and classes that are used in both runtimes.
11 // Implementation of some functions are provided in sanitizer_common, while
12 // others must be defined by run-time library itself.
13 //===----------------------------------------------------------------------===//
14 #ifndef SANITIZER_COMMON_H
15 #define SANITIZER_COMMON_H
17 #include "sanitizer_flags.h"
18 #include "sanitizer_interface_internal.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_libc.h"
21 #include "sanitizer_list.h"
22 #include "sanitizer_mutex.h"
24 #if defined(_MSC_VER) && !defined(__clang__)
25 extern "C" void _ReadWriteBarrier();
26 #pragma intrinsic(_ReadWriteBarrier)
29 namespace __sanitizer
{
34 const uptr kWordSize
= SANITIZER_WORDSIZE
/ 8;
35 const uptr kWordSizeInBits
= 8 * kWordSize
;
37 #if defined(__powerpc__) || defined(__powerpc64__)
38 const uptr kCacheLineSize
= 128;
40 const uptr kCacheLineSize
= 64;
43 const uptr kMaxPathLength
= 4096;
45 const uptr kMaxThreadStackSize
= 1 << 30; // 1Gb
47 static const uptr kErrorMessageBufferSize
= 1 << 16;
49 // Denotes fake PC values that come from JIT/JAVA/etc.
50 // For such PC values __tsan_symbolize_external() will be called.
51 const u64 kExternalPCBit
= 1ULL << 60;
53 extern const char *SanitizerToolName
; // Can be changed by the tool.
55 extern atomic_uint32_t current_verbosity
;
56 INLINE
void SetVerbosity(int verbosity
) {
57 atomic_store(¤t_verbosity
, verbosity
, memory_order_relaxed
);
59 INLINE
int Verbosity() {
60 return atomic_load(¤t_verbosity
, memory_order_relaxed
);
64 extern uptr PageSizeCached
;
65 INLINE uptr
GetPageSizeCached() {
67 PageSizeCached
= GetPageSize();
68 return PageSizeCached
;
70 uptr
GetMmapGranularity();
71 uptr
GetMaxVirtualAddress();
75 void GetThreadStackTopAndBottom(bool at_initialization
, uptr
*stack_top
,
77 void GetThreadStackAndTls(bool main
, uptr
*stk_addr
, uptr
*stk_size
,
78 uptr
*tls_addr
, uptr
*tls_size
);
81 void *MmapOrDie(uptr size
, const char *mem_type
, bool raw_report
= false);
82 INLINE
void *MmapOrDieQuietly(uptr size
, const char *mem_type
) {
83 return MmapOrDie(size
, mem_type
, /*raw_report*/ true);
85 void UnmapOrDie(void *addr
, uptr size
);
86 void *MmapFixedNoReserve(uptr fixed_addr
, uptr size
,
87 const char *name
= nullptr);
88 void *MmapNoReserveOrDie(uptr size
, const char *mem_type
);
89 void *MmapFixedOrDie(uptr fixed_addr
, uptr size
);
90 void *MmapFixedNoAccess(uptr fixed_addr
, uptr size
, const char *name
= nullptr);
91 void *MmapNoAccess(uptr size
);
92 // Map aligned chunk of address space; size and alignment are powers of two.
93 void *MmapAlignedOrDie(uptr size
, uptr alignment
, const char *mem_type
);
94 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
95 // unaccessible memory.
96 bool MprotectNoAccess(uptr addr
, uptr size
);
97 bool MprotectReadOnly(uptr addr
, uptr size
);
99 // Find an available address space.
100 uptr
FindAvailableMemoryRange(uptr size
, uptr alignment
, uptr left_padding
);
102 // Used to check if we can map shadow memory to a fixed location.
103 bool MemoryRangeIsAvailable(uptr range_start
, uptr range_end
);
104 void ReleaseMemoryToOS(uptr addr
, uptr size
);
105 void IncreaseTotalMmap(uptr size
);
106 void DecreaseTotalMmap(uptr size
);
108 void NoHugePagesInRegion(uptr addr
, uptr length
);
109 void DontDumpShadowMemory(uptr addr
, uptr length
);
110 // Check if the built VMA size matches the runtime one.
112 void RunMallocHooks(const void *ptr
, uptr size
);
113 void RunFreeHooks(const void *ptr
);
115 // InternalScopedBuffer can be used instead of large stack arrays to
116 // keep frame size low.
117 // FIXME: use InternalAlloc instead of MmapOrDie once
118 // InternalAlloc is made libc-free.
119 template <typename T
>
120 class InternalScopedBuffer
{
122 explicit InternalScopedBuffer(uptr cnt
) {
124 ptr_
= (T
*)MmapOrDie(cnt
* sizeof(T
), "InternalScopedBuffer");
126 ~InternalScopedBuffer() { UnmapOrDie(ptr_
, cnt_
* sizeof(T
)); }
127 T
&operator[](uptr i
) { return ptr_
[i
]; }
128 T
*data() { return ptr_
; }
129 uptr
size() { return cnt_
* sizeof(T
); }
134 // Disallow copies and moves.
135 InternalScopedBuffer(const InternalScopedBuffer
&) = delete;
136 InternalScopedBuffer
&operator=(const InternalScopedBuffer
&) = delete;
137 InternalScopedBuffer(InternalScopedBuffer
&&) = delete;
138 InternalScopedBuffer
&operator=(InternalScopedBuffer
&&) = delete;
141 class InternalScopedString
: public InternalScopedBuffer
<char> {
143 explicit InternalScopedString(uptr max_length
)
144 : InternalScopedBuffer
<char>(max_length
), length_(0) {
147 uptr
length() { return length_
; }
152 void append(const char *format
, ...);
158 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
159 // constructor, so all instances of LowLevelAllocator should be
160 // linker initialized.
161 class LowLevelAllocator
{
163 // Requires an external lock.
164 void *Allocate(uptr size
);
166 char *allocated_end_
;
167 char *allocated_current_
;
169 typedef void (*LowLevelAllocateCallback
)(uptr ptr
, uptr size
);
170 // Allows to register tool-specific callbacks for LowLevelAllocator.
171 // Passing NULL removes the callback.
172 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback
);
175 void RawWrite(const char *buffer
);
176 bool ColorizeReports();
177 void RemoveANSIEscapeSequencesFromString(char *buffer
);
178 void Printf(const char *format
, ...);
179 void Report(const char *format
, ...);
180 void SetPrintfAndReportCallback(void (*callback
)(const char *));
181 #define VReport(level, ...) \
183 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
185 #define VPrintf(level, ...) \
187 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
190 // Can be used to prevent mixing error reports from different sanitizers.
191 extern StaticSpinMutex CommonSanitizerReportMutex
;
194 void Write(const char *buffer
, uptr length
);
195 bool SupportsColors();
196 void SetReportPath(const char *path
);
198 // Don't use fields directly. They are only declared public to allow
199 // aggregate initialization.
201 // Protects fields below.
203 // Opened file descriptor. Defaults to stderr. It may be equal to
204 // kInvalidFd, in which case new file will be opened when necessary.
206 // Path prefix of report file, set via __sanitizer_set_report_path.
207 char path_prefix
[kMaxPathLength
];
208 // Full path to report, obtained as <path_prefix>.PID
209 char full_path
[kMaxPathLength
];
210 // PID of the process that opened fd. If a fork() occurs,
211 // the PID of child will be different from fd_pid.
215 void ReopenIfNecessary();
217 extern ReportFile report_file
;
219 extern uptr stoptheworld_tracer_pid
;
220 extern uptr stoptheworld_tracer_ppid
;
222 enum FileAccessMode
{
228 // Returns kInvalidFd on error.
229 fd_t
OpenFile(const char *filename
, FileAccessMode mode
,
230 error_t
*errno_p
= nullptr);
231 void CloseFile(fd_t
);
233 // Return true on success, false on error.
234 bool ReadFromFile(fd_t fd
, void *buff
, uptr buff_size
,
235 uptr
*bytes_read
= nullptr, error_t
*error_p
= nullptr);
236 bool WriteToFile(fd_t fd
, const void *buff
, uptr buff_size
,
237 uptr
*bytes_written
= nullptr, error_t
*error_p
= nullptr);
239 bool RenameFile(const char *oldpath
, const char *newpath
,
240 error_t
*error_p
= nullptr);
242 // Scoped file handle closer.
244 explicit FileCloser(fd_t fd
) : fd(fd
) {}
245 ~FileCloser() { CloseFile(fd
); }
249 bool SupportsColoredOutput(fd_t fd
);
251 // Opens the file 'file_name" and reads up to 'max_len' bytes.
252 // The resulting buffer is mmaped and stored in '*buff'.
253 // The size of the mmaped region is stored in '*buff_size'.
254 // The total number of read bytes is stored in '*read_len'.
255 // Returns true if file was successfully opened and read.
256 bool ReadFileToBuffer(const char *file_name
, char **buff
, uptr
*buff_size
,
257 uptr
*read_len
, uptr max_len
= 1 << 26,
258 error_t
*errno_p
= nullptr);
259 // Maps given file to virtual memory, and returns pointer to it
260 // (or NULL if mapping fails). Stores the size of mmaped region
262 void *MapFileToMemory(const char *file_name
, uptr
*buff_size
);
263 void *MapWritableFileToMemory(void *addr
, uptr size
, fd_t fd
, OFF_T offset
);
265 bool IsAccessibleMemoryRange(uptr beg
, uptr size
);
267 // Error report formatting.
268 const char *StripPathPrefix(const char *filepath
,
269 const char *strip_file_prefix
);
270 // Strip the directories from the module name.
271 const char *StripModuleName(const char *module
);
274 uptr
ReadBinaryName(/*out*/char *buf
, uptr buf_len
);
275 uptr
ReadBinaryNameCached(/*out*/char *buf
, uptr buf_len
);
276 uptr
ReadLongProcessName(/*out*/ char *buf
, uptr buf_len
);
277 const char *GetProcessName();
278 void UpdateProcessName();
279 void CacheBinaryName();
280 void DisableCoreDumperIfNecessary();
281 void DumpProcessMap();
282 bool FileExists(const char *filename
);
283 const char *GetEnv(const char *name
);
284 bool SetEnv(const char *name
, const char *value
);
285 const char *GetPwd();
286 char *FindPathToBinary(const char *name
);
287 bool IsPathSeparator(const char c
);
288 bool IsAbsolutePath(const char *path
);
289 // Starts a subprocess and returs its pid.
290 // If *_fd parameters are not kInvalidFd their corresponding input/output
291 // streams will be redirect to the file. The files will always be closed
292 // in parent process even in case of an error.
293 // The child process will close all fds after STDERR_FILENO
294 // before passing control to a program.
295 pid_t
StartSubprocess(const char *filename
, const char *const argv
[],
296 fd_t stdin_fd
= kInvalidFd
, fd_t stdout_fd
= kInvalidFd
,
297 fd_t stderr_fd
= kInvalidFd
);
298 // Checks if specified process is still running
299 bool IsProcessRunning(pid_t pid
);
300 // Waits for the process to finish and returns its exit code.
301 // Returns -1 in case of an error.
302 int WaitForProcess(pid_t pid
);
308 bool StackSizeIsUnlimited();
309 uptr
GetStackSizeLimitInBytes();
310 void SetStackSizeLimitInBytes(uptr limit
);
311 bool AddressSpaceIsUnlimited();
312 void SetAddressSpaceUnlimited();
313 void AdjustStackSize(void *attr
);
314 void PrepareForSandboxing(__sanitizer_sandbox_arguments
*args
);
315 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments
*args
);
316 void SetSandboxingCallback(void (*f
)());
318 void CoverageUpdateMapping();
319 void CovBeforeFork();
320 void CovAfterFork(int child_pid
);
322 void InitializeCoverage(bool enabled
, const char *coverage_dir
);
323 void ReInitializeCoverage(bool enabled
, const char *coverage_dir
);
329 void SleepForSeconds(int seconds
);
330 void SleepForMillis(int millis
);
332 int Atexit(void (*function
)(void));
333 void SortArray(uptr
*array
, uptr size
);
334 void SortArray(u32
*array
, uptr size
);
335 bool TemplateMatch(const char *templ
, const char *str
);
338 void NORETURN
Abort();
341 CheckFailed(const char *file
, int line
, const char *cond
, u64 v1
, u64 v2
);
342 void NORETURN
ReportMmapFailureAndDie(uptr size
, const char *mem_type
,
343 const char *mmap_type
, error_t err
,
344 bool raw_report
= false);
346 // Set the name of the current thread to 'name', return true on succees.
347 // The name may be truncated to a system-dependent limit.
348 bool SanitizerSetThreadName(const char *name
);
349 // Get the name of the current thread (no more than max_len bytes),
350 // return true on succees. name should have space for at least max_len+1 bytes.
351 bool SanitizerGetThreadName(char *name
, int max_len
);
353 // Specific tools may override behavior of "Die" and "CheckFailed" functions
354 // to do tool-specific job.
355 typedef void (*DieCallbackType
)(void);
357 // It's possible to add several callbacks that would be run when "Die" is
358 // called. The callbacks will be run in the opposite order. The tools are
359 // strongly recommended to setup all callbacks during initialization, when there
360 // is only a single thread.
361 bool AddDieCallback(DieCallbackType callback
);
362 bool RemoveDieCallback(DieCallbackType callback
);
364 void SetUserDieCallback(DieCallbackType callback
);
366 typedef void (*CheckFailedCallbackType
)(const char *, int, const char *,
368 void SetCheckFailedCallback(CheckFailedCallbackType callback
);
370 // Callback will be called if soft_rss_limit_mb is given and the limit is
371 // exceeded (exceeded==true) or if rss went down below the limit
372 // (exceeded==false).
373 // The callback should be registered once at the tool init time.
374 void SetSoftRssLimitExceededCallback(void (*Callback
)(bool exceeded
));
376 // Callback to be called when we want to try releasing unused allocator memory
378 typedef void (*AllocatorReleaseToOSCallback
)();
379 // The callback should be registered once at the tool init time.
380 void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback
);
382 // Functions related to signal handling.
383 typedef void (*SignalHandlerType
)(int, void *, void *);
384 bool IsHandledDeadlySignal(int signum
);
385 void InstallDeadlySignalHandlers(SignalHandlerType handler
);
386 // Alternative signal stack (POSIX-only).
387 void SetAlternateSignalStack();
388 void UnsetAlternateSignalStack();
390 // We don't want a summary too long.
391 const int kMaxSummaryLength
= 1024;
392 // Construct a one-line string:
393 // SUMMARY: SanitizerToolName: error_message
394 // and pass it to __sanitizer_report_error_summary.
395 void ReportErrorSummary(const char *error_message
);
396 // Same as above, but construct error_message as:
397 // error_type file:line[:column][ function]
398 void ReportErrorSummary(const char *error_type
, const AddressInfo
&info
);
399 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
400 void ReportErrorSummary(const char *error_type
, const StackTrace
*trace
);
403 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
405 unsigned char _BitScanForward(unsigned long *index
, unsigned long mask
); // NOLINT
406 unsigned char _BitScanReverse(unsigned long *index
, unsigned long mask
); // NOLINT
408 unsigned char _BitScanForward64(unsigned long *index
, unsigned __int64 mask
); // NOLINT
409 unsigned char _BitScanReverse64(unsigned long *index
, unsigned __int64 mask
); // NOLINT
414 INLINE uptr
MostSignificantSetBitIndex(uptr x
) {
416 unsigned long up
; // NOLINT
417 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
419 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzll(x
);
421 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzl(x
);
423 #elif defined(_WIN64)
424 _BitScanReverse64(&up
, x
);
426 _BitScanReverse(&up
, x
);
431 INLINE uptr
LeastSignificantSetBitIndex(uptr x
) {
433 unsigned long up
; // NOLINT
434 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
436 up
= __builtin_ctzll(x
);
438 up
= __builtin_ctzl(x
);
440 #elif defined(_WIN64)
441 _BitScanForward64(&up
, x
);
443 _BitScanForward(&up
, x
);
448 INLINE
bool IsPowerOfTwo(uptr x
) {
449 return (x
& (x
- 1)) == 0;
452 INLINE uptr
RoundUpToPowerOfTwo(uptr size
) {
454 if (IsPowerOfTwo(size
)) return size
;
456 uptr up
= MostSignificantSetBitIndex(size
);
457 CHECK_LT(size
, (1ULL << (up
+ 1)));
458 CHECK_GT(size
, (1ULL << up
));
459 return 1ULL << (up
+ 1);
462 INLINE uptr
RoundUpTo(uptr size
, uptr boundary
) {
463 RAW_CHECK(IsPowerOfTwo(boundary
));
464 return (size
+ boundary
- 1) & ~(boundary
- 1);
467 INLINE uptr
RoundDownTo(uptr x
, uptr boundary
) {
468 return x
& ~(boundary
- 1);
471 INLINE
bool IsAligned(uptr a
, uptr alignment
) {
472 return (a
& (alignment
- 1)) == 0;
475 INLINE uptr
Log2(uptr x
) {
476 CHECK(IsPowerOfTwo(x
));
477 return LeastSignificantSetBitIndex(x
);
480 // Don't use std::min, std::max or std::swap, to minimize dependency
482 template<class T
> T
Min(T a
, T b
) { return a
< b
? a
: b
; }
483 template<class T
> T
Max(T a
, T b
) { return a
> b
? a
: b
; }
484 template<class T
> void Swap(T
& a
, T
& b
) {
491 INLINE
bool IsSpace(int c
) {
492 return (c
== ' ') || (c
== '\n') || (c
== '\t') ||
493 (c
== '\f') || (c
== '\r') || (c
== '\v');
495 INLINE
bool IsDigit(int c
) {
496 return (c
>= '0') && (c
<= '9');
498 INLINE
int ToLower(int c
) {
499 return (c
>= 'A' && c
<= 'Z') ? (c
+ 'a' - 'A') : c
;
502 // A low-level vector based on mmap. May incur a significant memory overhead for
504 // WARNING: The current implementation supports only POD types.
506 class InternalMmapVectorNoCtor
{
508 void Initialize(uptr initial_capacity
) {
509 capacity_
= Max(initial_capacity
, (uptr
)1);
511 data_
= (T
*)MmapOrDie(capacity_
* sizeof(T
), "InternalMmapVectorNoCtor");
514 UnmapOrDie(data_
, capacity_
* sizeof(T
));
516 T
&operator[](uptr i
) {
520 const T
&operator[](uptr i
) const {
524 void push_back(const T
&element
) {
525 CHECK_LE(size_
, capacity_
);
526 if (size_
== capacity_
) {
527 uptr new_capacity
= RoundUpToPowerOfTwo(size_
+ 1);
528 Resize(new_capacity
);
530 internal_memcpy(&data_
[size_
++], &element
, sizeof(T
));
534 return data_
[size_
- 1];
543 const T
*data() const {
549 uptr
capacity() const {
553 void clear() { size_
= 0; }
554 bool empty() const { return size() == 0; }
556 const T
*begin() const {
562 const T
*end() const {
563 return data() + size();
566 return data() + size();
570 void Resize(uptr new_capacity
) {
571 CHECK_GT(new_capacity
, 0);
572 CHECK_LE(size_
, new_capacity
);
573 T
*new_data
= (T
*)MmapOrDie(new_capacity
* sizeof(T
),
574 "InternalMmapVector");
575 internal_memcpy(new_data
, data_
, size_
* sizeof(T
));
578 UnmapOrDie(old_data
, capacity_
* sizeof(T
));
579 capacity_
= new_capacity
;
588 class InternalMmapVector
: public InternalMmapVectorNoCtor
<T
> {
590 explicit InternalMmapVector(uptr initial_capacity
) {
591 InternalMmapVectorNoCtor
<T
>::Initialize(initial_capacity
);
593 ~InternalMmapVector() { InternalMmapVectorNoCtor
<T
>::Destroy(); }
594 // Disallow evil constructors.
595 InternalMmapVector(const InternalMmapVector
&);
596 void operator=(const InternalMmapVector
&);
599 // HeapSort for arrays and InternalMmapVector.
600 template<class Container
, class Compare
>
601 void InternalSort(Container
*v
, uptr size
, Compare comp
) {
604 // Stage 1: insert elements to the heap.
605 for (uptr i
= 1; i
< size
; i
++) {
607 for (j
= i
; j
> 0; j
= p
) {
609 if (comp((*v
)[p
], (*v
)[j
]))
610 Swap((*v
)[j
], (*v
)[p
]);
615 // Stage 2: swap largest element with the last one,
616 // and sink the new top.
617 for (uptr i
= size
- 1; i
> 0; i
--) {
618 Swap((*v
)[0], (*v
)[i
]);
620 for (j
= 0; j
< i
; j
= max_ind
) {
621 uptr left
= 2 * j
+ 1;
622 uptr right
= 2 * j
+ 2;
624 if (left
< i
&& comp((*v
)[max_ind
], (*v
)[left
]))
626 if (right
< i
&& comp((*v
)[max_ind
], (*v
)[right
]))
629 Swap((*v
)[j
], (*v
)[max_ind
]);
636 template<class Container
, class Value
, class Compare
>
637 uptr
InternalBinarySearch(const Container
&v
, uptr first
, uptr last
,
638 const Value
&val
, Compare comp
) {
639 uptr not_found
= last
+ 1;
640 while (last
>= first
) {
641 uptr mid
= (first
+ last
) / 2;
642 if (comp(v
[mid
], val
))
644 else if (comp(val
, v
[mid
]))
652 // Represents a binary loaded into virtual memory (e.g. this can be an
653 // executable or a shared object).
656 LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_
.clear(); }
657 void set(const char *module_name
, uptr base_address
);
659 void addAddressRange(uptr beg
, uptr end
, bool executable
);
660 bool containsAddress(uptr address
) const;
662 const char *full_name() const { return full_name_
; }
663 uptr
base_address() const { return base_address_
; }
665 struct AddressRange
{
671 AddressRange(uptr beg
, uptr end
, bool executable
)
672 : next(nullptr), beg(beg
), end(end
), executable(executable
) {}
675 const IntrusiveList
<AddressRange
> &ranges() const { return ranges_
; }
678 char *full_name_
; // Owned.
680 IntrusiveList
<AddressRange
> ranges_
;
683 // List of LoadedModules. OS-dependent implementation is responsible for
684 // filling this information.
685 class ListOfModules
{
687 ListOfModules() : modules_(kInitialCapacity
) {}
688 ~ListOfModules() { clear(); }
690 const LoadedModule
*begin() const { return modules_
.begin(); }
691 LoadedModule
*begin() { return modules_
.begin(); }
692 const LoadedModule
*end() const { return modules_
.end(); }
693 LoadedModule
*end() { return modules_
.end(); }
694 uptr
size() const { return modules_
.size(); }
695 const LoadedModule
&operator[](uptr i
) const {
696 CHECK_LT(i
, modules_
.size());
702 for (auto &module
: modules_
) module
.clear();
706 InternalMmapVector
<LoadedModule
> modules_
;
707 // We rarely have more than 16K loaded modules.
708 static const uptr kInitialCapacity
= 1 << 14;
711 // Callback type for iterating over a set of memory ranges.
712 typedef void (*RangeIteratorCallback
)(uptr begin
, uptr end
, void *arg
);
714 enum AndroidApiLevel
{
715 ANDROID_NOT_ANDROID
= 0,
717 ANDROID_LOLLIPOP_MR1
= 22,
718 ANDROID_POST_LOLLIPOP
= 23
721 void WriteToSyslog(const char *buffer
);
724 void LogFullErrorReport(const char *buffer
);
726 INLINE
void LogFullErrorReport(const char *buffer
) {}
729 #if SANITIZER_LINUX || SANITIZER_MAC
730 void WriteOneLineToSyslog(const char *s
);
731 void LogMessageOnPrintf(const char *str
);
733 INLINE
void WriteOneLineToSyslog(const char *s
) {}
734 INLINE
void LogMessageOnPrintf(const char *str
) {}
738 // Initialize Android logging. Any writes before this are silently lost.
739 void AndroidLogInit();
741 INLINE
void AndroidLogInit() {}
744 #if SANITIZER_ANDROID
745 void SanitizerInitializeUnwinder();
746 AndroidApiLevel
AndroidGetApiLevel();
748 INLINE
void AndroidLogWrite(const char *buffer_unused
) {}
749 INLINE
void SanitizerInitializeUnwinder() {}
750 INLINE AndroidApiLevel
AndroidGetApiLevel() { return ANDROID_NOT_ANDROID
; }
753 INLINE uptr
GetPthreadDestructorIterations() {
754 #if SANITIZER_ANDROID
755 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1
) ? 8 : 4;
756 #elif SANITIZER_POSIX
759 // Unused on Windows.
764 void *internal_start_thread(void(*func
)(void*), void *arg
);
765 void internal_join_thread(void *th
);
766 void MaybeStartBackgroudThread();
768 // Make the compiler think that something is going on there.
769 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
770 // compiler from recognising it and turning it into an actual call to
771 // memset/memcpy/etc.
772 static inline void SanitizerBreakOptimization(void *arg
) {
773 #if defined(_MSC_VER) && !defined(__clang__)
776 __asm__
__volatile__("" : : "r" (arg
) : "memory");
780 struct SignalContext
{
786 bool is_memory_access
;
788 enum WriteFlag
{ UNKNOWN
, READ
, WRITE
} write_flag
;
790 SignalContext(void *context
, uptr addr
, uptr pc
, uptr sp
, uptr bp
,
791 bool is_memory_access
, WriteFlag write_flag
)
797 is_memory_access(is_memory_access
),
798 write_flag(write_flag
) {}
800 // Creates signal context in a platform-specific manner.
801 static SignalContext
Create(void *siginfo
, void *context
);
803 // Returns true if the "context" indicates a memory write.
804 static WriteFlag
GetWriteFlag(void *context
);
807 void GetPcSpBp(void *context
, uptr
*pc
, uptr
*sp
, uptr
*bp
);
811 template <typename Fn
>
812 class RunOnDestruction
{
814 explicit RunOnDestruction(Fn fn
) : fn_(fn
) {}
815 ~RunOnDestruction() { fn_(); }
821 // A simple scope guard. Usage:
822 // auto cleanup = at_scope_exit([]{ do_cleanup; });
823 template <typename Fn
>
824 RunOnDestruction
<Fn
> at_scope_exit(Fn fn
) {
825 return RunOnDestruction
<Fn
>(fn
);
828 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
829 // if a process uses virtual memory over 4TB (as many sanitizers like
830 // to do). This function will abort the process if running on a kernel
831 // that looks vulnerable.
832 #if SANITIZER_LINUX && SANITIZER_S390_64
833 void AvoidCVE_2016_2143();
835 INLINE
void AvoidCVE_2016_2143() {}
838 struct StackDepotStats
{
843 } // namespace __sanitizer
845 inline void *operator new(__sanitizer::operator_new_size_type size
,
846 __sanitizer::LowLevelAllocator
&alloc
) {
847 return alloc
.Allocate(size
);
850 #endif // SANITIZER_COMMON_H