1 //===-- asan_allocator.cc -------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Implementation of ASan's memory allocator.
11 // Evey piece of memory (AsanChunk) allocated by the allocator
12 // has a left redzone of REDZONE bytes and
13 // a right redzone such that the end of the chunk is aligned by REDZONE
14 // (i.e. the right redzone is between 0 and REDZONE-1).
15 // The left redzone is always poisoned.
16 // The right redzone is poisoned on malloc, the body is poisoned on free.
17 // Once freed, a chunk is moved to a quarantine (fifo list).
18 // After quarantine, a chunk is returned to freelists.
20 // The left redzone contains ASan's internal data and the stack trace of
22 // Once freed, the body of the chunk contains the stack trace of the free call.
24 //===----------------------------------------------------------------------===//
26 #include "asan_allocator.h"
27 #include "asan_interceptors.h"
28 #include "asan_internal.h"
29 #include "asan_lock.h"
30 #include "asan_mapping.h"
31 #include "asan_stats.h"
32 #include "asan_report.h"
33 #include "asan_thread.h"
34 #include "asan_thread_registry.h"
35 #include "sanitizer/asan_interface.h"
36 #include "sanitizer_common/sanitizer_atomic.h"
38 #if defined(_WIN32) && !defined(__clang__)
44 #define REDZONE ((uptr)(flags()->redzone))
45 static const uptr kMinAllocSize
= REDZONE
* 2;
46 static const u64 kMaxAvailableRam
= 128ULL << 30; // 128G
47 static const uptr kMaxThreadLocalQuarantine
= 1 << 20; // 1M
49 static const uptr kMinMmapSize
= (ASAN_LOW_MEMORY
) ? 4UL << 17 : 4UL << 20;
50 static const uptr kMaxSizeForThreadLocalFreeList
=
51 (ASAN_LOW_MEMORY
) ? 1 << 15 : 1 << 17;
53 // Size classes less than kMallocSizeClassStep are powers of two.
54 // All other size classes are multiples of kMallocSizeClassStep.
55 static const uptr kMallocSizeClassStepLog
= 26;
56 static const uptr kMallocSizeClassStep
= 1UL << kMallocSizeClassStepLog
;
58 static const uptr kMaxAllowedMallocSize
=
59 (SANITIZER_WORDSIZE
== 32) ? 3UL << 30 : 8UL << 30;
61 static inline bool IsAligned(uptr a
, uptr alignment
) {
62 return (a
& (alignment
- 1)) == 0;
65 static inline uptr
Log2(uptr x
) {
66 CHECK(IsPowerOfTwo(x
));
67 #if !defined(_WIN32) || defined(__clang__)
68 return __builtin_ctzl(x
);
70 unsigned long ret
; // NOLINT
71 _BitScanForward64(&ret
, x
);
74 unsigned long ret
; // NOLINT
75 _BitScanForward(&ret
, x
);
80 static inline uptr
RoundUpToPowerOfTwo(uptr size
) {
82 if (IsPowerOfTwo(size
)) return size
;
84 unsigned long up
; // NOLINT
85 #if !defined(_WIN32) || defined(__clang__)
86 up
= SANITIZER_WORDSIZE
- 1 - __builtin_clzl(size
);
88 _BitScanReverse64(&up
, size
);
90 _BitScanReverse(&up
, size
);
92 CHECK(size
< (1ULL << (up
+ 1)));
93 CHECK(size
> (1ULL << up
));
94 return 1UL << (up
+ 1);
97 static inline uptr
SizeClassToSize(u8 size_class
) {
98 CHECK(size_class
< kNumberOfSizeClasses
);
99 if (size_class
<= kMallocSizeClassStepLog
) {
100 return 1UL << size_class
;
102 return (size_class
- kMallocSizeClassStepLog
) * kMallocSizeClassStep
;
106 static inline u8
SizeToSizeClass(uptr size
) {
108 if (size
<= kMallocSizeClassStep
) {
109 uptr rounded
= RoundUpToPowerOfTwo(size
);
112 res
= ((size
+ kMallocSizeClassStep
- 1) / kMallocSizeClassStep
)
113 + kMallocSizeClassStepLog
;
115 CHECK(res
< kNumberOfSizeClasses
);
116 CHECK(size
<= SizeClassToSize(res
));
120 // Given REDZONE bytes, we need to mark first size bytes
121 // as addressable and the rest REDZONE-size bytes as unaddressable.
122 static void PoisonHeapPartialRightRedzone(uptr mem
, uptr size
) {
123 CHECK(size
<= REDZONE
);
124 CHECK(IsAligned(mem
, REDZONE
));
125 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY
));
126 CHECK(IsPowerOfTwo(REDZONE
));
127 CHECK(REDZONE
>= SHADOW_GRANULARITY
);
128 PoisonShadowPartialRightRedzone(mem
, size
, REDZONE
,
129 kAsanHeapRightRedzoneMagic
);
132 static u8
*MmapNewPagesAndPoisonShadow(uptr size
) {
133 CHECK(IsAligned(size
, GetPageSizeCached()));
134 u8
*res
= (u8
*)MmapOrDie(size
, __FUNCTION__
);
135 PoisonShadow((uptr
)res
, size
, kAsanHeapLeftRedzoneMagic
);
136 if (flags()->debug
) {
137 Printf("ASAN_MMAP: [%p, %p)\n", res
, res
+ size
);
142 // Every chunk of memory allocated by this allocator can be in one of 3 states:
143 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
144 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
145 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
147 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
148 // the beginning of a AsanChunk (in which the actual chunk resides at
149 // this - this->used_size).
151 // The magic numbers for the enum values are taken randomly.
153 CHUNK_AVAILABLE
= 0x57,
154 CHUNK_ALLOCATED
= 0x32,
155 CHUNK_QUARANTINE
= 0x19,
156 CHUNK_MEMALIGN
= 0xDC
161 uptr chunk_state
: 8;
167 uptr alignment_log
: 8;
168 uptr used_size
: FIRST_32_SECOND_64(32, 56); // Size requested by the user.
170 // This field may overlap with the user area and thus should not
171 // be used while the chunk is in CHUNK_ALLOCATED state.
174 // Typically the beginning of the user-accessible memory is 'this'+REDZONE
175 // and is also aligned by REDZONE. However, if the memory is allocated
176 // by memalign, the alignment might be higher and the user-accessible memory
177 // starts at the first properly aligned address after 'this'.
178 uptr
Beg() { return RoundUpTo((uptr
)this + 1, 1 << alignment_log
); }
179 uptr
Size() { return SizeClassToSize(size_class
); }
180 u8
SizeClass() { return size_class
; }
183 struct AsanChunk
: public ChunkBase
{
184 u32
*compressed_alloc_stack() {
185 return (u32
*)((uptr
)this + sizeof(ChunkBase
));
187 u32
*compressed_free_stack() {
188 return (u32
*)((uptr
)this + Max((uptr
)REDZONE
, (uptr
)sizeof(ChunkBase
)));
191 // The left redzone after the ChunkBase is given to the alloc stack trace.
192 uptr
compressed_alloc_stack_size() {
193 if (REDZONE
< sizeof(ChunkBase
)) return 0;
194 return (REDZONE
- sizeof(ChunkBase
)) / sizeof(u32
);
196 uptr
compressed_free_stack_size() {
197 if (REDZONE
< sizeof(ChunkBase
)) return 0;
198 return (REDZONE
) / sizeof(u32
);
202 uptr
AsanChunkView::Beg() { return chunk_
->Beg(); }
203 uptr
AsanChunkView::End() { return Beg() + UsedSize(); }
204 uptr
AsanChunkView::UsedSize() { return chunk_
->used_size
; }
205 uptr
AsanChunkView::AllocTid() { return chunk_
->alloc_tid
; }
206 uptr
AsanChunkView::FreeTid() { return chunk_
->free_tid
; }
208 void AsanChunkView::GetAllocStack(StackTrace
*stack
) {
209 StackTrace::UncompressStack(stack
, chunk_
->compressed_alloc_stack(),
210 chunk_
->compressed_alloc_stack_size());
213 void AsanChunkView::GetFreeStack(StackTrace
*stack
) {
214 StackTrace::UncompressStack(stack
, chunk_
->compressed_free_stack(),
215 chunk_
->compressed_free_stack_size());
218 bool AsanChunkView::AddrIsInside(uptr addr
, uptr access_size
, uptr
*offset
) {
219 if (addr
>= Beg() && (addr
+ access_size
) <= End()) {
220 *offset
= addr
- Beg();
226 bool AsanChunkView::AddrIsAtLeft(uptr addr
, uptr access_size
, uptr
*offset
) {
228 *offset
= Beg() - addr
;
234 bool AsanChunkView::AddrIsAtRight(uptr addr
, uptr access_size
, uptr
*offset
) {
235 if (addr
+ access_size
>= End()) {
239 *offset
= addr
- End();
245 static AsanChunk
*PtrToChunk(uptr ptr
) {
246 AsanChunk
*m
= (AsanChunk
*)(ptr
- REDZONE
);
247 if (m
->chunk_state
== CHUNK_MEMALIGN
) {
248 m
= (AsanChunk
*)((uptr
)m
- m
->used_size
);
253 void AsanChunkFifoList::PushList(AsanChunkFifoList
*q
) {
254 CHECK(q
->size() > 0);
258 last_
->next
= q
->first_
;
272 void AsanChunkFifoList::Push(AsanChunk
*n
) {
286 // Interesting performance observation: this function takes up to 15% of overal
287 // allocator time. That's because *first_ has been evicted from cache long time
288 // ago. Not sure if we can or want to do anything with this.
289 AsanChunk
*AsanChunkFifoList::Pop() {
291 AsanChunk
*res
= first_
;
292 first_
= first_
->next
;
295 CHECK(size_
>= res
->Size());
296 size_
-= res
->Size();
303 // All pages we ever allocated.
309 bool InRange(uptr addr
) {
310 return addr
>= beg
&& addr
< end
;
316 explicit MallocInfo(LinkerInitialized x
) : mu_(x
) { }
318 AsanChunk
*AllocateChunks(u8 size_class
, uptr n_chunks
) {
320 AsanChunk
**fl
= &free_lists_
[size_class
];
322 ScopedLock
lock(&mu_
);
323 for (uptr i
= 0; i
< n_chunks
; i
++) {
325 *fl
= GetNewChunks(size_class
);
330 CHECK(t
->chunk_state
== CHUNK_AVAILABLE
);
337 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage
*x
,
338 bool eat_free_lists
) {
339 CHECK(flags()->quarantine_size
> 0);
340 ScopedLock
lock(&mu_
);
341 AsanChunkFifoList
*q
= &x
->quarantine_
;
343 quarantine_
.PushList(q
);
344 while (quarantine_
.size() > (uptr
)flags()->quarantine_size
) {
348 if (eat_free_lists
) {
349 for (uptr size_class
= 0; size_class
< kNumberOfSizeClasses
;
351 AsanChunk
*m
= x
->free_lists_
[size_class
];
353 AsanChunk
*t
= m
->next
;
354 m
->next
= free_lists_
[size_class
];
355 free_lists_
[size_class
] = m
;
358 x
->free_lists_
[size_class
] = 0;
363 void BypassThreadLocalQuarantine(AsanChunk
*chunk
) {
364 ScopedLock
lock(&mu_
);
365 quarantine_
.Push(chunk
);
368 AsanChunk
*FindChunkByAddr(uptr addr
) {
369 ScopedLock
lock(&mu_
);
370 return FindChunkByAddrUnlocked(addr
);
373 uptr
AllocationSize(uptr ptr
) {
375 ScopedLock
lock(&mu_
);
377 // Make sure this is our chunk and |ptr| actually points to the beginning
378 // of the allocated memory.
379 AsanChunk
*m
= FindChunkByAddrUnlocked(ptr
);
380 if (!m
|| m
->Beg() != ptr
) return 0;
382 if (m
->chunk_state
== CHUNK_ALLOCATED
) {
398 ScopedLock
lock(&mu_
);
401 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
402 quarantine_
.size() >> 20, malloced
>> 20);
403 for (uptr j
= 1; j
< kNumberOfSizeClasses
; j
++) {
404 AsanChunk
*i
= free_lists_
[j
];
407 for (; i
; i
= i
->next
) {
410 Printf("%zu:%zu ", j
, t
>> 20);
415 PageGroup
*FindPageGroup(uptr addr
) {
416 ScopedLock
lock(&mu_
);
417 return FindPageGroupUnlocked(addr
);
421 PageGroup
*FindPageGroupUnlocked(uptr addr
) {
422 int n
= atomic_load(&n_page_groups_
, memory_order_relaxed
);
423 // If the page groups are not sorted yet, sort them.
424 if (n_sorted_page_groups_
< n
) {
425 SortArray((uptr
*)page_groups_
, n
);
426 n_sorted_page_groups_
= n
;
428 // Binary search over the page groups.
429 int beg
= 0, end
= n
;
431 int med
= (beg
+ end
) / 2;
432 uptr g
= (uptr
)page_groups_
[med
];
434 // 'g' points to the end of the group, so 'addr'
435 // may not belong to page_groups_[med] or any previous group.
438 // 'addr' may belong to page_groups_[med] or a previous group.
444 PageGroup
*g
= page_groups_
[beg
];
446 if (g
->InRange(addr
))
451 // We have an address between two chunks, and we want to report just one.
452 AsanChunk
*ChooseChunk(uptr addr
,
453 AsanChunk
*left_chunk
, AsanChunk
*right_chunk
) {
454 // Prefer an allocated chunk or a chunk from quarantine.
455 if (left_chunk
->chunk_state
== CHUNK_AVAILABLE
&&
456 right_chunk
->chunk_state
!= CHUNK_AVAILABLE
)
458 if (right_chunk
->chunk_state
== CHUNK_AVAILABLE
&&
459 left_chunk
->chunk_state
!= CHUNK_AVAILABLE
)
461 // Choose based on offset.
462 uptr l_offset
= 0, r_offset
= 0;
463 CHECK(AsanChunkView(left_chunk
).AddrIsAtRight(addr
, 1, &l_offset
));
464 CHECK(AsanChunkView(right_chunk
).AddrIsAtLeft(addr
, 1, &r_offset
));
465 if (l_offset
< r_offset
)
470 AsanChunk
*FindChunkByAddrUnlocked(uptr addr
) {
471 PageGroup
*g
= FindPageGroupUnlocked(addr
);
473 CHECK(g
->size_of_chunk
);
474 uptr offset_from_beg
= addr
- g
->beg
;
475 uptr this_chunk_addr
= g
->beg
+
476 (offset_from_beg
/ g
->size_of_chunk
) * g
->size_of_chunk
;
477 CHECK(g
->InRange(this_chunk_addr
));
478 AsanChunk
*m
= (AsanChunk
*)this_chunk_addr
;
479 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
||
480 m
->chunk_state
== CHUNK_AVAILABLE
||
481 m
->chunk_state
== CHUNK_QUARANTINE
);
483 AsanChunkView
m_view(m
);
484 if (m_view
.AddrIsInside(addr
, 1, &offset
))
487 if (m_view
.AddrIsAtRight(addr
, 1, &offset
)) {
488 if (this_chunk_addr
== g
->last_chunk
) // rightmost chunk
490 uptr right_chunk_addr
= this_chunk_addr
+ g
->size_of_chunk
;
491 CHECK(g
->InRange(right_chunk_addr
));
492 return ChooseChunk(addr
, m
, (AsanChunk
*)right_chunk_addr
);
494 CHECK(m_view
.AddrIsAtLeft(addr
, 1, &offset
));
495 if (this_chunk_addr
== g
->beg
) // leftmost chunk
497 uptr left_chunk_addr
= this_chunk_addr
- g
->size_of_chunk
;
498 CHECK(g
->InRange(left_chunk_addr
));
499 return ChooseChunk(addr
, (AsanChunk
*)left_chunk_addr
, m
);
503 void QuarantinePop() {
504 CHECK(quarantine_
.size() > 0);
505 AsanChunk
*m
= quarantine_
.Pop();
507 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
509 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
510 m
->chunk_state
= CHUNK_AVAILABLE
;
511 PoisonShadow((uptr
)m
, m
->Size(), kAsanHeapLeftRedzoneMagic
);
512 CHECK(m
->alloc_tid
>= 0);
513 CHECK(m
->free_tid
>= 0);
515 uptr size_class
= m
->SizeClass();
516 m
->next
= free_lists_
[size_class
];
517 free_lists_
[size_class
] = m
;
520 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
521 thread_stats
.real_frees
++;
522 thread_stats
.really_freed
+= m
->used_size
;
523 thread_stats
.really_freed_redzones
+= m
->Size() - m
->used_size
;
524 thread_stats
.really_freed_by_size
[m
->SizeClass()]++;
527 // Get a list of newly allocated chunks.
528 AsanChunk
*GetNewChunks(u8 size_class
) {
529 uptr size
= SizeClassToSize(size_class
);
530 CHECK(IsPowerOfTwo(kMinMmapSize
));
531 CHECK(size
< kMinMmapSize
|| (size
% kMinMmapSize
) == 0);
532 uptr mmap_size
= Max(size
, kMinMmapSize
);
533 uptr n_chunks
= mmap_size
/ size
;
534 CHECK(n_chunks
* size
== mmap_size
);
535 uptr PageSize
= GetPageSizeCached();
536 if (size
< PageSize
) {
537 // Size is small, just poison the last chunk.
540 // Size is large, allocate an extra page at right and poison it.
541 mmap_size
+= PageSize
;
544 u8
*mem
= MmapNewPagesAndPoisonShadow(mmap_size
);
547 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
548 thread_stats
.mmaps
++;
549 thread_stats
.mmaped
+= mmap_size
;
550 thread_stats
.mmaped_by_size
[size_class
] += n_chunks
;
553 for (uptr i
= 0; i
< n_chunks
; i
++) {
554 AsanChunk
*m
= (AsanChunk
*)(mem
+ i
* size
);
555 m
->chunk_state
= CHUNK_AVAILABLE
;
556 m
->size_class
= size_class
;
560 PageGroup
*pg
= (PageGroup
*)(mem
+ n_chunks
* size
);
561 // This memory is already poisoned, no need to poison it again.
563 pg
->end
= pg
->beg
+ mmap_size
;
564 pg
->size_of_chunk
= size
;
565 pg
->last_chunk
= (uptr
)(mem
+ size
* (n_chunks
- 1));
566 int idx
= atomic_fetch_add(&n_page_groups_
, 1, memory_order_relaxed
);
567 CHECK(idx
< (int)ARRAY_SIZE(page_groups_
));
568 page_groups_
[idx
] = pg
;
572 AsanChunk
*free_lists_
[kNumberOfSizeClasses
];
573 AsanChunkFifoList quarantine_
;
576 PageGroup
*page_groups_
[kMaxAvailableRam
/ kMinMmapSize
];
577 atomic_uint32_t n_page_groups_
;
578 int n_sorted_page_groups_
;
581 static MallocInfo
malloc_info(LINKER_INITIALIZED
);
583 void AsanThreadLocalMallocStorage::CommitBack() {
584 malloc_info
.SwallowThreadLocalMallocStorage(this, true);
587 AsanChunkView
FindHeapChunkByAddress(uptr address
) {
588 return AsanChunkView(malloc_info
.FindChunkByAddr(address
));
591 static u8
*Allocate(uptr alignment
, uptr size
, StackTrace
*stack
) {
595 size
= 1; // TODO(kcc): do something smarter
597 CHECK(IsPowerOfTwo(alignment
));
598 uptr rounded_size
= RoundUpTo(size
, REDZONE
);
599 uptr needed_size
= rounded_size
+ REDZONE
;
600 if (alignment
> REDZONE
) {
601 needed_size
+= alignment
;
603 CHECK(IsAligned(needed_size
, REDZONE
));
604 if (size
> kMaxAllowedMallocSize
|| needed_size
> kMaxAllowedMallocSize
) {
605 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
610 u8 size_class
= SizeToSizeClass(needed_size
);
611 uptr size_to_allocate
= SizeClassToSize(size_class
);
612 CHECK(size_to_allocate
>= kMinAllocSize
);
613 CHECK(size_to_allocate
>= needed_size
);
614 CHECK(IsAligned(size_to_allocate
, REDZONE
));
616 if (flags()->verbosity
>= 3) {
617 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
618 alignment
, size
, size_class
, size_to_allocate
);
621 AsanThread
*t
= asanThreadRegistry().GetCurrent();
622 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
624 thread_stats
.mallocs
++;
625 thread_stats
.malloced
+= size
;
626 thread_stats
.malloced_redzones
+= size_to_allocate
- size
;
627 thread_stats
.malloced_by_size
[size_class
]++;
630 if (!t
|| size_to_allocate
>= kMaxSizeForThreadLocalFreeList
) {
631 // get directly from global storage.
632 m
= malloc_info
.AllocateChunks(size_class
, 1);
633 thread_stats
.malloc_large
++;
635 // get from the thread-local storage.
636 AsanChunk
**fl
= &t
->malloc_storage().free_lists_
[size_class
];
638 uptr n_new_chunks
= kMaxSizeForThreadLocalFreeList
/ size_to_allocate
;
639 *fl
= malloc_info
.AllocateChunks(size_class
, n_new_chunks
);
640 thread_stats
.malloc_small_slow
++;
646 CHECK(m
->chunk_state
== CHUNK_AVAILABLE
);
647 m
->chunk_state
= CHUNK_ALLOCATED
;
649 CHECK(m
->Size() == size_to_allocate
);
650 uptr addr
= (uptr
)m
+ REDZONE
;
651 CHECK(addr
<= (uptr
)m
->compressed_free_stack());
653 if (alignment
> REDZONE
&& (addr
& (alignment
- 1))) {
654 addr
= RoundUpTo(addr
, alignment
);
655 CHECK((addr
& (alignment
- 1)) == 0);
656 AsanChunk
*p
= (AsanChunk
*)(addr
- REDZONE
);
657 p
->chunk_state
= CHUNK_MEMALIGN
;
658 p
->used_size
= (uptr
)p
- (uptr
)m
;
659 m
->alignment_log
= Log2(alignment
);
660 CHECK(m
->Beg() == addr
);
662 m
->alignment_log
= Log2(REDZONE
);
664 CHECK(m
== PtrToChunk(addr
));
666 CHECK(m
->Beg() == addr
);
667 m
->alloc_tid
= t
? t
->tid() : 0;
668 m
->free_tid
= kInvalidTid
;
669 StackTrace::CompressStack(stack
, m
->compressed_alloc_stack(),
670 m
->compressed_alloc_stack_size());
671 PoisonShadow(addr
, rounded_size
, 0);
672 if (size
< rounded_size
) {
673 PoisonHeapPartialRightRedzone(addr
+ rounded_size
- REDZONE
,
674 size
& (REDZONE
- 1));
676 if (size
<= (uptr
)(flags()->max_malloc_fill_size
)) {
677 REAL(memset
)((void*)addr
, 0, rounded_size
);
682 static void Deallocate(u8
*ptr
, StackTrace
*stack
) {
686 if (flags()->debug
) {
687 CHECK(malloc_info
.FindPageGroup((uptr
)ptr
));
690 // Printf("Deallocate %p\n", ptr);
691 AsanChunk
*m
= PtrToChunk((uptr
)ptr
);
693 // Flip the chunk_state atomically to avoid race on double-free.
694 u8 old_chunk_state
= atomic_exchange((atomic_uint8_t
*)m
, CHUNK_QUARANTINE
,
695 memory_order_acq_rel
);
697 if (old_chunk_state
== CHUNK_QUARANTINE
) {
698 ReportDoubleFree((uptr
)ptr
, stack
);
699 } else if (old_chunk_state
!= CHUNK_ALLOCATED
) {
700 ReportFreeNotMalloced((uptr
)ptr
, stack
);
702 CHECK(old_chunk_state
== CHUNK_ALLOCATED
);
703 // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
704 CHECK(REDZONE
<= 16 || !m
->next
);
705 CHECK(m
->free_tid
== kInvalidTid
);
706 CHECK(m
->alloc_tid
>= 0);
707 AsanThread
*t
= asanThreadRegistry().GetCurrent();
708 m
->free_tid
= t
? t
->tid() : 0;
709 StackTrace::CompressStack(stack
, m
->compressed_free_stack(),
710 m
->compressed_free_stack_size());
711 uptr rounded_size
= RoundUpTo(m
->used_size
, REDZONE
);
712 PoisonShadow((uptr
)ptr
, rounded_size
, kAsanHeapFreeMagic
);
715 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
716 thread_stats
.frees
++;
717 thread_stats
.freed
+= m
->used_size
;
718 thread_stats
.freed_by_size
[m
->SizeClass()]++;
720 CHECK(m
->chunk_state
== CHUNK_QUARANTINE
);
723 AsanThreadLocalMallocStorage
*ms
= &t
->malloc_storage();
724 ms
->quarantine_
.Push(m
);
726 if (ms
->quarantine_
.size() > kMaxThreadLocalQuarantine
) {
727 malloc_info
.SwallowThreadLocalMallocStorage(ms
, false);
730 malloc_info
.BypassThreadLocalQuarantine(m
);
734 static u8
*Reallocate(u8
*old_ptr
, uptr new_size
,
736 CHECK(old_ptr
&& new_size
);
739 AsanStats
&thread_stats
= asanThreadRegistry().GetCurrentThreadStats();
740 thread_stats
.reallocs
++;
741 thread_stats
.realloced
+= new_size
;
743 AsanChunk
*m
= PtrToChunk((uptr
)old_ptr
);
744 CHECK(m
->chunk_state
== CHUNK_ALLOCATED
);
745 uptr old_size
= m
->used_size
;
746 uptr memcpy_size
= Min(new_size
, old_size
);
747 u8
*new_ptr
= Allocate(0, new_size
, stack
);
749 CHECK(REAL(memcpy
) != 0);
750 REAL(memcpy
)(new_ptr
, old_ptr
, memcpy_size
);
751 Deallocate(old_ptr
, stack
);
756 } // namespace __asan
758 // Default (no-op) implementation of malloc hooks.
760 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
761 void __asan_malloc_hook(void *ptr
, uptr size
) {
765 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
766 void __asan_free_hook(void *ptr
) {
773 SANITIZER_INTERFACE_ATTRIBUTE
774 void *asan_memalign(uptr alignment
, uptr size
, StackTrace
*stack
) {
775 void *ptr
= (void*)Allocate(alignment
, size
, stack
);
776 __asan_malloc_hook(ptr
, size
);
780 SANITIZER_INTERFACE_ATTRIBUTE
781 void asan_free(void *ptr
, StackTrace
*stack
) {
782 __asan_free_hook(ptr
);
783 Deallocate((u8
*)ptr
, stack
);
786 SANITIZER_INTERFACE_ATTRIBUTE
787 void *asan_malloc(uptr size
, StackTrace
*stack
) {
788 void *ptr
= (void*)Allocate(0, size
, stack
);
789 __asan_malloc_hook(ptr
, size
);
793 void *asan_calloc(uptr nmemb
, uptr size
, StackTrace
*stack
) {
794 void *ptr
= (void*)Allocate(0, nmemb
* size
, stack
);
796 REAL(memset
)(ptr
, 0, nmemb
* size
);
797 __asan_malloc_hook(ptr
, nmemb
* size
);
801 void *asan_realloc(void *p
, uptr size
, StackTrace
*stack
) {
803 void *ptr
= (void*)Allocate(0, size
, stack
);
804 __asan_malloc_hook(ptr
, size
);
806 } else if (size
== 0) {
808 Deallocate((u8
*)p
, stack
);
811 return Reallocate((u8
*)p
, size
, stack
);
814 void *asan_valloc(uptr size
, StackTrace
*stack
) {
815 void *ptr
= (void*)Allocate(GetPageSizeCached(), size
, stack
);
816 __asan_malloc_hook(ptr
, size
);
820 void *asan_pvalloc(uptr size
, StackTrace
*stack
) {
821 uptr PageSize
= GetPageSizeCached();
822 size
= RoundUpTo(size
, PageSize
);
824 // pvalloc(0) should allocate one page.
827 void *ptr
= (void*)Allocate(PageSize
, size
, stack
);
828 __asan_malloc_hook(ptr
, size
);
832 int asan_posix_memalign(void **memptr
, uptr alignment
, uptr size
,
834 void *ptr
= Allocate(alignment
, size
, stack
);
835 CHECK(IsAligned((uptr
)ptr
, alignment
));
836 __asan_malloc_hook(ptr
, size
);
841 uptr
asan_malloc_usable_size(void *ptr
, StackTrace
*stack
) {
843 if (ptr
== 0) return 0;
844 uptr usable_size
= malloc_info
.AllocationSize((uptr
)ptr
);
845 if (flags()->check_malloc_usable_size
&& (usable_size
== 0)) {
846 ReportMallocUsableSizeNotOwned((uptr
)ptr
, stack
);
851 uptr
asan_mz_size(const void *ptr
) {
852 return malloc_info
.AllocationSize((uptr
)ptr
);
855 void asan_mz_force_lock() {
856 malloc_info
.ForceLock();
859 void asan_mz_force_unlock() {
860 malloc_info
.ForceUnlock();
863 // ---------------------- Fake stack-------------------- {{{1
864 FakeStack::FakeStack() {
865 CHECK(REAL(memset
) != 0);
866 REAL(memset
)(this, 0, sizeof(*this));
869 bool FakeStack::AddrIsInSizeClass(uptr addr
, uptr size_class
) {
870 uptr mem
= allocated_size_classes_
[size_class
];
871 uptr size
= ClassMmapSize(size_class
);
872 bool res
= mem
&& addr
>= mem
&& addr
< mem
+ size
;
876 uptr
FakeStack::AddrIsInFakeStack(uptr addr
) {
877 for (uptr i
= 0; i
< kNumberOfSizeClasses
; i
++) {
878 if (AddrIsInSizeClass(addr
, i
)) return allocated_size_classes_
[i
];
883 // We may want to compute this during compilation.
884 inline uptr
FakeStack::ComputeSizeClass(uptr alloc_size
) {
885 uptr rounded_size
= RoundUpToPowerOfTwo(alloc_size
);
886 uptr log
= Log2(rounded_size
);
887 CHECK(alloc_size
<= (1UL << log
));
888 if (!(alloc_size
> (1UL << (log
-1)))) {
889 Printf("alloc_size %zu log %zu\n", alloc_size
, log
);
891 CHECK(alloc_size
> (1UL << (log
-1)));
892 uptr res
= log
< kMinStackFrameSizeLog
? 0 : log
- kMinStackFrameSizeLog
;
893 CHECK(res
< kNumberOfSizeClasses
);
894 CHECK(ClassSize(res
) >= rounded_size
);
898 void FakeFrameFifo::FifoPush(FakeFrame
*node
) {
901 if (first_
== 0 && last_
== 0) {
902 first_
= last_
= node
;
911 FakeFrame
*FakeFrameFifo::FifoPop() {
912 CHECK(first_
&& last_
&& "Exhausted fake stack");
914 if (first_
== last_
) {
919 first_
= first_
->next
;
924 void FakeStack::Init(uptr stack_size
) {
925 stack_size_
= stack_size
;
929 void FakeStack::Cleanup() {
931 for (uptr i
= 0; i
< kNumberOfSizeClasses
; i
++) {
932 uptr mem
= allocated_size_classes_
[i
];
934 PoisonShadow(mem
, ClassMmapSize(i
), 0);
935 allocated_size_classes_
[i
] = 0;
936 UnmapOrDie((void*)mem
, ClassMmapSize(i
));
941 uptr
FakeStack::ClassMmapSize(uptr size_class
) {
942 return RoundUpToPowerOfTwo(stack_size_
);
945 void FakeStack::AllocateOneSizeClass(uptr size_class
) {
946 CHECK(ClassMmapSize(size_class
) >= GetPageSizeCached());
947 uptr new_mem
= (uptr
)MmapOrDie(
948 ClassMmapSize(size_class
), __FUNCTION__
);
949 // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
950 // asanThreadRegistry().GetCurrent()->tid(),
951 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
952 // ClassMmapSize(size_class));
954 for (i
= 0; i
< ClassMmapSize(size_class
);
955 i
+= ClassSize(size_class
)) {
956 size_classes_
[size_class
].FifoPush((FakeFrame
*)(new_mem
+ i
));
958 CHECK(i
== ClassMmapSize(size_class
));
959 allocated_size_classes_
[size_class
] = new_mem
;
962 uptr
FakeStack::AllocateStack(uptr size
, uptr real_stack
) {
963 if (!alive_
) return real_stack
;
964 CHECK(size
<= kMaxStackMallocSize
&& size
> 1);
965 uptr size_class
= ComputeSizeClass(size
);
966 if (!allocated_size_classes_
[size_class
]) {
967 AllocateOneSizeClass(size_class
);
969 FakeFrame
*fake_frame
= size_classes_
[size_class
].FifoPop();
971 fake_frame
->size_minus_one
= size
- 1;
972 fake_frame
->real_stack
= real_stack
;
973 while (FakeFrame
*top
= call_stack_
.top()) {
974 if (top
->real_stack
> real_stack
) break;
975 call_stack_
.LifoPop();
976 DeallocateFrame(top
);
978 call_stack_
.LifoPush(fake_frame
);
979 uptr ptr
= (uptr
)fake_frame
;
980 PoisonShadow(ptr
, size
, 0);
984 void FakeStack::DeallocateFrame(FakeFrame
*fake_frame
) {
986 uptr size
= fake_frame
->size_minus_one
+ 1;
987 uptr size_class
= ComputeSizeClass(size
);
988 CHECK(allocated_size_classes_
[size_class
]);
989 uptr ptr
= (uptr
)fake_frame
;
990 CHECK(AddrIsInSizeClass(ptr
, size_class
));
991 CHECK(AddrIsInSizeClass(ptr
+ size
- 1, size_class
));
992 size_classes_
[size_class
].FifoPush(fake_frame
);
995 void FakeStack::OnFree(uptr ptr
, uptr size
, uptr real_stack
) {
996 FakeFrame
*fake_frame
= (FakeFrame
*)ptr
;
997 CHECK(fake_frame
->magic
= kRetiredStackFrameMagic
);
998 CHECK(fake_frame
->descr
!= 0);
999 CHECK(fake_frame
->size_minus_one
== size
- 1);
1000 PoisonShadow(ptr
, size
, kAsanStackAfterReturnMagic
);
1003 } // namespace __asan
1005 // ---------------------- Interface ---------------- {{{1
1006 using namespace __asan
; // NOLINT
1008 uptr
__asan_stack_malloc(uptr size
, uptr real_stack
) {
1009 if (!flags()->use_fake_stack
) return real_stack
;
1010 AsanThread
*t
= asanThreadRegistry().GetCurrent();
1012 // TSD is gone, use the real stack.
1015 uptr ptr
= t
->fake_stack().AllocateStack(size
, real_stack
);
1016 // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
1020 void __asan_stack_free(uptr ptr
, uptr size
, uptr real_stack
) {
1021 if (!flags()->use_fake_stack
) return;
1022 if (ptr
!= real_stack
) {
1023 FakeStack::OnFree(ptr
, size
, real_stack
);
1027 // ASan allocator doesn't reserve extra bytes, so normally we would
1028 // just return "size".
1029 uptr
__asan_get_estimated_allocated_size(uptr size
) {
1030 if (size
== 0) return 1;
1031 return Min(size
, kMaxAllowedMallocSize
);
1034 bool __asan_get_ownership(const void *p
) {
1035 return malloc_info
.AllocationSize((uptr
)p
) > 0;
1038 uptr
__asan_get_allocated_size(const void *p
) {
1039 if (p
== 0) return 0;
1040 uptr allocated_size
= malloc_info
.AllocationSize((uptr
)p
);
1041 // Die if p is not malloced or if it is already freed.
1042 if (allocated_size
== 0) {
1043 GET_STACK_TRACE_HERE(kStackTraceMax
);
1044 ReportAsanGetAllocatedSizeNotOwned((uptr
)p
, &stack
);
1046 return allocated_size
;