Add libstdc++-raw-cxx.m4 and use it in libsanitizer
[gcc.git] / libsanitizer / asan / asan_allocator.cc
1 //===-- asan_allocator.cc -------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // Implementation of ASan's memory allocator.
11 // Evey piece of memory (AsanChunk) allocated by the allocator
12 // has a left redzone of REDZONE bytes and
13 // a right redzone such that the end of the chunk is aligned by REDZONE
14 // (i.e. the right redzone is between 0 and REDZONE-1).
15 // The left redzone is always poisoned.
16 // The right redzone is poisoned on malloc, the body is poisoned on free.
17 // Once freed, a chunk is moved to a quarantine (fifo list).
18 // After quarantine, a chunk is returned to freelists.
19 //
20 // The left redzone contains ASan's internal data and the stack trace of
21 // the malloc call.
22 // Once freed, the body of the chunk contains the stack trace of the free call.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #include "asan_allocator.h"
27 #include "asan_interceptors.h"
28 #include "asan_internal.h"
29 #include "asan_lock.h"
30 #include "asan_mapping.h"
31 #include "asan_stats.h"
32 #include "asan_report.h"
33 #include "asan_thread.h"
34 #include "asan_thread_registry.h"
35 #include "sanitizer/asan_interface.h"
36 #include "sanitizer_common/sanitizer_atomic.h"
37
38 #if defined(_WIN32) && !defined(__clang__)
39 #include <intrin.h>
40 #endif
41
42 namespace __asan {
43
44 #define REDZONE ((uptr)(flags()->redzone))
45 static const uptr kMinAllocSize = REDZONE * 2;
46 static const u64 kMaxAvailableRam = 128ULL << 30; // 128G
47 static const uptr kMaxThreadLocalQuarantine = 1 << 20; // 1M
48
49 static const uptr kMinMmapSize = (ASAN_LOW_MEMORY) ? 4UL << 17 : 4UL << 20;
50 static const uptr kMaxSizeForThreadLocalFreeList =
51 (ASAN_LOW_MEMORY) ? 1 << 15 : 1 << 17;
52
53 // Size classes less than kMallocSizeClassStep are powers of two.
54 // All other size classes are multiples of kMallocSizeClassStep.
55 static const uptr kMallocSizeClassStepLog = 26;
56 static const uptr kMallocSizeClassStep = 1UL << kMallocSizeClassStepLog;
57
58 static const uptr kMaxAllowedMallocSize =
59 (SANITIZER_WORDSIZE == 32) ? 3UL << 30 : 8UL << 30;
60
61 static inline bool IsAligned(uptr a, uptr alignment) {
62 return (a & (alignment - 1)) == 0;
63 }
64
65 static inline uptr Log2(uptr x) {
66 CHECK(IsPowerOfTwo(x));
67 #if !defined(_WIN32) || defined(__clang__)
68 return __builtin_ctzl(x);
69 #elif defined(_WIN64)
70 unsigned long ret; // NOLINT
71 _BitScanForward64(&ret, x);
72 return ret;
73 #else
74 unsigned long ret; // NOLINT
75 _BitScanForward(&ret, x);
76 return ret;
77 #endif
78 }
79
80 static inline uptr RoundUpToPowerOfTwo(uptr size) {
81 CHECK(size);
82 if (IsPowerOfTwo(size)) return size;
83
84 unsigned long up; // NOLINT
85 #if !defined(_WIN32) || defined(__clang__)
86 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
87 #elif defined(_WIN64)
88 _BitScanReverse64(&up, size);
89 #else
90 _BitScanReverse(&up, size);
91 #endif
92 CHECK(size < (1ULL << (up + 1)));
93 CHECK(size > (1ULL << up));
94 return 1UL << (up + 1);
95 }
96
97 static inline uptr SizeClassToSize(u8 size_class) {
98 CHECK(size_class < kNumberOfSizeClasses);
99 if (size_class <= kMallocSizeClassStepLog) {
100 return 1UL << size_class;
101 } else {
102 return (size_class - kMallocSizeClassStepLog) * kMallocSizeClassStep;
103 }
104 }
105
106 static inline u8 SizeToSizeClass(uptr size) {
107 u8 res = 0;
108 if (size <= kMallocSizeClassStep) {
109 uptr rounded = RoundUpToPowerOfTwo(size);
110 res = Log2(rounded);
111 } else {
112 res = ((size + kMallocSizeClassStep - 1) / kMallocSizeClassStep)
113 + kMallocSizeClassStepLog;
114 }
115 CHECK(res < kNumberOfSizeClasses);
116 CHECK(size <= SizeClassToSize(res));
117 return res;
118 }
119
120 // Given REDZONE bytes, we need to mark first size bytes
121 // as addressable and the rest REDZONE-size bytes as unaddressable.
122 static void PoisonHeapPartialRightRedzone(uptr mem, uptr size) {
123 CHECK(size <= REDZONE);
124 CHECK(IsAligned(mem, REDZONE));
125 CHECK(IsPowerOfTwo(SHADOW_GRANULARITY));
126 CHECK(IsPowerOfTwo(REDZONE));
127 CHECK(REDZONE >= SHADOW_GRANULARITY);
128 PoisonShadowPartialRightRedzone(mem, size, REDZONE,
129 kAsanHeapRightRedzoneMagic);
130 }
131
132 static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
133 CHECK(IsAligned(size, GetPageSizeCached()));
134 u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
135 PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
136 if (flags()->debug) {
137 Printf("ASAN_MMAP: [%p, %p)\n", res, res + size);
138 }
139 return res;
140 }
141
142 // Every chunk of memory allocated by this allocator can be in one of 3 states:
143 // CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
144 // CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
145 // CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
146 //
147 // The pseudo state CHUNK_MEMALIGN is used to mark that the address is not
148 // the beginning of a AsanChunk (in which the actual chunk resides at
149 // this - this->used_size).
150 //
151 // The magic numbers for the enum values are taken randomly.
152 enum {
153 CHUNK_AVAILABLE = 0x57,
154 CHUNK_ALLOCATED = 0x32,
155 CHUNK_QUARANTINE = 0x19,
156 CHUNK_MEMALIGN = 0xDC
157 };
158
159 struct ChunkBase {
160 // First 8 bytes.
161 uptr chunk_state : 8;
162 uptr alloc_tid : 24;
163 uptr size_class : 8;
164 uptr free_tid : 24;
165
166 // Second 8 bytes.
167 uptr alignment_log : 8;
168 uptr used_size : FIRST_32_SECOND_64(32, 56); // Size requested by the user.
169
170 // This field may overlap with the user area and thus should not
171 // be used while the chunk is in CHUNK_ALLOCATED state.
172 AsanChunk *next;
173
174 // Typically the beginning of the user-accessible memory is 'this'+REDZONE
175 // and is also aligned by REDZONE. However, if the memory is allocated
176 // by memalign, the alignment might be higher and the user-accessible memory
177 // starts at the first properly aligned address after 'this'.
178 uptr Beg() { return RoundUpTo((uptr)this + 1, 1 << alignment_log); }
179 uptr Size() { return SizeClassToSize(size_class); }
180 u8 SizeClass() { return size_class; }
181 };
182
183 struct AsanChunk: public ChunkBase {
184 u32 *compressed_alloc_stack() {
185 return (u32*)((uptr)this + sizeof(ChunkBase));
186 }
187 u32 *compressed_free_stack() {
188 return (u32*)((uptr)this + Max((uptr)REDZONE, (uptr)sizeof(ChunkBase)));
189 }
190
191 // The left redzone after the ChunkBase is given to the alloc stack trace.
192 uptr compressed_alloc_stack_size() {
193 if (REDZONE < sizeof(ChunkBase)) return 0;
194 return (REDZONE - sizeof(ChunkBase)) / sizeof(u32);
195 }
196 uptr compressed_free_stack_size() {
197 if (REDZONE < sizeof(ChunkBase)) return 0;
198 return (REDZONE) / sizeof(u32);
199 }
200 };
201
202 uptr AsanChunkView::Beg() { return chunk_->Beg(); }
203 uptr AsanChunkView::End() { return Beg() + UsedSize(); }
204 uptr AsanChunkView::UsedSize() { return chunk_->used_size; }
205 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
206 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
207
208 void AsanChunkView::GetAllocStack(StackTrace *stack) {
209 StackTrace::UncompressStack(stack, chunk_->compressed_alloc_stack(),
210 chunk_->compressed_alloc_stack_size());
211 }
212
213 void AsanChunkView::GetFreeStack(StackTrace *stack) {
214 StackTrace::UncompressStack(stack, chunk_->compressed_free_stack(),
215 chunk_->compressed_free_stack_size());
216 }
217
218 bool AsanChunkView::AddrIsInside(uptr addr, uptr access_size, uptr *offset) {
219 if (addr >= Beg() && (addr + access_size) <= End()) {
220 *offset = addr - Beg();
221 return true;
222 }
223 return false;
224 }
225
226 bool AsanChunkView::AddrIsAtLeft(uptr addr, uptr access_size, uptr *offset) {
227 if (addr < Beg()) {
228 *offset = Beg() - addr;
229 return true;
230 }
231 return false;
232 }
233
234 bool AsanChunkView::AddrIsAtRight(uptr addr, uptr access_size, uptr *offset) {
235 if (addr + access_size >= End()) {
236 if (addr <= End())
237 *offset = 0;
238 else
239 *offset = addr - End();
240 return true;
241 }
242 return false;
243 }
244
245 static AsanChunk *PtrToChunk(uptr ptr) {
246 AsanChunk *m = (AsanChunk*)(ptr - REDZONE);
247 if (m->chunk_state == CHUNK_MEMALIGN) {
248 m = (AsanChunk*)((uptr)m - m->used_size);
249 }
250 return m;
251 }
252
253 void AsanChunkFifoList::PushList(AsanChunkFifoList *q) {
254 CHECK(q->size() > 0);
255 if (last_) {
256 CHECK(first_);
257 CHECK(!last_->next);
258 last_->next = q->first_;
259 last_ = q->last_;
260 } else {
261 CHECK(!first_);
262 last_ = q->last_;
263 first_ = q->first_;
264 CHECK(first_);
265 }
266 CHECK(last_);
267 CHECK(!last_->next);
268 size_ += q->size();
269 q->clear();
270 }
271
272 void AsanChunkFifoList::Push(AsanChunk *n) {
273 CHECK(n->next == 0);
274 if (last_) {
275 CHECK(first_);
276 CHECK(!last_->next);
277 last_->next = n;
278 last_ = n;
279 } else {
280 CHECK(!first_);
281 last_ = first_ = n;
282 }
283 size_ += n->Size();
284 }
285
286 // Interesting performance observation: this function takes up to 15% of overal
287 // allocator time. That's because *first_ has been evicted from cache long time
288 // ago. Not sure if we can or want to do anything with this.
289 AsanChunk *AsanChunkFifoList::Pop() {
290 CHECK(first_);
291 AsanChunk *res = first_;
292 first_ = first_->next;
293 if (first_ == 0)
294 last_ = 0;
295 CHECK(size_ >= res->Size());
296 size_ -= res->Size();
297 if (last_) {
298 CHECK(!last_->next);
299 }
300 return res;
301 }
302
303 // All pages we ever allocated.
304 struct PageGroup {
305 uptr beg;
306 uptr end;
307 uptr size_of_chunk;
308 uptr last_chunk;
309 bool InRange(uptr addr) {
310 return addr >= beg && addr < end;
311 }
312 };
313
314 class MallocInfo {
315 public:
316 explicit MallocInfo(LinkerInitialized x) : mu_(x) { }
317
318 AsanChunk *AllocateChunks(u8 size_class, uptr n_chunks) {
319 AsanChunk *m = 0;
320 AsanChunk **fl = &free_lists_[size_class];
321 {
322 ScopedLock lock(&mu_);
323 for (uptr i = 0; i < n_chunks; i++) {
324 if (!(*fl)) {
325 *fl = GetNewChunks(size_class);
326 }
327 AsanChunk *t = *fl;
328 *fl = t->next;
329 t->next = m;
330 CHECK(t->chunk_state == CHUNK_AVAILABLE);
331 m = t;
332 }
333 }
334 return m;
335 }
336
337 void SwallowThreadLocalMallocStorage(AsanThreadLocalMallocStorage *x,
338 bool eat_free_lists) {
339 CHECK(flags()->quarantine_size > 0);
340 ScopedLock lock(&mu_);
341 AsanChunkFifoList *q = &x->quarantine_;
342 if (q->size() > 0) {
343 quarantine_.PushList(q);
344 while (quarantine_.size() > (uptr)flags()->quarantine_size) {
345 QuarantinePop();
346 }
347 }
348 if (eat_free_lists) {
349 for (uptr size_class = 0; size_class < kNumberOfSizeClasses;
350 size_class++) {
351 AsanChunk *m = x->free_lists_[size_class];
352 while (m) {
353 AsanChunk *t = m->next;
354 m->next = free_lists_[size_class];
355 free_lists_[size_class] = m;
356 m = t;
357 }
358 x->free_lists_[size_class] = 0;
359 }
360 }
361 }
362
363 void BypassThreadLocalQuarantine(AsanChunk *chunk) {
364 ScopedLock lock(&mu_);
365 quarantine_.Push(chunk);
366 }
367
368 AsanChunk *FindChunkByAddr(uptr addr) {
369 ScopedLock lock(&mu_);
370 return FindChunkByAddrUnlocked(addr);
371 }
372
373 uptr AllocationSize(uptr ptr) {
374 if (!ptr) return 0;
375 ScopedLock lock(&mu_);
376
377 // Make sure this is our chunk and |ptr| actually points to the beginning
378 // of the allocated memory.
379 AsanChunk *m = FindChunkByAddrUnlocked(ptr);
380 if (!m || m->Beg() != ptr) return 0;
381
382 if (m->chunk_state == CHUNK_ALLOCATED) {
383 return m->used_size;
384 } else {
385 return 0;
386 }
387 }
388
389 void ForceLock() {
390 mu_.Lock();
391 }
392
393 void ForceUnlock() {
394 mu_.Unlock();
395 }
396
397 void PrintStatus() {
398 ScopedLock lock(&mu_);
399 uptr malloced = 0;
400
401 Printf(" MallocInfo: in quarantine: %zu malloced: %zu; ",
402 quarantine_.size() >> 20, malloced >> 20);
403 for (uptr j = 1; j < kNumberOfSizeClasses; j++) {
404 AsanChunk *i = free_lists_[j];
405 if (!i) continue;
406 uptr t = 0;
407 for (; i; i = i->next) {
408 t += i->Size();
409 }
410 Printf("%zu:%zu ", j, t >> 20);
411 }
412 Printf("\n");
413 }
414
415 PageGroup *FindPageGroup(uptr addr) {
416 ScopedLock lock(&mu_);
417 return FindPageGroupUnlocked(addr);
418 }
419
420 private:
421 PageGroup *FindPageGroupUnlocked(uptr addr) {
422 int n = atomic_load(&n_page_groups_, memory_order_relaxed);
423 // If the page groups are not sorted yet, sort them.
424 if (n_sorted_page_groups_ < n) {
425 SortArray((uptr*)page_groups_, n);
426 n_sorted_page_groups_ = n;
427 }
428 // Binary search over the page groups.
429 int beg = 0, end = n;
430 while (beg < end) {
431 int med = (beg + end) / 2;
432 uptr g = (uptr)page_groups_[med];
433 if (addr > g) {
434 // 'g' points to the end of the group, so 'addr'
435 // may not belong to page_groups_[med] or any previous group.
436 beg = med + 1;
437 } else {
438 // 'addr' may belong to page_groups_[med] or a previous group.
439 end = med;
440 }
441 }
442 if (beg >= n)
443 return 0;
444 PageGroup *g = page_groups_[beg];
445 CHECK(g);
446 if (g->InRange(addr))
447 return g;
448 return 0;
449 }
450
451 // We have an address between two chunks, and we want to report just one.
452 AsanChunk *ChooseChunk(uptr addr,
453 AsanChunk *left_chunk, AsanChunk *right_chunk) {
454 // Prefer an allocated chunk or a chunk from quarantine.
455 if (left_chunk->chunk_state == CHUNK_AVAILABLE &&
456 right_chunk->chunk_state != CHUNK_AVAILABLE)
457 return right_chunk;
458 if (right_chunk->chunk_state == CHUNK_AVAILABLE &&
459 left_chunk->chunk_state != CHUNK_AVAILABLE)
460 return left_chunk;
461 // Choose based on offset.
462 uptr l_offset = 0, r_offset = 0;
463 CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
464 CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
465 if (l_offset < r_offset)
466 return left_chunk;
467 return right_chunk;
468 }
469
470 AsanChunk *FindChunkByAddrUnlocked(uptr addr) {
471 PageGroup *g = FindPageGroupUnlocked(addr);
472 if (!g) return 0;
473 CHECK(g->size_of_chunk);
474 uptr offset_from_beg = addr - g->beg;
475 uptr this_chunk_addr = g->beg +
476 (offset_from_beg / g->size_of_chunk) * g->size_of_chunk;
477 CHECK(g->InRange(this_chunk_addr));
478 AsanChunk *m = (AsanChunk*)this_chunk_addr;
479 CHECK(m->chunk_state == CHUNK_ALLOCATED ||
480 m->chunk_state == CHUNK_AVAILABLE ||
481 m->chunk_state == CHUNK_QUARANTINE);
482 uptr offset = 0;
483 AsanChunkView m_view(m);
484 if (m_view.AddrIsInside(addr, 1, &offset))
485 return m;
486
487 if (m_view.AddrIsAtRight(addr, 1, &offset)) {
488 if (this_chunk_addr == g->last_chunk) // rightmost chunk
489 return m;
490 uptr right_chunk_addr = this_chunk_addr + g->size_of_chunk;
491 CHECK(g->InRange(right_chunk_addr));
492 return ChooseChunk(addr, m, (AsanChunk*)right_chunk_addr);
493 } else {
494 CHECK(m_view.AddrIsAtLeft(addr, 1, &offset));
495 if (this_chunk_addr == g->beg) // leftmost chunk
496 return m;
497 uptr left_chunk_addr = this_chunk_addr - g->size_of_chunk;
498 CHECK(g->InRange(left_chunk_addr));
499 return ChooseChunk(addr, (AsanChunk*)left_chunk_addr, m);
500 }
501 }
502
503 void QuarantinePop() {
504 CHECK(quarantine_.size() > 0);
505 AsanChunk *m = quarantine_.Pop();
506 CHECK(m);
507 // if (F_v >= 2) Printf("MallocInfo::pop %p\n", m);
508
509 CHECK(m->chunk_state == CHUNK_QUARANTINE);
510 m->chunk_state = CHUNK_AVAILABLE;
511 PoisonShadow((uptr)m, m->Size(), kAsanHeapLeftRedzoneMagic);
512 CHECK(m->alloc_tid >= 0);
513 CHECK(m->free_tid >= 0);
514
515 uptr size_class = m->SizeClass();
516 m->next = free_lists_[size_class];
517 free_lists_[size_class] = m;
518
519 // Statistics.
520 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
521 thread_stats.real_frees++;
522 thread_stats.really_freed += m->used_size;
523 thread_stats.really_freed_redzones += m->Size() - m->used_size;
524 thread_stats.really_freed_by_size[m->SizeClass()]++;
525 }
526
527 // Get a list of newly allocated chunks.
528 AsanChunk *GetNewChunks(u8 size_class) {
529 uptr size = SizeClassToSize(size_class);
530 CHECK(IsPowerOfTwo(kMinMmapSize));
531 CHECK(size < kMinMmapSize || (size % kMinMmapSize) == 0);
532 uptr mmap_size = Max(size, kMinMmapSize);
533 uptr n_chunks = mmap_size / size;
534 CHECK(n_chunks * size == mmap_size);
535 uptr PageSize = GetPageSizeCached();
536 if (size < PageSize) {
537 // Size is small, just poison the last chunk.
538 n_chunks--;
539 } else {
540 // Size is large, allocate an extra page at right and poison it.
541 mmap_size += PageSize;
542 }
543 CHECK(n_chunks > 0);
544 u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
545
546 // Statistics.
547 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
548 thread_stats.mmaps++;
549 thread_stats.mmaped += mmap_size;
550 thread_stats.mmaped_by_size[size_class] += n_chunks;
551
552 AsanChunk *res = 0;
553 for (uptr i = 0; i < n_chunks; i++) {
554 AsanChunk *m = (AsanChunk*)(mem + i * size);
555 m->chunk_state = CHUNK_AVAILABLE;
556 m->size_class = size_class;
557 m->next = res;
558 res = m;
559 }
560 PageGroup *pg = (PageGroup*)(mem + n_chunks * size);
561 // This memory is already poisoned, no need to poison it again.
562 pg->beg = (uptr)mem;
563 pg->end = pg->beg + mmap_size;
564 pg->size_of_chunk = size;
565 pg->last_chunk = (uptr)(mem + size * (n_chunks - 1));
566 int idx = atomic_fetch_add(&n_page_groups_, 1, memory_order_relaxed);
567 CHECK(idx < (int)ARRAY_SIZE(page_groups_));
568 page_groups_[idx] = pg;
569 return res;
570 }
571
572 AsanChunk *free_lists_[kNumberOfSizeClasses];
573 AsanChunkFifoList quarantine_;
574 AsanLock mu_;
575
576 PageGroup *page_groups_[kMaxAvailableRam / kMinMmapSize];
577 atomic_uint32_t n_page_groups_;
578 int n_sorted_page_groups_;
579 };
580
581 static MallocInfo malloc_info(LINKER_INITIALIZED);
582
583 void AsanThreadLocalMallocStorage::CommitBack() {
584 malloc_info.SwallowThreadLocalMallocStorage(this, true);
585 }
586
587 AsanChunkView FindHeapChunkByAddress(uptr address) {
588 return AsanChunkView(malloc_info.FindChunkByAddr(address));
589 }
590
591 static u8 *Allocate(uptr alignment, uptr size, StackTrace *stack) {
592 __asan_init();
593 CHECK(stack);
594 if (size == 0) {
595 size = 1; // TODO(kcc): do something smarter
596 }
597 CHECK(IsPowerOfTwo(alignment));
598 uptr rounded_size = RoundUpTo(size, REDZONE);
599 uptr needed_size = rounded_size + REDZONE;
600 if (alignment > REDZONE) {
601 needed_size += alignment;
602 }
603 CHECK(IsAligned(needed_size, REDZONE));
604 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
605 Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
606 (void*)size);
607 return 0;
608 }
609
610 u8 size_class = SizeToSizeClass(needed_size);
611 uptr size_to_allocate = SizeClassToSize(size_class);
612 CHECK(size_to_allocate >= kMinAllocSize);
613 CHECK(size_to_allocate >= needed_size);
614 CHECK(IsAligned(size_to_allocate, REDZONE));
615
616 if (flags()->verbosity >= 3) {
617 Printf("Allocate align: %zu size: %zu class: %u real: %zu\n",
618 alignment, size, size_class, size_to_allocate);
619 }
620
621 AsanThread *t = asanThreadRegistry().GetCurrent();
622 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
623 // Statistics
624 thread_stats.mallocs++;
625 thread_stats.malloced += size;
626 thread_stats.malloced_redzones += size_to_allocate - size;
627 thread_stats.malloced_by_size[size_class]++;
628
629 AsanChunk *m = 0;
630 if (!t || size_to_allocate >= kMaxSizeForThreadLocalFreeList) {
631 // get directly from global storage.
632 m = malloc_info.AllocateChunks(size_class, 1);
633 thread_stats.malloc_large++;
634 } else {
635 // get from the thread-local storage.
636 AsanChunk **fl = &t->malloc_storage().free_lists_[size_class];
637 if (!*fl) {
638 uptr n_new_chunks = kMaxSizeForThreadLocalFreeList / size_to_allocate;
639 *fl = malloc_info.AllocateChunks(size_class, n_new_chunks);
640 thread_stats.malloc_small_slow++;
641 }
642 m = *fl;
643 *fl = (*fl)->next;
644 }
645 CHECK(m);
646 CHECK(m->chunk_state == CHUNK_AVAILABLE);
647 m->chunk_state = CHUNK_ALLOCATED;
648 m->next = 0;
649 CHECK(m->Size() == size_to_allocate);
650 uptr addr = (uptr)m + REDZONE;
651 CHECK(addr <= (uptr)m->compressed_free_stack());
652
653 if (alignment > REDZONE && (addr & (alignment - 1))) {
654 addr = RoundUpTo(addr, alignment);
655 CHECK((addr & (alignment - 1)) == 0);
656 AsanChunk *p = (AsanChunk*)(addr - REDZONE);
657 p->chunk_state = CHUNK_MEMALIGN;
658 p->used_size = (uptr)p - (uptr)m;
659 m->alignment_log = Log2(alignment);
660 CHECK(m->Beg() == addr);
661 } else {
662 m->alignment_log = Log2(REDZONE);
663 }
664 CHECK(m == PtrToChunk(addr));
665 m->used_size = size;
666 CHECK(m->Beg() == addr);
667 m->alloc_tid = t ? t->tid() : 0;
668 m->free_tid = kInvalidTid;
669 StackTrace::CompressStack(stack, m->compressed_alloc_stack(),
670 m->compressed_alloc_stack_size());
671 PoisonShadow(addr, rounded_size, 0);
672 if (size < rounded_size) {
673 PoisonHeapPartialRightRedzone(addr + rounded_size - REDZONE,
674 size & (REDZONE - 1));
675 }
676 if (size <= (uptr)(flags()->max_malloc_fill_size)) {
677 REAL(memset)((void*)addr, 0, rounded_size);
678 }
679 return (u8*)addr;
680 }
681
682 static void Deallocate(u8 *ptr, StackTrace *stack) {
683 if (!ptr) return;
684 CHECK(stack);
685
686 if (flags()->debug) {
687 CHECK(malloc_info.FindPageGroup((uptr)ptr));
688 }
689
690 // Printf("Deallocate %p\n", ptr);
691 AsanChunk *m = PtrToChunk((uptr)ptr);
692
693 // Flip the chunk_state atomically to avoid race on double-free.
694 u8 old_chunk_state = atomic_exchange((atomic_uint8_t*)m, CHUNK_QUARANTINE,
695 memory_order_acq_rel);
696
697 if (old_chunk_state == CHUNK_QUARANTINE) {
698 ReportDoubleFree((uptr)ptr, stack);
699 } else if (old_chunk_state != CHUNK_ALLOCATED) {
700 ReportFreeNotMalloced((uptr)ptr, stack);
701 }
702 CHECK(old_chunk_state == CHUNK_ALLOCATED);
703 // With REDZONE==16 m->next is in the user area, otherwise it should be 0.
704 CHECK(REDZONE <= 16 || !m->next);
705 CHECK(m->free_tid == kInvalidTid);
706 CHECK(m->alloc_tid >= 0);
707 AsanThread *t = asanThreadRegistry().GetCurrent();
708 m->free_tid = t ? t->tid() : 0;
709 StackTrace::CompressStack(stack, m->compressed_free_stack(),
710 m->compressed_free_stack_size());
711 uptr rounded_size = RoundUpTo(m->used_size, REDZONE);
712 PoisonShadow((uptr)ptr, rounded_size, kAsanHeapFreeMagic);
713
714 // Statistics.
715 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
716 thread_stats.frees++;
717 thread_stats.freed += m->used_size;
718 thread_stats.freed_by_size[m->SizeClass()]++;
719
720 CHECK(m->chunk_state == CHUNK_QUARANTINE);
721
722 if (t) {
723 AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
724 ms->quarantine_.Push(m);
725
726 if (ms->quarantine_.size() > kMaxThreadLocalQuarantine) {
727 malloc_info.SwallowThreadLocalMallocStorage(ms, false);
728 }
729 } else {
730 malloc_info.BypassThreadLocalQuarantine(m);
731 }
732 }
733
734 static u8 *Reallocate(u8 *old_ptr, uptr new_size,
735 StackTrace *stack) {
736 CHECK(old_ptr && new_size);
737
738 // Statistics.
739 AsanStats &thread_stats = asanThreadRegistry().GetCurrentThreadStats();
740 thread_stats.reallocs++;
741 thread_stats.realloced += new_size;
742
743 AsanChunk *m = PtrToChunk((uptr)old_ptr);
744 CHECK(m->chunk_state == CHUNK_ALLOCATED);
745 uptr old_size = m->used_size;
746 uptr memcpy_size = Min(new_size, old_size);
747 u8 *new_ptr = Allocate(0, new_size, stack);
748 if (new_ptr) {
749 CHECK(REAL(memcpy) != 0);
750 REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
751 Deallocate(old_ptr, stack);
752 }
753 return new_ptr;
754 }
755
756 } // namespace __asan
757
758 // Default (no-op) implementation of malloc hooks.
759 extern "C" {
760 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
761 void __asan_malloc_hook(void *ptr, uptr size) {
762 (void)ptr;
763 (void)size;
764 }
765 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
766 void __asan_free_hook(void *ptr) {
767 (void)ptr;
768 }
769 } // extern "C"
770
771 namespace __asan {
772
773 SANITIZER_INTERFACE_ATTRIBUTE
774 void *asan_memalign(uptr alignment, uptr size, StackTrace *stack) {
775 void *ptr = (void*)Allocate(alignment, size, stack);
776 __asan_malloc_hook(ptr, size);
777 return ptr;
778 }
779
780 SANITIZER_INTERFACE_ATTRIBUTE
781 void asan_free(void *ptr, StackTrace *stack) {
782 __asan_free_hook(ptr);
783 Deallocate((u8*)ptr, stack);
784 }
785
786 SANITIZER_INTERFACE_ATTRIBUTE
787 void *asan_malloc(uptr size, StackTrace *stack) {
788 void *ptr = (void*)Allocate(0, size, stack);
789 __asan_malloc_hook(ptr, size);
790 return ptr;
791 }
792
793 void *asan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
794 void *ptr = (void*)Allocate(0, nmemb * size, stack);
795 if (ptr)
796 REAL(memset)(ptr, 0, nmemb * size);
797 __asan_malloc_hook(ptr, nmemb * size);
798 return ptr;
799 }
800
801 void *asan_realloc(void *p, uptr size, StackTrace *stack) {
802 if (p == 0) {
803 void *ptr = (void*)Allocate(0, size, stack);
804 __asan_malloc_hook(ptr, size);
805 return ptr;
806 } else if (size == 0) {
807 __asan_free_hook(p);
808 Deallocate((u8*)p, stack);
809 return 0;
810 }
811 return Reallocate((u8*)p, size, stack);
812 }
813
814 void *asan_valloc(uptr size, StackTrace *stack) {
815 void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack);
816 __asan_malloc_hook(ptr, size);
817 return ptr;
818 }
819
820 void *asan_pvalloc(uptr size, StackTrace *stack) {
821 uptr PageSize = GetPageSizeCached();
822 size = RoundUpTo(size, PageSize);
823 if (size == 0) {
824 // pvalloc(0) should allocate one page.
825 size = PageSize;
826 }
827 void *ptr = (void*)Allocate(PageSize, size, stack);
828 __asan_malloc_hook(ptr, size);
829 return ptr;
830 }
831
832 int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
833 StackTrace *stack) {
834 void *ptr = Allocate(alignment, size, stack);
835 CHECK(IsAligned((uptr)ptr, alignment));
836 __asan_malloc_hook(ptr, size);
837 *memptr = ptr;
838 return 0;
839 }
840
841 uptr asan_malloc_usable_size(void *ptr, StackTrace *stack) {
842 CHECK(stack);
843 if (ptr == 0) return 0;
844 uptr usable_size = malloc_info.AllocationSize((uptr)ptr);
845 if (flags()->check_malloc_usable_size && (usable_size == 0)) {
846 ReportMallocUsableSizeNotOwned((uptr)ptr, stack);
847 }
848 return usable_size;
849 }
850
851 uptr asan_mz_size(const void *ptr) {
852 return malloc_info.AllocationSize((uptr)ptr);
853 }
854
855 void asan_mz_force_lock() {
856 malloc_info.ForceLock();
857 }
858
859 void asan_mz_force_unlock() {
860 malloc_info.ForceUnlock();
861 }
862
863 // ---------------------- Fake stack-------------------- {{{1
864 FakeStack::FakeStack() {
865 CHECK(REAL(memset) != 0);
866 REAL(memset)(this, 0, sizeof(*this));
867 }
868
869 bool FakeStack::AddrIsInSizeClass(uptr addr, uptr size_class) {
870 uptr mem = allocated_size_classes_[size_class];
871 uptr size = ClassMmapSize(size_class);
872 bool res = mem && addr >= mem && addr < mem + size;
873 return res;
874 }
875
876 uptr FakeStack::AddrIsInFakeStack(uptr addr) {
877 for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
878 if (AddrIsInSizeClass(addr, i)) return allocated_size_classes_[i];
879 }
880 return 0;
881 }
882
883 // We may want to compute this during compilation.
884 inline uptr FakeStack::ComputeSizeClass(uptr alloc_size) {
885 uptr rounded_size = RoundUpToPowerOfTwo(alloc_size);
886 uptr log = Log2(rounded_size);
887 CHECK(alloc_size <= (1UL << log));
888 if (!(alloc_size > (1UL << (log-1)))) {
889 Printf("alloc_size %zu log %zu\n", alloc_size, log);
890 }
891 CHECK(alloc_size > (1UL << (log-1)));
892 uptr res = log < kMinStackFrameSizeLog ? 0 : log - kMinStackFrameSizeLog;
893 CHECK(res < kNumberOfSizeClasses);
894 CHECK(ClassSize(res) >= rounded_size);
895 return res;
896 }
897
898 void FakeFrameFifo::FifoPush(FakeFrame *node) {
899 CHECK(node);
900 node->next = 0;
901 if (first_ == 0 && last_ == 0) {
902 first_ = last_ = node;
903 } else {
904 CHECK(first_);
905 CHECK(last_);
906 last_->next = node;
907 last_ = node;
908 }
909 }
910
911 FakeFrame *FakeFrameFifo::FifoPop() {
912 CHECK(first_ && last_ && "Exhausted fake stack");
913 FakeFrame *res = 0;
914 if (first_ == last_) {
915 res = first_;
916 first_ = last_ = 0;
917 } else {
918 res = first_;
919 first_ = first_->next;
920 }
921 return res;
922 }
923
924 void FakeStack::Init(uptr stack_size) {
925 stack_size_ = stack_size;
926 alive_ = true;
927 }
928
929 void FakeStack::Cleanup() {
930 alive_ = false;
931 for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
932 uptr mem = allocated_size_classes_[i];
933 if (mem) {
934 PoisonShadow(mem, ClassMmapSize(i), 0);
935 allocated_size_classes_[i] = 0;
936 UnmapOrDie((void*)mem, ClassMmapSize(i));
937 }
938 }
939 }
940
941 uptr FakeStack::ClassMmapSize(uptr size_class) {
942 return RoundUpToPowerOfTwo(stack_size_);
943 }
944
945 void FakeStack::AllocateOneSizeClass(uptr size_class) {
946 CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
947 uptr new_mem = (uptr)MmapOrDie(
948 ClassMmapSize(size_class), __FUNCTION__);
949 // Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
950 // asanThreadRegistry().GetCurrent()->tid(),
951 // size_class, new_mem, new_mem + ClassMmapSize(size_class),
952 // ClassMmapSize(size_class));
953 uptr i;
954 for (i = 0; i < ClassMmapSize(size_class);
955 i += ClassSize(size_class)) {
956 size_classes_[size_class].FifoPush((FakeFrame*)(new_mem + i));
957 }
958 CHECK(i == ClassMmapSize(size_class));
959 allocated_size_classes_[size_class] = new_mem;
960 }
961
962 uptr FakeStack::AllocateStack(uptr size, uptr real_stack) {
963 if (!alive_) return real_stack;
964 CHECK(size <= kMaxStackMallocSize && size > 1);
965 uptr size_class = ComputeSizeClass(size);
966 if (!allocated_size_classes_[size_class]) {
967 AllocateOneSizeClass(size_class);
968 }
969 FakeFrame *fake_frame = size_classes_[size_class].FifoPop();
970 CHECK(fake_frame);
971 fake_frame->size_minus_one = size - 1;
972 fake_frame->real_stack = real_stack;
973 while (FakeFrame *top = call_stack_.top()) {
974 if (top->real_stack > real_stack) break;
975 call_stack_.LifoPop();
976 DeallocateFrame(top);
977 }
978 call_stack_.LifoPush(fake_frame);
979 uptr ptr = (uptr)fake_frame;
980 PoisonShadow(ptr, size, 0);
981 return ptr;
982 }
983
984 void FakeStack::DeallocateFrame(FakeFrame *fake_frame) {
985 CHECK(alive_);
986 uptr size = fake_frame->size_minus_one + 1;
987 uptr size_class = ComputeSizeClass(size);
988 CHECK(allocated_size_classes_[size_class]);
989 uptr ptr = (uptr)fake_frame;
990 CHECK(AddrIsInSizeClass(ptr, size_class));
991 CHECK(AddrIsInSizeClass(ptr + size - 1, size_class));
992 size_classes_[size_class].FifoPush(fake_frame);
993 }
994
995 void FakeStack::OnFree(uptr ptr, uptr size, uptr real_stack) {
996 FakeFrame *fake_frame = (FakeFrame*)ptr;
997 CHECK(fake_frame->magic = kRetiredStackFrameMagic);
998 CHECK(fake_frame->descr != 0);
999 CHECK(fake_frame->size_minus_one == size - 1);
1000 PoisonShadow(ptr, size, kAsanStackAfterReturnMagic);
1001 }
1002
1003 } // namespace __asan
1004
1005 // ---------------------- Interface ---------------- {{{1
1006 using namespace __asan; // NOLINT
1007
1008 uptr __asan_stack_malloc(uptr size, uptr real_stack) {
1009 if (!flags()->use_fake_stack) return real_stack;
1010 AsanThread *t = asanThreadRegistry().GetCurrent();
1011 if (!t) {
1012 // TSD is gone, use the real stack.
1013 return real_stack;
1014 }
1015 uptr ptr = t->fake_stack().AllocateStack(size, real_stack);
1016 // Printf("__asan_stack_malloc %p %zu %p\n", ptr, size, real_stack);
1017 return ptr;
1018 }
1019
1020 void __asan_stack_free(uptr ptr, uptr size, uptr real_stack) {
1021 if (!flags()->use_fake_stack) return;
1022 if (ptr != real_stack) {
1023 FakeStack::OnFree(ptr, size, real_stack);
1024 }
1025 }
1026
1027 // ASan allocator doesn't reserve extra bytes, so normally we would
1028 // just return "size".
1029 uptr __asan_get_estimated_allocated_size(uptr size) {
1030 if (size == 0) return 1;
1031 return Min(size, kMaxAllowedMallocSize);
1032 }
1033
1034 bool __asan_get_ownership(const void *p) {
1035 return malloc_info.AllocationSize((uptr)p) > 0;
1036 }
1037
1038 uptr __asan_get_allocated_size(const void *p) {
1039 if (p == 0) return 0;
1040 uptr allocated_size = malloc_info.AllocationSize((uptr)p);
1041 // Die if p is not malloced or if it is already freed.
1042 if (allocated_size == 0) {
1043 GET_STACK_TRACE_HERE(kStackTraceMax);
1044 ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
1045 }
1046 return allocated_size;
1047 }