[libsanitizer] merge from upstream r169371
[gcc.git] / libsanitizer / sanitizer_common / sanitizer_posix.cc
1 //===-- sanitizer_posix.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries and implements POSIX-specific functions from
10 // sanitizer_libc.h.
11 //===----------------------------------------------------------------------===//
12 #if defined(__linux__) || defined(__APPLE__)
13
14 #include "sanitizer_common.h"
15 #include "sanitizer_libc.h"
16 #include "sanitizer_procmaps.h"
17
18 #include <errno.h>
19 #include <pthread.h>
20 #include <stdarg.h>
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <string.h>
24 #include <sys/mman.h>
25 #include <sys/resource.h>
26 #include <sys/time.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29
30 namespace __sanitizer {
31
32 // ------------- sanitizer_common.h
33 uptr GetPageSize() {
34 return sysconf(_SC_PAGESIZE);
35 }
36
37 uptr GetMmapGranularity() {
38 return GetPageSize();
39 }
40
41 int GetPid() {
42 return getpid();
43 }
44
45 uptr GetThreadSelf() {
46 return (uptr)pthread_self();
47 }
48
49 void *MmapOrDie(uptr size, const char *mem_type) {
50 size = RoundUpTo(size, GetPageSizeCached());
51 void *res = internal_mmap(0, size,
52 PROT_READ | PROT_WRITE,
53 MAP_PRIVATE | MAP_ANON, -1, 0);
54 if (res == (void*)-1) {
55 static int recursion_count;
56 if (recursion_count) {
57 // The Report() and CHECK calls below may call mmap recursively and fail.
58 // If we went into recursion, just die.
59 RawWrite("AddressSanitizer is unable to mmap\n");
60 Die();
61 }
62 recursion_count++;
63 Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s: %s\n",
64 size, size, mem_type, strerror(errno));
65 DumpProcessMap();
66 CHECK("unable to mmap" && 0);
67 }
68 return res;
69 }
70
71 void UnmapOrDie(void *addr, uptr size) {
72 if (!addr || !size) return;
73 int res = internal_munmap(addr, size);
74 if (res != 0) {
75 Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n",
76 size, size, addr);
77 CHECK("unable to unmap" && 0);
78 }
79 }
80
81 void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
82 uptr PageSize = GetPageSizeCached();
83 void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
84 RoundUpTo(size, PageSize),
85 PROT_READ | PROT_WRITE,
86 MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
87 -1, 0);
88 if (p == (void*)-1)
89 Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
90 size, size, fixed_addr, errno);
91 return p;
92 }
93
94 void *Mprotect(uptr fixed_addr, uptr size) {
95 return internal_mmap((void*)fixed_addr, size,
96 PROT_NONE,
97 MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
98 -1, 0);
99 }
100
101 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
102 fd_t fd = internal_open(file_name, false);
103 CHECK_NE(fd, kInvalidFd);
104 uptr fsize = internal_filesize(fd);
105 CHECK_NE(fsize, (uptr)-1);
106 CHECK_GT(fsize, 0);
107 *buff_size = RoundUpTo(fsize, GetPageSizeCached());
108 void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
109 return (map == MAP_FAILED) ? 0 : map;
110 }
111
112
113 static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
114 uptr start2, uptr end2) {
115 CHECK(start1 <= end1);
116 CHECK(start2 <= end2);
117 return (end1 < start2) || (end2 < start1);
118 }
119
120 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
121 // When the shadow is mapped only a single thread usually exists (plus maybe
122 // several worker threads on Mac, which aren't expected to map big chunks of
123 // memory).
124 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
125 MemoryMappingLayout procmaps;
126 uptr start, end;
127 while (procmaps.Next(&start, &end,
128 /*offset*/0, /*filename*/0, /*filename_size*/0)) {
129 if (!IntervalsAreSeparate(start, end, range_start, range_end))
130 return false;
131 }
132 return true;
133 }
134
135 void DumpProcessMap() {
136 MemoryMappingLayout proc_maps;
137 uptr start, end;
138 const sptr kBufSize = 4095;
139 char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
140 Report("Process memory map follows:\n");
141 while (proc_maps.Next(&start, &end, /* file_offset */0,
142 filename, kBufSize)) {
143 Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
144 }
145 Report("End of process memory map.\n");
146 UnmapOrDie(filename, kBufSize);
147 }
148
149 const char *GetPwd() {
150 return GetEnv("PWD");
151 }
152
153 void DisableCoreDumper() {
154 struct rlimit nocore;
155 nocore.rlim_cur = 0;
156 nocore.rlim_max = 0;
157 setrlimit(RLIMIT_CORE, &nocore);
158 }
159
160 bool StackSizeIsUnlimited() {
161 struct rlimit rlim;
162 CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim));
163 return (rlim.rlim_cur == (uptr)-1);
164 }
165
166 void SetStackSizeLimitInBytes(uptr limit) {
167 struct rlimit rlim;
168 rlim.rlim_cur = limit;
169 rlim.rlim_max = limit;
170 if (setrlimit(RLIMIT_STACK, &rlim)) {
171 Report("setrlimit() failed %d\n", errno);
172 Die();
173 }
174 CHECK(!StackSizeIsUnlimited());
175 }
176
177 void SleepForSeconds(int seconds) {
178 sleep(seconds);
179 }
180
181 void SleepForMillis(int millis) {
182 usleep(millis * 1000);
183 }
184
185 void Exit(int exitcode) {
186 _exit(exitcode);
187 }
188
189 void Abort() {
190 abort();
191 }
192
193 int Atexit(void (*function)(void)) {
194 #ifndef SANITIZER_GO
195 return atexit(function);
196 #else
197 return 0;
198 #endif
199 }
200
201 int internal_isatty(fd_t fd) {
202 return isatty(fd);
203 }
204
205 } // namespace __sanitizer
206
207 #endif // __linux__ || __APPLE_