|
1 // Copyright (c) 2005, 2007, Google Inc. |
|
2 // All rights reserved. |
|
3 // |
|
4 // Redistribution and use in source and binary forms, with or without |
|
5 // modification, are permitted provided that the following conditions are |
|
6 // met: |
|
7 // |
|
8 // * Redistributions of source code must retain the above copyright |
|
9 // notice, this list of conditions and the following disclaimer. |
|
10 // * Redistributions in binary form must reproduce the above |
|
11 // copyright notice, this list of conditions and the following disclaimer |
|
12 // in the documentation and/or other materials provided with the |
|
13 // distribution. |
|
14 // * Neither the name of Google Inc. nor the names of its |
|
15 // contributors may be used to endorse or promote products derived from |
|
16 // this software without specific prior written permission. |
|
17 // |
|
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
29 |
|
30 // --- |
|
31 // Author: Sanjay Ghemawat |
|
32 |
|
33 #include "config.h" |
|
34 #include "TCSystemAlloc.h" |
|
35 |
|
36 #include <algorithm> |
|
37 #include "Assertions.h" |
|
38 #include "TCSpinLock.h" |
|
39 #include "UnusedParam.h" |
|
40 #include "VMTags.h" |
|
41 |
|
42 #if HAVE(STDINT_H) |
|
43 #include <stdint.h> |
|
44 #elif HAVE(INTTYPES_H) |
|
45 #include <inttypes.h> |
|
46 #else |
|
47 #include <sys/types.h> |
|
48 #endif |
|
49 |
|
50 #if OS(WINDOWS) |
|
51 #include "windows.h" |
|
52 #else |
|
53 #include <errno.h> |
|
54 #include <unistd.h> |
|
55 #include <sys/mman.h> |
|
56 #endif |
|
57 |
|
58 #ifndef MAP_ANONYMOUS |
|
59 #define MAP_ANONYMOUS MAP_ANON |
|
60 #endif |
|
61 |
|
62 using namespace std; |
|
63 |
|
64 // Structure for discovering alignment |
|
65 union MemoryAligner { |
|
66 void* p; |
|
67 double d; |
|
68 size_t s; |
|
69 }; |
|
70 |
|
71 static SpinLock spinlock = SPINLOCK_INITIALIZER; |
|
72 |
|
73 // Page size is initialized on demand |
|
74 static size_t pagesize = 0; |
|
75 |
|
76 // Configuration parameters. |
|
77 // |
|
78 // if use_devmem is true, either use_sbrk or use_mmap must also be true. |
|
79 // For 2.2 kernels, it looks like the sbrk address space (500MBish) and |
|
80 // the mmap address space (1300MBish) are disjoint, so we need both allocators |
|
81 // to get as much virtual memory as possible. |
|
82 #ifndef WTF_CHANGES |
|
83 static bool use_devmem = false; |
|
84 #endif |
|
85 |
|
86 #if HAVE(SBRK) |
|
87 static bool use_sbrk = false; |
|
88 #endif |
|
89 |
|
90 #if HAVE(MMAP) |
|
91 static bool use_mmap = true; |
|
92 #endif |
|
93 |
|
94 #if HAVE(VIRTUALALLOC) |
|
95 static bool use_VirtualAlloc = true; |
|
96 #endif |
|
97 |
|
98 // Flags to keep us from retrying allocators that failed. |
|
99 static bool devmem_failure = false; |
|
100 static bool sbrk_failure = false; |
|
101 static bool mmap_failure = false; |
|
102 static bool VirtualAlloc_failure = false; |
|
103 |
|
104 #ifndef WTF_CHANGES |
|
105 DEFINE_int32(malloc_devmem_start, 0, |
|
106 "Physical memory starting location in MB for /dev/mem allocation." |
|
107 " Setting this to 0 disables /dev/mem allocation"); |
|
108 DEFINE_int32(malloc_devmem_limit, 0, |
|
109 "Physical memory limit location in MB for /dev/mem allocation." |
|
110 " Setting this to 0 means no limit."); |
|
111 #else |
|
112 static const int32_t FLAGS_malloc_devmem_start = 0; |
|
113 static const int32_t FLAGS_malloc_devmem_limit = 0; |
|
114 #endif |
|
115 |
|
116 #if HAVE(SBRK) |
|
117 |
|
118 static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) { |
|
119 size = ((size + alignment - 1) / alignment) * alignment; |
|
120 |
|
121 // could theoretically return the "extra" bytes here, but this |
|
122 // is simple and correct. |
|
123 if (actual_size) |
|
124 *actual_size = size; |
|
125 |
|
126 void* result = sbrk(size); |
|
127 if (result == reinterpret_cast<void*>(-1)) { |
|
128 sbrk_failure = true; |
|
129 return NULL; |
|
130 } |
|
131 |
|
132 // Is it aligned? |
|
133 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
|
134 if ((ptr & (alignment-1)) == 0) return result; |
|
135 |
|
136 // Try to get more memory for alignment |
|
137 size_t extra = alignment - (ptr & (alignment-1)); |
|
138 void* r2 = sbrk(extra); |
|
139 if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) { |
|
140 // Contiguous with previous result |
|
141 return reinterpret_cast<void*>(ptr + extra); |
|
142 } |
|
143 |
|
144 // Give up and ask for "size + alignment - 1" bytes so |
|
145 // that we can find an aligned region within it. |
|
146 result = sbrk(size + alignment - 1); |
|
147 if (result == reinterpret_cast<void*>(-1)) { |
|
148 sbrk_failure = true; |
|
149 return NULL; |
|
150 } |
|
151 ptr = reinterpret_cast<uintptr_t>(result); |
|
152 if ((ptr & (alignment-1)) != 0) { |
|
153 ptr += alignment - (ptr & (alignment-1)); |
|
154 } |
|
155 return reinterpret_cast<void*>(ptr); |
|
156 } |
|
157 |
|
158 #endif /* HAVE(SBRK) */ |
|
159 |
|
160 #if HAVE(MMAP) |
|
161 |
|
162 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { |
|
163 // Enforce page alignment |
|
164 if (pagesize == 0) pagesize = getpagesize(); |
|
165 if (alignment < pagesize) alignment = pagesize; |
|
166 size = ((size + alignment - 1) / alignment) * alignment; |
|
167 |
|
168 // could theoretically return the "extra" bytes here, but this |
|
169 // is simple and correct. |
|
170 if (actual_size) |
|
171 *actual_size = size; |
|
172 |
|
173 // Ask for extra memory if alignment > pagesize |
|
174 size_t extra = 0; |
|
175 if (alignment > pagesize) { |
|
176 extra = alignment - pagesize; |
|
177 } |
|
178 void* result = mmap(NULL, size + extra, |
|
179 PROT_READ | PROT_WRITE, |
|
180 MAP_PRIVATE|MAP_ANONYMOUS, |
|
181 VM_TAG_FOR_TCMALLOC_MEMORY, 0); |
|
182 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
|
183 mmap_failure = true; |
|
184 return NULL; |
|
185 } |
|
186 |
|
187 // Adjust the return memory so it is aligned |
|
188 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
|
189 size_t adjust = 0; |
|
190 if ((ptr & (alignment - 1)) != 0) { |
|
191 adjust = alignment - (ptr & (alignment - 1)); |
|
192 } |
|
193 |
|
194 // Return the unused memory to the system |
|
195 if (adjust > 0) { |
|
196 munmap(reinterpret_cast<void*>(ptr), adjust); |
|
197 } |
|
198 if (adjust < extra) { |
|
199 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); |
|
200 } |
|
201 |
|
202 ptr += adjust; |
|
203 return reinterpret_cast<void*>(ptr); |
|
204 } |
|
205 |
|
206 #endif /* HAVE(MMAP) */ |
|
207 |
|
208 #if HAVE(VIRTUALALLOC) |
|
209 |
|
210 static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment) { |
|
211 // Enforce page alignment |
|
212 if (pagesize == 0) { |
|
213 SYSTEM_INFO system_info; |
|
214 GetSystemInfo(&system_info); |
|
215 pagesize = system_info.dwPageSize; |
|
216 } |
|
217 |
|
218 if (alignment < pagesize) alignment = pagesize; |
|
219 size = ((size + alignment - 1) / alignment) * alignment; |
|
220 |
|
221 // could theoretically return the "extra" bytes here, but this |
|
222 // is simple and correct. |
|
223 if (actual_size) |
|
224 *actual_size = size; |
|
225 |
|
226 // Ask for extra memory if alignment > pagesize |
|
227 size_t extra = 0; |
|
228 if (alignment > pagesize) { |
|
229 extra = alignment - pagesize; |
|
230 } |
|
231 void* result = VirtualAlloc(NULL, size + extra, |
|
232 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, |
|
233 PAGE_READWRITE); |
|
234 |
|
235 if (result == NULL) { |
|
236 VirtualAlloc_failure = true; |
|
237 return NULL; |
|
238 } |
|
239 |
|
240 // Adjust the return memory so it is aligned |
|
241 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
|
242 size_t adjust = 0; |
|
243 if ((ptr & (alignment - 1)) != 0) { |
|
244 adjust = alignment - (ptr & (alignment - 1)); |
|
245 } |
|
246 |
|
247 // Return the unused memory to the system - we'd like to release but the best we can do |
|
248 // is decommit, since Windows only lets you free the whole allocation. |
|
249 if (adjust > 0) { |
|
250 VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); |
|
251 } |
|
252 if (adjust < extra) { |
|
253 VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_DECOMMIT); |
|
254 } |
|
255 |
|
256 ptr += adjust; |
|
257 return reinterpret_cast<void*>(ptr); |
|
258 } |
|
259 |
|
260 #endif /* HAVE(MMAP) */ |
|
261 |
|
262 #ifndef WTF_CHANGES |
|
263 static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) { |
|
264 static bool initialized = false; |
|
265 static off_t physmem_base; // next physical memory address to allocate |
|
266 static off_t physmem_limit; // maximum physical address allowed |
|
267 static int physmem_fd; // file descriptor for /dev/mem |
|
268 |
|
269 // Check if we should use /dev/mem allocation. Note that it may take |
|
270 // a while to get this flag initialized, so meanwhile we fall back to |
|
271 // the next allocator. (It looks like 7MB gets allocated before |
|
272 // this flag gets initialized -khr.) |
|
273 if (FLAGS_malloc_devmem_start == 0) { |
|
274 // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to |
|
275 // try us again next time. |
|
276 return NULL; |
|
277 } |
|
278 |
|
279 if (!initialized) { |
|
280 physmem_fd = open("/dev/mem", O_RDWR); |
|
281 if (physmem_fd < 0) { |
|
282 devmem_failure = true; |
|
283 return NULL; |
|
284 } |
|
285 physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL; |
|
286 physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL; |
|
287 initialized = true; |
|
288 } |
|
289 |
|
290 // Enforce page alignment |
|
291 if (pagesize == 0) pagesize = getpagesize(); |
|
292 if (alignment < pagesize) alignment = pagesize; |
|
293 size = ((size + alignment - 1) / alignment) * alignment; |
|
294 |
|
295 // could theoretically return the "extra" bytes here, but this |
|
296 // is simple and correct. |
|
297 if (actual_size) |
|
298 *actual_size = size; |
|
299 |
|
300 // Ask for extra memory if alignment > pagesize |
|
301 size_t extra = 0; |
|
302 if (alignment > pagesize) { |
|
303 extra = alignment - pagesize; |
|
304 } |
|
305 |
|
306 // check to see if we have any memory left |
|
307 if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) { |
|
308 devmem_failure = true; |
|
309 return NULL; |
|
310 } |
|
311 void *result = mmap(0, size + extra, PROT_READ | PROT_WRITE, |
|
312 MAP_SHARED, physmem_fd, physmem_base); |
|
313 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
|
314 devmem_failure = true; |
|
315 return NULL; |
|
316 } |
|
317 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
|
318 |
|
319 // Adjust the return memory so it is aligned |
|
320 size_t adjust = 0; |
|
321 if ((ptr & (alignment - 1)) != 0) { |
|
322 adjust = alignment - (ptr & (alignment - 1)); |
|
323 } |
|
324 |
|
325 // Return the unused virtual memory to the system |
|
326 if (adjust > 0) { |
|
327 munmap(reinterpret_cast<void*>(ptr), adjust); |
|
328 } |
|
329 if (adjust < extra) { |
|
330 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); |
|
331 } |
|
332 |
|
333 ptr += adjust; |
|
334 physmem_base += adjust + size; |
|
335 |
|
336 return reinterpret_cast<void*>(ptr); |
|
337 } |
|
338 #endif |
|
339 |
|
340 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { |
|
341 // Discard requests that overflow |
|
342 if (size + alignment < size) return NULL; |
|
343 |
|
344 SpinLockHolder lock_holder(&spinlock); |
|
345 |
|
346 // Enforce minimum alignment |
|
347 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); |
|
348 |
|
349 // Try twice, once avoiding allocators that failed before, and once |
|
350 // more trying all allocators even if they failed before. |
|
351 for (int i = 0; i < 2; i++) { |
|
352 |
|
353 #ifndef WTF_CHANGES |
|
354 if (use_devmem && !devmem_failure) { |
|
355 void* result = TryDevMem(size, actual_size, alignment); |
|
356 if (result != NULL) return result; |
|
357 } |
|
358 #endif |
|
359 |
|
360 #if HAVE(SBRK) |
|
361 if (use_sbrk && !sbrk_failure) { |
|
362 void* result = TrySbrk(size, actual_size, alignment); |
|
363 if (result != NULL) return result; |
|
364 } |
|
365 #endif |
|
366 |
|
367 #if HAVE(MMAP) |
|
368 if (use_mmap && !mmap_failure) { |
|
369 void* result = TryMmap(size, actual_size, alignment); |
|
370 if (result != NULL) return result; |
|
371 } |
|
372 #endif |
|
373 |
|
374 #if HAVE(VIRTUALALLOC) |
|
375 if (use_VirtualAlloc && !VirtualAlloc_failure) { |
|
376 void* result = TryVirtualAlloc(size, actual_size, alignment); |
|
377 if (result != NULL) return result; |
|
378 } |
|
379 #endif |
|
380 |
|
381 // nothing worked - reset failure flags and try again |
|
382 devmem_failure = false; |
|
383 sbrk_failure = false; |
|
384 mmap_failure = false; |
|
385 VirtualAlloc_failure = false; |
|
386 } |
|
387 return NULL; |
|
388 } |
|
389 |
|
390 #if HAVE(MADV_FREE_REUSE) |
|
391 |
|
392 void TCMalloc_SystemRelease(void* start, size_t length) |
|
393 { |
|
394 while (madvise(start, length, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { } |
|
395 } |
|
396 |
|
397 #elif HAVE(MADV_FREE) || HAVE(MADV_DONTNEED) |
|
398 |
|
399 void TCMalloc_SystemRelease(void* start, size_t length) |
|
400 { |
|
401 // MADV_FREE clears the modified bit on pages, which allows |
|
402 // them to be discarded immediately. |
|
403 #if HAVE(MADV_FREE) |
|
404 const int advice = MADV_FREE; |
|
405 #else |
|
406 const int advice = MADV_DONTNEED; |
|
407 #endif |
|
408 if (FLAGS_malloc_devmem_start) { |
|
409 // It's not safe to use MADV_DONTNEED if we've been mapping |
|
410 // /dev/mem for heap memory |
|
411 return; |
|
412 } |
|
413 if (pagesize == 0) pagesize = getpagesize(); |
|
414 const size_t pagemask = pagesize - 1; |
|
415 |
|
416 size_t new_start = reinterpret_cast<size_t>(start); |
|
417 size_t end = new_start + length; |
|
418 size_t new_end = end; |
|
419 |
|
420 // Round up the starting address and round down the ending address |
|
421 // to be page aligned: |
|
422 new_start = (new_start + pagesize - 1) & ~pagemask; |
|
423 new_end = new_end & ~pagemask; |
|
424 |
|
425 ASSERT((new_start & pagemask) == 0); |
|
426 ASSERT((new_end & pagemask) == 0); |
|
427 ASSERT(new_start >= reinterpret_cast<size_t>(start)); |
|
428 ASSERT(new_end <= end); |
|
429 |
|
430 if (new_end > new_start) { |
|
431 // Note -- ignoring most return codes, because if this fails it |
|
432 // doesn't matter... |
|
433 while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start, |
|
434 advice) == -1 && |
|
435 errno == EAGAIN) { |
|
436 // NOP |
|
437 } |
|
438 } |
|
439 } |
|
440 |
|
441 #elif HAVE(MMAP) |
|
442 |
|
443 void TCMalloc_SystemRelease(void* start, size_t length) |
|
444 { |
|
445 void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); |
|
446 // If the mmap failed then that's ok, we just won't return the memory to the system. |
|
447 ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED)); |
|
448 } |
|
449 |
|
450 #elif HAVE(VIRTUALALLOC) |
|
451 |
|
452 void TCMalloc_SystemRelease(void* start, size_t length) |
|
453 { |
|
454 if (VirtualFree(start, length, MEM_DECOMMIT)) |
|
455 return; |
|
456 |
|
457 // The decommit may fail if the memory region consists of allocations |
|
458 // from more than one call to VirtualAlloc. In this case, fall back to |
|
459 // using VirtualQuery to retrieve the allocation boundaries and decommit |
|
460 // them each individually. |
|
461 |
|
462 char* ptr = static_cast<char*>(start); |
|
463 char* end = ptr + length; |
|
464 MEMORY_BASIC_INFORMATION info; |
|
465 while (ptr < end) { |
|
466 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); |
|
467 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); |
|
468 |
|
469 size_t decommitSize = min<size_t>(info.RegionSize, end - ptr); |
|
470 BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT); |
|
471 ASSERT_UNUSED(success, success); |
|
472 ptr += decommitSize; |
|
473 } |
|
474 } |
|
475 |
|
476 #else |
|
477 |
|
478 // Platforms that don't support returning memory use an empty inline version of TCMalloc_SystemRelease |
|
479 // declared in TCSystemAlloc.h |
|
480 |
|
481 #endif |
|
482 |
|
483 #if HAVE(MADV_FREE_REUSE) |
|
484 |
|
485 void TCMalloc_SystemCommit(void* start, size_t length) |
|
486 { |
|
487 while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } |
|
488 } |
|
489 |
|
490 #elif HAVE(VIRTUALALLOC) |
|
491 |
|
492 void TCMalloc_SystemCommit(void* start, size_t length) |
|
493 { |
|
494 if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start) |
|
495 return; |
|
496 |
|
497 // The commit may fail if the memory region consists of allocations |
|
498 // from more than one call to VirtualAlloc. In this case, fall back to |
|
499 // using VirtualQuery to retrieve the allocation boundaries and commit them |
|
500 // each individually. |
|
501 |
|
502 char* ptr = static_cast<char*>(start); |
|
503 char* end = ptr + length; |
|
504 MEMORY_BASIC_INFORMATION info; |
|
505 while (ptr < end) { |
|
506 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); |
|
507 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); |
|
508 |
|
509 size_t commitSize = min<size_t>(info.RegionSize, end - ptr); |
|
510 void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT, PAGE_READWRITE); |
|
511 ASSERT_UNUSED(newAddress, newAddress == ptr); |
|
512 ptr += commitSize; |
|
513 } |
|
514 } |
|
515 |
|
516 #else |
|
517 |
|
518 // Platforms that don't need to explicitly commit memory use an empty inline version of TCMalloc_SystemCommit |
|
519 // declared in TCSystemAlloc.h |
|
520 |
|
521 #endif |