|
1 /* |
|
2 * Copyright (C) 2009 Apple Inc. All rights reserved. |
|
3 * |
|
4 * Redistribution and use in source and binary forms, with or without |
|
5 * modification, are permitted provided that the following conditions |
|
6 * are met: |
|
7 * 1. Redistributions of source code must retain the above copyright |
|
8 * notice, this list of conditions and the following disclaimer. |
|
9 * 2. Redistributions in binary form must reproduce the above copyright |
|
10 * notice, this list of conditions and the following disclaimer in the |
|
11 * documentation and/or other materials provided with the distribution. |
|
12 * |
|
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
24 */ |
|
25 |
|
26 #include "config.h" |
|
27 |
|
28 #include "ExecutableAllocator.h" |
|
29 |
|
30 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) |
|
31 |
|
32 #include <errno.h> |
|
33 |
|
34 #include "TCSpinLock.h" |
|
35 #include <sys/mman.h> |
|
36 #include <unistd.h> |
|
37 #include <wtf/AVLTree.h> |
|
38 #include <wtf/VMTags.h> |
|
39 |
|
40 #if CPU(X86_64) |
|
41 // These limits suitable on 64-bit platforms (particularly x86-64, where we require all jumps to have a 2Gb max range). |
|
42 #define VM_POOL_SIZE (2u * 1024u * 1024u * 1024u) // 2Gb |
|
43 #define COALESCE_LIMIT (16u * 1024u * 1024u) // 16Mb |
|
44 #else |
|
45 // These limits are hopefully sensible on embedded platforms. |
|
46 #define VM_POOL_SIZE (32u * 1024u * 1024u) // 32Mb |
|
47 #define COALESCE_LIMIT (4u * 1024u * 1024u) // 4Mb |
|
48 #endif |
|
49 |
|
50 // ASLR currently only works on darwin (due to arc4random) & 64-bit (due to address space size). |
|
51 #define VM_POOL_ASLR (OS(DARWIN) && CPU(X86_64)) |
|
52 |
|
53 using namespace WTF; |
|
54 |
|
55 namespace JSC { |
|
56 |
|
57 // FreeListEntry describes a free chunk of memory, stored in the freeList. |
|
58 struct FreeListEntry { |
|
59 FreeListEntry(void* pointer, size_t size) |
|
60 : pointer(pointer) |
|
61 , size(size) |
|
62 , nextEntry(0) |
|
63 , less(0) |
|
64 , greater(0) |
|
65 , balanceFactor(0) |
|
66 { |
|
67 } |
|
68 |
|
69 // All entries of the same size share a single entry |
|
70 // in the AVLTree, and are linked together in a linked |
|
71 // list, using nextEntry. |
|
72 void* pointer; |
|
73 size_t size; |
|
74 FreeListEntry* nextEntry; |
|
75 |
|
76 // These fields are used by AVLTree. |
|
77 FreeListEntry* less; |
|
78 FreeListEntry* greater; |
|
79 int balanceFactor; |
|
80 }; |
|
81 |
|
82 // Abstractor class for use in AVLTree. |
|
83 // Nodes in the AVLTree are of type FreeListEntry, keyed on |
|
84 // (and thus sorted by) their size. |
|
85 struct AVLTreeAbstractorForFreeList { |
|
86 typedef FreeListEntry* handle; |
|
87 typedef int32_t size; |
|
88 typedef size_t key; |
|
89 |
|
90 handle get_less(handle h) { return h->less; } |
|
91 void set_less(handle h, handle lh) { h->less = lh; } |
|
92 handle get_greater(handle h) { return h->greater; } |
|
93 void set_greater(handle h, handle gh) { h->greater = gh; } |
|
94 int get_balance_factor(handle h) { return h->balanceFactor; } |
|
95 void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; } |
|
96 |
|
97 static handle null() { return 0; } |
|
98 |
|
99 int compare_key_key(key va, key vb) { return va - vb; } |
|
100 int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); } |
|
101 int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); } |
|
102 }; |
|
103 |
|
104 // Used to reverse sort an array of FreeListEntry pointers. |
|
105 static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr) |
|
106 { |
|
107 FreeListEntry* left = *(FreeListEntry**)leftPtr; |
|
108 FreeListEntry* right = *(FreeListEntry**)rightPtr; |
|
109 |
|
110 return (intptr_t)(right->pointer) - (intptr_t)(left->pointer); |
|
111 } |
|
112 |
|
113 // Used to reverse sort an array of pointers. |
|
114 static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr) |
|
115 { |
|
116 void* left = *(void**)leftPtr; |
|
117 void* right = *(void**)rightPtr; |
|
118 |
|
119 return (intptr_t)right - (intptr_t)left; |
|
120 } |
|
121 |
|
122 class FixedVMPoolAllocator |
|
123 { |
|
124 // The free list is stored in a sorted tree. |
|
125 typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree; |
|
126 |
|
127 // Use madvise as apropriate to prevent freed pages from being spilled, |
|
128 // and to attempt to ensure that used memory is reported correctly. |
|
129 #if HAVE(MADV_FREE_REUSE) |
|
130 void release(void* position, size_t size) |
|
131 { |
|
132 while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { } |
|
133 } |
|
134 |
|
135 void reuse(void* position, size_t size) |
|
136 { |
|
137 while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } |
|
138 } |
|
139 #elif HAVE(MADV_DONTNEED) |
|
140 void release(void* position, size_t size) |
|
141 { |
|
142 while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { } |
|
143 } |
|
144 |
|
145 void reuse(void*, size_t) {} |
|
146 #else |
|
147 void release(void*, size_t) {} |
|
148 void reuse(void*, size_t) {} |
|
149 #endif |
|
150 |
|
151 // All addition to the free list should go through this method, rather than |
|
152 // calling insert directly, to avoid multiple entries beging added with the |
|
153 // same key. All nodes being added should be singletons, they should not |
|
154 // already be a part of a chain. |
|
155 void addToFreeList(FreeListEntry* entry) |
|
156 { |
|
157 ASSERT(!entry->nextEntry); |
|
158 |
|
159 if (entry->size == m_commonSize) { |
|
160 m_commonSizedAllocations.append(entry->pointer); |
|
161 delete entry; |
|
162 } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) { |
|
163 // m_freeList already contain an entry for this size - insert this node into the chain. |
|
164 entry->nextEntry = entryInFreeList->nextEntry; |
|
165 entryInFreeList->nextEntry = entry; |
|
166 } else |
|
167 m_freeList.insert(entry); |
|
168 } |
|
169 |
|
170 // We do not attempt to coalesce addition, which may lead to fragmentation; |
|
171 // instead we periodically perform a sweep to try to coalesce neigboring |
|
172 // entries in m_freeList. Presently this is triggered at the point 16MB |
|
173 // of memory has been released. |
|
174 void coalesceFreeSpace() |
|
175 { |
|
176 Vector<FreeListEntry*> freeListEntries; |
|
177 SizeSortedFreeTree::Iterator iter; |
|
178 iter.start_iter_least(m_freeList); |
|
179 |
|
180 // Empty m_freeList into a Vector. |
|
181 for (FreeListEntry* entry; (entry = *iter); ++iter) { |
|
182 // Each entry in m_freeList might correspond to multiple |
|
183 // free chunks of memory (of the same size). Walk the chain |
|
184 // (this is likely of couse only be one entry long!) adding |
|
185 // each entry to the Vector (at reseting the next in chain |
|
186 // pointer to separate each node out). |
|
187 FreeListEntry* next; |
|
188 do { |
|
189 next = entry->nextEntry; |
|
190 entry->nextEntry = 0; |
|
191 freeListEntries.append(entry); |
|
192 } while ((entry = next)); |
|
193 } |
|
194 // All entries are now in the Vector; purge the tree. |
|
195 m_freeList.purge(); |
|
196 |
|
197 // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors. |
|
198 // We reverse-sort so that we can logically work forwards through memory, |
|
199 // whilst popping items off the end of the Vectors using last() and removeLast(). |
|
200 qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer); |
|
201 qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations); |
|
202 |
|
203 // The entries from m_commonSizedAllocations that cannot be |
|
204 // coalesced into larger chunks will be temporarily stored here. |
|
205 Vector<void*> newCommonSizedAllocations; |
|
206 |
|
207 // Keep processing so long as entries remain in either of the vectors. |
|
208 while (freeListEntries.size() || m_commonSizedAllocations.size()) { |
|
209 // We're going to try to find a FreeListEntry node that we can coalesce onto. |
|
210 FreeListEntry* coalescionEntry = 0; |
|
211 |
|
212 // Is the lowest addressed chunk of free memory of common-size, or is it in the free list? |
|
213 if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) { |
|
214 // Pop an item from the m_commonSizedAllocations vector - this is the lowest |
|
215 // addressed free chunk. Find out the begin and end addresses of the memory chunk. |
|
216 void* begin = m_commonSizedAllocations.last(); |
|
217 void* end = (void*)((intptr_t)begin + m_commonSize); |
|
218 m_commonSizedAllocations.removeLast(); |
|
219 |
|
220 // Try to find another free chunk abutting onto the end of the one we have already found. |
|
221 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) { |
|
222 // There is an existing FreeListEntry for the next chunk of memory! |
|
223 // we can reuse this. Pop it off the end of m_freeList. |
|
224 coalescionEntry = freeListEntries.last(); |
|
225 freeListEntries.removeLast(); |
|
226 // Update the existing node to include the common-sized chunk that we also found. |
|
227 coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize); |
|
228 coalescionEntry->size += m_commonSize; |
|
229 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) { |
|
230 // There is a second common-sized chunk that can be coalesced. |
|
231 // Allocate a new node. |
|
232 m_commonSizedAllocations.removeLast(); |
|
233 coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize); |
|
234 } else { |
|
235 // Nope - this poor little guy is all on his own. :-( |
|
236 // Add him into the newCommonSizedAllocations vector for now, we're |
|
237 // going to end up adding him back into the m_commonSizedAllocations |
|
238 // list when we're done. |
|
239 newCommonSizedAllocations.append(begin); |
|
240 continue; |
|
241 } |
|
242 } else { |
|
243 ASSERT(freeListEntries.size()); |
|
244 ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last())); |
|
245 // The lowest addressed item is from m_freeList; pop it from the Vector. |
|
246 coalescionEntry = freeListEntries.last(); |
|
247 freeListEntries.removeLast(); |
|
248 } |
|
249 |
|
250 // Right, we have a FreeListEntry, we just need check if there is anything else |
|
251 // to coalesce onto the end. |
|
252 ASSERT(coalescionEntry); |
|
253 while (true) { |
|
254 // Calculate the end address of the chunk we have found so far. |
|
255 void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size); |
|
256 |
|
257 // Is there another chunk adjacent to the one we already have? |
|
258 if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) { |
|
259 // Yes - another FreeListEntry -pop it from the list. |
|
260 FreeListEntry* coalescee = freeListEntries.last(); |
|
261 freeListEntries.removeLast(); |
|
262 // Add it's size onto our existing node. |
|
263 coalescionEntry->size += coalescee->size; |
|
264 delete coalescee; |
|
265 } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) { |
|
266 // We can coalesce the next common-sized chunk. |
|
267 m_commonSizedAllocations.removeLast(); |
|
268 coalescionEntry->size += m_commonSize; |
|
269 } else |
|
270 break; // Nope, nothing to be added - stop here. |
|
271 } |
|
272 |
|
273 // We've coalesced everything we can onto the current chunk. |
|
274 // Add it back into m_freeList. |
|
275 addToFreeList(coalescionEntry); |
|
276 } |
|
277 |
|
278 // All chunks of free memory larger than m_commonSize should be |
|
279 // back in m_freeList by now. All that remains to be done is to |
|
280 // copy the contents on the newCommonSizedAllocations back into |
|
281 // the m_commonSizedAllocations Vector. |
|
282 ASSERT(m_commonSizedAllocations.size() == 0); |
|
283 m_commonSizedAllocations.append(newCommonSizedAllocations); |
|
284 } |
|
285 |
|
286 public: |
|
287 |
|
288 FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize) |
|
289 : m_commonSize(commonSize) |
|
290 , m_countFreedSinceLastCoalesce(0) |
|
291 , m_totalHeapSize(totalHeapSize) |
|
292 { |
|
293 // Cook up an address to allocate at, using the following recipe: |
|
294 // 17 bits of zero, stay in userspace kids. |
|
295 // 26 bits of randomness for ASLR. |
|
296 // 21 bits of zero, at least stay aligned within one level of the pagetables. |
|
297 // |
|
298 // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854), |
|
299 // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus |
|
300 // 2^24, which should put up somewhere in the middle of usespace (in the address range |
|
301 // 0x200000000000 .. 0x5fffffffffff). |
|
302 intptr_t randomLocation = 0; |
|
303 #if VM_POOL_ASLR |
|
304 randomLocation = arc4random() & ((1 << 25) - 1); |
|
305 randomLocation += (1 << 24); |
|
306 randomLocation <<= 21; |
|
307 #endif |
|
308 m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0); |
|
309 |
|
310 if (m_base) { |
|
311 // For simplicity, we keep all memory in m_freeList in a 'released' state. |
|
312 // This means that we can simply reuse all memory when allocating, without |
|
313 // worrying about it's previous state, and also makes coalescing m_freeList |
|
314 // simpler since we need not worry about the possibility of coalescing released |
|
315 // chunks with non-released ones. |
|
316 release(m_base, m_totalHeapSize); |
|
317 m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize)); |
|
318 } |
|
319 #if !ENABLE(INTERPRETER) |
|
320 else |
|
321 CRASH(); |
|
322 #endif |
|
323 } |
|
324 |
|
325 void* alloc(size_t size) |
|
326 { |
|
327 #if ENABLE(INTERPRETER) |
|
328 if (!m_base) |
|
329 return 0; |
|
330 #else |
|
331 ASSERT(m_base); |
|
332 #endif |
|
333 void* result; |
|
334 |
|
335 // Freed allocations of the common size are not stored back into the main |
|
336 // m_freeList, but are instead stored in a separate vector. If the request |
|
337 // is for a common sized allocation, check this list. |
|
338 if ((size == m_commonSize) && m_commonSizedAllocations.size()) { |
|
339 result = m_commonSizedAllocations.last(); |
|
340 m_commonSizedAllocations.removeLast(); |
|
341 } else { |
|
342 // Serach m_freeList for a suitable sized chunk to allocate memory from. |
|
343 FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); |
|
344 |
|
345 // This would be bad news. |
|
346 if (!entry) { |
|
347 // Errk! Lets take a last-ditch desparation attempt at defragmentation... |
|
348 coalesceFreeSpace(); |
|
349 // Did that free up a large enough chunk? |
|
350 entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); |
|
351 // No?... *BOOM!* |
|
352 if (!entry) |
|
353 CRASH(); |
|
354 } |
|
355 ASSERT(entry->size != m_commonSize); |
|
356 |
|
357 // Remove the entry from m_freeList. But! - |
|
358 // Each entry in the tree may represent a chain of multiple chunks of the |
|
359 // same size, and we only want to remove one on them. So, if this entry |
|
360 // does have a chain, just remove the first-but-one item from the chain. |
|
361 if (FreeListEntry* next = entry->nextEntry) { |
|
362 // We're going to leave 'entry' in the tree; remove 'next' from its chain. |
|
363 entry->nextEntry = next->nextEntry; |
|
364 next->nextEntry = 0; |
|
365 entry = next; |
|
366 } else |
|
367 m_freeList.remove(entry->size); |
|
368 |
|
369 // Whoo!, we have a result! |
|
370 ASSERT(entry->size >= size); |
|
371 result = entry->pointer; |
|
372 |
|
373 // If the allocation exactly fits the chunk we found in the, |
|
374 // m_freeList then the FreeListEntry node is no longer needed. |
|
375 if (entry->size == size) |
|
376 delete entry; |
|
377 else { |
|
378 // There is memory left over, and it is not of the common size. |
|
379 // We can reuse the existing FreeListEntry node to add this back |
|
380 // into m_freeList. |
|
381 entry->pointer = (void*)((intptr_t)entry->pointer + size); |
|
382 entry->size -= size; |
|
383 addToFreeList(entry); |
|
384 } |
|
385 } |
|
386 |
|
387 // Call reuse to report to the operating system that this memory is in use. |
|
388 ASSERT(isWithinVMPool(result, size)); |
|
389 reuse(result, size); |
|
390 return result; |
|
391 } |
|
392 |
|
393 void free(void* pointer, size_t size) |
|
394 { |
|
395 ASSERT(m_base); |
|
396 // Call release to report to the operating system that this |
|
397 // memory is no longer in use, and need not be paged out. |
|
398 ASSERT(isWithinVMPool(pointer, size)); |
|
399 release(pointer, size); |
|
400 |
|
401 // Common-sized allocations are stored in the m_commonSizedAllocations |
|
402 // vector; all other freed chunks are added to m_freeList. |
|
403 if (size == m_commonSize) |
|
404 m_commonSizedAllocations.append(pointer); |
|
405 else |
|
406 addToFreeList(new FreeListEntry(pointer, size)); |
|
407 |
|
408 // Do some housekeeping. Every time we reach a point that |
|
409 // 16MB of allocations have been freed, sweep m_freeList |
|
410 // coalescing any neighboring fragments. |
|
411 m_countFreedSinceLastCoalesce += size; |
|
412 if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) { |
|
413 m_countFreedSinceLastCoalesce = 0; |
|
414 coalesceFreeSpace(); |
|
415 } |
|
416 } |
|
417 |
|
418 bool isValid() const { return !!m_base; } |
|
419 |
|
420 private: |
|
421 |
|
422 #ifndef NDEBUG |
|
423 bool isWithinVMPool(void* pointer, size_t size) |
|
424 { |
|
425 return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize); |
|
426 } |
|
427 #endif |
|
428 |
|
429 // Freed space from the most common sized allocations will be held in this list, ... |
|
430 const size_t m_commonSize; |
|
431 Vector<void*> m_commonSizedAllocations; |
|
432 |
|
433 // ... and all other freed allocations are held in m_freeList. |
|
434 SizeSortedFreeTree m_freeList; |
|
435 |
|
436 // This is used for housekeeping, to trigger defragmentation of the freed lists. |
|
437 size_t m_countFreedSinceLastCoalesce; |
|
438 |
|
439 void* m_base; |
|
440 size_t m_totalHeapSize; |
|
441 }; |
|
442 |
|
443 void ExecutableAllocator::intializePageSize() |
|
444 { |
|
445 ExecutableAllocator::pageSize = getpagesize(); |
|
446 } |
|
447 |
|
448 static FixedVMPoolAllocator* allocator = 0; |
|
449 static SpinLock spinlock = SPINLOCK_INITIALIZER; |
|
450 |
|
451 bool ExecutableAllocator::isValid() const |
|
452 { |
|
453 SpinLockHolder lock_holder(&spinlock); |
|
454 if (!allocator) |
|
455 allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE); |
|
456 return allocator->isValid(); |
|
457 } |
|
458 |
|
459 ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) |
|
460 { |
|
461 SpinLockHolder lock_holder(&spinlock); |
|
462 |
|
463 ASSERT(allocator); |
|
464 ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size}; |
|
465 return alloc; |
|
466 } |
|
467 |
|
468 void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation) |
|
469 { |
|
470 SpinLockHolder lock_holder(&spinlock); |
|
471 |
|
472 ASSERT(allocator); |
|
473 allocator->free(allocation.pages, allocation.size); |
|
474 } |
|
475 |
|
476 } |
|
477 |
|
478 |
|
479 #endif // HAVE(ASSEMBLER) |