|
1 /* |
|
2 * Copyright (C) 2008 Apple Inc. All rights reserved. |
|
3 * |
|
4 * Redistribution and use in source and binary forms, with or without |
|
5 * modification, are permitted provided that the following conditions |
|
6 * are met: |
|
7 * 1. Redistributions of source code must retain the above copyright |
|
8 * notice, this list of conditions and the following disclaimer. |
|
9 * 2. Redistributions in binary form must reproduce the above copyright |
|
10 * notice, this list of conditions and the following disclaimer in the |
|
11 * documentation and/or other materials provided with the distribution. |
|
12 * |
|
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
24 */ |
|
25 |
|
26 #ifndef ExecutableAllocator_h |
|
27 #define ExecutableAllocator_h |
|
28 #include <stddef.h> // for ptrdiff_t |
|
29 #include <limits> |
|
30 #include <wtf/Assertions.h> |
|
31 #include <wtf/PassRefPtr.h> |
|
32 #include <wtf/RefCounted.h> |
|
33 #include <wtf/UnusedParam.h> |
|
34 #include <wtf/Vector.h> |
|
35 |
|
36 #if OS(IPHONE_OS) |
|
37 #include <libkern/OSCacheControl.h> |
|
38 #include <sys/mman.h> |
|
39 #endif |
|
40 |
|
41 #if OS(SYMBIAN) |
|
42 #include <e32std.h> |
|
43 #endif |
|
44 |
|
45 #if CPU(MIPS) && OS(LINUX) |
|
46 #include <sys/cachectl.h> |
|
47 #endif |
|
48 |
|
49 #if OS(WINCE) |
|
50 // From pkfuncs.h (private header file from the Platform Builder) |
|
51 #define CACHE_SYNC_ALL 0x07F |
|
52 extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); |
|
53 #endif |
|
54 |
|
55 #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize) |
|
56 #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4) |
|
57 |
|
58 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) |
|
59 #define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) |
|
60 #define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) |
|
61 #define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX |
|
62 #else |
|
63 #define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC) |
|
64 #endif |
|
65 |
|
66 namespace JSC { |
|
67 |
|
68 inline size_t roundUpAllocationSize(size_t request, size_t granularity) |
|
69 { |
|
70 if ((std::numeric_limits<size_t>::max() - granularity) <= request) |
|
71 CRASH(); // Allocation is too large |
|
72 |
|
73 // Round up to next page boundary |
|
74 size_t size = request + (granularity - 1); |
|
75 size = size & ~(granularity - 1); |
|
76 ASSERT(size >= request); |
|
77 return size; |
|
78 } |
|
79 |
|
80 } |
|
81 |
|
82 #if ENABLE(JIT) && ENABLE(ASSEMBLER) |
|
83 |
|
84 namespace JSC { |
|
85 |
|
86 class ExecutablePool : public RefCounted<ExecutablePool> { |
|
87 private: |
|
88 struct Allocation { |
|
89 char* pages; |
|
90 size_t size; |
|
91 #if OS(SYMBIAN) |
|
92 RChunk* chunk; |
|
93 #endif |
|
94 }; |
|
95 typedef Vector<Allocation, 2> AllocationList; |
|
96 |
|
97 public: |
|
98 static PassRefPtr<ExecutablePool> create(size_t n) |
|
99 { |
|
100 return adoptRef(new ExecutablePool(n)); |
|
101 } |
|
102 |
|
103 void* alloc(size_t n) |
|
104 { |
|
105 ASSERT(m_freePtr <= m_end); |
|
106 |
|
107 // Round 'n' up to a multiple of word size; if all allocations are of |
|
108 // word sized quantities, then all subsequent allocations will be aligned. |
|
109 n = roundUpAllocationSize(n, sizeof(void*)); |
|
110 |
|
111 if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) { |
|
112 void* result = m_freePtr; |
|
113 m_freePtr += n; |
|
114 return result; |
|
115 } |
|
116 |
|
117 // Insufficient space to allocate in the existing pool |
|
118 // so we need allocate into a new pool |
|
119 return poolAllocate(n); |
|
120 } |
|
121 |
|
122 ~ExecutablePool() |
|
123 { |
|
124 AllocationList::const_iterator end = m_pools.end(); |
|
125 for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr) |
|
126 ExecutablePool::systemRelease(*ptr); |
|
127 } |
|
128 |
|
129 size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; } |
|
130 |
|
131 private: |
|
132 static Allocation systemAlloc(size_t n); |
|
133 static void systemRelease(const Allocation& alloc); |
|
134 |
|
135 ExecutablePool(size_t n); |
|
136 |
|
137 void* poolAllocate(size_t n); |
|
138 |
|
139 char* m_freePtr; |
|
140 char* m_end; |
|
141 AllocationList m_pools; |
|
142 }; |
|
143 |
|
144 class ExecutableAllocator { |
|
145 enum ProtectionSeting { Writable, Executable }; |
|
146 |
|
147 public: |
|
148 static size_t pageSize; |
|
149 ExecutableAllocator() |
|
150 { |
|
151 if (!pageSize) |
|
152 intializePageSize(); |
|
153 if (isValid()) |
|
154 m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE); |
|
155 #if !ENABLE(INTERPRETER) |
|
156 else |
|
157 CRASH(); |
|
158 #endif |
|
159 } |
|
160 |
|
161 bool isValid() const; |
|
162 |
|
163 PassRefPtr<ExecutablePool> poolForSize(size_t n) |
|
164 { |
|
165 // Try to fit in the existing small allocator |
|
166 ASSERT(m_smallAllocationPool); |
|
167 if (n < m_smallAllocationPool->available()) |
|
168 return m_smallAllocationPool; |
|
169 |
|
170 // If the request is large, we just provide a unshared allocator |
|
171 if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE) |
|
172 return ExecutablePool::create(n); |
|
173 |
|
174 // Create a new allocator |
|
175 RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE); |
|
176 |
|
177 // If the new allocator will result in more free space than in |
|
178 // the current small allocator, then we will use it instead |
|
179 if ((pool->available() - n) > m_smallAllocationPool->available()) |
|
180 m_smallAllocationPool = pool; |
|
181 return pool.release(); |
|
182 } |
|
183 |
|
184 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) |
|
185 static void makeWritable(void* start, size_t size) |
|
186 { |
|
187 reprotectRegion(start, size, Writable); |
|
188 } |
|
189 |
|
190 static void makeExecutable(void* start, size_t size) |
|
191 { |
|
192 reprotectRegion(start, size, Executable); |
|
193 } |
|
194 #else |
|
195 static void makeWritable(void*, size_t) {} |
|
196 static void makeExecutable(void*, size_t) {} |
|
197 #endif |
|
198 |
|
199 |
|
200 #if CPU(X86) || CPU(X86_64) |
|
201 static void cacheFlush(void*, size_t) |
|
202 { |
|
203 } |
|
204 #elif CPU(MIPS) |
|
205 static void cacheFlush(void* code, size_t size) |
|
206 { |
|
207 #if COMPILER(GCC) && (GCC_VERSION >= 40300) |
|
208 #if WTF_MIPS_ISA_REV(2) && (GCC_VERSION < 40403) |
|
209 int lineSize; |
|
210 asm("rdhwr %0, $1" : "=r" (lineSize)); |
|
211 // |
|
212 // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in |
|
213 // mips_expand_synci_loop that may execute synci one more time. |
|
214 // "start" points to the fisrt byte of the cache line. |
|
215 // "end" points to the last byte of the line before the last cache line. |
|
216 // Because size is always a multiple of 4, this is safe to set |
|
217 // "end" to the last byte. |
|
218 // |
|
219 intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize); |
|
220 intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1; |
|
221 __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end)); |
|
222 #else |
|
223 intptr_t end = reinterpret_cast<intptr_t>(code) + size; |
|
224 __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end)); |
|
225 #endif |
|
226 #else |
|
227 _flush_cache(reinterpret_cast<char*>(code), size, BCACHE); |
|
228 #endif |
|
229 } |
|
230 #elif CPU(ARM_THUMB2) && OS(IPHONE_OS) |
|
231 static void cacheFlush(void* code, size_t size) |
|
232 { |
|
233 sys_dcache_flush(code, size); |
|
234 sys_icache_invalidate(code, size); |
|
235 } |
|
236 #elif CPU(ARM_THUMB2) && OS(LINUX) |
|
237 static void cacheFlush(void* code, size_t size) |
|
238 { |
|
239 asm volatile ( |
|
240 "push {r7}\n" |
|
241 "mov r0, %0\n" |
|
242 "mov r1, %1\n" |
|
243 "movw r7, #0x2\n" |
|
244 "movt r7, #0xf\n" |
|
245 "movs r2, #0x0\n" |
|
246 "svc 0x0\n" |
|
247 "pop {r7}\n" |
|
248 : |
|
249 : "r" (code), "r" (reinterpret_cast<char*>(code) + size) |
|
250 : "r0", "r1", "r2"); |
|
251 } |
|
252 #elif OS(SYMBIAN) |
|
253 static void cacheFlush(void* code, size_t size) |
|
254 { |
|
255 User::IMB_Range(code, static_cast<char*>(code) + size); |
|
256 } |
|
257 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) |
|
258 static __asm void cacheFlush(void* code, size_t size); |
|
259 #elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC) |
|
260 static void cacheFlush(void* code, size_t size) |
|
261 { |
|
262 asm volatile ( |
|
263 "push {r7}\n" |
|
264 "mov r0, %0\n" |
|
265 "mov r1, %1\n" |
|
266 "mov r7, #0xf0000\n" |
|
267 "add r7, r7, #0x2\n" |
|
268 "mov r2, #0x0\n" |
|
269 "svc 0x0\n" |
|
270 "pop {r7}\n" |
|
271 : |
|
272 : "r" (code), "r" (reinterpret_cast<char*>(code) + size) |
|
273 : "r0", "r1", "r2"); |
|
274 } |
|
275 #elif OS(WINCE) |
|
276 static void cacheFlush(void* code, size_t size) |
|
277 { |
|
278 CacheRangeFlush(code, size, CACHE_SYNC_ALL); |
|
279 } |
|
280 #else |
|
281 #error "The cacheFlush support is missing on this platform." |
|
282 #endif |
|
283 |
|
284 private: |
|
285 |
|
286 #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) |
|
287 static void reprotectRegion(void*, size_t, ProtectionSeting); |
|
288 #endif |
|
289 |
|
290 RefPtr<ExecutablePool> m_smallAllocationPool; |
|
291 static void intializePageSize(); |
|
292 }; |
|
293 |
|
294 inline ExecutablePool::ExecutablePool(size_t n) |
|
295 { |
|
296 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE); |
|
297 Allocation mem = systemAlloc(allocSize); |
|
298 m_pools.append(mem); |
|
299 m_freePtr = mem.pages; |
|
300 if (!m_freePtr) |
|
301 CRASH(); // Failed to allocate |
|
302 m_end = m_freePtr + allocSize; |
|
303 } |
|
304 |
|
305 inline void* ExecutablePool::poolAllocate(size_t n) |
|
306 { |
|
307 size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE); |
|
308 |
|
309 Allocation result = systemAlloc(allocSize); |
|
310 if (!result.pages) |
|
311 CRASH(); // Failed to allocate |
|
312 |
|
313 ASSERT(m_end >= m_freePtr); |
|
314 if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) { |
|
315 // Replace allocation pool |
|
316 m_freePtr = result.pages + n; |
|
317 m_end = result.pages + allocSize; |
|
318 } |
|
319 |
|
320 m_pools.append(result); |
|
321 return result.pages; |
|
322 } |
|
323 |
|
324 } |
|
325 |
|
326 #endif // ENABLE(JIT) && ENABLE(ASSEMBLER) |
|
327 |
|
328 #endif // !defined(ExecutableAllocator) |