|
1 /* |
|
2 * Copyright (C) 2009 Apple Inc. All rights reserved. |
|
3 * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> |
|
4 * |
|
5 * Redistribution and use in source and binary forms, with or without |
|
6 * modification, are permitted provided that the following conditions |
|
7 * are met: |
|
8 * 1. Redistributions of source code must retain the above copyright |
|
9 * notice, this list of conditions and the following disclaimer. |
|
10 * 2. Redistributions in binary form must reproduce the above copyright |
|
11 * notice, this list of conditions and the following disclaimer in the |
|
12 * documentation and/or other materials provided with the distribution. |
|
13 * |
|
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
17 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
18 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
19 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
20 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
22 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
25 */ |
|
26 |
|
27 #include "config.h" |
|
28 #if ENABLE(JIT) |
|
29 #include "JIT.h" |
|
30 |
|
31 #include "JITInlineMethods.h" |
|
32 #include "JITStubCall.h" |
|
33 #include "JSArray.h" |
|
34 #include "JSCell.h" |
|
35 #include "JSFunction.h" |
|
36 #include "JSPropertyNameIterator.h" |
|
37 #include "LinkBuffer.h" |
|
38 |
|
39 namespace JSC { |
|
40 |
|
41 #if !USE(JSVALUE32_64) |
|
42 |
|
43 #define RECORD_JUMP_TARGET(targetOffset) \ |
|
44 do { m_labels[m_bytecodeOffset + (targetOffset)].used(); } while (false) |
|
45 |
|
46 void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines) |
|
47 { |
|
48 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
49 Label softModBegin = align(); |
|
50 softModulo(); |
|
51 #endif |
|
52 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
53 // (2) The second function provides fast property access for string length |
|
54 Label stringLengthBegin = align(); |
|
55 |
|
56 // Check eax is a string |
|
57 Jump string_failureCases1 = emitJumpIfNotJSCell(regT0); |
|
58 Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); |
|
59 |
|
60 // Checks out okay! - get the length from the Ustring. |
|
61 load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0); |
|
62 |
|
63 Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); |
|
64 |
|
65 // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. |
|
66 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
67 |
|
68 ret(); |
|
69 #endif |
|
70 |
|
71 // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct. |
|
72 COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit); |
|
73 |
|
74 // VirtualCallLink Trampoline |
|
75 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
76 JumpList callLinkFailures; |
|
77 Label virtualCallLinkBegin = align(); |
|
78 compileOpCallInitializeCallFrame(); |
|
79 preserveReturnAddressAfterCall(regT3); |
|
80 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
81 restoreArgumentReference(); |
|
82 Call callLazyLinkCall = call(); |
|
83 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
84 restoreReturnAddressBeforeReturn(regT3); |
|
85 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
86 jump(regT0); |
|
87 |
|
88 // VirtualConstructLink Trampoline |
|
89 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
90 Label virtualConstructLinkBegin = align(); |
|
91 compileOpCallInitializeCallFrame(); |
|
92 preserveReturnAddressAfterCall(regT3); |
|
93 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
94 restoreArgumentReference(); |
|
95 Call callLazyLinkConstruct = call(); |
|
96 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
97 restoreReturnAddressBeforeReturn(regT3); |
|
98 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
99 jump(regT0); |
|
100 |
|
101 // VirtualCall Trampoline |
|
102 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
103 Label virtualCallBegin = align(); |
|
104 compileOpCallInitializeCallFrame(); |
|
105 |
|
106 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
107 |
|
108 Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0)); |
|
109 preserveReturnAddressAfterCall(regT3); |
|
110 restoreArgumentReference(); |
|
111 Call callCompileCall = call(); |
|
112 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
113 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
114 restoreReturnAddressBeforeReturn(regT3); |
|
115 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
116 hasCodeBlock3.link(this); |
|
117 |
|
118 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0); |
|
119 jump(regT0); |
|
120 |
|
121 // VirtualConstruct Trampoline |
|
122 // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable. |
|
123 Label virtualConstructBegin = align(); |
|
124 compileOpCallInitializeCallFrame(); |
|
125 |
|
126 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
127 |
|
128 Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0)); |
|
129 preserveReturnAddressAfterCall(regT3); |
|
130 restoreArgumentReference(); |
|
131 Call callCompileConstruct = call(); |
|
132 callLinkFailures.append(branchTestPtr(Zero, regT0)); |
|
133 emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1); |
|
134 restoreReturnAddressBeforeReturn(regT3); |
|
135 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
136 hasCodeBlock4.link(this); |
|
137 |
|
138 loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0); |
|
139 jump(regT0); |
|
140 |
|
141 // If the parser fails we want to be able to be able to keep going, |
|
142 // So we handle this as a parse failure. |
|
143 callLinkFailures.link(this); |
|
144 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); |
|
145 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
146 restoreReturnAddressBeforeReturn(regT1); |
|
147 move(ImmPtr(&globalData->exceptionLocation), regT2); |
|
148 storePtr(regT1, regT2); |
|
149 poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); |
|
150 poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value())); |
|
151 ret(); |
|
152 |
|
153 // NativeCall Trampoline |
|
154 Label nativeCallThunk = privateCompileCTINativeCall(globalData); |
|
155 Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true); |
|
156 |
|
157 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
158 Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); |
|
159 Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); |
|
160 Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); |
|
161 #endif |
|
162 |
|
163 // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. |
|
164 LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size())); |
|
165 |
|
166 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
167 patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail)); |
|
168 patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail)); |
|
169 patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail)); |
|
170 #endif |
|
171 #if ENABLE(JIT_OPTIMIZE_CALL) |
|
172 patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall)); |
|
173 patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct)); |
|
174 #endif |
|
175 patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile)); |
|
176 patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile)); |
|
177 |
|
178 CodeRef finalCode = patchBuffer.finalizeCode(); |
|
179 *executablePool = finalCode.m_executablePool; |
|
180 |
|
181 trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin); |
|
182 trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin); |
|
183 trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin); |
|
184 trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin); |
|
185 trampolines->ctiNativeCall = trampolineAt(finalCode, nativeCallThunk); |
|
186 trampolines->ctiNativeConstruct = trampolineAt(finalCode, nativeConstructThunk); |
|
187 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
188 trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin); |
|
189 #endif |
|
190 #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) |
|
191 trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin); |
|
192 #endif |
|
193 } |
|
194 |
|
195 JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct) |
|
196 { |
|
197 int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function); |
|
198 |
|
199 Label nativeCallThunk = align(); |
|
200 |
|
201 #if CPU(X86_64) |
|
202 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
203 // get to its global data. |
|
204 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); |
|
205 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); |
|
206 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
207 |
|
208 peek(regT1); |
|
209 emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC); |
|
210 |
|
211 // Calling convention: f(edi, esi, edx, ecx, ...); |
|
212 // Host function signature: f(ExecState*); |
|
213 move(callFrameRegister, X86Registers::edi); |
|
214 |
|
215 subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. |
|
216 |
|
217 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi); |
|
218 loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9); |
|
219 move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
220 call(Address(X86Registers::r9, executableOffsetToFunction)); |
|
221 |
|
222 addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); |
|
223 |
|
224 #elif CPU(ARM) |
|
225 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
226 // get to its global data. |
|
227 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2); |
|
228 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2); |
|
229 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
230 |
|
231 preserveReturnAddressAfterCall(regT3); // Callee preserved |
|
232 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
233 |
|
234 // Calling convention: f(r0 == regT0, r1 == regT1, ...); |
|
235 // Host function signature: f(ExecState*); |
|
236 move(callFrameRegister, ARMRegisters::r0); |
|
237 |
|
238 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1); |
|
239 move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
240 loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
241 call(Address(regT2, executableOffsetToFunction)); |
|
242 |
|
243 restoreReturnAddressBeforeReturn(regT3); |
|
244 |
|
245 #elif CPU(MIPS) |
|
246 // Load caller frame's scope chain into this callframe so that whatever we call can |
|
247 // get to its global data. |
|
248 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0); |
|
249 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0); |
|
250 emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); |
|
251 |
|
252 preserveReturnAddressAfterCall(regT3); // Callee preserved |
|
253 emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC); |
|
254 |
|
255 // Calling convention: f(a0, a1, a2, a3); |
|
256 // Host function signature: f(ExecState*); |
|
257 |
|
258 // Allocate stack space for 16 bytes (8-byte aligned) |
|
259 // 16 bytes (unused) for 4 arguments |
|
260 subPtr(Imm32(16), stackPointerRegister); |
|
261 |
|
262 // Setup arg0 |
|
263 move(callFrameRegister, MIPSRegisters::a0); |
|
264 |
|
265 // Call |
|
266 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2); |
|
267 loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); |
|
268 move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. |
|
269 call(Address(regT2, executableOffsetToFunction)); |
|
270 |
|
271 // Restore stack space |
|
272 addPtr(Imm32(16), stackPointerRegister); |
|
273 |
|
274 restoreReturnAddressBeforeReturn(regT3); |
|
275 |
|
276 #elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL) |
|
277 #error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform." |
|
278 #else |
|
279 UNUSED_PARAM(executableOffsetToFunction); |
|
280 breakpoint(); |
|
281 #endif |
|
282 |
|
283 // Check for an exception |
|
284 loadPtr(&(globalData->exception), regT2); |
|
285 Jump exceptionHandler = branchTestPtr(NonZero, regT2); |
|
286 |
|
287 // Return. |
|
288 ret(); |
|
289 |
|
290 // Handle an exception |
|
291 exceptionHandler.link(this); |
|
292 |
|
293 // Grab the return address. |
|
294 preserveReturnAddressAfterCall(regT1); |
|
295 |
|
296 move(ImmPtr(&globalData->exceptionLocation), regT2); |
|
297 storePtr(regT1, regT2); |
|
298 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); |
|
299 |
|
300 // Set the return address. |
|
301 move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1); |
|
302 restoreReturnAddressBeforeReturn(regT1); |
|
303 |
|
304 ret(); |
|
305 |
|
306 return nativeCallThunk; |
|
307 } |
|
308 |
|
309 JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction) |
|
310 { |
|
311 return globalData->jitStubs->ctiNativeCall(); |
|
312 } |
|
313 |
|
314 void JIT::emit_op_mov(Instruction* currentInstruction) |
|
315 { |
|
316 int dst = currentInstruction[1].u.operand; |
|
317 int src = currentInstruction[2].u.operand; |
|
318 |
|
319 if (m_codeBlock->isConstantRegisterIndex(src)) { |
|
320 storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register))); |
|
321 if (dst == m_lastResultBytecodeRegister) |
|
322 killLastResultRegister(); |
|
323 } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) { |
|
324 // If either the src or dst is the cached register go though |
|
325 // get/put registers to make sure we track this correctly. |
|
326 emitGetVirtualRegister(src, regT0); |
|
327 emitPutVirtualRegister(dst); |
|
328 } else { |
|
329 // Perform the copy via regT1; do not disturb any mapping in regT0. |
|
330 loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1); |
|
331 storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register))); |
|
332 } |
|
333 } |
|
334 |
|
335 void JIT::emit_op_end(Instruction* currentInstruction) |
|
336 { |
|
337 if (m_codeBlock->needsFullScopeChain()) |
|
338 JITStubCall(this, cti_op_end).call(); |
|
339 ASSERT(returnValueRegister != callFrameRegister); |
|
340 emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); |
|
341 restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)))); |
|
342 ret(); |
|
343 } |
|
344 |
|
345 void JIT::emit_op_jmp(Instruction* currentInstruction) |
|
346 { |
|
347 unsigned target = currentInstruction[1].u.operand; |
|
348 addJump(jump(), target); |
|
349 RECORD_JUMP_TARGET(target); |
|
350 } |
|
351 |
|
352 void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) |
|
353 { |
|
354 emitTimeoutCheck(); |
|
355 |
|
356 unsigned op1 = currentInstruction[1].u.operand; |
|
357 unsigned op2 = currentInstruction[2].u.operand; |
|
358 unsigned target = currentInstruction[3].u.operand; |
|
359 if (isOperandConstantImmediateInt(op2)) { |
|
360 emitGetVirtualRegister(op1, regT0); |
|
361 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
362 #if USE(JSVALUE64) |
|
363 int32_t op2imm = getConstantOperandImmediateInt(op2); |
|
364 #else |
|
365 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); |
|
366 #endif |
|
367 addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target); |
|
368 } else { |
|
369 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
370 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
371 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
372 addJump(branch32(LessThanOrEqual, regT0, regT1), target); |
|
373 } |
|
374 } |
|
375 |
|
376 void JIT::emit_op_new_object(Instruction* currentInstruction) |
|
377 { |
|
378 JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand); |
|
379 } |
|
380 |
|
381 void JIT::emit_op_instanceof(Instruction* currentInstruction) |
|
382 { |
|
383 unsigned dst = currentInstruction[1].u.operand; |
|
384 unsigned value = currentInstruction[2].u.operand; |
|
385 unsigned baseVal = currentInstruction[3].u.operand; |
|
386 unsigned proto = currentInstruction[4].u.operand; |
|
387 |
|
388 // Load the operands (baseVal, proto, and value respectively) into registers. |
|
389 // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result. |
|
390 emitGetVirtualRegister(value, regT2); |
|
391 emitGetVirtualRegister(baseVal, regT0); |
|
392 emitGetVirtualRegister(proto, regT1); |
|
393 |
|
394 // Check that baseVal & proto are cells. |
|
395 emitJumpSlowCaseIfNotJSCell(regT2, value); |
|
396 emitJumpSlowCaseIfNotJSCell(regT0, baseVal); |
|
397 emitJumpSlowCaseIfNotJSCell(regT1, proto); |
|
398 |
|
399 // Check that baseVal 'ImplementsDefaultHasInstance'. |
|
400 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); |
|
401 addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance))); |
|
402 |
|
403 // Optimistically load the result true, and start looping. |
|
404 // Initially, regT1 still contains proto and regT2 still contains value. |
|
405 // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain. |
|
406 move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0); |
|
407 Label loop(this); |
|
408 |
|
409 // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! |
|
410 // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. |
|
411 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
412 loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2); |
|
413 Jump isInstance = branchPtr(Equal, regT2, regT1); |
|
414 emitJumpIfJSCell(regT2).linkTo(loop, this); |
|
415 |
|
416 // We get here either by dropping out of the loop, or if value was not an Object. Result is false. |
|
417 move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0); |
|
418 |
|
419 // isInstance jumps right down to here, to skip setting the result to false (it has already set true). |
|
420 isInstance.link(this); |
|
421 emitPutVirtualRegister(dst); |
|
422 } |
|
423 |
|
424 void JIT::emit_op_new_func(Instruction* currentInstruction) |
|
425 { |
|
426 JITStubCall stubCall(this, cti_op_new_func); |
|
427 stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand))); |
|
428 stubCall.call(currentInstruction[1].u.operand); |
|
429 } |
|
430 |
|
431 void JIT::emit_op_call(Instruction* currentInstruction) |
|
432 { |
|
433 compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); |
|
434 } |
|
435 |
|
436 void JIT::emit_op_call_eval(Instruction* currentInstruction) |
|
437 { |
|
438 compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++); |
|
439 } |
|
440 |
|
441 void JIT::emit_op_load_varargs(Instruction* currentInstruction) |
|
442 { |
|
443 int argCountDst = currentInstruction[1].u.operand; |
|
444 int argsOffset = currentInstruction[2].u.operand; |
|
445 |
|
446 JITStubCall stubCall(this, cti_op_load_varargs); |
|
447 stubCall.addArgument(Imm32(argsOffset)); |
|
448 stubCall.call(); |
|
449 // Stores a naked int32 in the register file. |
|
450 store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register))); |
|
451 } |
|
452 |
|
453 void JIT::emit_op_call_varargs(Instruction* currentInstruction) |
|
454 { |
|
455 compileOpCallVarargs(currentInstruction); |
|
456 } |
|
457 |
|
458 void JIT::emit_op_construct(Instruction* currentInstruction) |
|
459 { |
|
460 compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); |
|
461 } |
|
462 |
|
463 void JIT::emit_op_get_global_var(Instruction* currentInstruction) |
|
464 { |
|
465 JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell); |
|
466 move(ImmPtr(globalObject), regT0); |
|
467 emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0); |
|
468 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
469 } |
|
470 |
|
471 void JIT::emit_op_put_global_var(Instruction* currentInstruction) |
|
472 { |
|
473 emitGetVirtualRegister(currentInstruction[3].u.operand, regT1); |
|
474 JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell); |
|
475 move(ImmPtr(globalObject), regT0); |
|
476 emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand); |
|
477 } |
|
478 |
|
479 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction) |
|
480 { |
|
481 int skip = currentInstruction[3].u.operand; |
|
482 |
|
483 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0); |
|
484 while (skip--) |
|
485 loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0); |
|
486 |
|
487 loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0); |
|
488 emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0); |
|
489 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
490 } |
|
491 |
|
492 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction) |
|
493 { |
|
494 int skip = currentInstruction[2].u.operand; |
|
495 |
|
496 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1); |
|
497 emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); |
|
498 while (skip--) |
|
499 loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1); |
|
500 |
|
501 loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1); |
|
502 emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand); |
|
503 } |
|
504 |
|
505 void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) |
|
506 { |
|
507 JITStubCall stubCall(this, cti_op_tear_off_activation); |
|
508 stubCall.addArgument(currentInstruction[1].u.operand, regT2); |
|
509 stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand), regT2); |
|
510 stubCall.call(); |
|
511 } |
|
512 |
|
513 void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) |
|
514 { |
|
515 unsigned dst = currentInstruction[1].u.operand; |
|
516 |
|
517 Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst)))); |
|
518 JITStubCall stubCall(this, cti_op_tear_off_arguments); |
|
519 stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2); |
|
520 stubCall.call(); |
|
521 argsNotCreated.link(this); |
|
522 } |
|
523 |
|
524 void JIT::emit_op_ret(Instruction* currentInstruction) |
|
525 { |
|
526 // We could JIT generate the deref, only calling out to C when the refcount hits zero. |
|
527 if (m_codeBlock->needsFullScopeChain()) |
|
528 JITStubCall(this, cti_op_ret_scopeChain).call(); |
|
529 |
|
530 ASSERT(callFrameRegister != regT1); |
|
531 ASSERT(regT1 != returnValueRegister); |
|
532 ASSERT(returnValueRegister != callFrameRegister); |
|
533 |
|
534 // Return the result in %eax. |
|
535 emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); |
|
536 |
|
537 // Grab the return address. |
|
538 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); |
|
539 |
|
540 // Restore our caller's "r". |
|
541 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
542 |
|
543 // Return. |
|
544 restoreReturnAddressBeforeReturn(regT1); |
|
545 ret(); |
|
546 } |
|
547 |
|
548 void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) |
|
549 { |
|
550 // We could JIT generate the deref, only calling out to C when the refcount hits zero. |
|
551 if (m_codeBlock->needsFullScopeChain()) |
|
552 JITStubCall(this, cti_op_ret_scopeChain).call(); |
|
553 |
|
554 ASSERT(callFrameRegister != regT1); |
|
555 ASSERT(regT1 != returnValueRegister); |
|
556 ASSERT(returnValueRegister != callFrameRegister); |
|
557 |
|
558 // Return the result in %eax. |
|
559 emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); |
|
560 Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister); |
|
561 loadPtr(Address(returnValueRegister, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
562 Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)); |
|
563 |
|
564 // Grab the return address. |
|
565 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); |
|
566 |
|
567 // Restore our caller's "r". |
|
568 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
569 |
|
570 // Return. |
|
571 restoreReturnAddressBeforeReturn(regT1); |
|
572 ret(); |
|
573 |
|
574 // Return 'this' in %eax. |
|
575 notJSCell.link(this); |
|
576 notObject.link(this); |
|
577 emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister); |
|
578 |
|
579 // Grab the return address. |
|
580 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1); |
|
581 |
|
582 // Restore our caller's "r". |
|
583 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
584 |
|
585 // Return. |
|
586 restoreReturnAddressBeforeReturn(regT1); |
|
587 ret(); |
|
588 } |
|
589 |
|
590 void JIT::emit_op_new_array(Instruction* currentInstruction) |
|
591 { |
|
592 JITStubCall stubCall(this, cti_op_new_array); |
|
593 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
594 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
595 stubCall.call(currentInstruction[1].u.operand); |
|
596 } |
|
597 |
|
598 void JIT::emit_op_resolve(Instruction* currentInstruction) |
|
599 { |
|
600 JITStubCall stubCall(this, cti_op_resolve); |
|
601 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
602 stubCall.call(currentInstruction[1].u.operand); |
|
603 } |
|
604 |
|
605 void JIT::emit_op_to_primitive(Instruction* currentInstruction) |
|
606 { |
|
607 int dst = currentInstruction[1].u.operand; |
|
608 int src = currentInstruction[2].u.operand; |
|
609 |
|
610 emitGetVirtualRegister(src, regT0); |
|
611 |
|
612 Jump isImm = emitJumpIfNotJSCell(regT0); |
|
613 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr))); |
|
614 isImm.link(this); |
|
615 |
|
616 if (dst != src) |
|
617 emitPutVirtualRegister(dst); |
|
618 |
|
619 } |
|
620 |
|
621 void JIT::emit_op_strcat(Instruction* currentInstruction) |
|
622 { |
|
623 JITStubCall stubCall(this, cti_op_strcat); |
|
624 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
625 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
626 stubCall.call(currentInstruction[1].u.operand); |
|
627 } |
|
628 |
|
629 void JIT::emit_op_resolve_base(Instruction* currentInstruction) |
|
630 { |
|
631 JITStubCall stubCall(this, cti_op_resolve_base); |
|
632 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
633 stubCall.call(currentInstruction[1].u.operand); |
|
634 } |
|
635 |
|
636 void JIT::emit_op_resolve_skip(Instruction* currentInstruction) |
|
637 { |
|
638 JITStubCall stubCall(this, cti_op_resolve_skip); |
|
639 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
640 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
641 stubCall.call(currentInstruction[1].u.operand); |
|
642 } |
|
643 |
|
644 void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool) |
|
645 { |
|
646 // Fast case |
|
647 void* globalObject = currentInstruction[2].u.jsCell; |
|
648 unsigned currentIndex = m_globalResolveInfoIndex++; |
|
649 void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure); |
|
650 void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset); |
|
651 |
|
652 // Check Structure of global object |
|
653 move(ImmPtr(globalObject), regT0); |
|
654 loadPtr(structureAddress, regT1); |
|
655 addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); // Structures don't match |
|
656 |
|
657 // Load cached property |
|
658 // Assume that the global object always uses external storage. |
|
659 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0); |
|
660 load32(offsetAddr, regT1); |
|
661 loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0); |
|
662 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
663 } |
|
664 |
|
665 void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
666 { |
|
667 unsigned dst = currentInstruction[1].u.operand; |
|
668 void* globalObject = currentInstruction[2].u.jsCell; |
|
669 Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand); |
|
670 |
|
671 unsigned currentIndex = m_globalResolveInfoIndex++; |
|
672 |
|
673 linkSlowCase(iter); |
|
674 JITStubCall stubCall(this, cti_op_resolve_global); |
|
675 stubCall.addArgument(ImmPtr(globalObject)); |
|
676 stubCall.addArgument(ImmPtr(ident)); |
|
677 stubCall.addArgument(Imm32(currentIndex)); |
|
678 stubCall.call(dst); |
|
679 } |
|
680 |
|
681 void JIT::emit_op_not(Instruction* currentInstruction) |
|
682 { |
|
683 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); |
|
684 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); |
|
685 addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue)))); |
|
686 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0); |
|
687 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
688 } |
|
689 |
|
690 void JIT::emit_op_jfalse(Instruction* currentInstruction) |
|
691 { |
|
692 unsigned target = currentInstruction[2].u.operand; |
|
693 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); |
|
694 |
|
695 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target); |
|
696 Jump isNonZero = emitJumpIfImmediateInteger(regT0); |
|
697 |
|
698 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target); |
|
699 addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true))))); |
|
700 |
|
701 isNonZero.link(this); |
|
702 RECORD_JUMP_TARGET(target); |
|
703 } |
|
704 |
|
705 void JIT::emit_op_jeq_null(Instruction* currentInstruction) |
|
706 { |
|
707 unsigned src = currentInstruction[1].u.operand; |
|
708 unsigned target = currentInstruction[2].u.operand; |
|
709 |
|
710 emitGetVirtualRegister(src, regT0); |
|
711 Jump isImmediate = emitJumpIfNotJSCell(regT0); |
|
712 |
|
713 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. |
|
714 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
715 addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target); |
|
716 Jump wasNotImmediate = jump(); |
|
717 |
|
718 // Now handle the immediate cases - undefined & null |
|
719 isImmediate.link(this); |
|
720 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); |
|
721 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target); |
|
722 |
|
723 wasNotImmediate.link(this); |
|
724 RECORD_JUMP_TARGET(target); |
|
725 }; |
|
726 void JIT::emit_op_jneq_null(Instruction* currentInstruction) |
|
727 { |
|
728 unsigned src = currentInstruction[1].u.operand; |
|
729 unsigned target = currentInstruction[2].u.operand; |
|
730 |
|
731 emitGetVirtualRegister(src, regT0); |
|
732 Jump isImmediate = emitJumpIfNotJSCell(regT0); |
|
733 |
|
734 // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. |
|
735 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
736 addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target); |
|
737 Jump wasNotImmediate = jump(); |
|
738 |
|
739 // Now handle the immediate cases - undefined & null |
|
740 isImmediate.link(this); |
|
741 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); |
|
742 addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target); |
|
743 |
|
744 wasNotImmediate.link(this); |
|
745 RECORD_JUMP_TARGET(target); |
|
746 } |
|
747 |
|
748 void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) |
|
749 { |
|
750 unsigned src = currentInstruction[1].u.operand; |
|
751 JSCell* ptr = currentInstruction[2].u.jsCell; |
|
752 unsigned target = currentInstruction[3].u.operand; |
|
753 |
|
754 emitGetVirtualRegister(src, regT0); |
|
755 addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target); |
|
756 |
|
757 RECORD_JUMP_TARGET(target); |
|
758 } |
|
759 |
|
760 void JIT::emit_op_jsr(Instruction* currentInstruction) |
|
761 { |
|
762 int retAddrDst = currentInstruction[1].u.operand; |
|
763 int target = currentInstruction[2].u.operand; |
|
764 DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst)); |
|
765 addJump(jump(), target); |
|
766 m_jsrSites.append(JSRInfo(storeLocation, label())); |
|
767 killLastResultRegister(); |
|
768 RECORD_JUMP_TARGET(target); |
|
769 } |
|
770 |
|
771 void JIT::emit_op_sret(Instruction* currentInstruction) |
|
772 { |
|
773 jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); |
|
774 killLastResultRegister(); |
|
775 } |
|
776 |
|
777 void JIT::emit_op_eq(Instruction* currentInstruction) |
|
778 { |
|
779 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); |
|
780 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); |
|
781 set32(Equal, regT1, regT0, regT0); |
|
782 emitTagAsBoolImmediate(regT0); |
|
783 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
784 } |
|
785 |
|
786 void JIT::emit_op_bitnot(Instruction* currentInstruction) |
|
787 { |
|
788 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); |
|
789 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
790 #if USE(JSVALUE64) |
|
791 not32(regT0); |
|
792 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
793 #else |
|
794 xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0); |
|
795 #endif |
|
796 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
797 } |
|
798 |
|
799 void JIT::emit_op_resolve_with_base(Instruction* currentInstruction) |
|
800 { |
|
801 JITStubCall stubCall(this, cti_op_resolve_with_base); |
|
802 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand))); |
|
803 stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); |
|
804 stubCall.call(currentInstruction[2].u.operand); |
|
805 } |
|
806 |
|
807 void JIT::emit_op_new_func_exp(Instruction* currentInstruction) |
|
808 { |
|
809 JITStubCall stubCall(this, cti_op_new_func_exp); |
|
810 stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand))); |
|
811 stubCall.call(currentInstruction[1].u.operand); |
|
812 } |
|
813 |
|
814 void JIT::emit_op_jtrue(Instruction* currentInstruction) |
|
815 { |
|
816 unsigned target = currentInstruction[2].u.operand; |
|
817 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); |
|
818 |
|
819 Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))); |
|
820 addJump(emitJumpIfImmediateInteger(regT0), target); |
|
821 |
|
822 addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target); |
|
823 addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false))))); |
|
824 |
|
825 isZero.link(this); |
|
826 RECORD_JUMP_TARGET(target); |
|
827 } |
|
828 |
|
829 void JIT::emit_op_neq(Instruction* currentInstruction) |
|
830 { |
|
831 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); |
|
832 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); |
|
833 set32(NotEqual, regT1, regT0, regT0); |
|
834 emitTagAsBoolImmediate(regT0); |
|
835 |
|
836 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
837 |
|
838 } |
|
839 |
|
840 void JIT::emit_op_bitxor(Instruction* currentInstruction) |
|
841 { |
|
842 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); |
|
843 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); |
|
844 xorPtr(regT1, regT0); |
|
845 emitFastArithReTagImmediate(regT0, regT0); |
|
846 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
847 } |
|
848 |
|
849 void JIT::emit_op_bitor(Instruction* currentInstruction) |
|
850 { |
|
851 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); |
|
852 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); |
|
853 orPtr(regT1, regT0); |
|
854 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
855 } |
|
856 |
|
857 void JIT::emit_op_throw(Instruction* currentInstruction) |
|
858 { |
|
859 JITStubCall stubCall(this, cti_op_throw); |
|
860 stubCall.addArgument(currentInstruction[1].u.operand, regT2); |
|
861 stubCall.call(); |
|
862 ASSERT(regT0 == returnValueRegister); |
|
863 #ifndef NDEBUG |
|
864 // cti_op_throw always changes it's return address, |
|
865 // this point in the code should never be reached. |
|
866 breakpoint(); |
|
867 #endif |
|
868 } |
|
869 |
|
870 void JIT::emit_op_get_pnames(Instruction* currentInstruction) |
|
871 { |
|
872 int dst = currentInstruction[1].u.operand; |
|
873 int base = currentInstruction[2].u.operand; |
|
874 int i = currentInstruction[3].u.operand; |
|
875 int size = currentInstruction[4].u.operand; |
|
876 int breakTarget = currentInstruction[5].u.operand; |
|
877 |
|
878 JumpList isNotObject; |
|
879 |
|
880 emitGetVirtualRegister(base, regT0); |
|
881 if (!m_codeBlock->isKnownNotImmediate(base)) |
|
882 isNotObject.append(emitJumpIfNotJSCell(regT0)); |
|
883 if (base != m_codeBlock->thisRegister()) { |
|
884 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
885 isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType))); |
|
886 } |
|
887 |
|
888 // We could inline the case where you have a valid cache, but |
|
889 // this call doesn't seem to be hot. |
|
890 Label isObject(this); |
|
891 JITStubCall getPnamesStubCall(this, cti_op_get_pnames); |
|
892 getPnamesStubCall.addArgument(regT0); |
|
893 getPnamesStubCall.call(dst); |
|
894 load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); |
|
895 store32(Imm32(0), addressFor(i)); |
|
896 store32(regT3, addressFor(size)); |
|
897 Jump end = jump(); |
|
898 |
|
899 isNotObject.link(this); |
|
900 move(regT0, regT1); |
|
901 and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT1); |
|
902 addJump(branch32(Equal, regT1, Imm32(JSImmediate::FullTagTypeNull)), breakTarget); |
|
903 |
|
904 JITStubCall toObjectStubCall(this, cti_to_object); |
|
905 toObjectStubCall.addArgument(regT0); |
|
906 toObjectStubCall.call(base); |
|
907 jump().linkTo(isObject, this); |
|
908 |
|
909 end.link(this); |
|
910 } |
|
911 |
|
912 void JIT::emit_op_next_pname(Instruction* currentInstruction) |
|
913 { |
|
914 int dst = currentInstruction[1].u.operand; |
|
915 int base = currentInstruction[2].u.operand; |
|
916 int i = currentInstruction[3].u.operand; |
|
917 int size = currentInstruction[4].u.operand; |
|
918 int it = currentInstruction[5].u.operand; |
|
919 int target = currentInstruction[6].u.operand; |
|
920 |
|
921 JumpList callHasProperty; |
|
922 |
|
923 Label begin(this); |
|
924 load32(addressFor(i), regT0); |
|
925 Jump end = branch32(Equal, regT0, addressFor(size)); |
|
926 |
|
927 // Grab key @ i |
|
928 loadPtr(addressFor(it), regT1); |
|
929 loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); |
|
930 |
|
931 #if USE(JSVALUE64) |
|
932 loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2); |
|
933 #else |
|
934 loadPtr(BaseIndex(regT2, regT0, TimesFour), regT2); |
|
935 #endif |
|
936 |
|
937 emitPutVirtualRegister(dst, regT2); |
|
938 |
|
939 // Increment i |
|
940 add32(Imm32(1), regT0); |
|
941 store32(regT0, addressFor(i)); |
|
942 |
|
943 // Verify that i is valid: |
|
944 emitGetVirtualRegister(base, regT0); |
|
945 |
|
946 // Test base's structure |
|
947 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
948 callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); |
|
949 |
|
950 // Test base's prototype chain |
|
951 loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); |
|
952 loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); |
|
953 addJump(branchTestPtr(Zero, Address(regT3)), target); |
|
954 |
|
955 Label checkPrototype(this); |
|
956 loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2); |
|
957 callHasProperty.append(emitJumpIfNotJSCell(regT2)); |
|
958 loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
959 callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); |
|
960 addPtr(Imm32(sizeof(Structure*)), regT3); |
|
961 branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); |
|
962 |
|
963 // Continue loop. |
|
964 addJump(jump(), target); |
|
965 |
|
966 // Slow case: Ask the object if i is valid. |
|
967 callHasProperty.link(this); |
|
968 emitGetVirtualRegister(dst, regT1); |
|
969 JITStubCall stubCall(this, cti_has_property); |
|
970 stubCall.addArgument(regT0); |
|
971 stubCall.addArgument(regT1); |
|
972 stubCall.call(); |
|
973 |
|
974 // Test for valid key. |
|
975 addJump(branchTest32(NonZero, regT0), target); |
|
976 jump().linkTo(begin, this); |
|
977 |
|
978 // End of loop. |
|
979 end.link(this); |
|
980 } |
|
981 |
|
982 void JIT::emit_op_push_scope(Instruction* currentInstruction) |
|
983 { |
|
984 JITStubCall stubCall(this, cti_op_push_scope); |
|
985 stubCall.addArgument(currentInstruction[1].u.operand, regT2); |
|
986 stubCall.call(currentInstruction[1].u.operand); |
|
987 } |
|
988 |
|
989 void JIT::emit_op_pop_scope(Instruction*) |
|
990 { |
|
991 JITStubCall(this, cti_op_pop_scope).call(); |
|
992 } |
|
993 |
|
994 void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) |
|
995 { |
|
996 unsigned dst = currentInstruction[1].u.operand; |
|
997 unsigned src1 = currentInstruction[2].u.operand; |
|
998 unsigned src2 = currentInstruction[3].u.operand; |
|
999 |
|
1000 emitGetVirtualRegisters(src1, regT0, src2, regT1); |
|
1001 |
|
1002 // Jump to a slow case if either operand is a number, or if both are JSCell*s. |
|
1003 move(regT0, regT2); |
|
1004 orPtr(regT1, regT2); |
|
1005 addSlowCase(emitJumpIfJSCell(regT2)); |
|
1006 addSlowCase(emitJumpIfImmediateNumber(regT2)); |
|
1007 |
|
1008 if (type == OpStrictEq) |
|
1009 set32(Equal, regT1, regT0, regT0); |
|
1010 else |
|
1011 set32(NotEqual, regT1, regT0, regT0); |
|
1012 emitTagAsBoolImmediate(regT0); |
|
1013 |
|
1014 emitPutVirtualRegister(dst); |
|
1015 } |
|
1016 |
|
1017 void JIT::emit_op_stricteq(Instruction* currentInstruction) |
|
1018 { |
|
1019 compileOpStrictEq(currentInstruction, OpStrictEq); |
|
1020 } |
|
1021 |
|
1022 void JIT::emit_op_nstricteq(Instruction* currentInstruction) |
|
1023 { |
|
1024 compileOpStrictEq(currentInstruction, OpNStrictEq); |
|
1025 } |
|
1026 |
|
1027 void JIT::emit_op_to_jsnumber(Instruction* currentInstruction) |
|
1028 { |
|
1029 int srcVReg = currentInstruction[2].u.operand; |
|
1030 emitGetVirtualRegister(srcVReg, regT0); |
|
1031 |
|
1032 Jump wasImmediate = emitJumpIfImmediateInteger(regT0); |
|
1033 |
|
1034 emitJumpSlowCaseIfNotJSCell(regT0, srcVReg); |
|
1035 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1036 addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType))); |
|
1037 |
|
1038 wasImmediate.link(this); |
|
1039 |
|
1040 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
1041 } |
|
1042 |
|
1043 void JIT::emit_op_push_new_scope(Instruction* currentInstruction) |
|
1044 { |
|
1045 JITStubCall stubCall(this, cti_op_push_new_scope); |
|
1046 stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand))); |
|
1047 stubCall.addArgument(currentInstruction[3].u.operand, regT2); |
|
1048 stubCall.call(currentInstruction[1].u.operand); |
|
1049 } |
|
1050 |
|
1051 void JIT::emit_op_catch(Instruction* currentInstruction) |
|
1052 { |
|
1053 killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code. |
|
1054 peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*)); |
|
1055 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
1056 } |
|
1057 |
|
1058 void JIT::emit_op_jmp_scopes(Instruction* currentInstruction) |
|
1059 { |
|
1060 JITStubCall stubCall(this, cti_op_jmp_scopes); |
|
1061 stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); |
|
1062 stubCall.call(); |
|
1063 addJump(jump(), currentInstruction[2].u.operand); |
|
1064 RECORD_JUMP_TARGET(currentInstruction[2].u.operand); |
|
1065 } |
|
1066 |
|
1067 void JIT::emit_op_switch_imm(Instruction* currentInstruction) |
|
1068 { |
|
1069 unsigned tableIndex = currentInstruction[1].u.operand; |
|
1070 unsigned defaultOffset = currentInstruction[2].u.operand; |
|
1071 unsigned scrutinee = currentInstruction[3].u.operand; |
|
1072 |
|
1073 // create jump table for switch destinations, track this switch statement. |
|
1074 SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex); |
|
1075 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); |
|
1076 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); |
|
1077 |
|
1078 JITStubCall stubCall(this, cti_op_switch_imm); |
|
1079 stubCall.addArgument(scrutinee, regT2); |
|
1080 stubCall.addArgument(Imm32(tableIndex)); |
|
1081 stubCall.call(); |
|
1082 jump(regT0); |
|
1083 } |
|
1084 |
|
1085 void JIT::emit_op_switch_char(Instruction* currentInstruction) |
|
1086 { |
|
1087 unsigned tableIndex = currentInstruction[1].u.operand; |
|
1088 unsigned defaultOffset = currentInstruction[2].u.operand; |
|
1089 unsigned scrutinee = currentInstruction[3].u.operand; |
|
1090 |
|
1091 // create jump table for switch destinations, track this switch statement. |
|
1092 SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex); |
|
1093 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); |
|
1094 jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); |
|
1095 |
|
1096 JITStubCall stubCall(this, cti_op_switch_char); |
|
1097 stubCall.addArgument(scrutinee, regT2); |
|
1098 stubCall.addArgument(Imm32(tableIndex)); |
|
1099 stubCall.call(); |
|
1100 jump(regT0); |
|
1101 } |
|
1102 |
|
1103 void JIT::emit_op_switch_string(Instruction* currentInstruction) |
|
1104 { |
|
1105 unsigned tableIndex = currentInstruction[1].u.operand; |
|
1106 unsigned defaultOffset = currentInstruction[2].u.operand; |
|
1107 unsigned scrutinee = currentInstruction[3].u.operand; |
|
1108 |
|
1109 // create jump table for switch destinations, track this switch statement. |
|
1110 StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); |
|
1111 m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset)); |
|
1112 |
|
1113 JITStubCall stubCall(this, cti_op_switch_string); |
|
1114 stubCall.addArgument(scrutinee, regT2); |
|
1115 stubCall.addArgument(Imm32(tableIndex)); |
|
1116 stubCall.call(); |
|
1117 jump(regT0); |
|
1118 } |
|
1119 |
|
1120 void JIT::emit_op_new_error(Instruction* currentInstruction) |
|
1121 { |
|
1122 JITStubCall stubCall(this, cti_op_new_error); |
|
1123 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
1124 stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand)))); |
|
1125 stubCall.addArgument(Imm32(m_bytecodeOffset)); |
|
1126 stubCall.call(currentInstruction[1].u.operand); |
|
1127 } |
|
1128 |
|
1129 void JIT::emit_op_debug(Instruction* currentInstruction) |
|
1130 { |
|
1131 #if ENABLE(DEBUG_WITH_BREAKPOINT) |
|
1132 UNUSED_PARAM(currentInstruction); |
|
1133 breakpoint(); |
|
1134 #else |
|
1135 JITStubCall stubCall(this, cti_op_debug); |
|
1136 stubCall.addArgument(Imm32(currentInstruction[1].u.operand)); |
|
1137 stubCall.addArgument(Imm32(currentInstruction[2].u.operand)); |
|
1138 stubCall.addArgument(Imm32(currentInstruction[3].u.operand)); |
|
1139 stubCall.call(); |
|
1140 #endif |
|
1141 } |
|
1142 |
|
1143 void JIT::emit_op_eq_null(Instruction* currentInstruction) |
|
1144 { |
|
1145 unsigned dst = currentInstruction[1].u.operand; |
|
1146 unsigned src1 = currentInstruction[2].u.operand; |
|
1147 |
|
1148 emitGetVirtualRegister(src1, regT0); |
|
1149 Jump isImmediate = emitJumpIfNotJSCell(regT0); |
|
1150 |
|
1151 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1152 setTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); |
|
1153 |
|
1154 Jump wasNotImmediate = jump(); |
|
1155 |
|
1156 isImmediate.link(this); |
|
1157 |
|
1158 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); |
|
1159 setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); |
|
1160 |
|
1161 wasNotImmediate.link(this); |
|
1162 |
|
1163 emitTagAsBoolImmediate(regT0); |
|
1164 emitPutVirtualRegister(dst); |
|
1165 |
|
1166 } |
|
1167 |
|
1168 void JIT::emit_op_neq_null(Instruction* currentInstruction) |
|
1169 { |
|
1170 unsigned dst = currentInstruction[1].u.operand; |
|
1171 unsigned src1 = currentInstruction[2].u.operand; |
|
1172 |
|
1173 emitGetVirtualRegister(src1, regT0); |
|
1174 Jump isImmediate = emitJumpIfNotJSCell(regT0); |
|
1175 |
|
1176 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
1177 setTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); |
|
1178 |
|
1179 Jump wasNotImmediate = jump(); |
|
1180 |
|
1181 isImmediate.link(this); |
|
1182 |
|
1183 andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); |
|
1184 setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); |
|
1185 |
|
1186 wasNotImmediate.link(this); |
|
1187 |
|
1188 emitTagAsBoolImmediate(regT0); |
|
1189 emitPutVirtualRegister(dst); |
|
1190 } |
|
1191 |
|
1192 void JIT::emit_op_enter(Instruction*) |
|
1193 { |
|
1194 // Even though CTI doesn't use them, we initialize our constant |
|
1195 // registers to zap stale pointers, to avoid unnecessarily prolonging |
|
1196 // object lifetime and increasing GC pressure. |
|
1197 size_t count = m_codeBlock->m_numVars; |
|
1198 for (size_t j = 0; j < count; ++j) |
|
1199 emitInitRegister(j); |
|
1200 |
|
1201 } |
|
1202 |
|
1203 void JIT::emit_op_enter_with_activation(Instruction* currentInstruction) |
|
1204 { |
|
1205 // Even though CTI doesn't use them, we initialize our constant |
|
1206 // registers to zap stale pointers, to avoid unnecessarily prolonging |
|
1207 // object lifetime and increasing GC pressure. |
|
1208 size_t count = m_codeBlock->m_numVars; |
|
1209 for (size_t j = 0; j < count; ++j) |
|
1210 emitInitRegister(j); |
|
1211 |
|
1212 JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand); |
|
1213 } |
|
1214 |
|
1215 void JIT::emit_op_create_arguments(Instruction* currentInstruction) |
|
1216 { |
|
1217 unsigned dst = currentInstruction[1].u.operand; |
|
1218 |
|
1219 Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); |
|
1220 if (m_codeBlock->m_numParameters == 1) |
|
1221 JITStubCall(this, cti_op_create_arguments_no_params).call(); |
|
1222 else |
|
1223 JITStubCall(this, cti_op_create_arguments).call(); |
|
1224 emitPutVirtualRegister(dst); |
|
1225 emitPutVirtualRegister(unmodifiedArgumentsRegister(dst)); |
|
1226 argsCreated.link(this); |
|
1227 } |
|
1228 |
|
1229 void JIT::emit_op_init_arguments(Instruction* currentInstruction) |
|
1230 { |
|
1231 unsigned dst = currentInstruction[1].u.operand; |
|
1232 |
|
1233 storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst)); |
|
1234 storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst)))); |
|
1235 } |
|
1236 |
|
1237 void JIT::emit_op_convert_this(Instruction* currentInstruction) |
|
1238 { |
|
1239 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); |
|
1240 |
|
1241 emitJumpSlowCaseIfNotJSCell(regT0); |
|
1242 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1); |
|
1243 addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); |
|
1244 } |
|
1245 |
|
1246 void JIT::emit_op_get_callee(Instruction* currentInstruction) |
|
1247 { |
|
1248 unsigned result = currentInstruction[1].u.operand; |
|
1249 emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0); |
|
1250 emitPutVirtualRegister(result); |
|
1251 } |
|
1252 |
|
1253 void JIT::emit_op_create_this(Instruction* currentInstruction) |
|
1254 { |
|
1255 JITStubCall stubCall(this, cti_op_create_this); |
|
1256 stubCall.addArgument(currentInstruction[2].u.operand, regT1); |
|
1257 stubCall.call(currentInstruction[1].u.operand); |
|
1258 } |
|
1259 |
|
1260 void JIT::emit_op_profile_will_call(Instruction* currentInstruction) |
|
1261 { |
|
1262 peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); |
|
1263 Jump noProfiler = branchTestPtr(Zero, Address(regT1)); |
|
1264 |
|
1265 JITStubCall stubCall(this, cti_op_profile_will_call); |
|
1266 stubCall.addArgument(currentInstruction[1].u.operand, regT1); |
|
1267 stubCall.call(); |
|
1268 noProfiler.link(this); |
|
1269 |
|
1270 } |
|
1271 |
|
1272 void JIT::emit_op_profile_did_call(Instruction* currentInstruction) |
|
1273 { |
|
1274 peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*)); |
|
1275 Jump noProfiler = branchTestPtr(Zero, Address(regT1)); |
|
1276 |
|
1277 JITStubCall stubCall(this, cti_op_profile_did_call); |
|
1278 stubCall.addArgument(currentInstruction[1].u.operand, regT1); |
|
1279 stubCall.call(); |
|
1280 noProfiler.link(this); |
|
1281 } |
|
1282 |
|
1283 |
|
1284 // Slow cases |
|
1285 |
|
1286 void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1287 { |
|
1288 linkSlowCase(iter); |
|
1289 linkSlowCase(iter); |
|
1290 JITStubCall stubCall(this, cti_op_convert_this); |
|
1291 stubCall.addArgument(regT0); |
|
1292 stubCall.call(currentInstruction[1].u.operand); |
|
1293 } |
|
1294 |
|
1295 void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1296 { |
|
1297 linkSlowCase(iter); |
|
1298 |
|
1299 JITStubCall stubCall(this, cti_op_to_primitive); |
|
1300 stubCall.addArgument(regT0); |
|
1301 stubCall.call(currentInstruction[1].u.operand); |
|
1302 } |
|
1303 |
|
1304 void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1305 { |
|
1306 unsigned op2 = currentInstruction[2].u.operand; |
|
1307 unsigned target = currentInstruction[3].u.operand; |
|
1308 if (isOperandConstantImmediateInt(op2)) { |
|
1309 linkSlowCase(iter); |
|
1310 JITStubCall stubCall(this, cti_op_loop_if_lesseq); |
|
1311 stubCall.addArgument(regT0); |
|
1312 stubCall.addArgument(currentInstruction[2].u.operand, regT2); |
|
1313 stubCall.call(); |
|
1314 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
1315 } else { |
|
1316 linkSlowCase(iter); |
|
1317 linkSlowCase(iter); |
|
1318 JITStubCall stubCall(this, cti_op_loop_if_lesseq); |
|
1319 stubCall.addArgument(regT0); |
|
1320 stubCall.addArgument(regT1); |
|
1321 stubCall.call(); |
|
1322 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
1323 } |
|
1324 } |
|
1325 |
|
1326 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1327 { |
|
1328 unsigned base = currentInstruction[1].u.operand; |
|
1329 unsigned property = currentInstruction[2].u.operand; |
|
1330 unsigned value = currentInstruction[3].u.operand; |
|
1331 |
|
1332 linkSlowCase(iter); // property int32 check |
|
1333 linkSlowCaseIfNotJSCell(iter, base); // base cell check |
|
1334 linkSlowCase(iter); // base not array check |
|
1335 linkSlowCase(iter); // in vector check |
|
1336 |
|
1337 JITStubCall stubPutByValCall(this, cti_op_put_by_val); |
|
1338 stubPutByValCall.addArgument(regT0); |
|
1339 stubPutByValCall.addArgument(property, regT2); |
|
1340 stubPutByValCall.addArgument(value, regT2); |
|
1341 stubPutByValCall.call(); |
|
1342 } |
|
1343 |
|
1344 void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1345 { |
|
1346 linkSlowCase(iter); |
|
1347 xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); |
|
1348 JITStubCall stubCall(this, cti_op_not); |
|
1349 stubCall.addArgument(regT0); |
|
1350 stubCall.call(currentInstruction[1].u.operand); |
|
1351 } |
|
1352 |
|
1353 void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1354 { |
|
1355 linkSlowCase(iter); |
|
1356 JITStubCall stubCall(this, cti_op_jtrue); |
|
1357 stubCall.addArgument(regT0); |
|
1358 stubCall.call(); |
|
1359 emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted! |
|
1360 } |
|
1361 |
|
1362 void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1363 { |
|
1364 linkSlowCase(iter); |
|
1365 JITStubCall stubCall(this, cti_op_bitnot); |
|
1366 stubCall.addArgument(regT0); |
|
1367 stubCall.call(currentInstruction[1].u.operand); |
|
1368 } |
|
1369 |
|
1370 void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1371 { |
|
1372 linkSlowCase(iter); |
|
1373 JITStubCall stubCall(this, cti_op_jtrue); |
|
1374 stubCall.addArgument(regT0); |
|
1375 stubCall.call(); |
|
1376 emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand); |
|
1377 } |
|
1378 |
|
1379 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1380 { |
|
1381 linkSlowCase(iter); |
|
1382 JITStubCall stubCall(this, cti_op_bitxor); |
|
1383 stubCall.addArgument(regT0); |
|
1384 stubCall.addArgument(regT1); |
|
1385 stubCall.call(currentInstruction[1].u.operand); |
|
1386 } |
|
1387 |
|
1388 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1389 { |
|
1390 linkSlowCase(iter); |
|
1391 JITStubCall stubCall(this, cti_op_bitor); |
|
1392 stubCall.addArgument(regT0); |
|
1393 stubCall.addArgument(regT1); |
|
1394 stubCall.call(currentInstruction[1].u.operand); |
|
1395 } |
|
1396 |
|
1397 void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1398 { |
|
1399 linkSlowCase(iter); |
|
1400 JITStubCall stubCall(this, cti_op_eq); |
|
1401 stubCall.addArgument(regT0); |
|
1402 stubCall.addArgument(regT1); |
|
1403 stubCall.call(); |
|
1404 emitTagAsBoolImmediate(regT0); |
|
1405 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
1406 } |
|
1407 |
|
1408 void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1409 { |
|
1410 linkSlowCase(iter); |
|
1411 JITStubCall stubCall(this, cti_op_eq); |
|
1412 stubCall.addArgument(regT0); |
|
1413 stubCall.addArgument(regT1); |
|
1414 stubCall.call(); |
|
1415 xor32(Imm32(0x1), regT0); |
|
1416 emitTagAsBoolImmediate(regT0); |
|
1417 emitPutVirtualRegister(currentInstruction[1].u.operand); |
|
1418 } |
|
1419 |
|
1420 void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1421 { |
|
1422 linkSlowCase(iter); |
|
1423 linkSlowCase(iter); |
|
1424 JITStubCall stubCall(this, cti_op_stricteq); |
|
1425 stubCall.addArgument(regT0); |
|
1426 stubCall.addArgument(regT1); |
|
1427 stubCall.call(currentInstruction[1].u.operand); |
|
1428 } |
|
1429 |
|
1430 void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1431 { |
|
1432 linkSlowCase(iter); |
|
1433 linkSlowCase(iter); |
|
1434 JITStubCall stubCall(this, cti_op_nstricteq); |
|
1435 stubCall.addArgument(regT0); |
|
1436 stubCall.addArgument(regT1); |
|
1437 stubCall.call(currentInstruction[1].u.operand); |
|
1438 } |
|
1439 |
|
1440 void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1441 { |
|
1442 unsigned dst = currentInstruction[1].u.operand; |
|
1443 unsigned value = currentInstruction[2].u.operand; |
|
1444 unsigned baseVal = currentInstruction[3].u.operand; |
|
1445 unsigned proto = currentInstruction[4].u.operand; |
|
1446 |
|
1447 linkSlowCaseIfNotJSCell(iter, value); |
|
1448 linkSlowCaseIfNotJSCell(iter, baseVal); |
|
1449 linkSlowCaseIfNotJSCell(iter, proto); |
|
1450 linkSlowCase(iter); |
|
1451 JITStubCall stubCall(this, cti_op_instanceof); |
|
1452 stubCall.addArgument(value, regT2); |
|
1453 stubCall.addArgument(baseVal, regT2); |
|
1454 stubCall.addArgument(proto, regT2); |
|
1455 stubCall.call(dst); |
|
1456 } |
|
1457 |
|
1458 void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1459 { |
|
1460 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call); |
|
1461 } |
|
1462 |
|
1463 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1464 { |
|
1465 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval); |
|
1466 } |
|
1467 |
|
1468 void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1469 { |
|
1470 compileOpCallVarargsSlowCase(currentInstruction, iter); |
|
1471 } |
|
1472 |
|
1473 void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1474 { |
|
1475 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct); |
|
1476 } |
|
1477 |
|
1478 void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1479 { |
|
1480 linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand); |
|
1481 linkSlowCase(iter); |
|
1482 |
|
1483 JITStubCall stubCall(this, cti_op_to_jsnumber); |
|
1484 stubCall.addArgument(regT0); |
|
1485 stubCall.call(currentInstruction[1].u.operand); |
|
1486 } |
|
1487 |
|
1488 #endif // !USE(JSVALUE32_64) |
|
1489 |
|
1490 void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction) |
|
1491 { |
|
1492 int skip = currentInstruction[6].u.operand; |
|
1493 |
|
1494 emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0); |
|
1495 while (skip--) { |
|
1496 loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1); |
|
1497 addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get())); |
|
1498 loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0); |
|
1499 } |
|
1500 emit_op_resolve_global(currentInstruction, true); |
|
1501 } |
|
1502 |
|
1503 void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1504 { |
|
1505 unsigned dst = currentInstruction[1].u.operand; |
|
1506 void* globalObject = currentInstruction[2].u.jsCell; |
|
1507 Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand); |
|
1508 int skip = currentInstruction[6].u.operand; |
|
1509 while (skip--) |
|
1510 linkSlowCase(iter); |
|
1511 JITStubCall resolveStubCall(this, cti_op_resolve); |
|
1512 resolveStubCall.addArgument(ImmPtr(ident)); |
|
1513 resolveStubCall.call(dst); |
|
1514 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic)); |
|
1515 |
|
1516 unsigned currentIndex = m_globalResolveInfoIndex++; |
|
1517 |
|
1518 linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed. |
|
1519 JITStubCall stubCall(this, cti_op_resolve_global); |
|
1520 stubCall.addArgument(ImmPtr(globalObject)); |
|
1521 stubCall.addArgument(ImmPtr(ident)); |
|
1522 stubCall.addArgument(Imm32(currentIndex)); |
|
1523 stubCall.call(dst); |
|
1524 } |
|
1525 |
|
1526 void JIT::emit_op_new_regexp(Instruction* currentInstruction) |
|
1527 { |
|
1528 JITStubCall stubCall(this, cti_op_new_regexp); |
|
1529 stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand))); |
|
1530 stubCall.call(currentInstruction[1].u.operand); |
|
1531 } |
|
1532 |
|
1533 // For both JSValue32_64 and JSValue32 |
|
1534 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
1535 #if CPU(ARM_TRADITIONAL) |
|
1536 void JIT::softModulo() |
|
1537 { |
|
1538 push(regS0); |
|
1539 push(regS1); |
|
1540 push(regT1); |
|
1541 push(regT3); |
|
1542 #if USE(JSVALUE32_64) |
|
1543 m_assembler.mov_r(regT3, regT2); |
|
1544 m_assembler.mov_r(regT2, regT0); |
|
1545 #else |
|
1546 m_assembler.mov_r(regT3, m_assembler.asr(regT2, 1)); |
|
1547 m_assembler.mov_r(regT2, m_assembler.asr(regT0, 1)); |
|
1548 #endif |
|
1549 m_assembler.mov_r(regT1, ARMAssembler::getOp2(0)); |
|
1550 |
|
1551 m_assembler.teq_r(regT3, ARMAssembler::getOp2(0)); |
|
1552 m_assembler.rsb_r(regT3, regT3, ARMAssembler::getOp2(0), ARMAssembler::MI); |
|
1553 m_assembler.eor_r(regT1, regT1, ARMAssembler::getOp2(1), ARMAssembler::MI); |
|
1554 |
|
1555 m_assembler.teq_r(regT2, ARMAssembler::getOp2(0)); |
|
1556 m_assembler.rsb_r(regT2, regT2, ARMAssembler::getOp2(0), ARMAssembler::MI); |
|
1557 m_assembler.eor_r(regT1, regT1, ARMAssembler::getOp2(2), ARMAssembler::MI); |
|
1558 |
|
1559 Jump exitBranch = branch32(LessThan, regT2, regT3); |
|
1560 |
|
1561 m_assembler.sub_r(regS1, regT3, ARMAssembler::getOp2(1)); |
|
1562 m_assembler.tst_r(regS1, regT3); |
|
1563 m_assembler.and_r(regT2, regT2, regS1, ARMAssembler::EQ); |
|
1564 m_assembler.and_r(regT0, regS1, regT3); |
|
1565 Jump exitBranch2 = branchTest32(Zero, regT0); |
|
1566 |
|
1567 m_assembler.clz_r(regS1, regT2); |
|
1568 m_assembler.clz_r(regS0, regT3); |
|
1569 m_assembler.sub_r(regS0, regS0, regS1); |
|
1570 |
|
1571 m_assembler.rsbs_r(regS0, regS0, ARMAssembler::getOp2(31)); |
|
1572 |
|
1573 m_assembler.mov_r(regS0, m_assembler.lsl(regS0, 1), ARMAssembler::NE); |
|
1574 |
|
1575 m_assembler.add_r(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(regS0, 2), ARMAssembler::NE); |
|
1576 m_assembler.mov_r(regT0, regT0); |
|
1577 |
|
1578 for (int i = 31; i > 0; --i) { |
|
1579 m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i)); |
|
1580 m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS); |
|
1581 } |
|
1582 |
|
1583 m_assembler.cmp_r(regT2, regT3); |
|
1584 m_assembler.sub_r(regT2, regT2, regT3, ARMAssembler::CS); |
|
1585 |
|
1586 exitBranch.link(this); |
|
1587 exitBranch2.link(this); |
|
1588 |
|
1589 m_assembler.teq_r(regT1, ARMAssembler::getOp2(0)); |
|
1590 m_assembler.rsb_r(regT2, regT2, ARMAssembler::getOp2(0), ARMAssembler::GT); |
|
1591 |
|
1592 #if USE(JSVALUE32_64) |
|
1593 m_assembler.mov_r(regT0, regT2); |
|
1594 #else |
|
1595 m_assembler.mov_r(regT0, m_assembler.lsl(regT2, 1)); |
|
1596 m_assembler.eor_r(regT0, regT0, ARMAssembler::getOp2(1)); |
|
1597 #endif |
|
1598 pop(regT3); |
|
1599 pop(regT1); |
|
1600 pop(regS1); |
|
1601 pop(regS0); |
|
1602 ret(); |
|
1603 } |
|
1604 #else |
|
1605 #error "JIT_OPTIMIZE_MOD not yet supported on this platform." |
|
1606 #endif // CPU(ARM_TRADITIONAL) |
|
1607 #endif |
|
1608 } // namespace JSC |
|
1609 |
|
1610 #endif // ENABLE(JIT) |