|
1 /* |
|
2 * Copyright (C) 2008 Apple Inc. All rights reserved. |
|
3 * |
|
4 * Redistribution and use in source and binary forms, with or without |
|
5 * modification, are permitted provided that the following conditions |
|
6 * are met: |
|
7 * 1. Redistributions of source code must retain the above copyright |
|
8 * notice, this list of conditions and the following disclaimer. |
|
9 * 2. Redistributions in binary form must reproduce the above copyright |
|
10 * notice, this list of conditions and the following disclaimer in the |
|
11 * documentation and/or other materials provided with the distribution. |
|
12 * |
|
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
24 */ |
|
25 |
|
26 #include "config.h" |
|
27 |
|
28 #if ENABLE(JIT) |
|
29 #if USE(JSVALUE32_64) |
|
30 #include "JIT.h" |
|
31 |
|
32 #include "CodeBlock.h" |
|
33 #include "Interpreter.h" |
|
34 #include "JITInlineMethods.h" |
|
35 #include "JITStubCall.h" |
|
36 #include "JSArray.h" |
|
37 #include "JSFunction.h" |
|
38 #include "ResultType.h" |
|
39 #include "SamplingTool.h" |
|
40 |
|
41 #ifndef NDEBUG |
|
42 #include <stdio.h> |
|
43 #endif |
|
44 |
|
45 using namespace std; |
|
46 |
|
47 namespace JSC { |
|
48 |
|
49 void JIT::compileOpCallInitializeCallFrame() |
|
50 { |
|
51 // regT0 holds callee, regT1 holds argCount |
|
52 store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); |
|
53 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT3); // scopeChain |
|
54 storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee |
|
55 storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain |
|
56 } |
|
57 |
|
58 void JIT::emit_op_call_put_result(Instruction* instruction) |
|
59 { |
|
60 int dst = instruction[1].u.operand; |
|
61 emitStore(dst, regT1, regT0); |
|
62 } |
|
63 |
|
64 void JIT::compileOpCallVarargs(Instruction* instruction) |
|
65 { |
|
66 int callee = instruction[1].u.operand; |
|
67 int argCountRegister = instruction[2].u.operand; |
|
68 int registerOffset = instruction[3].u.operand; |
|
69 |
|
70 emitLoad(callee, regT1, regT0); |
|
71 emitLoadPayload(argCountRegister, regT2); // argCount |
|
72 addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset |
|
73 |
|
74 emitJumpSlowCaseIfNotJSCell(callee, regT1); |
|
75 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr))); |
|
76 |
|
77 // Speculatively roll the callframe, assuming argCount will match the arity. |
|
78 mul32(Imm32(sizeof(Register)), regT3, regT3); |
|
79 addPtr(callFrameRegister, regT3); |
|
80 storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)))); |
|
81 move(regT3, callFrameRegister); |
|
82 |
|
83 move(regT2, regT1); // argCount |
|
84 |
|
85 emitNakedCall(m_globalData->jitStubs->ctiVirtualCall()); |
|
86 |
|
87 sampleCodeBlock(m_codeBlock); |
|
88 } |
|
89 |
|
90 void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) |
|
91 { |
|
92 int callee = instruction[1].u.operand; |
|
93 |
|
94 linkSlowCaseIfNotJSCell(iter, callee); |
|
95 linkSlowCase(iter); |
|
96 |
|
97 JITStubCall stubCall(this, cti_op_call_NotJSFunction); |
|
98 stubCall.addArgument(regT1, regT0); |
|
99 stubCall.addArgument(regT3); |
|
100 stubCall.addArgument(regT2); |
|
101 stubCall.call(); |
|
102 |
|
103 sampleCodeBlock(m_codeBlock); |
|
104 } |
|
105 |
|
106 void JIT::emit_op_ret(Instruction* currentInstruction) |
|
107 { |
|
108 unsigned dst = currentInstruction[1].u.operand; |
|
109 |
|
110 // We could JIT generate the deref, only calling out to C when the refcount hits zero. |
|
111 if (m_codeBlock->needsFullScopeChain()) |
|
112 JITStubCall(this, cti_op_ret_scopeChain).call(); |
|
113 |
|
114 emitLoad(dst, regT1, regT0); |
|
115 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2); |
|
116 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
117 |
|
118 restoreReturnAddressBeforeReturn(regT2); |
|
119 ret(); |
|
120 } |
|
121 |
|
122 void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) |
|
123 { |
|
124 unsigned result = currentInstruction[1].u.operand; |
|
125 unsigned thisReg = currentInstruction[2].u.operand; |
|
126 |
|
127 // We could JIT generate the deref, only calling out to C when the refcount hits zero. |
|
128 if (m_codeBlock->needsFullScopeChain()) |
|
129 JITStubCall(this, cti_op_ret_scopeChain).call(); |
|
130 |
|
131 emitLoad(result, regT1, regT0); |
|
132 Jump notJSCell = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
133 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2); |
|
134 Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)); |
|
135 |
|
136 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2); |
|
137 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
138 |
|
139 restoreReturnAddressBeforeReturn(regT2); |
|
140 ret(); |
|
141 |
|
142 notJSCell.link(this); |
|
143 notObject.link(this); |
|
144 emitLoad(thisReg, regT1, regT0); |
|
145 |
|
146 emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2); |
|
147 emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister); |
|
148 |
|
149 restoreReturnAddressBeforeReturn(regT2); |
|
150 ret(); |
|
151 } |
|
152 |
|
153 void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
154 { |
|
155 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call); |
|
156 } |
|
157 |
|
158 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
159 { |
|
160 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval); |
|
161 } |
|
162 |
|
163 void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
164 { |
|
165 compileOpCallVarargsSlowCase(currentInstruction, iter); |
|
166 } |
|
167 |
|
168 void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
169 { |
|
170 compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct); |
|
171 } |
|
172 |
|
173 void JIT::emit_op_call(Instruction* currentInstruction) |
|
174 { |
|
175 compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); |
|
176 } |
|
177 |
|
178 void JIT::emit_op_call_eval(Instruction* currentInstruction) |
|
179 { |
|
180 compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++); |
|
181 } |
|
182 |
|
183 void JIT::emit_op_load_varargs(Instruction* currentInstruction) |
|
184 { |
|
185 int argCountDst = currentInstruction[1].u.operand; |
|
186 int argsOffset = currentInstruction[2].u.operand; |
|
187 |
|
188 JITStubCall stubCall(this, cti_op_load_varargs); |
|
189 stubCall.addArgument(Imm32(argsOffset)); |
|
190 stubCall.call(); |
|
191 // Stores a naked int32 in the register file. |
|
192 store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register))); |
|
193 } |
|
194 |
|
195 void JIT::emit_op_call_varargs(Instruction* currentInstruction) |
|
196 { |
|
197 compileOpCallVarargs(currentInstruction); |
|
198 } |
|
199 |
|
200 void JIT::emit_op_construct(Instruction* currentInstruction) |
|
201 { |
|
202 compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); |
|
203 } |
|
204 |
|
205 #if !ENABLE(JIT_OPTIMIZE_CALL) |
|
206 |
|
207 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
208 |
|
209 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) |
|
210 { |
|
211 int callee = instruction[1].u.operand; |
|
212 int argCount = instruction[2].u.operand; |
|
213 int registerOffset = instruction[3].u.operand; |
|
214 |
|
215 Jump wasEval; |
|
216 if (opcodeID == op_call_eval) { |
|
217 JITStubCall stubCall(this, cti_op_call_eval); |
|
218 stubCall.addArgument(callee); |
|
219 stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
220 stubCall.addArgument(JIT::Imm32(argCount)); |
|
221 stubCall.call(); |
|
222 wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag)); |
|
223 } |
|
224 |
|
225 emitLoad(callee, regT1, regT0); |
|
226 |
|
227 emitJumpSlowCaseIfNotJSCell(callee, regT1); |
|
228 addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr))); |
|
229 |
|
230 // Speculatively roll the callframe, assuming argCount will match the arity. |
|
231 storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); |
|
232 addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); |
|
233 move(Imm32(argCount), regT1); |
|
234 |
|
235 emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall()); |
|
236 |
|
237 if (opcodeID == op_call_eval) |
|
238 wasEval.link(this); |
|
239 |
|
240 sampleCodeBlock(m_codeBlock); |
|
241 } |
|
242 |
|
243 void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) |
|
244 { |
|
245 int callee = instruction[1].u.operand; |
|
246 int argCount = instruction[2].u.operand; |
|
247 int registerOffset = instruction[3].u.operand; |
|
248 |
|
249 linkSlowCaseIfNotJSCell(iter, callee); |
|
250 linkSlowCase(iter); |
|
251 |
|
252 JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); |
|
253 stubCall.addArgument(callee); |
|
254 stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
255 stubCall.addArgument(JIT::Imm32(argCount)); |
|
256 stubCall.call(); |
|
257 |
|
258 sampleCodeBlock(m_codeBlock); |
|
259 } |
|
260 |
|
261 #else // !ENABLE(JIT_OPTIMIZE_CALL) |
|
262 |
|
263 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
264 |
|
265 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) |
|
266 { |
|
267 int callee = instruction[1].u.operand; |
|
268 int argCount = instruction[2].u.operand; |
|
269 int registerOffset = instruction[3].u.operand; |
|
270 |
|
271 Jump wasEval; |
|
272 if (opcodeID == op_call_eval) { |
|
273 JITStubCall stubCall(this, cti_op_call_eval); |
|
274 stubCall.addArgument(callee); |
|
275 stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
276 stubCall.addArgument(JIT::Imm32(argCount)); |
|
277 stubCall.call(); |
|
278 wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag)); |
|
279 } |
|
280 |
|
281 emitLoad(callee, regT1, regT0); |
|
282 |
|
283 DataLabelPtr addressOfLinkedFunctionCheck; |
|
284 |
|
285 BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); |
|
286 |
|
287 Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0)); |
|
288 |
|
289 END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); |
|
290 |
|
291 addSlowCase(jumpToSlow); |
|
292 ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump); |
|
293 m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; |
|
294 |
|
295 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); |
|
296 |
|
297 // The following is the fast case, only used whan a callee can be linked. |
|
298 |
|
299 // Fast version of stack frame initialization, directly relative to edi. |
|
300 // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee |
|
301 loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT2); |
|
302 |
|
303 store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); |
|
304 storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); |
|
305 emitStore(registerOffset + RegisterFile::Callee, regT1, regT0); |
|
306 storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); |
|
307 addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); |
|
308 |
|
309 // Call to the callee |
|
310 m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); |
|
311 |
|
312 if (opcodeID == op_call_eval) |
|
313 wasEval.link(this); |
|
314 |
|
315 sampleCodeBlock(m_codeBlock); |
|
316 } |
|
317 |
|
318 void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) |
|
319 { |
|
320 int callee = instruction[1].u.operand; |
|
321 int argCount = instruction[2].u.operand; |
|
322 int registerOffset = instruction[3].u.operand; |
|
323 |
|
324 linkSlowCase(iter); |
|
325 linkSlowCase(iter); |
|
326 |
|
327 // Fast check for JS function. |
|
328 Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag)); |
|
329 Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)); |
|
330 |
|
331 // Speculatively roll the callframe, assuming argCount will match the arity. |
|
332 storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); |
|
333 addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); |
|
334 move(Imm32(argCount), regT1); |
|
335 |
|
336 m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink()); |
|
337 |
|
338 // Done! - return back to the hot path. |
|
339 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); |
|
340 ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); |
|
341 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call)); |
|
342 |
|
343 // This handles host functions |
|
344 callLinkFailNotObject.link(this); |
|
345 callLinkFailNotJSFunction.link(this); |
|
346 |
|
347 JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); |
|
348 stubCall.addArgument(callee); |
|
349 stubCall.addArgument(JIT::Imm32(registerOffset)); |
|
350 stubCall.addArgument(JIT::Imm32(argCount)); |
|
351 stubCall.call(); |
|
352 |
|
353 sampleCodeBlock(m_codeBlock); |
|
354 } |
|
355 |
|
356 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */ |
|
357 |
|
358 #endif // !ENABLE(JIT_OPTIMIZE_CALL) |
|
359 |
|
360 } // namespace JSC |
|
361 |
|
362 #endif // USE(JSVALUE32_64) |
|
363 #endif // ENABLE(JIT) |