|
1 /* |
|
2 * Copyright (C) 2008 Apple Inc. All rights reserved. |
|
3 * |
|
4 * Redistribution and use in source and binary forms, with or without |
|
5 * modification, are permitted provided that the following conditions |
|
6 * are met: |
|
7 * 1. Redistributions of source code must retain the above copyright |
|
8 * notice, this list of conditions and the following disclaimer. |
|
9 * 2. Redistributions in binary form must reproduce the above copyright |
|
10 * notice, this list of conditions and the following disclaimer in the |
|
11 * documentation and/or other materials provided with the distribution. |
|
12 * |
|
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
|
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
|
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
|
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
|
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
|
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
|
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
|
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|
24 */ |
|
25 |
|
26 #include "config.h" |
|
27 |
|
28 #if ENABLE(JIT) |
|
29 #if !USE(JSVALUE32_64) |
|
30 #include "JIT.h" |
|
31 |
|
32 #include "CodeBlock.h" |
|
33 #include "JITInlineMethods.h" |
|
34 #include "JITStubCall.h" |
|
35 #include "JITStubs.h" |
|
36 #include "JSArray.h" |
|
37 #include "JSFunction.h" |
|
38 #include "Interpreter.h" |
|
39 #include "ResultType.h" |
|
40 #include "SamplingTool.h" |
|
41 |
|
42 #ifndef NDEBUG |
|
43 #include <stdio.h> |
|
44 #endif |
|
45 |
|
46 using namespace std; |
|
47 |
|
48 namespace JSC { |
|
49 |
|
50 void JIT::emit_op_lshift(Instruction* currentInstruction) |
|
51 { |
|
52 unsigned result = currentInstruction[1].u.operand; |
|
53 unsigned op1 = currentInstruction[2].u.operand; |
|
54 unsigned op2 = currentInstruction[3].u.operand; |
|
55 |
|
56 emitGetVirtualRegisters(op1, regT0, op2, regT2); |
|
57 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent. |
|
58 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
59 emitJumpSlowCaseIfNotImmediateInteger(regT2); |
|
60 emitFastArithImmToInt(regT0); |
|
61 emitFastArithImmToInt(regT2); |
|
62 lshift32(regT2, regT0); |
|
63 #if USE(JSVALUE32) |
|
64 addSlowCase(branchAdd32(Overflow, regT0, regT0)); |
|
65 signExtend32ToPtr(regT0, regT0); |
|
66 #endif |
|
67 emitFastArithReTagImmediate(regT0, regT0); |
|
68 emitPutVirtualRegister(result); |
|
69 } |
|
70 |
|
71 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
72 { |
|
73 unsigned result = currentInstruction[1].u.operand; |
|
74 unsigned op1 = currentInstruction[2].u.operand; |
|
75 unsigned op2 = currentInstruction[3].u.operand; |
|
76 |
|
77 #if USE(JSVALUE64) |
|
78 UNUSED_PARAM(op1); |
|
79 UNUSED_PARAM(op2); |
|
80 linkSlowCase(iter); |
|
81 linkSlowCase(iter); |
|
82 #else |
|
83 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded. |
|
84 Jump notImm1 = getSlowCase(iter); |
|
85 Jump notImm2 = getSlowCase(iter); |
|
86 linkSlowCase(iter); |
|
87 emitGetVirtualRegisters(op1, regT0, op2, regT2); |
|
88 notImm1.link(this); |
|
89 notImm2.link(this); |
|
90 #endif |
|
91 JITStubCall stubCall(this, cti_op_lshift); |
|
92 stubCall.addArgument(regT0); |
|
93 stubCall.addArgument(regT2); |
|
94 stubCall.call(result); |
|
95 } |
|
96 |
|
97 void JIT::emit_op_rshift(Instruction* currentInstruction) |
|
98 { |
|
99 unsigned result = currentInstruction[1].u.operand; |
|
100 unsigned op1 = currentInstruction[2].u.operand; |
|
101 unsigned op2 = currentInstruction[3].u.operand; |
|
102 |
|
103 if (isOperandConstantImmediateInt(op2)) { |
|
104 // isOperandConstantImmediateInt(op2) => 1 SlowCase |
|
105 emitGetVirtualRegister(op1, regT0); |
|
106 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
107 // Mask with 0x1f as per ecma-262 11.7.2 step 7. |
|
108 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0); |
|
109 } else { |
|
110 emitGetVirtualRegisters(op1, regT0, op2, regT2); |
|
111 if (supportsFloatingPointTruncate()) { |
|
112 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0); |
|
113 #if USE(JSVALUE64) |
|
114 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases |
|
115 addSlowCase(emitJumpIfNotImmediateNumber(regT0)); |
|
116 addPtr(tagTypeNumberRegister, regT0); |
|
117 movePtrToDouble(regT0, fpRegT0); |
|
118 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); |
|
119 #else |
|
120 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell) |
|
121 emitJumpSlowCaseIfNotJSCell(regT0, op1); |
|
122 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get())); |
|
123 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
124 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); |
|
125 addSlowCase(branchAdd32(Overflow, regT0, regT0)); |
|
126 #endif |
|
127 lhsIsInt.link(this); |
|
128 emitJumpSlowCaseIfNotImmediateInteger(regT2); |
|
129 } else { |
|
130 // !supportsFloatingPoint() => 2 SlowCases |
|
131 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
132 emitJumpSlowCaseIfNotImmediateInteger(regT2); |
|
133 } |
|
134 emitFastArithImmToInt(regT2); |
|
135 rshift32(regT2, regT0); |
|
136 #if USE(JSVALUE32) |
|
137 signExtend32ToPtr(regT0, regT0); |
|
138 #endif |
|
139 } |
|
140 #if USE(JSVALUE64) |
|
141 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
142 #else |
|
143 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0); |
|
144 #endif |
|
145 emitPutVirtualRegister(result); |
|
146 } |
|
147 |
|
148 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
149 { |
|
150 unsigned result = currentInstruction[1].u.operand; |
|
151 unsigned op1 = currentInstruction[2].u.operand; |
|
152 unsigned op2 = currentInstruction[3].u.operand; |
|
153 |
|
154 JITStubCall stubCall(this, cti_op_rshift); |
|
155 |
|
156 if (isOperandConstantImmediateInt(op2)) { |
|
157 linkSlowCase(iter); |
|
158 stubCall.addArgument(regT0); |
|
159 stubCall.addArgument(op2, regT2); |
|
160 } else { |
|
161 if (supportsFloatingPointTruncate()) { |
|
162 #if USE(JSVALUE64) |
|
163 linkSlowCase(iter); |
|
164 linkSlowCase(iter); |
|
165 linkSlowCase(iter); |
|
166 #else |
|
167 linkSlowCaseIfNotJSCell(iter, op1); |
|
168 linkSlowCase(iter); |
|
169 linkSlowCase(iter); |
|
170 linkSlowCase(iter); |
|
171 linkSlowCase(iter); |
|
172 #endif |
|
173 // We're reloading op1 to regT0 as we can no longer guarantee that |
|
174 // we have not munged the operand. It may have already been shifted |
|
175 // correctly, but it still will not have been tagged. |
|
176 stubCall.addArgument(op1, regT0); |
|
177 stubCall.addArgument(regT2); |
|
178 } else { |
|
179 linkSlowCase(iter); |
|
180 linkSlowCase(iter); |
|
181 stubCall.addArgument(regT0); |
|
182 stubCall.addArgument(regT2); |
|
183 } |
|
184 } |
|
185 |
|
186 stubCall.call(result); |
|
187 } |
|
188 |
|
189 void JIT::emit_op_urshift(Instruction* currentInstruction) |
|
190 { |
|
191 unsigned dst = currentInstruction[1].u.operand; |
|
192 unsigned op1 = currentInstruction[2].u.operand; |
|
193 unsigned op2 = currentInstruction[3].u.operand; |
|
194 |
|
195 // Slow case of urshift makes assumptions about what registers hold the |
|
196 // shift arguments, so any changes must be updated there as well. |
|
197 if (isOperandConstantImmediateInt(op2)) { |
|
198 emitGetVirtualRegister(op1, regT0); |
|
199 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
200 emitFastArithImmToInt(regT0); |
|
201 int shift = getConstantOperand(op2).asInt32(); |
|
202 if (shift) |
|
203 urshift32(Imm32(shift & 0x1f), regT0); |
|
204 // unsigned shift < 0 or shift = k*2^32 may result in (essentially) |
|
205 // a toUint conversion, which can result in a value we can represent |
|
206 // as an immediate int. |
|
207 if (shift < 0 || !(shift & 31)) |
|
208 addSlowCase(branch32(LessThan, regT0, Imm32(0))); |
|
209 #if USE(JSVALUE32) |
|
210 addSlowCase(branchAdd32(Overflow, regT0, regT0)); |
|
211 signExtend32ToPtr(regT0, regT0); |
|
212 #endif |
|
213 emitFastArithReTagImmediate(regT0, regT0); |
|
214 emitPutVirtualRegister(dst, regT0); |
|
215 return; |
|
216 } |
|
217 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
218 if (!isOperandConstantImmediateInt(op1)) |
|
219 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
220 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
221 emitFastArithImmToInt(regT0); |
|
222 emitFastArithImmToInt(regT1); |
|
223 urshift32(regT1, regT0); |
|
224 addSlowCase(branch32(LessThan, regT0, Imm32(0))); |
|
225 #if USE(JSVALUE32) |
|
226 addSlowCase(branchAdd32(Overflow, regT0, regT0)); |
|
227 signExtend32ToPtr(regT0, regT0); |
|
228 #endif |
|
229 emitFastArithReTagImmediate(regT0, regT0); |
|
230 emitPutVirtualRegister(dst, regT0); |
|
231 } |
|
232 |
|
233 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
234 { |
|
235 unsigned dst = currentInstruction[1].u.operand; |
|
236 unsigned op1 = currentInstruction[2].u.operand; |
|
237 unsigned op2 = currentInstruction[3].u.operand; |
|
238 if (isOperandConstantImmediateInt(op2)) { |
|
239 int shift = getConstantOperand(op2).asInt32(); |
|
240 // op1 = regT0 |
|
241 linkSlowCase(iter); // int32 check |
|
242 #if USE(JSVALUE64) |
|
243 if (supportsFloatingPointTruncate()) { |
|
244 JumpList failures; |
|
245 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double |
|
246 addPtr(tagTypeNumberRegister, regT0); |
|
247 movePtrToDouble(regT0, fpRegT0); |
|
248 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); |
|
249 if (shift) |
|
250 urshift32(Imm32(shift & 0x1f), regT0); |
|
251 if (shift < 0 || !(shift & 31)) |
|
252 failures.append(branch32(LessThan, regT0, Imm32(0))); |
|
253 emitFastArithReTagImmediate(regT0, regT0); |
|
254 emitPutVirtualRegister(dst, regT0); |
|
255 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); |
|
256 failures.link(this); |
|
257 } |
|
258 #endif // JSVALUE64 |
|
259 if (shift < 0 || !(shift & 31)) |
|
260 linkSlowCase(iter); // failed to box in hot path |
|
261 #if USE(JSVALUE32) |
|
262 linkSlowCase(iter); // Couldn't box result |
|
263 #endif |
|
264 } else { |
|
265 // op1 = regT0 |
|
266 // op2 = regT1 |
|
267 if (!isOperandConstantImmediateInt(op1)) { |
|
268 linkSlowCase(iter); // int32 check -- op1 is not an int |
|
269 #if USE(JSVALUE64) |
|
270 if (supportsFloatingPointTruncate()) { |
|
271 JumpList failures; |
|
272 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double |
|
273 addPtr(tagTypeNumberRegister, regT0); |
|
274 movePtrToDouble(regT0, fpRegT0); |
|
275 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); |
|
276 failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int |
|
277 emitFastArithImmToInt(regT1); |
|
278 urshift32(regT1, regT0); |
|
279 failures.append(branch32(LessThan, regT0, Imm32(0))); |
|
280 emitFastArithReTagImmediate(regT0, regT0); |
|
281 emitPutVirtualRegister(dst, regT0); |
|
282 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); |
|
283 failures.link(this); |
|
284 } |
|
285 #endif |
|
286 } |
|
287 |
|
288 linkSlowCase(iter); // int32 check - op2 is not an int |
|
289 linkSlowCase(iter); // Can't represent unsigned result as an immediate |
|
290 #if USE(JSVALUE32) |
|
291 linkSlowCase(iter); // Couldn't box result |
|
292 #endif |
|
293 } |
|
294 |
|
295 JITStubCall stubCall(this, cti_op_urshift); |
|
296 stubCall.addArgument(op1, regT0); |
|
297 stubCall.addArgument(op2, regT1); |
|
298 stubCall.call(dst); |
|
299 } |
|
300 |
|
301 void JIT::emit_op_jnless(Instruction* currentInstruction) |
|
302 { |
|
303 unsigned op1 = currentInstruction[1].u.operand; |
|
304 unsigned op2 = currentInstruction[2].u.operand; |
|
305 unsigned target = currentInstruction[3].u.operand; |
|
306 |
|
307 // We generate inline code for the following cases in the fast path: |
|
308 // - int immediate to constant int immediate |
|
309 // - constant int immediate to int immediate |
|
310 // - int immediate to int immediate |
|
311 |
|
312 if (isOperandConstantImmediateChar(op1)) { |
|
313 emitGetVirtualRegister(op2, regT0); |
|
314 addSlowCase(emitJumpIfNotJSCell(regT0)); |
|
315 JumpList failures; |
|
316 emitLoadCharacterString(regT0, regT0, failures); |
|
317 addSlowCase(failures); |
|
318 addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); |
|
319 return; |
|
320 } |
|
321 if (isOperandConstantImmediateChar(op2)) { |
|
322 emitGetVirtualRegister(op1, regT0); |
|
323 addSlowCase(emitJumpIfNotJSCell(regT0)); |
|
324 JumpList failures; |
|
325 emitLoadCharacterString(regT0, regT0, failures); |
|
326 addSlowCase(failures); |
|
327 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); |
|
328 return; |
|
329 } |
|
330 if (isOperandConstantImmediateInt(op2)) { |
|
331 emitGetVirtualRegister(op1, regT0); |
|
332 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
333 #if USE(JSVALUE64) |
|
334 int32_t op2imm = getConstantOperandImmediateInt(op2); |
|
335 #else |
|
336 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); |
|
337 #endif |
|
338 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target); |
|
339 } else if (isOperandConstantImmediateInt(op1)) { |
|
340 emitGetVirtualRegister(op2, regT1); |
|
341 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
342 #if USE(JSVALUE64) |
|
343 int32_t op1imm = getConstantOperandImmediateInt(op1); |
|
344 #else |
|
345 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1))); |
|
346 #endif |
|
347 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target); |
|
348 } else { |
|
349 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
350 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
351 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
352 |
|
353 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target); |
|
354 } |
|
355 } |
|
356 |
|
357 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
358 { |
|
359 unsigned op1 = currentInstruction[1].u.operand; |
|
360 unsigned op2 = currentInstruction[2].u.operand; |
|
361 unsigned target = currentInstruction[3].u.operand; |
|
362 |
|
363 // We generate inline code for the following cases in the slow path: |
|
364 // - floating-point number to constant int immediate |
|
365 // - constant int immediate to floating-point number |
|
366 // - floating-point number to floating-point number. |
|
367 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { |
|
368 linkSlowCase(iter); |
|
369 linkSlowCase(iter); |
|
370 linkSlowCase(iter); |
|
371 linkSlowCase(iter); |
|
372 JITStubCall stubCall(this, cti_op_jless); |
|
373 stubCall.addArgument(op1, regT0); |
|
374 stubCall.addArgument(op2, regT1); |
|
375 stubCall.call(); |
|
376 emitJumpSlowToHot(branchTest32(Zero, regT0), target); |
|
377 return; |
|
378 } |
|
379 |
|
380 if (isOperandConstantImmediateInt(op2)) { |
|
381 linkSlowCase(iter); |
|
382 |
|
383 if (supportsFloatingPoint()) { |
|
384 #if USE(JSVALUE64) |
|
385 Jump fail1 = emitJumpIfNotImmediateNumber(regT0); |
|
386 addPtr(tagTypeNumberRegister, regT0); |
|
387 movePtrToDouble(regT0, fpRegT0); |
|
388 #else |
|
389 Jump fail1; |
|
390 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
391 fail1 = emitJumpIfNotJSCell(regT0); |
|
392 |
|
393 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get()); |
|
394 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
395 #endif |
|
396 |
|
397 int32_t op2imm = getConstantOperand(op2).asInt32();; |
|
398 |
|
399 move(Imm32(op2imm), regT1); |
|
400 convertInt32ToDouble(regT1, fpRegT1); |
|
401 |
|
402 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target); |
|
403 |
|
404 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless)); |
|
405 |
|
406 #if USE(JSVALUE64) |
|
407 fail1.link(this); |
|
408 #else |
|
409 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
410 fail1.link(this); |
|
411 fail2.link(this); |
|
412 #endif |
|
413 } |
|
414 |
|
415 JITStubCall stubCall(this, cti_op_jless); |
|
416 stubCall.addArgument(regT0); |
|
417 stubCall.addArgument(op2, regT2); |
|
418 stubCall.call(); |
|
419 emitJumpSlowToHot(branchTest32(Zero, regT0), target); |
|
420 |
|
421 } else if (isOperandConstantImmediateInt(op1)) { |
|
422 linkSlowCase(iter); |
|
423 |
|
424 if (supportsFloatingPoint()) { |
|
425 #if USE(JSVALUE64) |
|
426 Jump fail1 = emitJumpIfNotImmediateNumber(regT1); |
|
427 addPtr(tagTypeNumberRegister, regT1); |
|
428 movePtrToDouble(regT1, fpRegT1); |
|
429 #else |
|
430 Jump fail1; |
|
431 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
432 fail1 = emitJumpIfNotJSCell(regT1); |
|
433 |
|
434 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get()); |
|
435 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
436 #endif |
|
437 |
|
438 int32_t op1imm = getConstantOperand(op1).asInt32();; |
|
439 |
|
440 move(Imm32(op1imm), regT0); |
|
441 convertInt32ToDouble(regT0, fpRegT0); |
|
442 |
|
443 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target); |
|
444 |
|
445 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless)); |
|
446 |
|
447 #if USE(JSVALUE64) |
|
448 fail1.link(this); |
|
449 #else |
|
450 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
451 fail1.link(this); |
|
452 fail2.link(this); |
|
453 #endif |
|
454 } |
|
455 |
|
456 JITStubCall stubCall(this, cti_op_jless); |
|
457 stubCall.addArgument(op1, regT2); |
|
458 stubCall.addArgument(regT1); |
|
459 stubCall.call(); |
|
460 emitJumpSlowToHot(branchTest32(Zero, regT0), target); |
|
461 |
|
462 } else { |
|
463 linkSlowCase(iter); |
|
464 |
|
465 if (supportsFloatingPoint()) { |
|
466 #if USE(JSVALUE64) |
|
467 Jump fail1 = emitJumpIfNotImmediateNumber(regT0); |
|
468 Jump fail2 = emitJumpIfNotImmediateNumber(regT1); |
|
469 Jump fail3 = emitJumpIfImmediateInteger(regT1); |
|
470 addPtr(tagTypeNumberRegister, regT0); |
|
471 addPtr(tagTypeNumberRegister, regT1); |
|
472 movePtrToDouble(regT0, fpRegT0); |
|
473 movePtrToDouble(regT1, fpRegT1); |
|
474 #else |
|
475 Jump fail1; |
|
476 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
477 fail1 = emitJumpIfNotJSCell(regT0); |
|
478 |
|
479 Jump fail2; |
|
480 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
481 fail2 = emitJumpIfNotJSCell(regT1); |
|
482 |
|
483 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get()); |
|
484 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get()); |
|
485 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
486 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
487 #endif |
|
488 |
|
489 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target); |
|
490 |
|
491 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless)); |
|
492 |
|
493 #if USE(JSVALUE64) |
|
494 fail1.link(this); |
|
495 fail2.link(this); |
|
496 fail3.link(this); |
|
497 #else |
|
498 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
499 fail1.link(this); |
|
500 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
501 fail2.link(this); |
|
502 fail3.link(this); |
|
503 fail4.link(this); |
|
504 #endif |
|
505 } |
|
506 |
|
507 linkSlowCase(iter); |
|
508 JITStubCall stubCall(this, cti_op_jless); |
|
509 stubCall.addArgument(regT0); |
|
510 stubCall.addArgument(regT1); |
|
511 stubCall.call(); |
|
512 emitJumpSlowToHot(branchTest32(Zero, regT0), target); |
|
513 } |
|
514 } |
|
515 |
|
516 void JIT::emit_op_jless(Instruction* currentInstruction) |
|
517 { |
|
518 unsigned op1 = currentInstruction[1].u.operand; |
|
519 unsigned op2 = currentInstruction[2].u.operand; |
|
520 unsigned target = currentInstruction[3].u.operand; |
|
521 |
|
522 // We generate inline code for the following cases in the fast path: |
|
523 // - int immediate to constant int immediate |
|
524 // - constant int immediate to int immediate |
|
525 // - int immediate to int immediate |
|
526 |
|
527 if (isOperandConstantImmediateChar(op1)) { |
|
528 emitGetVirtualRegister(op2, regT0); |
|
529 addSlowCase(emitJumpIfNotJSCell(regT0)); |
|
530 JumpList failures; |
|
531 emitLoadCharacterString(regT0, regT0, failures); |
|
532 addSlowCase(failures); |
|
533 addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); |
|
534 return; |
|
535 } |
|
536 if (isOperandConstantImmediateChar(op2)) { |
|
537 emitGetVirtualRegister(op1, regT0); |
|
538 addSlowCase(emitJumpIfNotJSCell(regT0)); |
|
539 JumpList failures; |
|
540 emitLoadCharacterString(regT0, regT0, failures); |
|
541 addSlowCase(failures); |
|
542 addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); |
|
543 return; |
|
544 } |
|
545 if (isOperandConstantImmediateInt(op2)) { |
|
546 emitGetVirtualRegister(op1, regT0); |
|
547 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
548 #if USE(JSVALUE64) |
|
549 int32_t op2imm = getConstantOperandImmediateInt(op2); |
|
550 #else |
|
551 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); |
|
552 #endif |
|
553 addJump(branch32(LessThan, regT0, Imm32(op2imm)), target); |
|
554 } else if (isOperandConstantImmediateInt(op1)) { |
|
555 emitGetVirtualRegister(op2, regT1); |
|
556 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
557 #if USE(JSVALUE64) |
|
558 int32_t op1imm = getConstantOperandImmediateInt(op1); |
|
559 #else |
|
560 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1))); |
|
561 #endif |
|
562 addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target); |
|
563 } else { |
|
564 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
565 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
566 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
567 |
|
568 addJump(branch32(LessThan, regT0, regT1), target); |
|
569 } |
|
570 } |
|
571 |
|
572 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
573 { |
|
574 unsigned op1 = currentInstruction[1].u.operand; |
|
575 unsigned op2 = currentInstruction[2].u.operand; |
|
576 unsigned target = currentInstruction[3].u.operand; |
|
577 |
|
578 // We generate inline code for the following cases in the slow path: |
|
579 // - floating-point number to constant int immediate |
|
580 // - constant int immediate to floating-point number |
|
581 // - floating-point number to floating-point number. |
|
582 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { |
|
583 linkSlowCase(iter); |
|
584 linkSlowCase(iter); |
|
585 linkSlowCase(iter); |
|
586 linkSlowCase(iter); |
|
587 JITStubCall stubCall(this, cti_op_jless); |
|
588 stubCall.addArgument(op1, regT0); |
|
589 stubCall.addArgument(op2, regT1); |
|
590 stubCall.call(); |
|
591 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
592 return; |
|
593 } |
|
594 |
|
595 if (isOperandConstantImmediateInt(op2)) { |
|
596 linkSlowCase(iter); |
|
597 |
|
598 if (supportsFloatingPoint()) { |
|
599 #if USE(JSVALUE64) |
|
600 Jump fail1 = emitJumpIfNotImmediateNumber(regT0); |
|
601 addPtr(tagTypeNumberRegister, regT0); |
|
602 movePtrToDouble(regT0, fpRegT0); |
|
603 #else |
|
604 Jump fail1; |
|
605 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
606 fail1 = emitJumpIfNotJSCell(regT0); |
|
607 |
|
608 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get()); |
|
609 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
610 #endif |
|
611 |
|
612 int32_t op2imm = getConstantOperand(op2).asInt32(); |
|
613 |
|
614 move(Imm32(op2imm), regT1); |
|
615 convertInt32ToDouble(regT1, fpRegT1); |
|
616 |
|
617 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target); |
|
618 |
|
619 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless)); |
|
620 |
|
621 #if USE(JSVALUE64) |
|
622 fail1.link(this); |
|
623 #else |
|
624 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
625 fail1.link(this); |
|
626 fail2.link(this); |
|
627 #endif |
|
628 } |
|
629 |
|
630 JITStubCall stubCall(this, cti_op_jless); |
|
631 stubCall.addArgument(regT0); |
|
632 stubCall.addArgument(op2, regT2); |
|
633 stubCall.call(); |
|
634 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
635 |
|
636 } else if (isOperandConstantImmediateInt(op1)) { |
|
637 linkSlowCase(iter); |
|
638 |
|
639 if (supportsFloatingPoint()) { |
|
640 #if USE(JSVALUE64) |
|
641 Jump fail1 = emitJumpIfNotImmediateNumber(regT1); |
|
642 addPtr(tagTypeNumberRegister, regT1); |
|
643 movePtrToDouble(regT1, fpRegT1); |
|
644 #else |
|
645 Jump fail1; |
|
646 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
647 fail1 = emitJumpIfNotJSCell(regT1); |
|
648 |
|
649 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get()); |
|
650 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
651 #endif |
|
652 |
|
653 int32_t op1imm = getConstantOperand(op1).asInt32(); |
|
654 |
|
655 move(Imm32(op1imm), regT0); |
|
656 convertInt32ToDouble(regT0, fpRegT0); |
|
657 |
|
658 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target); |
|
659 |
|
660 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless)); |
|
661 |
|
662 #if USE(JSVALUE64) |
|
663 fail1.link(this); |
|
664 #else |
|
665 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
666 fail1.link(this); |
|
667 fail2.link(this); |
|
668 #endif |
|
669 } |
|
670 |
|
671 JITStubCall stubCall(this, cti_op_jless); |
|
672 stubCall.addArgument(op1, regT2); |
|
673 stubCall.addArgument(regT1); |
|
674 stubCall.call(); |
|
675 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
676 |
|
677 } else { |
|
678 linkSlowCase(iter); |
|
679 |
|
680 if (supportsFloatingPoint()) { |
|
681 #if USE(JSVALUE64) |
|
682 Jump fail1 = emitJumpIfNotImmediateNumber(regT0); |
|
683 Jump fail2 = emitJumpIfNotImmediateNumber(regT1); |
|
684 Jump fail3 = emitJumpIfImmediateInteger(regT1); |
|
685 addPtr(tagTypeNumberRegister, regT0); |
|
686 addPtr(tagTypeNumberRegister, regT1); |
|
687 movePtrToDouble(regT0, fpRegT0); |
|
688 movePtrToDouble(regT1, fpRegT1); |
|
689 #else |
|
690 Jump fail1; |
|
691 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
692 fail1 = emitJumpIfNotJSCell(regT0); |
|
693 |
|
694 Jump fail2; |
|
695 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
696 fail2 = emitJumpIfNotJSCell(regT1); |
|
697 |
|
698 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get()); |
|
699 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get()); |
|
700 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
701 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
702 #endif |
|
703 |
|
704 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target); |
|
705 |
|
706 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless)); |
|
707 |
|
708 #if USE(JSVALUE64) |
|
709 fail1.link(this); |
|
710 fail2.link(this); |
|
711 fail3.link(this); |
|
712 #else |
|
713 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
714 fail1.link(this); |
|
715 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
716 fail2.link(this); |
|
717 fail3.link(this); |
|
718 fail4.link(this); |
|
719 #endif |
|
720 } |
|
721 |
|
722 linkSlowCase(iter); |
|
723 JITStubCall stubCall(this, cti_op_jless); |
|
724 stubCall.addArgument(regT0); |
|
725 stubCall.addArgument(regT1); |
|
726 stubCall.call(); |
|
727 emitJumpSlowToHot(branchTest32(NonZero, regT0), target); |
|
728 } |
|
729 } |
|
730 |
|
731 void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert) |
|
732 { |
|
733 unsigned op1 = currentInstruction[1].u.operand; |
|
734 unsigned op2 = currentInstruction[2].u.operand; |
|
735 unsigned target = currentInstruction[3].u.operand; |
|
736 |
|
737 // We generate inline code for the following cases in the fast path: |
|
738 // - int immediate to constant int immediate |
|
739 // - constant int immediate to int immediate |
|
740 // - int immediate to int immediate |
|
741 |
|
742 if (isOperandConstantImmediateChar(op1)) { |
|
743 emitGetVirtualRegister(op2, regT0); |
|
744 addSlowCase(emitJumpIfNotJSCell(regT0)); |
|
745 JumpList failures; |
|
746 emitLoadCharacterString(regT0, regT0, failures); |
|
747 addSlowCase(failures); |
|
748 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); |
|
749 return; |
|
750 } |
|
751 if (isOperandConstantImmediateChar(op2)) { |
|
752 emitGetVirtualRegister(op1, regT0); |
|
753 addSlowCase(emitJumpIfNotJSCell(regT0)); |
|
754 JumpList failures; |
|
755 emitLoadCharacterString(regT0, regT0, failures); |
|
756 addSlowCase(failures); |
|
757 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); |
|
758 return; |
|
759 } |
|
760 if (isOperandConstantImmediateInt(op2)) { |
|
761 emitGetVirtualRegister(op1, regT0); |
|
762 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
763 #if USE(JSVALUE64) |
|
764 int32_t op2imm = getConstantOperandImmediateInt(op2); |
|
765 #else |
|
766 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); |
|
767 #endif |
|
768 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target); |
|
769 } else if (isOperandConstantImmediateInt(op1)) { |
|
770 emitGetVirtualRegister(op2, regT1); |
|
771 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
772 #if USE(JSVALUE64) |
|
773 int32_t op1imm = getConstantOperandImmediateInt(op1); |
|
774 #else |
|
775 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1))); |
|
776 #endif |
|
777 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target); |
|
778 } else { |
|
779 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
780 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
781 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
782 |
|
783 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target); |
|
784 } |
|
785 } |
|
786 |
|
787 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert) |
|
788 { |
|
789 unsigned op1 = currentInstruction[1].u.operand; |
|
790 unsigned op2 = currentInstruction[2].u.operand; |
|
791 unsigned target = currentInstruction[3].u.operand; |
|
792 |
|
793 // We generate inline code for the following cases in the slow path: |
|
794 // - floating-point number to constant int immediate |
|
795 // - constant int immediate to floating-point number |
|
796 // - floating-point number to floating-point number. |
|
797 |
|
798 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { |
|
799 linkSlowCase(iter); |
|
800 linkSlowCase(iter); |
|
801 linkSlowCase(iter); |
|
802 linkSlowCase(iter); |
|
803 JITStubCall stubCall(this, cti_op_jlesseq); |
|
804 stubCall.addArgument(op1, regT0); |
|
805 stubCall.addArgument(op2, regT1); |
|
806 stubCall.call(); |
|
807 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); |
|
808 return; |
|
809 } |
|
810 |
|
811 if (isOperandConstantImmediateInt(op2)) { |
|
812 linkSlowCase(iter); |
|
813 |
|
814 if (supportsFloatingPoint()) { |
|
815 #if USE(JSVALUE64) |
|
816 Jump fail1 = emitJumpIfNotImmediateNumber(regT0); |
|
817 addPtr(tagTypeNumberRegister, regT0); |
|
818 movePtrToDouble(regT0, fpRegT0); |
|
819 #else |
|
820 Jump fail1; |
|
821 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
822 fail1 = emitJumpIfNotJSCell(regT0); |
|
823 |
|
824 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get()); |
|
825 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
826 #endif |
|
827 |
|
828 int32_t op2imm = getConstantOperand(op2).asInt32();; |
|
829 |
|
830 move(Imm32(op2imm), regT1); |
|
831 convertInt32ToDouble(regT1, fpRegT1); |
|
832 |
|
833 emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target); |
|
834 |
|
835 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq)); |
|
836 |
|
837 #if USE(JSVALUE64) |
|
838 fail1.link(this); |
|
839 #else |
|
840 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
841 fail1.link(this); |
|
842 fail2.link(this); |
|
843 #endif |
|
844 } |
|
845 |
|
846 JITStubCall stubCall(this, cti_op_jlesseq); |
|
847 stubCall.addArgument(regT0); |
|
848 stubCall.addArgument(op2, regT2); |
|
849 stubCall.call(); |
|
850 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); |
|
851 |
|
852 } else if (isOperandConstantImmediateInt(op1)) { |
|
853 linkSlowCase(iter); |
|
854 |
|
855 if (supportsFloatingPoint()) { |
|
856 #if USE(JSVALUE64) |
|
857 Jump fail1 = emitJumpIfNotImmediateNumber(regT1); |
|
858 addPtr(tagTypeNumberRegister, regT1); |
|
859 movePtrToDouble(regT1, fpRegT1); |
|
860 #else |
|
861 Jump fail1; |
|
862 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
863 fail1 = emitJumpIfNotJSCell(regT1); |
|
864 |
|
865 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get()); |
|
866 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
867 #endif |
|
868 |
|
869 int32_t op1imm = getConstantOperand(op1).asInt32();; |
|
870 |
|
871 move(Imm32(op1imm), regT0); |
|
872 convertInt32ToDouble(regT0, fpRegT0); |
|
873 |
|
874 emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target); |
|
875 |
|
876 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq)); |
|
877 |
|
878 #if USE(JSVALUE64) |
|
879 fail1.link(this); |
|
880 #else |
|
881 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
882 fail1.link(this); |
|
883 fail2.link(this); |
|
884 #endif |
|
885 } |
|
886 |
|
887 JITStubCall stubCall(this, cti_op_jlesseq); |
|
888 stubCall.addArgument(op1, regT2); |
|
889 stubCall.addArgument(regT1); |
|
890 stubCall.call(); |
|
891 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); |
|
892 |
|
893 } else { |
|
894 linkSlowCase(iter); |
|
895 |
|
896 if (supportsFloatingPoint()) { |
|
897 #if USE(JSVALUE64) |
|
898 Jump fail1 = emitJumpIfNotImmediateNumber(regT0); |
|
899 Jump fail2 = emitJumpIfNotImmediateNumber(regT1); |
|
900 Jump fail3 = emitJumpIfImmediateInteger(regT1); |
|
901 addPtr(tagTypeNumberRegister, regT0); |
|
902 addPtr(tagTypeNumberRegister, regT1); |
|
903 movePtrToDouble(regT0, fpRegT0); |
|
904 movePtrToDouble(regT1, fpRegT1); |
|
905 #else |
|
906 Jump fail1; |
|
907 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
908 fail1 = emitJumpIfNotJSCell(regT0); |
|
909 |
|
910 Jump fail2; |
|
911 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
912 fail2 = emitJumpIfNotJSCell(regT1); |
|
913 |
|
914 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get()); |
|
915 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get()); |
|
916 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
917 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
918 #endif |
|
919 |
|
920 emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target); |
|
921 |
|
922 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq)); |
|
923 |
|
924 #if USE(JSVALUE64) |
|
925 fail1.link(this); |
|
926 fail2.link(this); |
|
927 fail3.link(this); |
|
928 #else |
|
929 if (!m_codeBlock->isKnownNotImmediate(op1)) |
|
930 fail1.link(this); |
|
931 if (!m_codeBlock->isKnownNotImmediate(op2)) |
|
932 fail2.link(this); |
|
933 fail3.link(this); |
|
934 fail4.link(this); |
|
935 #endif |
|
936 } |
|
937 |
|
938 linkSlowCase(iter); |
|
939 JITStubCall stubCall(this, cti_op_jlesseq); |
|
940 stubCall.addArgument(regT0); |
|
941 stubCall.addArgument(regT1); |
|
942 stubCall.call(); |
|
943 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); |
|
944 } |
|
945 } |
|
946 |
|
947 void JIT::emit_op_jnlesseq(Instruction* currentInstruction) |
|
948 { |
|
949 emit_op_jlesseq(currentInstruction, true); |
|
950 } |
|
951 |
|
952 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
953 { |
|
954 emitSlow_op_jlesseq(currentInstruction, iter, true); |
|
955 } |
|
956 |
|
957 void JIT::emit_op_bitand(Instruction* currentInstruction) |
|
958 { |
|
959 unsigned result = currentInstruction[1].u.operand; |
|
960 unsigned op1 = currentInstruction[2].u.operand; |
|
961 unsigned op2 = currentInstruction[3].u.operand; |
|
962 |
|
963 if (isOperandConstantImmediateInt(op1)) { |
|
964 emitGetVirtualRegister(op2, regT0); |
|
965 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
966 #if USE(JSVALUE64) |
|
967 int32_t imm = getConstantOperandImmediateInt(op1); |
|
968 andPtr(Imm32(imm), regT0); |
|
969 if (imm >= 0) |
|
970 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
971 #else |
|
972 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0); |
|
973 #endif |
|
974 } else if (isOperandConstantImmediateInt(op2)) { |
|
975 emitGetVirtualRegister(op1, regT0); |
|
976 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
977 #if USE(JSVALUE64) |
|
978 int32_t imm = getConstantOperandImmediateInt(op2); |
|
979 andPtr(Imm32(imm), regT0); |
|
980 if (imm >= 0) |
|
981 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
982 #else |
|
983 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0); |
|
984 #endif |
|
985 } else { |
|
986 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
987 andPtr(regT1, regT0); |
|
988 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
989 } |
|
990 emitPutVirtualRegister(result); |
|
991 } |
|
992 |
|
993 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
994 { |
|
995 unsigned result = currentInstruction[1].u.operand; |
|
996 unsigned op1 = currentInstruction[2].u.operand; |
|
997 unsigned op2 = currentInstruction[3].u.operand; |
|
998 |
|
999 linkSlowCase(iter); |
|
1000 if (isOperandConstantImmediateInt(op1)) { |
|
1001 JITStubCall stubCall(this, cti_op_bitand); |
|
1002 stubCall.addArgument(op1, regT2); |
|
1003 stubCall.addArgument(regT0); |
|
1004 stubCall.call(result); |
|
1005 } else if (isOperandConstantImmediateInt(op2)) { |
|
1006 JITStubCall stubCall(this, cti_op_bitand); |
|
1007 stubCall.addArgument(regT0); |
|
1008 stubCall.addArgument(op2, regT2); |
|
1009 stubCall.call(result); |
|
1010 } else { |
|
1011 JITStubCall stubCall(this, cti_op_bitand); |
|
1012 stubCall.addArgument(op1, regT2); |
|
1013 stubCall.addArgument(regT1); |
|
1014 stubCall.call(result); |
|
1015 } |
|
1016 } |
|
1017 |
|
1018 void JIT::emit_op_post_inc(Instruction* currentInstruction) |
|
1019 { |
|
1020 unsigned result = currentInstruction[1].u.operand; |
|
1021 unsigned srcDst = currentInstruction[2].u.operand; |
|
1022 |
|
1023 emitGetVirtualRegister(srcDst, regT0); |
|
1024 move(regT0, regT1); |
|
1025 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1026 #if USE(JSVALUE64) |
|
1027 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1)); |
|
1028 emitFastArithIntToImmNoCheck(regT1, regT1); |
|
1029 #else |
|
1030 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); |
|
1031 signExtend32ToPtr(regT1, regT1); |
|
1032 #endif |
|
1033 emitPutVirtualRegister(srcDst, regT1); |
|
1034 emitPutVirtualRegister(result); |
|
1035 } |
|
1036 |
|
1037 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1038 { |
|
1039 unsigned result = currentInstruction[1].u.operand; |
|
1040 unsigned srcDst = currentInstruction[2].u.operand; |
|
1041 |
|
1042 linkSlowCase(iter); |
|
1043 linkSlowCase(iter); |
|
1044 JITStubCall stubCall(this, cti_op_post_inc); |
|
1045 stubCall.addArgument(regT0); |
|
1046 stubCall.addArgument(Imm32(srcDst)); |
|
1047 stubCall.call(result); |
|
1048 } |
|
1049 |
|
1050 void JIT::emit_op_post_dec(Instruction* currentInstruction) |
|
1051 { |
|
1052 unsigned result = currentInstruction[1].u.operand; |
|
1053 unsigned srcDst = currentInstruction[2].u.operand; |
|
1054 |
|
1055 emitGetVirtualRegister(srcDst, regT0); |
|
1056 move(regT0, regT1); |
|
1057 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1058 #if USE(JSVALUE64) |
|
1059 addSlowCase(branchSub32(Zero, Imm32(1), regT1)); |
|
1060 emitFastArithIntToImmNoCheck(regT1, regT1); |
|
1061 #else |
|
1062 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); |
|
1063 signExtend32ToPtr(regT1, regT1); |
|
1064 #endif |
|
1065 emitPutVirtualRegister(srcDst, regT1); |
|
1066 emitPutVirtualRegister(result); |
|
1067 } |
|
1068 |
|
1069 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1070 { |
|
1071 unsigned result = currentInstruction[1].u.operand; |
|
1072 unsigned srcDst = currentInstruction[2].u.operand; |
|
1073 |
|
1074 linkSlowCase(iter); |
|
1075 linkSlowCase(iter); |
|
1076 JITStubCall stubCall(this, cti_op_post_dec); |
|
1077 stubCall.addArgument(regT0); |
|
1078 stubCall.addArgument(Imm32(srcDst)); |
|
1079 stubCall.call(result); |
|
1080 } |
|
1081 |
|
1082 void JIT::emit_op_pre_inc(Instruction* currentInstruction) |
|
1083 { |
|
1084 unsigned srcDst = currentInstruction[1].u.operand; |
|
1085 |
|
1086 emitGetVirtualRegister(srcDst, regT0); |
|
1087 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1088 #if USE(JSVALUE64) |
|
1089 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); |
|
1090 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
1091 #else |
|
1092 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); |
|
1093 signExtend32ToPtr(regT0, regT0); |
|
1094 #endif |
|
1095 emitPutVirtualRegister(srcDst); |
|
1096 } |
|
1097 |
|
1098 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1099 { |
|
1100 unsigned srcDst = currentInstruction[1].u.operand; |
|
1101 |
|
1102 Jump notImm = getSlowCase(iter); |
|
1103 linkSlowCase(iter); |
|
1104 emitGetVirtualRegister(srcDst, regT0); |
|
1105 notImm.link(this); |
|
1106 JITStubCall stubCall(this, cti_op_pre_inc); |
|
1107 stubCall.addArgument(regT0); |
|
1108 stubCall.call(srcDst); |
|
1109 } |
|
1110 |
|
1111 void JIT::emit_op_pre_dec(Instruction* currentInstruction) |
|
1112 { |
|
1113 unsigned srcDst = currentInstruction[1].u.operand; |
|
1114 |
|
1115 emitGetVirtualRegister(srcDst, regT0); |
|
1116 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1117 #if USE(JSVALUE64) |
|
1118 addSlowCase(branchSub32(Zero, Imm32(1), regT0)); |
|
1119 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
1120 #else |
|
1121 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); |
|
1122 signExtend32ToPtr(regT0, regT0); |
|
1123 #endif |
|
1124 emitPutVirtualRegister(srcDst); |
|
1125 } |
|
1126 |
|
1127 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1128 { |
|
1129 unsigned srcDst = currentInstruction[1].u.operand; |
|
1130 |
|
1131 Jump notImm = getSlowCase(iter); |
|
1132 linkSlowCase(iter); |
|
1133 emitGetVirtualRegister(srcDst, regT0); |
|
1134 notImm.link(this); |
|
1135 JITStubCall stubCall(this, cti_op_pre_dec); |
|
1136 stubCall.addArgument(regT0); |
|
1137 stubCall.call(srcDst); |
|
1138 } |
|
1139 |
|
1140 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ |
|
1141 |
|
1142 #if CPU(X86) || CPU(X86_64) |
|
1143 |
|
1144 void JIT::emit_op_mod(Instruction* currentInstruction) |
|
1145 { |
|
1146 unsigned result = currentInstruction[1].u.operand; |
|
1147 unsigned op1 = currentInstruction[2].u.operand; |
|
1148 unsigned op2 = currentInstruction[3].u.operand; |
|
1149 |
|
1150 emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx); |
|
1151 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax); |
|
1152 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx); |
|
1153 #if USE(JSVALUE64) |
|
1154 addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))))); |
|
1155 m_assembler.cdq(); |
|
1156 m_assembler.idivl_r(X86Registers::ecx); |
|
1157 #else |
|
1158 emitFastArithDeTagImmediate(X86Registers::eax); |
|
1159 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx)); |
|
1160 m_assembler.cdq(); |
|
1161 m_assembler.idivl_r(X86Registers::ecx); |
|
1162 signExtend32ToPtr(X86Registers::edx, X86Registers::edx); |
|
1163 #endif |
|
1164 emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax); |
|
1165 emitPutVirtualRegister(result); |
|
1166 } |
|
1167 |
|
1168 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1169 { |
|
1170 unsigned result = currentInstruction[1].u.operand; |
|
1171 |
|
1172 #if USE(JSVALUE64) |
|
1173 linkSlowCase(iter); |
|
1174 linkSlowCase(iter); |
|
1175 linkSlowCase(iter); |
|
1176 #else |
|
1177 Jump notImm1 = getSlowCase(iter); |
|
1178 Jump notImm2 = getSlowCase(iter); |
|
1179 linkSlowCase(iter); |
|
1180 emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax); |
|
1181 emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx); |
|
1182 notImm1.link(this); |
|
1183 notImm2.link(this); |
|
1184 #endif |
|
1185 JITStubCall stubCall(this, cti_op_mod); |
|
1186 stubCall.addArgument(X86Registers::eax); |
|
1187 stubCall.addArgument(X86Registers::ecx); |
|
1188 stubCall.call(result); |
|
1189 } |
|
1190 |
|
1191 #else // CPU(X86) || CPU(X86_64) |
|
1192 |
|
1193 void JIT::emit_op_mod(Instruction* currentInstruction) |
|
1194 { |
|
1195 unsigned result = currentInstruction[1].u.operand; |
|
1196 unsigned op1 = currentInstruction[2].u.operand; |
|
1197 unsigned op2 = currentInstruction[3].u.operand; |
|
1198 |
|
1199 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
1200 emitGetVirtualRegisters(op1, regT0, op2, regT2); |
|
1201 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1202 emitJumpSlowCaseIfNotImmediateInteger(regT2); |
|
1203 |
|
1204 addSlowCase(branch32(Equal, regT2, Imm32(1))); |
|
1205 |
|
1206 emitNakedCall(m_globalData->jitStubs->ctiSoftModulo()); |
|
1207 |
|
1208 emitPutVirtualRegister(result, regT0); |
|
1209 #else |
|
1210 JITStubCall stubCall(this, cti_op_mod); |
|
1211 stubCall.addArgument(op1, regT2); |
|
1212 stubCall.addArgument(op2, regT2); |
|
1213 stubCall.call(result); |
|
1214 #endif |
|
1215 } |
|
1216 |
|
1217 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1218 { |
|
1219 #if ENABLE(JIT_OPTIMIZE_MOD) |
|
1220 unsigned result = currentInstruction[1].u.operand; |
|
1221 unsigned op1 = currentInstruction[2].u.operand; |
|
1222 unsigned op2 = currentInstruction[3].u.operand; |
|
1223 linkSlowCase(iter); |
|
1224 linkSlowCase(iter); |
|
1225 linkSlowCase(iter); |
|
1226 JITStubCall stubCall(this, cti_op_mod); |
|
1227 stubCall.addArgument(op1, regT2); |
|
1228 stubCall.addArgument(op2, regT2); |
|
1229 stubCall.call(result); |
|
1230 #else |
|
1231 ASSERT_NOT_REACHED(); |
|
1232 #endif |
|
1233 } |
|
1234 |
|
1235 #endif // CPU(X86) || CPU(X86_64) |
|
1236 |
|
1237 /* ------------------------------ END: OP_MOD ------------------------------ */ |
|
1238 |
|
1239 #if USE(JSVALUE64) |
|
1240 |
|
1241 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */ |
|
1242 |
|
1243 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes) |
|
1244 { |
|
1245 emitGetVirtualRegisters(op1, regT0, op2, regT1); |
|
1246 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1247 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
1248 if (opcodeID == op_add) |
|
1249 addSlowCase(branchAdd32(Overflow, regT1, regT0)); |
|
1250 else if (opcodeID == op_sub) |
|
1251 addSlowCase(branchSub32(Overflow, regT1, regT0)); |
|
1252 else { |
|
1253 ASSERT(opcodeID == op_mul); |
|
1254 addSlowCase(branchMul32(Overflow, regT1, regT0)); |
|
1255 addSlowCase(branchTest32(Zero, regT0)); |
|
1256 } |
|
1257 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
1258 } |
|
1259 |
|
1260 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase) |
|
1261 { |
|
1262 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset. |
|
1263 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0); |
|
1264 |
|
1265 Jump notImm1; |
|
1266 Jump notImm2; |
|
1267 if (op1HasImmediateIntFastCase) { |
|
1268 notImm2 = getSlowCase(iter); |
|
1269 } else if (op2HasImmediateIntFastCase) { |
|
1270 notImm1 = getSlowCase(iter); |
|
1271 } else { |
|
1272 notImm1 = getSlowCase(iter); |
|
1273 notImm2 = getSlowCase(iter); |
|
1274 } |
|
1275 |
|
1276 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare. |
|
1277 if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number. |
|
1278 linkSlowCase(iter); |
|
1279 emitGetVirtualRegister(op1, regT0); |
|
1280 |
|
1281 Label stubFunctionCall(this); |
|
1282 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul); |
|
1283 if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) { |
|
1284 emitGetVirtualRegister(op1, regT0); |
|
1285 emitGetVirtualRegister(op2, regT1); |
|
1286 } |
|
1287 stubCall.addArgument(regT0); |
|
1288 stubCall.addArgument(regT1); |
|
1289 stubCall.call(result); |
|
1290 Jump end = jump(); |
|
1291 |
|
1292 if (op1HasImmediateIntFastCase) { |
|
1293 notImm2.link(this); |
|
1294 if (!types.second().definitelyIsNumber()) |
|
1295 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); |
|
1296 emitGetVirtualRegister(op1, regT1); |
|
1297 convertInt32ToDouble(regT1, fpRegT1); |
|
1298 addPtr(tagTypeNumberRegister, regT0); |
|
1299 movePtrToDouble(regT0, fpRegT2); |
|
1300 } else if (op2HasImmediateIntFastCase) { |
|
1301 notImm1.link(this); |
|
1302 if (!types.first().definitelyIsNumber()) |
|
1303 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); |
|
1304 emitGetVirtualRegister(op2, regT1); |
|
1305 convertInt32ToDouble(regT1, fpRegT1); |
|
1306 addPtr(tagTypeNumberRegister, regT0); |
|
1307 movePtrToDouble(regT0, fpRegT2); |
|
1308 } else { |
|
1309 // if we get here, eax is not an int32, edx not yet checked. |
|
1310 notImm1.link(this); |
|
1311 if (!types.first().definitelyIsNumber()) |
|
1312 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); |
|
1313 if (!types.second().definitelyIsNumber()) |
|
1314 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); |
|
1315 addPtr(tagTypeNumberRegister, regT0); |
|
1316 movePtrToDouble(regT0, fpRegT1); |
|
1317 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1); |
|
1318 convertInt32ToDouble(regT1, fpRegT2); |
|
1319 Jump op2wasInteger = jump(); |
|
1320 |
|
1321 // if we get here, eax IS an int32, edx is not. |
|
1322 notImm2.link(this); |
|
1323 if (!types.second().definitelyIsNumber()) |
|
1324 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); |
|
1325 convertInt32ToDouble(regT0, fpRegT1); |
|
1326 op2isDouble.link(this); |
|
1327 addPtr(tagTypeNumberRegister, regT1); |
|
1328 movePtrToDouble(regT1, fpRegT2); |
|
1329 op2wasInteger.link(this); |
|
1330 } |
|
1331 |
|
1332 if (opcodeID == op_add) |
|
1333 addDouble(fpRegT2, fpRegT1); |
|
1334 else if (opcodeID == op_sub) |
|
1335 subDouble(fpRegT2, fpRegT1); |
|
1336 else if (opcodeID == op_mul) |
|
1337 mulDouble(fpRegT2, fpRegT1); |
|
1338 else { |
|
1339 ASSERT(opcodeID == op_div); |
|
1340 divDouble(fpRegT2, fpRegT1); |
|
1341 } |
|
1342 moveDoubleToPtr(fpRegT1, regT0); |
|
1343 subPtr(tagTypeNumberRegister, regT0); |
|
1344 emitPutVirtualRegister(result, regT0); |
|
1345 |
|
1346 end.link(this); |
|
1347 } |
|
1348 |
|
1349 void JIT::emit_op_add(Instruction* currentInstruction) |
|
1350 { |
|
1351 unsigned result = currentInstruction[1].u.operand; |
|
1352 unsigned op1 = currentInstruction[2].u.operand; |
|
1353 unsigned op2 = currentInstruction[3].u.operand; |
|
1354 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1355 |
|
1356 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { |
|
1357 JITStubCall stubCall(this, cti_op_add); |
|
1358 stubCall.addArgument(op1, regT2); |
|
1359 stubCall.addArgument(op2, regT2); |
|
1360 stubCall.call(result); |
|
1361 return; |
|
1362 } |
|
1363 |
|
1364 if (isOperandConstantImmediateInt(op1)) { |
|
1365 emitGetVirtualRegister(op2, regT0); |
|
1366 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1367 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0)); |
|
1368 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
1369 } else if (isOperandConstantImmediateInt(op2)) { |
|
1370 emitGetVirtualRegister(op1, regT0); |
|
1371 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1372 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0)); |
|
1373 emitFastArithIntToImmNoCheck(regT0, regT0); |
|
1374 } else |
|
1375 compileBinaryArithOp(op_add, result, op1, op2, types); |
|
1376 |
|
1377 emitPutVirtualRegister(result); |
|
1378 } |
|
1379 |
|
1380 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1381 { |
|
1382 unsigned result = currentInstruction[1].u.operand; |
|
1383 unsigned op1 = currentInstruction[2].u.operand; |
|
1384 unsigned op2 = currentInstruction[3].u.operand; |
|
1385 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1386 |
|
1387 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) |
|
1388 return; |
|
1389 |
|
1390 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1); |
|
1391 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2); |
|
1392 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase); |
|
1393 } |
|
1394 |
|
1395 void JIT::emit_op_mul(Instruction* currentInstruction) |
|
1396 { |
|
1397 unsigned result = currentInstruction[1].u.operand; |
|
1398 unsigned op1 = currentInstruction[2].u.operand; |
|
1399 unsigned op2 = currentInstruction[3].u.operand; |
|
1400 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1401 |
|
1402 // For now, only plant a fast int case if the constant operand is greater than zero. |
|
1403 int32_t value; |
|
1404 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { |
|
1405 emitGetVirtualRegister(op2, regT0); |
|
1406 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1407 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); |
|
1408 emitFastArithReTagImmediate(regT0, regT0); |
|
1409 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { |
|
1410 emitGetVirtualRegister(op1, regT0); |
|
1411 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1412 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); |
|
1413 emitFastArithReTagImmediate(regT0, regT0); |
|
1414 } else |
|
1415 compileBinaryArithOp(op_mul, result, op1, op2, types); |
|
1416 |
|
1417 emitPutVirtualRegister(result); |
|
1418 } |
|
1419 |
|
1420 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1421 { |
|
1422 unsigned result = currentInstruction[1].u.operand; |
|
1423 unsigned op1 = currentInstruction[2].u.operand; |
|
1424 unsigned op2 = currentInstruction[3].u.operand; |
|
1425 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1426 |
|
1427 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0; |
|
1428 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0; |
|
1429 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase); |
|
1430 } |
|
1431 |
|
1432 void JIT::emit_op_div(Instruction* currentInstruction) |
|
1433 { |
|
1434 unsigned dst = currentInstruction[1].u.operand; |
|
1435 unsigned op1 = currentInstruction[2].u.operand; |
|
1436 unsigned op2 = currentInstruction[3].u.operand; |
|
1437 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1438 |
|
1439 if (isOperandConstantImmediateDouble(op1)) { |
|
1440 emitGetVirtualRegister(op1, regT0); |
|
1441 addPtr(tagTypeNumberRegister, regT0); |
|
1442 movePtrToDouble(regT0, fpRegT0); |
|
1443 } else if (isOperandConstantImmediateInt(op1)) { |
|
1444 emitLoadInt32ToDouble(op1, fpRegT0); |
|
1445 } else { |
|
1446 emitGetVirtualRegister(op1, regT0); |
|
1447 if (!types.first().definitelyIsNumber()) |
|
1448 emitJumpSlowCaseIfNotImmediateNumber(regT0); |
|
1449 Jump notInt = emitJumpIfNotImmediateInteger(regT0); |
|
1450 convertInt32ToDouble(regT0, fpRegT0); |
|
1451 Jump skipDoubleLoad = jump(); |
|
1452 notInt.link(this); |
|
1453 addPtr(tagTypeNumberRegister, regT0); |
|
1454 movePtrToDouble(regT0, fpRegT0); |
|
1455 skipDoubleLoad.link(this); |
|
1456 } |
|
1457 |
|
1458 if (isOperandConstantImmediateDouble(op2)) { |
|
1459 emitGetVirtualRegister(op2, regT1); |
|
1460 addPtr(tagTypeNumberRegister, regT1); |
|
1461 movePtrToDouble(regT1, fpRegT1); |
|
1462 } else if (isOperandConstantImmediateInt(op2)) { |
|
1463 emitLoadInt32ToDouble(op2, fpRegT1); |
|
1464 } else { |
|
1465 emitGetVirtualRegister(op2, regT1); |
|
1466 if (!types.second().definitelyIsNumber()) |
|
1467 emitJumpSlowCaseIfNotImmediateNumber(regT1); |
|
1468 Jump notInt = emitJumpIfNotImmediateInteger(regT1); |
|
1469 convertInt32ToDouble(regT1, fpRegT1); |
|
1470 Jump skipDoubleLoad = jump(); |
|
1471 notInt.link(this); |
|
1472 addPtr(tagTypeNumberRegister, regT1); |
|
1473 movePtrToDouble(regT1, fpRegT1); |
|
1474 skipDoubleLoad.link(this); |
|
1475 } |
|
1476 divDouble(fpRegT1, fpRegT0); |
|
1477 |
|
1478 // Double result. |
|
1479 moveDoubleToPtr(fpRegT0, regT0); |
|
1480 subPtr(tagTypeNumberRegister, regT0); |
|
1481 |
|
1482 emitPutVirtualRegister(dst, regT0); |
|
1483 } |
|
1484 |
|
1485 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1486 { |
|
1487 unsigned result = currentInstruction[1].u.operand; |
|
1488 unsigned op1 = currentInstruction[2].u.operand; |
|
1489 unsigned op2 = currentInstruction[3].u.operand; |
|
1490 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1491 if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) { |
|
1492 #ifndef NDEBUG |
|
1493 breakpoint(); |
|
1494 #endif |
|
1495 return; |
|
1496 } |
|
1497 if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) { |
|
1498 if (!types.first().definitelyIsNumber()) |
|
1499 linkSlowCase(iter); |
|
1500 } |
|
1501 if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) { |
|
1502 if (!types.second().definitelyIsNumber()) |
|
1503 linkSlowCase(iter); |
|
1504 } |
|
1505 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. |
|
1506 JITStubCall stubCall(this, cti_op_div); |
|
1507 stubCall.addArgument(op1, regT2); |
|
1508 stubCall.addArgument(op2, regT2); |
|
1509 stubCall.call(result); |
|
1510 } |
|
1511 |
|
1512 void JIT::emit_op_sub(Instruction* currentInstruction) |
|
1513 { |
|
1514 unsigned result = currentInstruction[1].u.operand; |
|
1515 unsigned op1 = currentInstruction[2].u.operand; |
|
1516 unsigned op2 = currentInstruction[3].u.operand; |
|
1517 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1518 |
|
1519 compileBinaryArithOp(op_sub, result, op1, op2, types); |
|
1520 emitPutVirtualRegister(result); |
|
1521 } |
|
1522 |
|
1523 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1524 { |
|
1525 unsigned result = currentInstruction[1].u.operand; |
|
1526 unsigned op1 = currentInstruction[2].u.operand; |
|
1527 unsigned op2 = currentInstruction[3].u.operand; |
|
1528 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1529 |
|
1530 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false); |
|
1531 } |
|
1532 |
|
1533 #else // USE(JSVALUE64) |
|
1534 |
|
1535 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */ |
|
1536 |
|
1537 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types) |
|
1538 { |
|
1539 Structure* numberStructure = m_globalData->numberStructure.get(); |
|
1540 Jump wasJSNumberCell1; |
|
1541 Jump wasJSNumberCell2; |
|
1542 |
|
1543 emitGetVirtualRegisters(src1, regT0, src2, regT1); |
|
1544 |
|
1545 if (types.second().isReusable() && supportsFloatingPoint()) { |
|
1546 ASSERT(types.second().mightBeNumber()); |
|
1547 |
|
1548 // Check op2 is a number |
|
1549 Jump op2imm = emitJumpIfImmediateInteger(regT1); |
|
1550 if (!types.second().definitelyIsNumber()) { |
|
1551 emitJumpSlowCaseIfNotJSCell(regT1, src2); |
|
1552 addSlowCase(checkStructure(regT1, numberStructure)); |
|
1553 } |
|
1554 |
|
1555 // (1) In this case src2 is a reusable number cell. |
|
1556 // Slow case if src1 is not a number type. |
|
1557 Jump op1imm = emitJumpIfImmediateInteger(regT0); |
|
1558 if (!types.first().definitelyIsNumber()) { |
|
1559 emitJumpSlowCaseIfNotJSCell(regT0, src1); |
|
1560 addSlowCase(checkStructure(regT0, numberStructure)); |
|
1561 } |
|
1562 |
|
1563 // (1a) if we get here, src1 is also a number cell |
|
1564 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
1565 Jump loadedDouble = jump(); |
|
1566 // (1b) if we get here, src1 is an immediate |
|
1567 op1imm.link(this); |
|
1568 emitFastArithImmToInt(regT0); |
|
1569 convertInt32ToDouble(regT0, fpRegT0); |
|
1570 // (1c) |
|
1571 loadedDouble.link(this); |
|
1572 if (opcodeID == op_add) |
|
1573 addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
1574 else if (opcodeID == op_sub) |
|
1575 subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
1576 else { |
|
1577 ASSERT(opcodeID == op_mul); |
|
1578 mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
1579 } |
|
1580 |
|
1581 // Store the result to the JSNumberCell and jump. |
|
1582 storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value))); |
|
1583 move(regT1, regT0); |
|
1584 emitPutVirtualRegister(dst); |
|
1585 wasJSNumberCell2 = jump(); |
|
1586 |
|
1587 // (2) This handles cases where src2 is an immediate number. |
|
1588 // Two slow cases - either src1 isn't an immediate, or the subtract overflows. |
|
1589 op2imm.link(this); |
|
1590 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1591 } else if (types.first().isReusable() && supportsFloatingPoint()) { |
|
1592 ASSERT(types.first().mightBeNumber()); |
|
1593 |
|
1594 // Check op1 is a number |
|
1595 Jump op1imm = emitJumpIfImmediateInteger(regT0); |
|
1596 if (!types.first().definitelyIsNumber()) { |
|
1597 emitJumpSlowCaseIfNotJSCell(regT0, src1); |
|
1598 addSlowCase(checkStructure(regT0, numberStructure)); |
|
1599 } |
|
1600 |
|
1601 // (1) In this case src1 is a reusable number cell. |
|
1602 // Slow case if src2 is not a number type. |
|
1603 Jump op2imm = emitJumpIfImmediateInteger(regT1); |
|
1604 if (!types.second().definitelyIsNumber()) { |
|
1605 emitJumpSlowCaseIfNotJSCell(regT1, src2); |
|
1606 addSlowCase(checkStructure(regT1, numberStructure)); |
|
1607 } |
|
1608 |
|
1609 // (1a) if we get here, src2 is also a number cell |
|
1610 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1); |
|
1611 Jump loadedDouble = jump(); |
|
1612 // (1b) if we get here, src2 is an immediate |
|
1613 op2imm.link(this); |
|
1614 emitFastArithImmToInt(regT1); |
|
1615 convertInt32ToDouble(regT1, fpRegT1); |
|
1616 // (1c) |
|
1617 loadedDouble.link(this); |
|
1618 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0); |
|
1619 if (opcodeID == op_add) |
|
1620 addDouble(fpRegT1, fpRegT0); |
|
1621 else if (opcodeID == op_sub) |
|
1622 subDouble(fpRegT1, fpRegT0); |
|
1623 else { |
|
1624 ASSERT(opcodeID == op_mul); |
|
1625 mulDouble(fpRegT1, fpRegT0); |
|
1626 } |
|
1627 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value))); |
|
1628 emitPutVirtualRegister(dst); |
|
1629 |
|
1630 // Store the result to the JSNumberCell and jump. |
|
1631 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value))); |
|
1632 emitPutVirtualRegister(dst); |
|
1633 wasJSNumberCell1 = jump(); |
|
1634 |
|
1635 // (2) This handles cases where src1 is an immediate number. |
|
1636 // Two slow cases - either src2 isn't an immediate, or the subtract overflows. |
|
1637 op1imm.link(this); |
|
1638 emitJumpSlowCaseIfNotImmediateInteger(regT1); |
|
1639 } else |
|
1640 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); |
|
1641 |
|
1642 if (opcodeID == op_add) { |
|
1643 emitFastArithDeTagImmediate(regT0); |
|
1644 addSlowCase(branchAdd32(Overflow, regT1, regT0)); |
|
1645 } else if (opcodeID == op_sub) { |
|
1646 addSlowCase(branchSub32(Overflow, regT1, regT0)); |
|
1647 signExtend32ToPtr(regT0, regT0); |
|
1648 emitFastArithReTagImmediate(regT0, regT0); |
|
1649 } else { |
|
1650 ASSERT(opcodeID == op_mul); |
|
1651 // convert eax & edx from JSImmediates to ints, and check if either are zero |
|
1652 emitFastArithImmToInt(regT1); |
|
1653 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0); |
|
1654 Jump op2NonZero = branchTest32(NonZero, regT1); |
|
1655 op1Zero.link(this); |
|
1656 // if either input is zero, add the two together, and check if the result is < 0. |
|
1657 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate. |
|
1658 move(regT0, regT2); |
|
1659 addSlowCase(branchAdd32(Signed, regT1, regT2)); |
|
1660 // Skip the above check if neither input is zero |
|
1661 op2NonZero.link(this); |
|
1662 addSlowCase(branchMul32(Overflow, regT1, regT0)); |
|
1663 signExtend32ToPtr(regT0, regT0); |
|
1664 emitFastArithReTagImmediate(regT0, regT0); |
|
1665 } |
|
1666 emitPutVirtualRegister(dst); |
|
1667 |
|
1668 if (types.second().isReusable() && supportsFloatingPoint()) |
|
1669 wasJSNumberCell2.link(this); |
|
1670 else if (types.first().isReusable() && supportsFloatingPoint()) |
|
1671 wasJSNumberCell1.link(this); |
|
1672 } |
|
1673 |
|
1674 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types) |
|
1675 { |
|
1676 linkSlowCase(iter); |
|
1677 if (types.second().isReusable() && supportsFloatingPoint()) { |
|
1678 if (!types.first().definitelyIsNumber()) { |
|
1679 linkSlowCaseIfNotJSCell(iter, src1); |
|
1680 linkSlowCase(iter); |
|
1681 } |
|
1682 if (!types.second().definitelyIsNumber()) { |
|
1683 linkSlowCaseIfNotJSCell(iter, src2); |
|
1684 linkSlowCase(iter); |
|
1685 } |
|
1686 } else if (types.first().isReusable() && supportsFloatingPoint()) { |
|
1687 if (!types.first().definitelyIsNumber()) { |
|
1688 linkSlowCaseIfNotJSCell(iter, src1); |
|
1689 linkSlowCase(iter); |
|
1690 } |
|
1691 if (!types.second().definitelyIsNumber()) { |
|
1692 linkSlowCaseIfNotJSCell(iter, src2); |
|
1693 linkSlowCase(iter); |
|
1694 } |
|
1695 } |
|
1696 linkSlowCase(iter); |
|
1697 |
|
1698 // additional entry point to handle -0 cases. |
|
1699 if (opcodeID == op_mul) |
|
1700 linkSlowCase(iter); |
|
1701 |
|
1702 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul); |
|
1703 stubCall.addArgument(src1, regT2); |
|
1704 stubCall.addArgument(src2, regT2); |
|
1705 stubCall.call(dst); |
|
1706 } |
|
1707 |
|
1708 void JIT::emit_op_add(Instruction* currentInstruction) |
|
1709 { |
|
1710 unsigned result = currentInstruction[1].u.operand; |
|
1711 unsigned op1 = currentInstruction[2].u.operand; |
|
1712 unsigned op2 = currentInstruction[3].u.operand; |
|
1713 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1714 |
|
1715 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { |
|
1716 JITStubCall stubCall(this, cti_op_add); |
|
1717 stubCall.addArgument(op1, regT2); |
|
1718 stubCall.addArgument(op2, regT2); |
|
1719 stubCall.call(result); |
|
1720 return; |
|
1721 } |
|
1722 |
|
1723 if (isOperandConstantImmediateInt(op1)) { |
|
1724 emitGetVirtualRegister(op2, regT0); |
|
1725 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1726 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0)); |
|
1727 signExtend32ToPtr(regT0, regT0); |
|
1728 emitPutVirtualRegister(result); |
|
1729 } else if (isOperandConstantImmediateInt(op2)) { |
|
1730 emitGetVirtualRegister(op1, regT0); |
|
1731 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1732 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0)); |
|
1733 signExtend32ToPtr(regT0, regT0); |
|
1734 emitPutVirtualRegister(result); |
|
1735 } else { |
|
1736 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); |
|
1737 } |
|
1738 } |
|
1739 |
|
1740 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1741 { |
|
1742 unsigned result = currentInstruction[1].u.operand; |
|
1743 unsigned op1 = currentInstruction[2].u.operand; |
|
1744 unsigned op2 = currentInstruction[3].u.operand; |
|
1745 |
|
1746 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1747 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) |
|
1748 return; |
|
1749 |
|
1750 if (isOperandConstantImmediateInt(op1)) { |
|
1751 Jump notImm = getSlowCase(iter); |
|
1752 linkSlowCase(iter); |
|
1753 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0); |
|
1754 notImm.link(this); |
|
1755 JITStubCall stubCall(this, cti_op_add); |
|
1756 stubCall.addArgument(op1, regT2); |
|
1757 stubCall.addArgument(regT0); |
|
1758 stubCall.call(result); |
|
1759 } else if (isOperandConstantImmediateInt(op2)) { |
|
1760 Jump notImm = getSlowCase(iter); |
|
1761 linkSlowCase(iter); |
|
1762 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0); |
|
1763 notImm.link(this); |
|
1764 JITStubCall stubCall(this, cti_op_add); |
|
1765 stubCall.addArgument(regT0); |
|
1766 stubCall.addArgument(op2, regT2); |
|
1767 stubCall.call(result); |
|
1768 } else { |
|
1769 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); |
|
1770 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber()); |
|
1771 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types); |
|
1772 } |
|
1773 } |
|
1774 |
|
1775 void JIT::emit_op_mul(Instruction* currentInstruction) |
|
1776 { |
|
1777 unsigned result = currentInstruction[1].u.operand; |
|
1778 unsigned op1 = currentInstruction[2].u.operand; |
|
1779 unsigned op2 = currentInstruction[3].u.operand; |
|
1780 |
|
1781 // For now, only plant a fast int case if the constant operand is greater than zero. |
|
1782 int32_t value; |
|
1783 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { |
|
1784 emitGetVirtualRegister(op2, regT0); |
|
1785 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1786 emitFastArithDeTagImmediate(regT0); |
|
1787 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); |
|
1788 signExtend32ToPtr(regT0, regT0); |
|
1789 emitFastArithReTagImmediate(regT0, regT0); |
|
1790 emitPutVirtualRegister(result); |
|
1791 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { |
|
1792 emitGetVirtualRegister(op1, regT0); |
|
1793 emitJumpSlowCaseIfNotImmediateInteger(regT0); |
|
1794 emitFastArithDeTagImmediate(regT0); |
|
1795 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); |
|
1796 signExtend32ToPtr(regT0, regT0); |
|
1797 emitFastArithReTagImmediate(regT0, regT0); |
|
1798 emitPutVirtualRegister(result); |
|
1799 } else |
|
1800 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); |
|
1801 } |
|
1802 |
|
1803 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1804 { |
|
1805 unsigned result = currentInstruction[1].u.operand; |
|
1806 unsigned op1 = currentInstruction[2].u.operand; |
|
1807 unsigned op2 = currentInstruction[3].u.operand; |
|
1808 |
|
1809 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0)) |
|
1810 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) { |
|
1811 linkSlowCase(iter); |
|
1812 linkSlowCase(iter); |
|
1813 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. |
|
1814 JITStubCall stubCall(this, cti_op_mul); |
|
1815 stubCall.addArgument(op1, regT2); |
|
1816 stubCall.addArgument(op2, regT2); |
|
1817 stubCall.call(result); |
|
1818 } else |
|
1819 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); |
|
1820 } |
|
1821 |
|
1822 void JIT::emit_op_sub(Instruction* currentInstruction) |
|
1823 { |
|
1824 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand)); |
|
1825 } |
|
1826 |
|
1827 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) |
|
1828 { |
|
1829 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand)); |
|
1830 } |
|
1831 |
|
1832 #endif // USE(JSVALUE64) |
|
1833 |
|
1834 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */ |
|
1835 |
|
1836 } // namespace JSC |
|
1837 |
|
1838 #endif // !USE(JSVALUE32_64) |
|
1839 #endif // ENABLE(JIT) |