JavaScriptCore/jit/JITPropertyAccess.cpp
changeset 0 4f2f89ce4247
equal deleted inserted replaced
-1:000000000000 0:4f2f89ce4247
       
     1 /*
       
     2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
       
     3  *
       
     4  * Redistribution and use in source and binary forms, with or without
       
     5  * modification, are permitted provided that the following conditions
       
     6  * are met:
       
     7  * 1. Redistributions of source code must retain the above copyright
       
     8  *    notice, this list of conditions and the following disclaimer.
       
     9  * 2. Redistributions in binary form must reproduce the above copyright
       
    10  *    notice, this list of conditions and the following disclaimer in the
       
    11  *    documentation and/or other materials provided with the distribution.
       
    12  *
       
    13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
       
    14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
       
    15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
       
    16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
       
    17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
       
    18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
       
    19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
       
    20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
       
    21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
       
    22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
       
    23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
       
    24  */
       
    25 
       
    26 #include "config.h"
       
    27 
       
    28 #if ENABLE(JIT)
       
    29 #if !USE(JSVALUE32_64)
       
    30 #include "JIT.h"
       
    31 
       
    32 #include "CodeBlock.h"
       
    33 #include "GetterSetter.h"
       
    34 #include "JITInlineMethods.h"
       
    35 #include "JITStubCall.h"
       
    36 #include "JSArray.h"
       
    37 #include "JSFunction.h"
       
    38 #include "JSPropertyNameIterator.h"
       
    39 #include "Interpreter.h"
       
    40 #include "LinkBuffer.h"
       
    41 #include "RepatchBuffer.h"
       
    42 #include "ResultType.h"
       
    43 #include "SamplingTool.h"
       
    44 
       
    45 #ifndef NDEBUG
       
    46 #include <stdio.h>
       
    47 #endif
       
    48 
       
    49 using namespace std;
       
    50 
       
    51 namespace JSC {
       
    52 
       
    53 JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
       
    54 {
       
    55     JSInterfaceJIT jit;
       
    56     JumpList failures;
       
    57     failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
       
    58     failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
       
    59 
       
    60     // Load string length to regT1, and start the process of loading the data pointer into regT0
       
    61     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
       
    62     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
       
    63     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
       
    64     
       
    65     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
       
    66     failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
       
    67     
       
    68     // Load the character
       
    69     jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
       
    70     
       
    71     failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
       
    72     jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
       
    73     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
       
    74     jit.ret();
       
    75     
       
    76     failures.link(&jit);
       
    77     jit.move(Imm32(0), regT0);
       
    78     jit.ret();
       
    79     
       
    80     LinkBuffer patchBuffer(&jit, pool);
       
    81     return patchBuffer.finalizeCode().m_code;
       
    82 }
       
    83 
       
    84 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
       
    85 {
       
    86     unsigned dst = currentInstruction[1].u.operand;
       
    87     unsigned base = currentInstruction[2].u.operand;
       
    88     unsigned property = currentInstruction[3].u.operand;
       
    89 
       
    90     emitGetVirtualRegisters(base, regT0, property, regT1);
       
    91     emitJumpSlowCaseIfNotImmediateInteger(regT1);
       
    92 #if USE(JSVALUE64)
       
    93     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
       
    94     // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
       
    95     // number was signed since m_vectorLength is always less than intmax (since the total allocation
       
    96     // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
       
    97     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
       
    98     // extending since it makes it easier to re-tag the value in the slow case.
       
    99     zeroExtend32ToPtr(regT1, regT1);
       
   100 #else
       
   101     emitFastArithImmToInt(regT1);
       
   102 #endif
       
   103     emitJumpSlowCaseIfNotJSCell(regT0, base);
       
   104     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
       
   105 
       
   106     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
       
   107     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
       
   108 
       
   109     loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
       
   110     addSlowCase(branchTestPtr(Zero, regT0));
       
   111 
       
   112     emitPutVirtualRegister(dst);
       
   113 }
       
   114 
       
   115 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
       
   116 {
       
   117     unsigned dst = currentInstruction[1].u.operand;
       
   118     unsigned base = currentInstruction[2].u.operand;
       
   119     unsigned property = currentInstruction[3].u.operand;
       
   120     
       
   121     linkSlowCase(iter); // property int32 check
       
   122     linkSlowCaseIfNotJSCell(iter, base); // base cell check
       
   123     Jump nonCell = jump();
       
   124     linkSlowCase(iter); // base array check
       
   125     Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
       
   126     emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
       
   127     Jump failed = branchTestPtr(Zero, regT0);
       
   128     emitPutVirtualRegister(dst, regT0);
       
   129     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
       
   130     failed.link(this);
       
   131     notString.link(this);
       
   132     nonCell.link(this);
       
   133     
       
   134     linkSlowCase(iter); // vector length check
       
   135     linkSlowCase(iter); // empty value
       
   136     
       
   137     JITStubCall stubCall(this, cti_op_get_by_val);
       
   138     stubCall.addArgument(base, regT2);
       
   139     stubCall.addArgument(property, regT2);
       
   140     stubCall.call(dst);
       
   141 }
       
   142 
       
   143 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
       
   144 {
       
   145     ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
       
   146     ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
       
   147 
       
   148     Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
       
   149     loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
       
   150     Jump finishedLoad = jump();
       
   151     notUsingInlineStorage.link(this);
       
   152     loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
       
   153     loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
       
   154     finishedLoad.link(this);
       
   155 }
       
   156 
       
   157 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
       
   158 {
       
   159     unsigned dst = currentInstruction[1].u.operand;
       
   160     unsigned base = currentInstruction[2].u.operand;
       
   161     unsigned property = currentInstruction[3].u.operand;
       
   162     unsigned expected = currentInstruction[4].u.operand;
       
   163     unsigned iter = currentInstruction[5].u.operand;
       
   164     unsigned i = currentInstruction[6].u.operand;
       
   165 
       
   166     emitGetVirtualRegister(property, regT0);
       
   167     addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
       
   168     emitGetVirtualRegisters(base, regT0, iter, regT1);
       
   169     emitJumpSlowCaseIfNotJSCell(regT0, base);
       
   170 
       
   171     // Test base's structure
       
   172     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
       
   173     addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
       
   174     load32(addressFor(i), regT3);
       
   175     sub32(Imm32(1), regT3);
       
   176     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
       
   177     compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
       
   178 
       
   179     emitPutVirtualRegister(dst, regT0);
       
   180 }
       
   181 
       
   182 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
       
   183 {
       
   184     unsigned dst = currentInstruction[1].u.operand;
       
   185     unsigned base = currentInstruction[2].u.operand;
       
   186     unsigned property = currentInstruction[3].u.operand;
       
   187 
       
   188     linkSlowCase(iter);
       
   189     linkSlowCaseIfNotJSCell(iter, base);
       
   190     linkSlowCase(iter);
       
   191     linkSlowCase(iter);
       
   192 
       
   193     JITStubCall stubCall(this, cti_op_get_by_val);
       
   194     stubCall.addArgument(base, regT2);
       
   195     stubCall.addArgument(property, regT2);
       
   196     stubCall.call(dst);
       
   197 }
       
   198 
       
   199 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
       
   200 {
       
   201     unsigned base = currentInstruction[1].u.operand;
       
   202     unsigned property = currentInstruction[2].u.operand;
       
   203     unsigned value = currentInstruction[3].u.operand;
       
   204 
       
   205     emitGetVirtualRegisters(base, regT0, property, regT1);
       
   206     emitJumpSlowCaseIfNotImmediateInteger(regT1);
       
   207 #if USE(JSVALUE64)
       
   208     // See comment in op_get_by_val.
       
   209     zeroExtend32ToPtr(regT1, regT1);
       
   210 #else
       
   211     emitFastArithImmToInt(regT1);
       
   212 #endif
       
   213     emitJumpSlowCaseIfNotJSCell(regT0, base);
       
   214     addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
       
   215     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
       
   216 
       
   217     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
       
   218 
       
   219     Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
       
   220 
       
   221     Label storeResult(this);
       
   222     emitGetVirtualRegister(value, regT0);
       
   223     storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
       
   224     Jump end = jump();
       
   225     
       
   226     empty.link(this);
       
   227     add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
       
   228     branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
       
   229 
       
   230     move(regT1, regT0);
       
   231     add32(Imm32(1), regT0);
       
   232     store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
       
   233     jump().linkTo(storeResult, this);
       
   234 
       
   235     end.link(this);
       
   236 }
       
   237 
       
   238 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
       
   239 {
       
   240     JITStubCall stubCall(this, cti_op_put_by_index);
       
   241     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
       
   242     stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
       
   243     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
       
   244     stubCall.call();
       
   245 }
       
   246 
       
   247 void JIT::emit_op_put_getter(Instruction* currentInstruction)
       
   248 {
       
   249     JITStubCall stubCall(this, cti_op_put_getter);
       
   250     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
       
   251     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
       
   252     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
       
   253     stubCall.call();
       
   254 }
       
   255 
       
   256 void JIT::emit_op_put_setter(Instruction* currentInstruction)
       
   257 {
       
   258     JITStubCall stubCall(this, cti_op_put_setter);
       
   259     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
       
   260     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
       
   261     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
       
   262     stubCall.call();
       
   263 }
       
   264 
       
   265 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
       
   266 {
       
   267     JITStubCall stubCall(this, cti_op_del_by_id);
       
   268     stubCall.addArgument(currentInstruction[2].u.operand, regT2);
       
   269     stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
       
   270     stubCall.call(currentInstruction[1].u.operand);
       
   271 }
       
   272 
       
   273 
       
   274 #if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
       
   275 
       
   276 /* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
       
   277 
       
   278 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
       
   279 void JIT::emit_op_method_check(Instruction*) {}
       
   280 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
       
   281 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
       
   282 #error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
       
   283 #endif
       
   284 
       
   285 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
       
   286 {
       
   287     unsigned resultVReg = currentInstruction[1].u.operand;
       
   288     unsigned baseVReg = currentInstruction[2].u.operand;
       
   289     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
       
   290 
       
   291     emitGetVirtualRegister(baseVReg, regT0);
       
   292     JITStubCall stubCall(this, cti_op_get_by_id_generic);
       
   293     stubCall.addArgument(regT0);
       
   294     stubCall.addArgument(ImmPtr(ident));
       
   295     stubCall.call(resultVReg);
       
   296 
       
   297     m_propertyAccessInstructionIndex++;
       
   298 }
       
   299 
       
   300 void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
       
   301 {
       
   302     ASSERT_NOT_REACHED();
       
   303 }
       
   304 
       
   305 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
       
   306 {
       
   307     unsigned baseVReg = currentInstruction[1].u.operand;
       
   308     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
       
   309     unsigned valueVReg = currentInstruction[3].u.operand;
       
   310     unsigned direct = currentInstruction[8].u.operand;
       
   311 
       
   312     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
       
   313 
       
   314     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
       
   315     stubCall.addArgument(regT0);
       
   316     stubCall.addArgument(ImmPtr(ident));
       
   317     stubCall.addArgument(regT1);
       
   318     stubCall.call();
       
   319 
       
   320     m_propertyAccessInstructionIndex++;
       
   321 }
       
   322 
       
   323 void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
       
   324 {
       
   325     ASSERT_NOT_REACHED();
       
   326 }
       
   327 
       
   328 #else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
       
   329 
       
   330 /* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
       
   331 
       
   332 #if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
       
   333 
       
   334 void JIT::emit_op_method_check(Instruction* currentInstruction)
       
   335 {
       
   336     // Assert that the following instruction is a get_by_id.
       
   337     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
       
   338 
       
   339     currentInstruction += OPCODE_LENGTH(op_method_check);
       
   340     unsigned resultVReg = currentInstruction[1].u.operand;
       
   341     unsigned baseVReg = currentInstruction[2].u.operand;
       
   342     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
       
   343 
       
   344     emitGetVirtualRegister(baseVReg, regT0);
       
   345 
       
   346     // Do the method check - check the object & its prototype's structure inline (this is the common case).
       
   347     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
       
   348     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
       
   349 
       
   350     Jump notCell = emitJumpIfNotJSCell(regT0);
       
   351 
       
   352     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
       
   353 
       
   354     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
       
   355     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
       
   356     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
       
   357 
       
   358     // This will be relinked to load the function without doing a load.
       
   359     DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
       
   360 
       
   361     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
       
   362 
       
   363     Jump match = jump();
       
   364 
       
   365     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
       
   366     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
       
   367     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
       
   368 
       
   369     // Link the failure cases here.
       
   370     notCell.link(this);
       
   371     structureCheck.link(this);
       
   372     protoStructureCheck.link(this);
       
   373 
       
   374     // Do a regular(ish) get_by_id (the slow case will be link to
       
   375     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
       
   376     compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
       
   377 
       
   378     match.link(this);
       
   379     emitPutVirtualRegister(resultVReg);
       
   380 
       
   381     // We've already generated the following get_by_id, so make sure it's skipped over.
       
   382     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
       
   383 }
       
   384 
       
   385 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
       
   386 {
       
   387     currentInstruction += OPCODE_LENGTH(op_method_check);
       
   388     unsigned resultVReg = currentInstruction[1].u.operand;
       
   389     unsigned baseVReg = currentInstruction[2].u.operand;
       
   390     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
       
   391 
       
   392     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
       
   393 
       
   394     // We've already generated the following get_by_id, so make sure it's skipped over.
       
   395     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
       
   396 }
       
   397 
       
   398 #else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
       
   399 
       
   400 // Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
       
   401 void JIT::emit_op_method_check(Instruction*) {}
       
   402 void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
       
   403 
       
   404 #endif
       
   405 
       
   406 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
       
   407 {
       
   408     unsigned resultVReg = currentInstruction[1].u.operand;
       
   409     unsigned baseVReg = currentInstruction[2].u.operand;
       
   410     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
       
   411 
       
   412     emitGetVirtualRegister(baseVReg, regT0);
       
   413     compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
       
   414     emitPutVirtualRegister(resultVReg);
       
   415 }
       
   416 
       
   417 void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
       
   418 {
       
   419     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
       
   420     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
       
   421     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
       
   422     // to jump back to if one of these trampolies finds a match.
       
   423 
       
   424     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
       
   425 
       
   426     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
       
   427 
       
   428     Label hotPathBegin(this);
       
   429     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
       
   430 
       
   431     DataLabelPtr structureToCompare;
       
   432     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
       
   433     addSlowCase(structureCheck);
       
   434     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
       
   435     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
       
   436 
       
   437     Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
       
   438     Label externalLoadComplete(this);
       
   439     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
       
   440     ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
       
   441 
       
   442     DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
       
   443     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
       
   444 
       
   445     Label putResult(this);
       
   446 
       
   447     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
       
   448 
       
   449     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
       
   450 }
       
   451 
       
   452 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
       
   453 {
       
   454     unsigned resultVReg = currentInstruction[1].u.operand;
       
   455     unsigned baseVReg = currentInstruction[2].u.operand;
       
   456     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
       
   457 
       
   458     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
       
   459 }
       
   460 
       
   461 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
       
   462 {
       
   463     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
       
   464     // so that we only need track one pointer into the slow case code - we track a pointer to the location
       
   465     // of the call (which we can use to look up the patch information), but should a array-length or
       
   466     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
       
   467     // the distance from the call to the head of the slow case.
       
   468 
       
   469     linkSlowCaseIfNotJSCell(iter, baseVReg);
       
   470     linkSlowCase(iter);
       
   471 
       
   472     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
       
   473 
       
   474 #ifndef NDEBUG
       
   475     Label coldPathBegin(this);
       
   476 #endif
       
   477     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
       
   478     stubCall.addArgument(regT0);
       
   479     stubCall.addArgument(ImmPtr(ident));
       
   480     Call call = stubCall.call(resultVReg);
       
   481 
       
   482     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
       
   483 
       
   484     ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
       
   485 
       
   486     // Track the location of the call; this will be used to recover patch information.
       
   487     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
       
   488     m_propertyAccessInstructionIndex++;
       
   489 }
       
   490 
       
   491 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
       
   492 {
       
   493     unsigned baseVReg = currentInstruction[1].u.operand;
       
   494     unsigned valueVReg = currentInstruction[3].u.operand;
       
   495 
       
   496     unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
       
   497 
       
   498     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
       
   499     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
       
   500     // such that the Structure & offset are always at the same distance from this.
       
   501 
       
   502     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
       
   503 
       
   504     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
       
   505     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
       
   506 
       
   507     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
       
   508 
       
   509     Label hotPathBegin(this);
       
   510     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
       
   511 
       
   512     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
       
   513     DataLabelPtr structureToCompare;
       
   514     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
       
   515     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
       
   516 
       
   517     // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
       
   518     Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
       
   519     Label externalLoadComplete(this);
       
   520     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
       
   521     ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
       
   522 
       
   523     DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
       
   524 
       
   525     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
       
   526 
       
   527     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
       
   528 }
       
   529 
       
   530 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
       
   531 {
       
   532     unsigned baseVReg = currentInstruction[1].u.operand;
       
   533     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
       
   534     unsigned direct = currentInstruction[8].u.operand;
       
   535 
       
   536     unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
       
   537 
       
   538     linkSlowCaseIfNotJSCell(iter, baseVReg);
       
   539     linkSlowCase(iter);
       
   540 
       
   541     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
       
   542     stubCall.addArgument(regT0);
       
   543     stubCall.addArgument(ImmPtr(ident));
       
   544     stubCall.addArgument(regT1);
       
   545     Call call = stubCall.call();
       
   546 
       
   547     // Track the location of the call; this will be used to recover patch information.
       
   548     m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
       
   549 }
       
   550 
       
   551 // Compile a store into an object's property storage.  May overwrite the
       
   552 // value in objectReg.
       
   553 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
       
   554 {
       
   555     int offset = cachedOffset * sizeof(JSValue);
       
   556     if (structure->isUsingInlineStorage())
       
   557         offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
       
   558     else
       
   559         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
       
   560     storePtr(value, Address(base, offset));
       
   561 }
       
   562 
       
   563 // Compile a load from an object's property storage.  May overwrite base.
       
   564 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
       
   565 {
       
   566     int offset = cachedOffset * sizeof(JSValue);
       
   567     if (structure->isUsingInlineStorage())
       
   568         offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
       
   569     else
       
   570         loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
       
   571     loadPtr(Address(base, offset), result);
       
   572 }
       
   573 
       
   574 void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
       
   575 {
       
   576     if (base->isUsingInlineStorage())
       
   577         loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
       
   578     else {
       
   579         PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
       
   580         loadPtr(static_cast<void*>(protoPropertyStorage), temp);
       
   581         loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
       
   582     } 
       
   583 }
       
   584 
       
   585 void JIT::testPrototype(Structure* structure, JumpList& failureCases)
       
   586 {
       
   587     if (structure->m_prototype.isNull())
       
   588         return;
       
   589 
       
   590     move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
       
   591     move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
       
   592     failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
       
   593 }
       
   594 
       
   595 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
       
   596 {
       
   597     JumpList failureCases;
       
   598     // Check eax is an object of the right Structure.
       
   599     failureCases.append(emitJumpIfNotJSCell(regT0));
       
   600     failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
       
   601     testPrototype(oldStructure, failureCases);
       
   602 
       
   603     // ecx = baseObject->m_structure
       
   604     if (!direct) {
       
   605         for (RefPtr<Structure>* it = chain->head(); *it; ++it)
       
   606             testPrototype(it->get(), failureCases);
       
   607     }
       
   608 
       
   609     Call callTarget;
       
   610 
       
   611     // emit a call only if storage realloc is needed
       
   612     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
       
   613     if (willNeedStorageRealloc) {
       
   614         // This trampoline was called to like a JIT stub; before we can can call again we need to
       
   615         // remove the return address from the stack, to prevent the stack from becoming misaligned.
       
   616         preserveReturnAddressAfterCall(regT3);
       
   617  
       
   618         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
       
   619         stubCall.skipArgument(); // base
       
   620         stubCall.skipArgument(); // ident
       
   621         stubCall.skipArgument(); // value
       
   622         stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
       
   623         stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
       
   624         stubCall.call(regT0);
       
   625         emitGetJITStubArg(2, regT1);
       
   626 
       
   627         restoreReturnAddressBeforeReturn(regT3);
       
   628     }
       
   629 
       
   630     // Assumes m_refCount can be decremented easily, refcount decrement is safe as 
       
   631     // codeblock should ensure oldStructure->m_refCount > 0
       
   632     sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
       
   633     add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
       
   634     storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
       
   635 
       
   636     // write the value
       
   637     compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
       
   638 
       
   639     ret();
       
   640     
       
   641     ASSERT(!failureCases.empty());
       
   642     failureCases.link(this);
       
   643     restoreArgumentReferenceForTrampoline();
       
   644     Call failureCall = tailRecursiveCall();
       
   645 
       
   646     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
   647 
       
   648     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
       
   649 
       
   650     if (willNeedStorageRealloc) {
       
   651         ASSERT(m_calls.size() == 1);
       
   652         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
       
   653     }
       
   654     
       
   655     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
   656     stubInfo->stubRoutine = entryLabel;
       
   657     RepatchBuffer repatchBuffer(m_codeBlock);
       
   658     repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
       
   659 }
       
   660 
       
   661 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
       
   662 {
       
   663     RepatchBuffer repatchBuffer(codeBlock);
       
   664 
       
   665     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
       
   666     // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
       
   667     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
       
   668 
       
   669     int offset = sizeof(JSValue) * cachedOffset;
       
   670 
       
   671     // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
       
   672     // and makes the subsequent load's offset automatically correct
       
   673     if (structure->isUsingInlineStorage())
       
   674         repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
       
   675 
       
   676     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
       
   677     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
       
   678     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
       
   679 }
       
   680 
       
   681 void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
       
   682 {
       
   683     RepatchBuffer repatchBuffer(codeBlock);
       
   684 
       
   685     ASSERT(!methodCallLinkInfo.cachedStructure);
       
   686     methodCallLinkInfo.cachedStructure = structure;
       
   687     structure->ref();
       
   688 
       
   689     Structure* prototypeStructure = proto->structure();
       
   690     methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
       
   691     prototypeStructure->ref();
       
   692 
       
   693     repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
       
   694     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
       
   695     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
       
   696     repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
       
   697 
       
   698     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
       
   699 }
       
   700 
       
   701 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
       
   702 {
       
   703     RepatchBuffer repatchBuffer(codeBlock);
       
   704 
       
   705     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
       
   706     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
       
   707     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
       
   708 
       
   709     int offset = sizeof(JSValue) * cachedOffset;
       
   710 
       
   711     // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
       
   712     // and makes the subsequent load's offset automatically correct
       
   713     if (structure->isUsingInlineStorage())
       
   714         repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
       
   715 
       
   716     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
       
   717     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
       
   718     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
       
   719 }
       
   720 
       
   721 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
       
   722 {
       
   723     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
       
   724 
       
   725     // Check eax is an array
       
   726     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
       
   727 
       
   728     // Checks out okay! - get the length from the storage
       
   729     loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
       
   730     load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
       
   731 
       
   732     Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
       
   733 
       
   734     emitFastArithIntToImmNoCheck(regT2, regT0);
       
   735     Jump success = jump();
       
   736 
       
   737     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
   738 
       
   739     // Use the patch information to link the failure cases back to the original slow case routine.
       
   740     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
       
   741     patchBuffer.link(failureCases1, slowCaseBegin);
       
   742     patchBuffer.link(failureCases2, slowCaseBegin);
       
   743 
       
   744     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
       
   745     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
       
   746 
       
   747     // Track the stub we have created so that it will be deleted later.
       
   748     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
   749     stubInfo->stubRoutine = entryLabel;
       
   750 
       
   751     // Finally patch the jump to slow case back in the hot path to jump here instead.
       
   752     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
       
   753     RepatchBuffer repatchBuffer(m_codeBlock);
       
   754     repatchBuffer.relink(jumpLocation, entryLabel);
       
   755 
       
   756     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
       
   757     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
       
   758 }
       
   759 
       
   760 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
       
   761 {
       
   762     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
       
   763     // referencing the prototype object - let's speculatively load it's table nice and early!)
       
   764     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
       
   765 
       
   766     // Check eax is an object of the right Structure.
       
   767     Jump failureCases1 = checkStructure(regT0, structure);
       
   768 
       
   769     // Check the prototype object's Structure had not changed.
       
   770     Structure** prototypeStructureAddress = &(protoObject->m_structure);
       
   771 #if CPU(X86_64)
       
   772     move(ImmPtr(prototypeStructure), regT3);
       
   773     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
       
   774 #else
       
   775     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
       
   776 #endif
       
   777 
       
   778     bool needsStubLink = false;
       
   779     
       
   780     // Checks out okay!
       
   781     if (slot.cachedPropertyType() == PropertySlot::Getter) {
       
   782         needsStubLink = true;
       
   783         compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
       
   784         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
       
   785         stubCall.addArgument(regT1);
       
   786         stubCall.addArgument(regT0);
       
   787         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   788         stubCall.call();
       
   789     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
       
   790         needsStubLink = true;
       
   791         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
       
   792         stubCall.addArgument(ImmPtr(protoObject));
       
   793         stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
       
   794         stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
       
   795         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   796         stubCall.call();
       
   797     } else
       
   798         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
       
   799     Jump success = jump();
       
   800     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
   801 
       
   802     // Use the patch information to link the failure cases back to the original slow case routine.
       
   803     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
       
   804     patchBuffer.link(failureCases1, slowCaseBegin);
       
   805     patchBuffer.link(failureCases2, slowCaseBegin);
       
   806 
       
   807     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
       
   808     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
       
   809 
       
   810     if (needsStubLink) {
       
   811         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
       
   812             if (iter->to)
       
   813                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
       
   814         }
       
   815     }
       
   816     // Track the stub we have created so that it will be deleted later.
       
   817     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
   818     stubInfo->stubRoutine = entryLabel;
       
   819 
       
   820     // Finally patch the jump to slow case back in the hot path to jump here instead.
       
   821     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
       
   822     RepatchBuffer repatchBuffer(m_codeBlock);
       
   823     repatchBuffer.relink(jumpLocation, entryLabel);
       
   824 
       
   825     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
       
   826     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
       
   827 }
       
   828 
       
   829 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
       
   830 {
       
   831     Jump failureCase = checkStructure(regT0, structure);
       
   832     bool needsStubLink = false;
       
   833     if (slot.cachedPropertyType() == PropertySlot::Getter) {
       
   834         needsStubLink = true;
       
   835         if (!structure->isUsingInlineStorage()) {
       
   836             move(regT0, regT1);
       
   837             compileGetDirectOffset(regT1, regT1, structure, cachedOffset);
       
   838         } else
       
   839             compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
       
   840         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
       
   841         stubCall.addArgument(regT1);
       
   842         stubCall.addArgument(regT0);
       
   843         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   844         stubCall.call();
       
   845     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
       
   846         needsStubLink = true;
       
   847         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
       
   848         stubCall.addArgument(regT0);
       
   849         stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
       
   850         stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
       
   851         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   852         stubCall.call();
       
   853     } else
       
   854         compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
       
   855     Jump success = jump();
       
   856 
       
   857     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
   858 
       
   859     if (needsStubLink) {
       
   860         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
       
   861             if (iter->to)
       
   862                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
       
   863         }
       
   864     }
       
   865 
       
   866     // Use the patch information to link the failure cases back to the original slow case routine.
       
   867     CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
       
   868     if (!lastProtoBegin)
       
   869         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
       
   870 
       
   871     patchBuffer.link(failureCase, lastProtoBegin);
       
   872 
       
   873     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
       
   874     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
       
   875 
       
   876     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
   877 
       
   878     structure->ref();
       
   879     polymorphicStructures->list[currentIndex].set(entryLabel, structure);
       
   880 
       
   881     // Finally patch the jump to slow case back in the hot path to jump here instead.
       
   882     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
       
   883     RepatchBuffer repatchBuffer(m_codeBlock);
       
   884     repatchBuffer.relink(jumpLocation, entryLabel);
       
   885 }
       
   886 
       
   887 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
       
   888 {
       
   889     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
       
   890     // referencing the prototype object - let's speculatively load it's table nice and early!)
       
   891     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
       
   892 
       
   893     // Check eax is an object of the right Structure.
       
   894     Jump failureCases1 = checkStructure(regT0, structure);
       
   895 
       
   896     // Check the prototype object's Structure had not changed.
       
   897     Structure** prototypeStructureAddress = &(protoObject->m_structure);
       
   898 #if CPU(X86_64)
       
   899     move(ImmPtr(prototypeStructure), regT3);
       
   900     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
       
   901 #else
       
   902     Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
       
   903 #endif
       
   904 
       
   905     // Checks out okay!
       
   906     bool needsStubLink = false;
       
   907     if (slot.cachedPropertyType() == PropertySlot::Getter) {
       
   908         needsStubLink = true;
       
   909         compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
       
   910         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
       
   911         stubCall.addArgument(regT1);
       
   912         stubCall.addArgument(regT0);
       
   913         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   914         stubCall.call();
       
   915     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
       
   916         needsStubLink = true;
       
   917         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
       
   918         stubCall.addArgument(ImmPtr(protoObject));
       
   919         stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
       
   920         stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
       
   921         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   922         stubCall.call();
       
   923     } else
       
   924         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
       
   925 
       
   926     Jump success = jump();
       
   927 
       
   928     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
   929 
       
   930     if (needsStubLink) {
       
   931         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
       
   932             if (iter->to)
       
   933                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
       
   934         }
       
   935     }
       
   936 
       
   937     // Use the patch information to link the failure cases back to the original slow case routine.
       
   938     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
       
   939     patchBuffer.link(failureCases1, lastProtoBegin);
       
   940     patchBuffer.link(failureCases2, lastProtoBegin);
       
   941 
       
   942     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
       
   943     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
       
   944 
       
   945     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
   946 
       
   947     structure->ref();
       
   948     prototypeStructure->ref();
       
   949     prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
       
   950 
       
   951     // Finally patch the jump to slow case back in the hot path to jump here instead.
       
   952     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
       
   953     RepatchBuffer repatchBuffer(m_codeBlock);
       
   954     repatchBuffer.relink(jumpLocation, entryLabel);
       
   955 }
       
   956 
       
   957 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
       
   958 {
       
   959     ASSERT(count);
       
   960     JumpList bucketsOfFail;
       
   961 
       
   962     // Check eax is an object of the right Structure.
       
   963     Jump baseObjectCheck = checkStructure(regT0, structure);
       
   964     bucketsOfFail.append(baseObjectCheck);
       
   965 
       
   966     Structure* currStructure = structure;
       
   967     RefPtr<Structure>* chainEntries = chain->head();
       
   968     JSObject* protoObject = 0;
       
   969     for (unsigned i = 0; i < count; ++i) {
       
   970         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
       
   971         currStructure = chainEntries[i].get();
       
   972 
       
   973         // Check the prototype object's Structure had not changed.
       
   974         Structure** prototypeStructureAddress = &(protoObject->m_structure);
       
   975 #if CPU(X86_64)
       
   976         move(ImmPtr(currStructure), regT3);
       
   977         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
       
   978 #else
       
   979         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
       
   980 #endif
       
   981     }
       
   982     ASSERT(protoObject);
       
   983     
       
   984     bool needsStubLink = false;
       
   985     if (slot.cachedPropertyType() == PropertySlot::Getter) {
       
   986         needsStubLink = true;
       
   987         compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
       
   988         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
       
   989         stubCall.addArgument(regT1);
       
   990         stubCall.addArgument(regT0);
       
   991         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
   992         stubCall.call();
       
   993     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
       
   994         needsStubLink = true;
       
   995         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
       
   996         stubCall.addArgument(ImmPtr(protoObject));
       
   997         stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
       
   998         stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
       
   999         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
  1000         stubCall.call();
       
  1001     } else
       
  1002         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
       
  1003     Jump success = jump();
       
  1004 
       
  1005     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
  1006     
       
  1007     if (needsStubLink) {
       
  1008         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
       
  1009             if (iter->to)
       
  1010                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
       
  1011         }
       
  1012     }
       
  1013 
       
  1014     // Use the patch information to link the failure cases back to the original slow case routine.
       
  1015     CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
       
  1016 
       
  1017     patchBuffer.link(bucketsOfFail, lastProtoBegin);
       
  1018 
       
  1019     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
       
  1020     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
       
  1021 
       
  1022     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
  1023 
       
  1024     // Track the stub we have created so that it will be deleted later.
       
  1025     structure->ref();
       
  1026     chain->ref();
       
  1027     prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
       
  1028 
       
  1029     // Finally patch the jump to slow case back in the hot path to jump here instead.
       
  1030     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
       
  1031     RepatchBuffer repatchBuffer(m_codeBlock);
       
  1032     repatchBuffer.relink(jumpLocation, entryLabel);
       
  1033 }
       
  1034 
       
  1035 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
       
  1036 {
       
  1037     ASSERT(count);
       
  1038 
       
  1039     JumpList bucketsOfFail;
       
  1040 
       
  1041     // Check eax is an object of the right Structure.
       
  1042     bucketsOfFail.append(checkStructure(regT0, structure));
       
  1043 
       
  1044     Structure* currStructure = structure;
       
  1045     RefPtr<Structure>* chainEntries = chain->head();
       
  1046     JSObject* protoObject = 0;
       
  1047     for (unsigned i = 0; i < count; ++i) {
       
  1048         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
       
  1049         currStructure = chainEntries[i].get();
       
  1050 
       
  1051         // Check the prototype object's Structure had not changed.
       
  1052         Structure** prototypeStructureAddress = &(protoObject->m_structure);
       
  1053 #if CPU(X86_64)
       
  1054         move(ImmPtr(currStructure), regT3);
       
  1055         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
       
  1056 #else
       
  1057         bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
       
  1058 #endif
       
  1059     }
       
  1060     ASSERT(protoObject);
       
  1061 
       
  1062     bool needsStubLink = false;
       
  1063     if (slot.cachedPropertyType() == PropertySlot::Getter) {
       
  1064         needsStubLink = true;
       
  1065         compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
       
  1066         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
       
  1067         stubCall.addArgument(regT1);
       
  1068         stubCall.addArgument(regT0);
       
  1069         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
  1070         stubCall.call();
       
  1071     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
       
  1072         needsStubLink = true;
       
  1073         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
       
  1074         stubCall.addArgument(ImmPtr(protoObject));
       
  1075         stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
       
  1076         stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
       
  1077         stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
       
  1078         stubCall.call();
       
  1079     } else
       
  1080         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
       
  1081     Jump success = jump();
       
  1082 
       
  1083     LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
       
  1084 
       
  1085     if (needsStubLink) {
       
  1086         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
       
  1087             if (iter->to)
       
  1088                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
       
  1089         }
       
  1090     }
       
  1091 
       
  1092     // Use the patch information to link the failure cases back to the original slow case routine.
       
  1093     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
       
  1094 
       
  1095     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
       
  1096     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
       
  1097 
       
  1098     // Track the stub we have created so that it will be deleted later.
       
  1099     CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
       
  1100     stubInfo->stubRoutine = entryLabel;
       
  1101 
       
  1102     // Finally patch the jump to slow case back in the hot path to jump here instead.
       
  1103     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
       
  1104     RepatchBuffer repatchBuffer(m_codeBlock);
       
  1105     repatchBuffer.relink(jumpLocation, entryLabel);
       
  1106 
       
  1107     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
       
  1108     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
       
  1109 }
       
  1110 
       
  1111 /* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
       
  1112 
       
  1113 #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
       
  1114 
       
  1115 } // namespace JSC
       
  1116 
       
  1117 #endif // !USE(JSVALUE32_64)
       
  1118 #endif // ENABLE(JIT)