JavaScriptCore/runtime/Collector.cpp
changeset 0 4f2f89ce4247
equal deleted inserted replaced
-1:000000000000 0:4f2f89ce4247
       
     1 /*
       
     2  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
       
     3  *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
       
     4  *
       
     5  *  This library is free software; you can redistribute it and/or
       
     6  *  modify it under the terms of the GNU Lesser General Public
       
     7  *  License as published by the Free Software Foundation; either
       
     8  *  version 2 of the License, or (at your option) any later version.
       
     9  *
       
    10  *  This library is distributed in the hope that it will be useful,
       
    11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
       
    12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
       
    13  *  Lesser General Public License for more details.
       
    14  *
       
    15  *  You should have received a copy of the GNU Lesser General Public
       
    16  *  License along with this library; if not, write to the Free Software
       
    17  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
       
    18  *
       
    19  */
       
    20 
       
    21 #include "config.h"
       
    22 #include "Collector.h"
       
    23 
       
    24 #include "ArgList.h"
       
    25 #include "CallFrame.h"
       
    26 #include "CodeBlock.h"
       
    27 #include "CollectorHeapIterator.h"
       
    28 #include "Interpreter.h"
       
    29 #include "JSArray.h"
       
    30 #include "JSGlobalObject.h"
       
    31 #include "JSLock.h"
       
    32 #include "JSONObject.h"
       
    33 #include "JSString.h"
       
    34 #include "JSValue.h"
       
    35 #include "JSZombie.h"
       
    36 #include "MarkStack.h"
       
    37 #include "Nodes.h"
       
    38 #include "Tracing.h"
       
    39 #include <algorithm>
       
    40 #include <limits.h>
       
    41 #include <setjmp.h>
       
    42 #include <stdlib.h>
       
    43 #include <wtf/FastMalloc.h>
       
    44 #include <wtf/HashCountedSet.h>
       
    45 #include <wtf/UnusedParam.h>
       
    46 #include <wtf/VMTags.h>
       
    47 
       
    48 #if OS(DARWIN)
       
    49 
       
    50 #include <mach/mach_init.h>
       
    51 #include <mach/mach_port.h>
       
    52 #include <mach/task.h>
       
    53 #include <mach/thread_act.h>
       
    54 #include <mach/vm_map.h>
       
    55 
       
    56 #elif OS(WINDOWS)
       
    57 
       
    58 #include <windows.h>
       
    59 #include <malloc.h>
       
    60 
       
    61 #elif OS(HAIKU)
       
    62 
       
    63 #include <OS.h>
       
    64 
       
    65 #elif OS(UNIX)
       
    66 
       
    67 #include <stdlib.h>
       
    68 #if !OS(HAIKU)
       
    69 #include <sys/mman.h>
       
    70 #endif
       
    71 #include <unistd.h>
       
    72 
       
    73 #if OS(SOLARIS)
       
    74 #include <thread.h>
       
    75 #else
       
    76 #include <pthread.h>
       
    77 #endif
       
    78 
       
    79 #if HAVE(PTHREAD_NP_H)
       
    80 #include <pthread_np.h>
       
    81 #endif
       
    82 
       
    83 #if OS(QNX)
       
    84 #include <fcntl.h>
       
    85 #include <sys/procfs.h>
       
    86 #include <stdio.h>
       
    87 #include <errno.h>
       
    88 #endif
       
    89 
       
    90 #endif
       
    91 
       
    92 #define COLLECT_ON_EVERY_ALLOCATION 0
       
    93 
       
    94 using std::max;
       
    95 
       
    96 namespace JSC {
       
    97 
       
    98 // tunable parameters
       
    99 
       
   100 const size_t GROWTH_FACTOR = 2;
       
   101 const size_t LOW_WATER_FACTOR = 4;
       
   102 const size_t ALLOCATIONS_PER_COLLECTION = 3600;
       
   103 // This value has to be a macro to be used in max() without introducing
       
   104 // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>.
       
   105 #define MIN_ARRAY_SIZE (static_cast<size_t>(14))
       
   106 
       
   107 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   108 
       
   109 #if OS(DARWIN)
       
   110 typedef mach_port_t PlatformThread;
       
   111 #elif OS(WINDOWS)
       
   112 typedef HANDLE PlatformThread;
       
   113 #endif
       
   114 
       
   115 class Heap::Thread {
       
   116 public:
       
   117     Thread(pthread_t pthread, const PlatformThread& platThread, void* base) 
       
   118         : posixThread(pthread)
       
   119         , platformThread(platThread)
       
   120         , stackBase(base)
       
   121     {
       
   122     }
       
   123 
       
   124     Thread* next;
       
   125     pthread_t posixThread;
       
   126     PlatformThread platformThread;
       
   127     void* stackBase;
       
   128 };
       
   129 
       
   130 #endif
       
   131 
       
   132 Heap::Heap(JSGlobalData* globalData)
       
   133     : m_markListSet(0)
       
   134 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   135     , m_registeredThreads(0)
       
   136     , m_currentThreadRegistrar(0)
       
   137 #endif
       
   138 #if OS(SYMBIAN)
       
   139     , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)
       
   140 #endif
       
   141     , m_globalData(globalData)
       
   142 {
       
   143     ASSERT(globalData);
       
   144     memset(&m_heap, 0, sizeof(CollectorHeap));
       
   145     allocateBlock();
       
   146 }
       
   147 
       
   148 Heap::~Heap()
       
   149 {
       
   150     // The destroy function must already have been called, so assert this.
       
   151     ASSERT(!m_globalData);
       
   152 }
       
   153 
       
   154 void Heap::destroy()
       
   155 {
       
   156     JSLock lock(SilenceAssertionsOnly);
       
   157 
       
   158     if (!m_globalData)
       
   159         return;
       
   160 
       
   161     ASSERT(!m_globalData->dynamicGlobalObject);
       
   162     ASSERT(!isBusy());
       
   163     
       
   164     // The global object is not GC protected at this point, so sweeping may delete it
       
   165     // (and thus the global data) before other objects that may use the global data.
       
   166     RefPtr<JSGlobalData> protect(m_globalData);
       
   167 
       
   168     delete m_markListSet;
       
   169     m_markListSet = 0;
       
   170 
       
   171     freeBlocks();
       
   172 
       
   173 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   174     if (m_currentThreadRegistrar) {
       
   175         int error = pthread_key_delete(m_currentThreadRegistrar);
       
   176         ASSERT_UNUSED(error, !error);
       
   177     }
       
   178 
       
   179     MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
       
   180     for (Heap::Thread* t = m_registeredThreads; t;) {
       
   181         Heap::Thread* next = t->next;
       
   182         delete t;
       
   183         t = next;
       
   184     }
       
   185 #endif
       
   186 #if OS(SYMBIAN)
       
   187     m_blockallocator.destroy();
       
   188 #endif
       
   189     m_globalData = 0;
       
   190 }
       
   191 
       
   192 NEVER_INLINE CollectorBlock* Heap::allocateBlock()
       
   193 {
       
   194 #if OS(DARWIN)
       
   195     vm_address_t address = 0;
       
   196     vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
       
   197 #elif OS(SYMBIAN)
       
   198     void* address = m_blockallocator.alloc();  
       
   199     if (!address)
       
   200         CRASH();
       
   201 #elif OS(WINCE)
       
   202     void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
       
   203 #elif OS(WINDOWS)
       
   204 #if COMPILER(MINGW) && !COMPILER(MINGW64)
       
   205     void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
       
   206 #else
       
   207     void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
       
   208 #endif
       
   209     memset(address, 0, BLOCK_SIZE);
       
   210 #elif HAVE(POSIX_MEMALIGN)
       
   211     void* address;
       
   212     posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
       
   213 #else
       
   214 
       
   215 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   216 #error Need to initialize pagesize safely.
       
   217 #endif
       
   218     static size_t pagesize = getpagesize();
       
   219 
       
   220     size_t extra = 0;
       
   221     if (BLOCK_SIZE > pagesize)
       
   222         extra = BLOCK_SIZE - pagesize;
       
   223 
       
   224     void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
       
   225     uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
       
   226 
       
   227     size_t adjust = 0;
       
   228     if ((address & BLOCK_OFFSET_MASK) != 0)
       
   229         adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
       
   230 
       
   231     if (adjust > 0)
       
   232         munmap(reinterpret_cast<char*>(address), adjust);
       
   233 
       
   234     if (adjust < extra)
       
   235         munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
       
   236 
       
   237     address += adjust;
       
   238 #endif
       
   239 
       
   240     // Initialize block.
       
   241 
       
   242     CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
       
   243     block->heap = this;
       
   244     clearMarkBits(block);
       
   245 
       
   246     Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
       
   247     for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i)
       
   248         new (&block->cells[i]) JSCell(dummyMarkableCellStructure);
       
   249     
       
   250     // Add block to blocks vector.
       
   251 
       
   252     size_t numBlocks = m_heap.numBlocks;
       
   253     if (m_heap.usedBlocks == numBlocks) {
       
   254         static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
       
   255         if (numBlocks > maxNumBlocks)
       
   256             CRASH();
       
   257         numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
       
   258         m_heap.numBlocks = numBlocks;
       
   259         m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
       
   260     }
       
   261     m_heap.blocks[m_heap.usedBlocks++] = block;
       
   262 
       
   263     return block;
       
   264 }
       
   265 
       
   266 NEVER_INLINE void Heap::freeBlock(size_t block)
       
   267 {
       
   268     m_heap.didShrink = true;
       
   269 
       
   270     ObjectIterator it(m_heap, block);
       
   271     ObjectIterator end(m_heap, block + 1);
       
   272     for ( ; it != end; ++it)
       
   273         (*it)->~JSCell();
       
   274     freeBlockPtr(m_heap.blocks[block]);
       
   275 
       
   276     // swap with the last block so we compact as we go
       
   277     m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
       
   278     m_heap.usedBlocks--;
       
   279 
       
   280     if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
       
   281         m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR; 
       
   282         m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
       
   283     }
       
   284 }
       
   285 
       
   286 NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
       
   287 {
       
   288 #if OS(DARWIN)    
       
   289     vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
       
   290 #elif OS(SYMBIAN)
       
   291     m_blockallocator.free(reinterpret_cast<void*>(block));
       
   292 #elif OS(WINCE)
       
   293     VirtualFree(block, 0, MEM_RELEASE);
       
   294 #elif OS(WINDOWS)
       
   295 #if COMPILER(MINGW) && !COMPILER(MINGW64)
       
   296     __mingw_aligned_free(block);
       
   297 #else
       
   298     _aligned_free(block);
       
   299 #endif
       
   300 #elif HAVE(POSIX_MEMALIGN)
       
   301     free(block);
       
   302 #else
       
   303     munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
       
   304 #endif
       
   305 }
       
   306 
       
   307 void Heap::freeBlocks()
       
   308 {
       
   309     ProtectCountSet protectedValuesCopy = m_protectedValues;
       
   310 
       
   311     clearMarkBits();
       
   312     ProtectCountSet::iterator protectedValuesEnd = protectedValuesCopy.end();
       
   313     for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
       
   314         markCell(it->first);
       
   315 
       
   316     m_heap.nextCell = 0;
       
   317     m_heap.nextBlock = 0;
       
   318     DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
       
   319     DeadObjectIterator end(m_heap, m_heap.usedBlocks);
       
   320     for ( ; it != end; ++it)
       
   321         (*it)->~JSCell();
       
   322 
       
   323     ASSERT(!protectedObjectCount());
       
   324 
       
   325     protectedValuesEnd = protectedValuesCopy.end();
       
   326     for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
       
   327         it->first->~JSCell();
       
   328 
       
   329     for (size_t block = 0; block < m_heap.usedBlocks; ++block)
       
   330         freeBlockPtr(m_heap.blocks[block]);
       
   331 
       
   332     fastFree(m_heap.blocks);
       
   333 
       
   334     memset(&m_heap, 0, sizeof(CollectorHeap));
       
   335 }
       
   336 
       
   337 void Heap::recordExtraCost(size_t cost)
       
   338 {
       
   339     // Our frequency of garbage collection tries to balance memory use against speed
       
   340     // by collecting based on the number of newly created values. However, for values
       
   341     // that hold on to a great deal of memory that's not in the form of other JS values,
       
   342     // that is not good enough - in some cases a lot of those objects can pile up and
       
   343     // use crazy amounts of memory without a GC happening. So we track these extra
       
   344     // memory costs. Only unusually large objects are noted, and we only keep track
       
   345     // of this extra cost until the next GC. In garbage collected languages, most values
       
   346     // are either very short lived temporaries, or have extremely long lifetimes. So
       
   347     // if a large value survives one garbage collection, there is not much point to
       
   348     // collecting more frequently as long as it stays alive.
       
   349 
       
   350     if (m_heap.extraCost > maxExtraCost && m_heap.extraCost > m_heap.usedBlocks * BLOCK_SIZE / 2) {
       
   351         // If the last iteration through the heap deallocated blocks, we need
       
   352         // to clean up remaining garbage before marking. Otherwise, the conservative
       
   353         // marking mechanism might follow a pointer to unmapped memory.
       
   354         if (m_heap.didShrink)
       
   355             sweep();
       
   356         reset();
       
   357     }
       
   358     m_heap.extraCost += cost;
       
   359 }
       
   360 
       
   361 void* Heap::allocate(size_t s)
       
   362 {
       
   363     typedef HeapConstants::Block Block;
       
   364     typedef HeapConstants::Cell Cell;
       
   365     
       
   366     ASSERT(JSLock::lockCount() > 0);
       
   367     ASSERT(JSLock::currentThreadIsHoldingLock());
       
   368     ASSERT_UNUSED(s, s <= HeapConstants::cellSize);
       
   369 
       
   370     ASSERT(m_heap.operationInProgress == NoOperation);
       
   371 
       
   372 #if COLLECT_ON_EVERY_ALLOCATION
       
   373     collectAllGarbage();
       
   374     ASSERT(m_heap.operationInProgress == NoOperation);
       
   375 #endif
       
   376 
       
   377 allocate:
       
   378 
       
   379     // Fast case: find the next garbage cell and recycle it.
       
   380 
       
   381     do {
       
   382         ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
       
   383         Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
       
   384         do {
       
   385             ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
       
   386             if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
       
   387                 Cell* cell = &block->cells[m_heap.nextCell];
       
   388 
       
   389                 m_heap.operationInProgress = Allocation;
       
   390                 JSCell* imp = reinterpret_cast<JSCell*>(cell);
       
   391                 imp->~JSCell();
       
   392                 m_heap.operationInProgress = NoOperation;
       
   393 
       
   394                 ++m_heap.nextCell;
       
   395                 return cell;
       
   396             }
       
   397             block->marked.advanceToNextPossibleFreeCell(m_heap.nextCell);
       
   398         } while (m_heap.nextCell != HeapConstants::cellsPerBlock);
       
   399         m_heap.nextCell = 0;
       
   400     } while (++m_heap.nextBlock != m_heap.usedBlocks);
       
   401 
       
   402     // Slow case: reached the end of the heap. Mark live objects and start over.
       
   403 
       
   404     reset();
       
   405     goto allocate;
       
   406 }
       
   407 
       
   408 void Heap::resizeBlocks()
       
   409 {
       
   410     m_heap.didShrink = false;
       
   411 
       
   412     size_t usedCellCount = markedCells();
       
   413     size_t minCellCount = usedCellCount + max(ALLOCATIONS_PER_COLLECTION, usedCellCount);
       
   414     size_t minBlockCount = (minCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
       
   415 
       
   416     size_t maxCellCount = 1.25f * minCellCount;
       
   417     size_t maxBlockCount = (maxCellCount + HeapConstants::cellsPerBlock - 1) / HeapConstants::cellsPerBlock;
       
   418 
       
   419     if (m_heap.usedBlocks < minBlockCount)
       
   420         growBlocks(minBlockCount);
       
   421     else if (m_heap.usedBlocks > maxBlockCount)
       
   422         shrinkBlocks(maxBlockCount);
       
   423 }
       
   424 
       
   425 void Heap::growBlocks(size_t neededBlocks)
       
   426 {
       
   427     ASSERT(m_heap.usedBlocks < neededBlocks);
       
   428     while (m_heap.usedBlocks < neededBlocks)
       
   429         allocateBlock();
       
   430 }
       
   431 
       
   432 void Heap::shrinkBlocks(size_t neededBlocks)
       
   433 {
       
   434     ASSERT(m_heap.usedBlocks > neededBlocks);
       
   435     
       
   436     // Clear the always-on last bit, so isEmpty() isn't fooled by it.
       
   437     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
       
   438         m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1);
       
   439 
       
   440     for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
       
   441         if (m_heap.blocks[i]->marked.isEmpty()) {
       
   442             freeBlock(i);
       
   443         } else
       
   444             ++i;
       
   445     }
       
   446 
       
   447     // Reset the always-on last bit.
       
   448     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
       
   449         m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1);
       
   450 }
       
   451 
       
   452 #if OS(WINCE)
       
   453 JS_EXPORTDATA void* g_stackBase = 0;
       
   454 
       
   455 inline bool isPageWritable(void* page)
       
   456 {
       
   457     MEMORY_BASIC_INFORMATION memoryInformation;
       
   458     DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
       
   459 
       
   460     // return false on error, including ptr outside memory
       
   461     if (result != sizeof(memoryInformation))
       
   462         return false;
       
   463 
       
   464     DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
       
   465     return protect == PAGE_READWRITE
       
   466         || protect == PAGE_WRITECOPY
       
   467         || protect == PAGE_EXECUTE_READWRITE
       
   468         || protect == PAGE_EXECUTE_WRITECOPY;
       
   469 }
       
   470 
       
   471 static void* getStackBase(void* previousFrame)
       
   472 {
       
   473     // find the address of this stack frame by taking the address of a local variable
       
   474     bool isGrowingDownward;
       
   475     void* thisFrame = (void*)(&isGrowingDownward);
       
   476 
       
   477     isGrowingDownward = previousFrame < &thisFrame;
       
   478     static DWORD pageSize = 0;
       
   479     if (!pageSize) {
       
   480         SYSTEM_INFO systemInfo;
       
   481         GetSystemInfo(&systemInfo);
       
   482         pageSize = systemInfo.dwPageSize;
       
   483     }
       
   484 
       
   485     // scan all of memory starting from this frame, and return the last writeable page found
       
   486     register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
       
   487     if (isGrowingDownward) {
       
   488         while (currentPage > 0) {
       
   489             // check for underflow
       
   490             if (currentPage >= (char*)pageSize)
       
   491                 currentPage -= pageSize;
       
   492             else
       
   493                 currentPage = 0;
       
   494             if (!isPageWritable(currentPage))
       
   495                 return currentPage + pageSize;
       
   496         }
       
   497         return 0;
       
   498     } else {
       
   499         while (true) {
       
   500             // guaranteed to complete because isPageWritable returns false at end of memory
       
   501             currentPage += pageSize;
       
   502             if (!isPageWritable(currentPage))
       
   503                 return currentPage;
       
   504         }
       
   505     }
       
   506 }
       
   507 #endif
       
   508 
       
   509 #if OS(QNX)
       
   510 static inline void *currentThreadStackBaseQNX()
       
   511 {
       
   512     static void* stackBase = 0;
       
   513     static size_t stackSize = 0;
       
   514     static pthread_t stackThread;
       
   515     pthread_t thread = pthread_self();
       
   516     if (stackBase == 0 || thread != stackThread) {
       
   517         struct _debug_thread_info threadInfo;
       
   518         memset(&threadInfo, 0, sizeof(threadInfo));
       
   519         threadInfo.tid = pthread_self();
       
   520         int fd = open("/proc/self", O_RDONLY);
       
   521         if (fd == -1) {
       
   522             LOG_ERROR("Unable to open /proc/self (errno: %d)", errno);
       
   523             return 0;
       
   524         }
       
   525         devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0);
       
   526         close(fd);
       
   527         stackBase = reinterpret_cast<void*>(threadInfo.stkbase);
       
   528         stackSize = threadInfo.stksize;
       
   529         ASSERT(stackBase);
       
   530         stackThread = thread;
       
   531     }
       
   532     return static_cast<char*>(stackBase) + stackSize;
       
   533 }
       
   534 #endif
       
   535 
       
   536 static inline void* currentThreadStackBase()
       
   537 {
       
   538 #if OS(DARWIN)
       
   539     pthread_t thread = pthread_self();
       
   540     return pthread_get_stackaddr_np(thread);
       
   541 #elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC)
       
   542     // offset 0x18 from the FS segment register gives a pointer to
       
   543     // the thread information block for the current thread
       
   544     NT_TIB* pTib;
       
   545     __asm {
       
   546         MOV EAX, FS:[18h]
       
   547         MOV pTib, EAX
       
   548     }
       
   549     return static_cast<void*>(pTib->StackBase);
       
   550 #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC)
       
   551     // offset 0x18 from the FS segment register gives a pointer to
       
   552     // the thread information block for the current thread
       
   553     NT_TIB* pTib;
       
   554     asm ( "movl %%fs:0x18, %0\n"
       
   555           : "=r" (pTib)
       
   556         );
       
   557     return static_cast<void*>(pTib->StackBase);
       
   558 #elif OS(WINDOWS) && CPU(X86_64)
       
   559     PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb());
       
   560     return reinterpret_cast<void*>(pTib->StackBase);
       
   561 #elif OS(QNX)
       
   562     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
       
   563     MutexLocker locker(mutex);
       
   564     return currentThreadStackBaseQNX();
       
   565 #elif OS(SOLARIS)
       
   566     stack_t s;
       
   567     thr_stksegment(&s);
       
   568     return s.ss_sp;
       
   569 #elif OS(OPENBSD)
       
   570     pthread_t thread = pthread_self();
       
   571     stack_t stack;
       
   572     pthread_stackseg_np(thread, &stack);
       
   573     return stack.ss_sp;
       
   574 #elif OS(SYMBIAN)
       
   575     TThreadStackInfo info;
       
   576     RThread thread;
       
   577     thread.StackInfo(info);
       
   578     return (void*)info.iBase;
       
   579 #elif OS(HAIKU)
       
   580     thread_info threadInfo;
       
   581     get_thread_info(find_thread(NULL), &threadInfo);
       
   582     return threadInfo.stack_end;
       
   583 #elif OS(UNIX)
       
   584     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
       
   585     MutexLocker locker(mutex);
       
   586     static void* stackBase = 0;
       
   587     static size_t stackSize = 0;
       
   588     static pthread_t stackThread;
       
   589     pthread_t thread = pthread_self();
       
   590     if (stackBase == 0 || thread != stackThread) {
       
   591         pthread_attr_t sattr;
       
   592         pthread_attr_init(&sattr);
       
   593 #if HAVE(PTHREAD_NP_H) || OS(NETBSD)
       
   594         // e.g. on FreeBSD 5.4, neundorf@kde.org
       
   595         pthread_attr_get_np(thread, &sattr);
       
   596 #else
       
   597         // FIXME: this function is non-portable; other POSIX systems may have different np alternatives
       
   598         pthread_getattr_np(thread, &sattr);
       
   599 #endif
       
   600         int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
       
   601         (void)rc; // FIXME: Deal with error code somehow? Seems fatal.
       
   602         ASSERT(stackBase);
       
   603         pthread_attr_destroy(&sattr);
       
   604         stackThread = thread;
       
   605     }
       
   606     return static_cast<char*>(stackBase) + stackSize;
       
   607 #elif OS(WINCE)
       
   608     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
       
   609     MutexLocker locker(mutex);
       
   610     if (g_stackBase)
       
   611         return g_stackBase;
       
   612     else {
       
   613         int dummy;
       
   614         return getStackBase(&dummy);
       
   615     }
       
   616 #else
       
   617 #error Need a way to get the stack base on this platform
       
   618 #endif
       
   619 }
       
   620 
       
   621 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   622 
       
   623 static inline PlatformThread getCurrentPlatformThread()
       
   624 {
       
   625 #if OS(DARWIN)
       
   626     return pthread_mach_thread_np(pthread_self());
       
   627 #elif OS(WINDOWS)
       
   628     return pthread_getw32threadhandle_np(pthread_self());
       
   629 #endif
       
   630 }
       
   631 
       
   632 void Heap::makeUsableFromMultipleThreads()
       
   633 {
       
   634     if (m_currentThreadRegistrar)
       
   635         return;
       
   636 
       
   637     int error = pthread_key_create(&m_currentThreadRegistrar, unregisterThread);
       
   638     if (error)
       
   639         CRASH();
       
   640 }
       
   641 
       
   642 void Heap::registerThread()
       
   643 {
       
   644     ASSERT(!m_globalData->exclusiveThread || m_globalData->exclusiveThread == currentThread());
       
   645 
       
   646     if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar))
       
   647         return;
       
   648 
       
   649     pthread_setspecific(m_currentThreadRegistrar, this);
       
   650     Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase());
       
   651 
       
   652     MutexLocker lock(m_registeredThreadsMutex);
       
   653 
       
   654     thread->next = m_registeredThreads;
       
   655     m_registeredThreads = thread;
       
   656 }
       
   657 
       
   658 void Heap::unregisterThread(void* p)
       
   659 {
       
   660     if (p)
       
   661         static_cast<Heap*>(p)->unregisterThread();
       
   662 }
       
   663 
       
   664 void Heap::unregisterThread()
       
   665 {
       
   666     pthread_t currentPosixThread = pthread_self();
       
   667 
       
   668     MutexLocker lock(m_registeredThreadsMutex);
       
   669 
       
   670     if (pthread_equal(currentPosixThread, m_registeredThreads->posixThread)) {
       
   671         Thread* t = m_registeredThreads;
       
   672         m_registeredThreads = m_registeredThreads->next;
       
   673         delete t;
       
   674     } else {
       
   675         Heap::Thread* last = m_registeredThreads;
       
   676         Heap::Thread* t;
       
   677         for (t = m_registeredThreads->next; t; t = t->next) {
       
   678             if (pthread_equal(t->posixThread, currentPosixThread)) {
       
   679                 last->next = t->next;
       
   680                 break;
       
   681             }
       
   682             last = t;
       
   683         }
       
   684         ASSERT(t); // If t is NULL, we never found ourselves in the list.
       
   685         delete t;
       
   686     }
       
   687 }
       
   688 
       
   689 #else // ENABLE(JSC_MULTIPLE_THREADS)
       
   690 
       
   691 void Heap::registerThread()
       
   692 {
       
   693 }
       
   694 
       
   695 #endif
       
   696 
       
   697 inline bool isPointerAligned(void* p)
       
   698 {
       
   699     return (((intptr_t)(p) & (sizeof(char*) - 1)) == 0);
       
   700 }
       
   701 
       
   702 // Cell size needs to be a power of two for isPossibleCell to be valid.
       
   703 COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two);
       
   704 
       
   705 #if USE(JSVALUE32)
       
   706 static bool isHalfCellAligned(void *p)
       
   707 {
       
   708     return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0);
       
   709 }
       
   710 
       
   711 static inline bool isPossibleCell(void* p)
       
   712 {
       
   713     return isHalfCellAligned(p) && p;
       
   714 }
       
   715 
       
   716 #else
       
   717 
       
   718 static inline bool isCellAligned(void *p)
       
   719 {
       
   720     return (((intptr_t)(p) & CELL_MASK) == 0);
       
   721 }
       
   722 
       
   723 static inline bool isPossibleCell(void* p)
       
   724 {
       
   725     return isCellAligned(p) && p;
       
   726 }
       
   727 #endif // USE(JSVALUE32)
       
   728 
       
   729 void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
       
   730 {
       
   731     if (start > end) {
       
   732         void* tmp = start;
       
   733         start = end;
       
   734         end = tmp;
       
   735     }
       
   736 
       
   737     ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
       
   738     ASSERT(isPointerAligned(start));
       
   739     ASSERT(isPointerAligned(end));
       
   740 
       
   741     char** p = static_cast<char**>(start);
       
   742     char** e = static_cast<char**>(end);
       
   743 
       
   744     CollectorBlock** blocks = m_heap.blocks;
       
   745     while (p != e) {
       
   746         char* x = *p++;
       
   747         if (isPossibleCell(x)) {
       
   748             size_t usedBlocks;
       
   749             uintptr_t xAsBits = reinterpret_cast<uintptr_t>(x);
       
   750             xAsBits &= CELL_ALIGN_MASK;
       
   751 
       
   752             uintptr_t offset = xAsBits & BLOCK_OFFSET_MASK;
       
   753             const size_t lastCellOffset = sizeof(CollectorCell) * (CELLS_PER_BLOCK - 1);
       
   754             if (offset > lastCellOffset)
       
   755                 continue;
       
   756 
       
   757             CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
       
   758             usedBlocks = m_heap.usedBlocks;
       
   759             for (size_t block = 0; block < usedBlocks; block++) {
       
   760                 if (blocks[block] != blockAddr)
       
   761                     continue;
       
   762                 markStack.append(reinterpret_cast<JSCell*>(xAsBits));
       
   763                 markStack.drain();
       
   764             }
       
   765         }
       
   766     }
       
   767 }
       
   768 
       
   769 void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack)
       
   770 {
       
   771     void* dummy;
       
   772     void* stackPointer = &dummy;
       
   773     void* stackBase = currentThreadStackBase();
       
   774     markConservatively(markStack, stackPointer, stackBase);
       
   775 }
       
   776 
       
   777 #if COMPILER(GCC)
       
   778 #define REGISTER_BUFFER_ALIGNMENT __attribute__ ((aligned (sizeof(void*))))
       
   779 #else
       
   780 #define REGISTER_BUFFER_ALIGNMENT
       
   781 #endif
       
   782 
       
   783 void Heap::markCurrentThreadConservatively(MarkStack& markStack)
       
   784 {
       
   785     // setjmp forces volatile registers onto the stack
       
   786     jmp_buf registers REGISTER_BUFFER_ALIGNMENT;
       
   787 #if COMPILER(MSVC)
       
   788 #pragma warning(push)
       
   789 #pragma warning(disable: 4611)
       
   790 #endif
       
   791     setjmp(registers);
       
   792 #if COMPILER(MSVC)
       
   793 #pragma warning(pop)
       
   794 #endif
       
   795 
       
   796     markCurrentThreadConservativelyInternal(markStack);
       
   797 }
       
   798 
       
   799 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   800 
       
   801 static inline void suspendThread(const PlatformThread& platformThread)
       
   802 {
       
   803 #if OS(DARWIN)
       
   804     thread_suspend(platformThread);
       
   805 #elif OS(WINDOWS)
       
   806     SuspendThread(platformThread);
       
   807 #else
       
   808 #error Need a way to suspend threads on this platform
       
   809 #endif
       
   810 }
       
   811 
       
   812 static inline void resumeThread(const PlatformThread& platformThread)
       
   813 {
       
   814 #if OS(DARWIN)
       
   815     thread_resume(platformThread);
       
   816 #elif OS(WINDOWS)
       
   817     ResumeThread(platformThread);
       
   818 #else
       
   819 #error Need a way to resume threads on this platform
       
   820 #endif
       
   821 }
       
   822 
       
   823 typedef unsigned long usword_t; // word size, assumed to be either 32 or 64 bit
       
   824 
       
   825 #if OS(DARWIN)
       
   826 
       
   827 #if CPU(X86)
       
   828 typedef i386_thread_state_t PlatformThreadRegisters;
       
   829 #elif CPU(X86_64)
       
   830 typedef x86_thread_state64_t PlatformThreadRegisters;
       
   831 #elif CPU(PPC)
       
   832 typedef ppc_thread_state_t PlatformThreadRegisters;
       
   833 #elif CPU(PPC64)
       
   834 typedef ppc_thread_state64_t PlatformThreadRegisters;
       
   835 #elif CPU(ARM)
       
   836 typedef arm_thread_state_t PlatformThreadRegisters;
       
   837 #else
       
   838 #error Unknown Architecture
       
   839 #endif
       
   840 
       
   841 #elif OS(WINDOWS) && CPU(X86)
       
   842 typedef CONTEXT PlatformThreadRegisters;
       
   843 #else
       
   844 #error Need a thread register struct for this platform
       
   845 #endif
       
   846 
       
   847 static size_t getPlatformThreadRegisters(const PlatformThread& platformThread, PlatformThreadRegisters& regs)
       
   848 {
       
   849 #if OS(DARWIN)
       
   850 
       
   851 #if CPU(X86)
       
   852     unsigned user_count = sizeof(regs)/sizeof(int);
       
   853     thread_state_flavor_t flavor = i386_THREAD_STATE;
       
   854 #elif CPU(X86_64)
       
   855     unsigned user_count = x86_THREAD_STATE64_COUNT;
       
   856     thread_state_flavor_t flavor = x86_THREAD_STATE64;
       
   857 #elif CPU(PPC) 
       
   858     unsigned user_count = PPC_THREAD_STATE_COUNT;
       
   859     thread_state_flavor_t flavor = PPC_THREAD_STATE;
       
   860 #elif CPU(PPC64)
       
   861     unsigned user_count = PPC_THREAD_STATE64_COUNT;
       
   862     thread_state_flavor_t flavor = PPC_THREAD_STATE64;
       
   863 #elif CPU(ARM)
       
   864     unsigned user_count = ARM_THREAD_STATE_COUNT;
       
   865     thread_state_flavor_t flavor = ARM_THREAD_STATE;
       
   866 #else
       
   867 #error Unknown Architecture
       
   868 #endif
       
   869 
       
   870     kern_return_t result = thread_get_state(platformThread, flavor, (thread_state_t)&regs, &user_count);
       
   871     if (result != KERN_SUCCESS) {
       
   872         WTFReportFatalError(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, 
       
   873                             "JavaScript garbage collection failed because thread_get_state returned an error (%d). This is probably the result of running inside Rosetta, which is not supported.", result);
       
   874         CRASH();
       
   875     }
       
   876     return user_count * sizeof(usword_t);
       
   877 // end OS(DARWIN)
       
   878 
       
   879 #elif OS(WINDOWS) && CPU(X86)
       
   880     regs.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL | CONTEXT_SEGMENTS;
       
   881     GetThreadContext(platformThread, &regs);
       
   882     return sizeof(CONTEXT);
       
   883 #else
       
   884 #error Need a way to get thread registers on this platform
       
   885 #endif
       
   886 }
       
   887 
       
   888 static inline void* otherThreadStackPointer(const PlatformThreadRegisters& regs)
       
   889 {
       
   890 #if OS(DARWIN)
       
   891 
       
   892 #if __DARWIN_UNIX03
       
   893 
       
   894 #if CPU(X86)
       
   895     return reinterpret_cast<void*>(regs.__esp);
       
   896 #elif CPU(X86_64)
       
   897     return reinterpret_cast<void*>(regs.__rsp);
       
   898 #elif CPU(PPC) || CPU(PPC64)
       
   899     return reinterpret_cast<void*>(regs.__r1);
       
   900 #elif CPU(ARM)
       
   901     return reinterpret_cast<void*>(regs.__sp);
       
   902 #else
       
   903 #error Unknown Architecture
       
   904 #endif
       
   905 
       
   906 #else // !__DARWIN_UNIX03
       
   907 
       
   908 #if CPU(X86)
       
   909     return reinterpret_cast<void*>(regs.esp);
       
   910 #elif CPU(X86_64)
       
   911     return reinterpret_cast<void*>(regs.rsp);
       
   912 #elif CPU(PPC) || CPU(PPC64)
       
   913     return reinterpret_cast<void*>(regs.r1);
       
   914 #else
       
   915 #error Unknown Architecture
       
   916 #endif
       
   917 
       
   918 #endif // __DARWIN_UNIX03
       
   919 
       
   920 // end OS(DARWIN)
       
   921 #elif CPU(X86) && OS(WINDOWS)
       
   922     return reinterpret_cast<void*>((uintptr_t) regs.Esp);
       
   923 #else
       
   924 #error Need a way to get the stack pointer for another thread on this platform
       
   925 #endif
       
   926 }
       
   927 
       
   928 void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread)
       
   929 {
       
   930     suspendThread(thread->platformThread);
       
   931 
       
   932     PlatformThreadRegisters regs;
       
   933     size_t regSize = getPlatformThreadRegisters(thread->platformThread, regs);
       
   934 
       
   935     // mark the thread's registers
       
   936     markConservatively(markStack, static_cast<void*>(&regs), static_cast<void*>(reinterpret_cast<char*>(&regs) + regSize));
       
   937 
       
   938     void* stackPointer = otherThreadStackPointer(regs);
       
   939     markConservatively(markStack, stackPointer, thread->stackBase);
       
   940 
       
   941     resumeThread(thread->platformThread);
       
   942 }
       
   943 
       
   944 #endif
       
   945 
       
   946 void Heap::markStackObjectsConservatively(MarkStack& markStack)
       
   947 {
       
   948     markCurrentThreadConservatively(markStack);
       
   949 
       
   950 #if ENABLE(JSC_MULTIPLE_THREADS)
       
   951 
       
   952     if (m_currentThreadRegistrar) {
       
   953 
       
   954         MutexLocker lock(m_registeredThreadsMutex);
       
   955 
       
   956 #ifndef NDEBUG
       
   957         // Forbid malloc during the mark phase. Marking a thread suspends it, so 
       
   958         // a malloc inside markChildren() would risk a deadlock with a thread that had been 
       
   959         // suspended while holding the malloc lock.
       
   960         fastMallocForbid();
       
   961 #endif
       
   962         // It is safe to access the registeredThreads list, because we earlier asserted that locks are being held,
       
   963         // and since this is a shared heap, they are real locks.
       
   964         for (Thread* thread = m_registeredThreads; thread; thread = thread->next) {
       
   965             if (!pthread_equal(thread->posixThread, pthread_self()))
       
   966                 markOtherThreadConservatively(markStack, thread);
       
   967         }
       
   968 #ifndef NDEBUG
       
   969         fastMallocAllow();
       
   970 #endif
       
   971     }
       
   972 #endif
       
   973 }
       
   974 
       
   975 void Heap::protect(JSValue k)
       
   976 {
       
   977     ASSERT(k);
       
   978     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
       
   979 
       
   980     if (!k.isCell())
       
   981         return;
       
   982 
       
   983     m_protectedValues.add(k.asCell());
       
   984 }
       
   985 
       
   986 bool Heap::unprotect(JSValue k)
       
   987 {
       
   988     ASSERT(k);
       
   989     ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
       
   990 
       
   991     if (!k.isCell())
       
   992         return false;
       
   993 
       
   994     return m_protectedValues.remove(k.asCell());
       
   995 }
       
   996 
       
   997 void Heap::markProtectedObjects(MarkStack& markStack)
       
   998 {
       
   999     ProtectCountSet::iterator end = m_protectedValues.end();
       
  1000     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it) {
       
  1001         markStack.append(it->first);
       
  1002         markStack.drain();
       
  1003     }
       
  1004 }
       
  1005 
       
  1006 void Heap::clearMarkBits()
       
  1007 {
       
  1008     for (size_t i = 0; i < m_heap.usedBlocks; ++i)
       
  1009         clearMarkBits(m_heap.blocks[i]);
       
  1010 }
       
  1011 
       
  1012 void Heap::clearMarkBits(CollectorBlock* block)
       
  1013 {
       
  1014     // allocate assumes that the last cell in every block is marked.
       
  1015     block->marked.clearAll();
       
  1016     block->marked.set(HeapConstants::cellsPerBlock - 1);
       
  1017 }
       
  1018 
       
  1019 size_t Heap::markedCells(size_t startBlock, size_t startCell) const
       
  1020 {
       
  1021     ASSERT(startBlock <= m_heap.usedBlocks);
       
  1022     ASSERT(startCell < HeapConstants::cellsPerBlock);
       
  1023 
       
  1024     if (startBlock >= m_heap.usedBlocks)
       
  1025         return 0;
       
  1026 
       
  1027     size_t result = 0;
       
  1028     result += m_heap.blocks[startBlock]->marked.count(startCell);
       
  1029     for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
       
  1030         result += m_heap.blocks[i]->marked.count();
       
  1031 
       
  1032     return result;
       
  1033 }
       
  1034 
       
  1035 void Heap::sweep()
       
  1036 {
       
  1037     ASSERT(m_heap.operationInProgress == NoOperation);
       
  1038     if (m_heap.operationInProgress != NoOperation)
       
  1039         CRASH();
       
  1040     m_heap.operationInProgress = Collection;
       
  1041     
       
  1042 #if !ENABLE(JSC_ZOMBIES)
       
  1043     Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get();
       
  1044 #endif
       
  1045 
       
  1046     DeadObjectIterator it(m_heap, m_heap.nextBlock, m_heap.nextCell);
       
  1047     DeadObjectIterator end(m_heap, m_heap.usedBlocks);
       
  1048     for ( ; it != end; ++it) {
       
  1049         JSCell* cell = *it;
       
  1050 #if ENABLE(JSC_ZOMBIES)
       
  1051         if (!cell->isZombie()) {
       
  1052             const ClassInfo* info = cell->classInfo();
       
  1053             cell->~JSCell();
       
  1054             new (cell) JSZombie(info, JSZombie::leakedZombieStructure());
       
  1055             Heap::markCell(cell);
       
  1056         }
       
  1057 #else
       
  1058         cell->~JSCell();
       
  1059         // Callers of sweep assume it's safe to mark any cell in the heap.
       
  1060         new (cell) JSCell(dummyMarkableCellStructure);
       
  1061 #endif
       
  1062     }
       
  1063 
       
  1064     m_heap.operationInProgress = NoOperation;
       
  1065 }
       
  1066 
       
  1067 void Heap::markRoots()
       
  1068 {
       
  1069 #ifndef NDEBUG
       
  1070     if (m_globalData->isSharedInstance()) {
       
  1071         ASSERT(JSLock::lockCount() > 0);
       
  1072         ASSERT(JSLock::currentThreadIsHoldingLock());
       
  1073     }
       
  1074 #endif
       
  1075 
       
  1076     ASSERT(m_heap.operationInProgress == NoOperation);
       
  1077     if (m_heap.operationInProgress != NoOperation)
       
  1078         CRASH();
       
  1079 
       
  1080     m_heap.operationInProgress = Collection;
       
  1081 
       
  1082     MarkStack& markStack = m_globalData->markStack;
       
  1083 
       
  1084     // Reset mark bits.
       
  1085     clearMarkBits();
       
  1086 
       
  1087     // Mark stack roots.
       
  1088     markStackObjectsConservatively(markStack);
       
  1089     m_globalData->interpreter->registerFile().markCallFrames(markStack, this);
       
  1090 
       
  1091     // Mark explicitly registered roots.
       
  1092     markProtectedObjects(markStack);
       
  1093 
       
  1094     // Mark misc. other roots.
       
  1095     if (m_markListSet && m_markListSet->size())
       
  1096         MarkedArgumentBuffer::markLists(markStack, *m_markListSet);
       
  1097     if (m_globalData->exception)
       
  1098         markStack.append(m_globalData->exception);
       
  1099     if (m_globalData->functionCodeBlockBeingReparsed)
       
  1100         m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack);
       
  1101     if (m_globalData->firstStringifierToMark)
       
  1102         JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark);
       
  1103 
       
  1104     // Mark the small strings cache last, since it will clear itself if nothing
       
  1105     // else has marked it.
       
  1106     m_globalData->smallStrings.markChildren(markStack);
       
  1107 
       
  1108     markStack.drain();
       
  1109     markStack.compact();
       
  1110 
       
  1111     m_heap.operationInProgress = NoOperation;
       
  1112 }
       
  1113 
       
  1114 size_t Heap::objectCount() const
       
  1115 {
       
  1116     return m_heap.nextBlock * HeapConstants::cellsPerBlock // allocated full blocks
       
  1117            + m_heap.nextCell // allocated cells in current block
       
  1118            + markedCells(m_heap.nextBlock, m_heap.nextCell) // marked cells in remainder of m_heap
       
  1119            - m_heap.usedBlocks; // 1 cell per block is a dummy sentinel
       
  1120 }
       
  1121 
       
  1122 void Heap::addToStatistics(Heap::Statistics& statistics) const
       
  1123 {
       
  1124     statistics.size += m_heap.usedBlocks * BLOCK_SIZE;
       
  1125     statistics.free += m_heap.usedBlocks * BLOCK_SIZE - (objectCount() * HeapConstants::cellSize);
       
  1126 }
       
  1127 
       
  1128 Heap::Statistics Heap::statistics() const
       
  1129 {
       
  1130     Statistics statistics = { 0, 0 };
       
  1131     addToStatistics(statistics);
       
  1132     return statistics;
       
  1133 }
       
  1134 
       
  1135 size_t Heap::size() const
       
  1136 {
       
  1137     return m_heap.usedBlocks * BLOCK_SIZE;
       
  1138 }
       
  1139 
       
  1140 size_t Heap::globalObjectCount()
       
  1141 {
       
  1142     size_t count = 0;
       
  1143     if (JSGlobalObject* head = m_globalData->head) {
       
  1144         JSGlobalObject* o = head;
       
  1145         do {
       
  1146             ++count;
       
  1147             o = o->next();
       
  1148         } while (o != head);
       
  1149     }
       
  1150     return count;
       
  1151 }
       
  1152 
       
  1153 size_t Heap::protectedGlobalObjectCount()
       
  1154 {
       
  1155     size_t count = 0;
       
  1156     if (JSGlobalObject* head = m_globalData->head) {
       
  1157         JSGlobalObject* o = head;
       
  1158         do {
       
  1159             if (m_protectedValues.contains(o))
       
  1160                 ++count;
       
  1161             o = o->next();
       
  1162         } while (o != head);
       
  1163     }
       
  1164 
       
  1165     return count;
       
  1166 }
       
  1167 
       
  1168 size_t Heap::protectedObjectCount()
       
  1169 {
       
  1170     return m_protectedValues.size();
       
  1171 }
       
  1172 
       
  1173 static const char* typeName(JSCell* cell)
       
  1174 {
       
  1175     if (cell->isString())
       
  1176         return "string";
       
  1177 #if USE(JSVALUE32)
       
  1178     if (cell->isNumber())
       
  1179         return "number";
       
  1180 #endif
       
  1181     if (cell->isGetterSetter())
       
  1182         return "Getter-Setter";
       
  1183     if (cell->isAPIValueWrapper())
       
  1184         return "API wrapper";
       
  1185     if (cell->isPropertyNameIterator())
       
  1186         return "For-in iterator";
       
  1187     if (!cell->isObject())
       
  1188         return "[empty cell]";
       
  1189     const ClassInfo* info = cell->classInfo();
       
  1190     return info ? info->className : "Object";
       
  1191 }
       
  1192 
       
  1193 HashCountedSet<const char*>* Heap::protectedObjectTypeCounts()
       
  1194 {
       
  1195     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
       
  1196 
       
  1197     ProtectCountSet::iterator end = m_protectedValues.end();
       
  1198     for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
       
  1199         counts->add(typeName(it->first));
       
  1200 
       
  1201     return counts;
       
  1202 }
       
  1203 
       
  1204 HashCountedSet<const char*>* Heap::objectTypeCounts()
       
  1205 {
       
  1206     HashCountedSet<const char*>* counts = new HashCountedSet<const char*>;
       
  1207 
       
  1208     LiveObjectIterator it = primaryHeapBegin();
       
  1209     LiveObjectIterator heapEnd = primaryHeapEnd();
       
  1210     for ( ; it != heapEnd; ++it)
       
  1211         counts->add(typeName(*it));
       
  1212 
       
  1213     return counts;
       
  1214 }
       
  1215 
       
  1216 bool Heap::isBusy()
       
  1217 {
       
  1218     return m_heap.operationInProgress != NoOperation;
       
  1219 }
       
  1220 
       
  1221 void Heap::reset()
       
  1222 {
       
  1223     JAVASCRIPTCORE_GC_BEGIN();
       
  1224 
       
  1225     markRoots();
       
  1226 
       
  1227     JAVASCRIPTCORE_GC_MARKED();
       
  1228 
       
  1229     m_heap.nextCell = 0;
       
  1230     m_heap.nextBlock = 0;
       
  1231     m_heap.nextNumber = 0;
       
  1232     m_heap.extraCost = 0;
       
  1233 #if ENABLE(JSC_ZOMBIES)
       
  1234     sweep();
       
  1235 #endif
       
  1236     resizeBlocks();
       
  1237 
       
  1238     JAVASCRIPTCORE_GC_END();
       
  1239 }
       
  1240 
       
  1241 void Heap::collectAllGarbage()
       
  1242 {
       
  1243     JAVASCRIPTCORE_GC_BEGIN();
       
  1244 
       
  1245     // If the last iteration through the heap deallocated blocks, we need
       
  1246     // to clean up remaining garbage before marking. Otherwise, the conservative
       
  1247     // marking mechanism might follow a pointer to unmapped memory.
       
  1248     if (m_heap.didShrink)
       
  1249         sweep();
       
  1250 
       
  1251     markRoots();
       
  1252 
       
  1253     JAVASCRIPTCORE_GC_MARKED();
       
  1254 
       
  1255     m_heap.nextCell = 0;
       
  1256     m_heap.nextBlock = 0;
       
  1257     m_heap.nextNumber = 0;
       
  1258     m_heap.extraCost = 0;
       
  1259     sweep();
       
  1260     resizeBlocks();
       
  1261 
       
  1262     JAVASCRIPTCORE_GC_END();
       
  1263 }
       
  1264 
       
  1265 LiveObjectIterator Heap::primaryHeapBegin()
       
  1266 {
       
  1267     return LiveObjectIterator(m_heap, 0);
       
  1268 }
       
  1269 
       
  1270 LiveObjectIterator Heap::primaryHeapEnd()
       
  1271 {
       
  1272     return LiveObjectIterator(m_heap, m_heap.usedBlocks);
       
  1273 }
       
  1274 
       
  1275 } // namespace JSC