diff -r 000000000000 -r a41df078684a kernel/eka/memmodel/epoc/flexible/mmu/mpagearray.cpp --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/kernel/eka/memmodel/epoc/flexible/mmu/mpagearray.cpp Mon Oct 19 15:55:17 2009 +0100 @@ -0,0 +1,1349 @@ +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). +// All rights reserved. +// This component and the accompanying materials are made available +// under the terms of the License "Eclipse Public License v1.0" +// which accompanies this distribution, and is available +// at the URL "http://www.eclipse.org/legal/epl-v10.html". +// +// Initial Contributors: +// Nokia Corporation - initial contribution. +// +// Contributors: +// +// Description: +// + +#include +#include "mm.h" +#include "mmu.h" + +#include "mpagearray.h" +#include "mslaballoc.h" + + +static RStaticSlabAllocator PageSegmentAllocator; + + +// +// RPageArray::TSegment +// + +RPageArray::TSegment* RPageArray::TSegment::New() + { + __NK_ASSERT_DEBUG(!MmuLock::IsHeld()); + + // allocate segment... + TSegment* s = PageSegmentAllocator.Alloc(); + if(!s) + return s; + + // initialise segment... + s->iCounts = 1; // lock count = 1, alloc count = 0 + TPhysAddr* p = s->iPages; + TPhysAddr* pEnd = p+KPageArraySegmentSize; + TPhysAddr nullPage = EEmptyEntry; + do *p++ = nullPage; + while(piCounts==0); +#ifdef _DEBUG + TPhysAddr* p = aSegment->iPages; + TPhysAddr* pEnd = p+KPageArraySegmentSize; + do + { + TPhysAddr a = *p++; + if(IsPresent(a)) + { + Kern::Printf("TSegment Delete with allocated pages! [%d]=0x%08x",p-aSegment->iPages-1,a); + __NK_ASSERT_DEBUG(0); + } + } + while(piCounts, (TUint32)-(TInt)aCount); + __NK_ASSERT_DEBUG(oldCounts&KPageArraySegmentLockCountMask); // alloc count must have been non-zero before decrementing + +#ifdef _DEBUG + if((oldCounts&KPageArraySegmentLockCountMask)==aCount) + { + // check alloc count is consistent... + TUint allocCount = s->iCounts>>KPageArraySegmentAllocCountShift; + __NK_ASSERT_DEBUG(allocCount<=KPageArraySegmentSize); + TUint realAllocCount = 0; + TPhysAddr* p = s->iPages; + TPhysAddr* pEnd = p+KPageArraySegmentSize; + do + { + if(IsPresent(*p++)) + ++realAllocCount; + } + while(p1) + return oldCounts; // return 'true' to indicate segment still exists + + // delete segment... + aSegment = 0; + return (TBool)Delete(s); // returns 'false' + } + + +FORCE_INLINE void RPageArray::TSegment::AdjustAllocCount(TInt aDelta) + { + __NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask)); + __e32_atomic_add_ord32(&iCounts, TUint32(aDelta)<>KPageArraySegmentAllocCountShift; + TUint lockCount = iCounts&KPageArraySegmentLockCountMask; + Kern::Printf("RPageArray::TSegment[0x%08x]::Dump() allocCount=%d lockCount=%d",this,allocCount,lockCount); + for(TUint i=0; i>KPageArraySegmentShift]->iPages+offset; + + TUint n = KPageArraySegmentSize-offset; + if(n>aMaxCount) + n = aMaxCount; + if(n>size) + n = size; + return n; + } + + +TUint RPageArray::TIter::AddFind(TIter& aPageList) + { + TRACE2(("RPageArray::TIter::AddFind range 0x%x..0x%x",iIndex,iEndIndex)); + + TUint index = iIndex; + TUint endIndex = iEndIndex; + if(index==endIndex) + { +nothing_found: + aPageList.iIndex = endIndex; + aPageList.iEndIndex = endIndex; + TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",iEndIndex,0)); + return 0; + } + + TSegment** pS = iSegments+(index>>KPageArraySegmentShift); + TPhysAddr* p; + TUint limit; + + MmuLock::Lock(); + + // scan for empty entries... + do + { + // get segment... + p = (*pS++)->iPages+(index&KPageArraySegmentMask); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + limit = (nextIndex=endIndex) + break; + MmuLock::Flash(); + // get next segment... + p = (*pS++)->iPages; + TUint nextIndex = index+KPageArraySegmentSize; + limit = (nextIndex>KPageArraySegmentShift); + do + { + // get segment... + TSegment* s = *pS++; + TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndexAdjustAllocCount(limit-index); + do + { + __NK_ASSERT_DEBUG((*aPages&KPageMask)==0); + __NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries + *p++ = *aPages++|ECommitted; + } + while(++index>KPageArraySegmentShift); + + do + { + // get segment... + TSegment* s = *pS++; + TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndexAdjustAllocCount(limit-index); + do + { + __NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries + *p++ = aPhysAddr|ECommitted; + aPhysAddr += KPageSize; + } + while(++index>KPageArraySegmentShift)==((index+aCount-1)>>KPageArraySegmentShift)); + TSegment* s = iSegments[index>>KPageArraySegmentShift]; + __NK_ASSERT_DEBUG(s); + __NK_ASSERT_DEBUG(s->iCounts&KPageArraySegmentLockCountMask); + s->AdjustAllocCount(aChanged); + Skip(aCount); + } + + +TUint RPageArray::TIter::Find(TIter& aPageList) + { + TRACE2(("RPageArray::TIter::Find range 0x%x..0x%x",iIndex,iEndIndex)); + + MmuLock::Lock(); + TUint index = iIndex; + TUint endIndex = iEndIndex; + TSegment** pS = iSegments+(index>>KPageArraySegmentShift); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + + // search for first page... + while(indexiPages+(index&KPageArraySegmentMask); + TUint limit = (nextIndexiPages+(index&KPageArraySegmentMask); + if(!RPageArray::IsPresent(*p++)) + break; + + // segment has pages, lock it... + s->Lock(); + + // scan rest of entries... + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndex>KPageArraySegmentShift); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + + // search for first page... + while(indexiPages+(index&KPageArraySegmentMask); + TUint limit = (nextIndex=EDecommitting) + goto start_done; + } + while(++indexiPages+(index&KPageArraySegmentMask); + TPhysAddr page = *p++; + if(State(page)Lock(); + + // scan rest of entries... + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndex>KPageArraySegmentShift); + + MmuLock::Lock(); + + do + { + // get segment... + TSegment* s = *pS++; + TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndexAdjustAllocCount(-1); + TPhysAddr pagePhys = page&~KPageMask; + aPages[count++] = pagePhys; + TRACE2(("RPageArray::TIter::Remove index=0x%x returns 0x%08x",index,pagePhys)); + if(count>=aMaxCount) + { + ++index; + goto done; + } + } + // check not removing managed pages without the RamAllocLock... + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld() + || SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged); + } + } + while(++index>KPageArraySegmentShift); + + MmuLock::Lock(); + + do + { + // get segment... + TSegment* s = *pS++; + TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndex>KPageArraySegmentShift); + + MmuLock::Lock(); + + do + { + // get segment... + TSegment* s = *pS++; + TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask); + TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask; + TUint limit = (nextIndex>KPageArraySegmentShift; + iSegments = (TSegment**)Kern::AllocZ(iNumSegments*sizeof(TSegment*)); + if(!iSegments) + return KErrNoMemory; + + if(!aPreallocateMemory) + return KErrNone; + + return PreallocateMemory(); + } + + +TInt RPageArray::PreallocateMemory() + { + MmuLock::Lock(); + + __NK_ASSERT_DEBUG(!iPreallocatedMemory); + iPreallocatedMemory = true; + + TSegment** pS = iSegments; + TSegment** pGEnd = pS+iNumSegments; + do + { + if(!GetOrAllocateSegment(pS,1)) + { + iNumSegments = pS-iSegments; // truncate to amount successfully allocated + MmuLock::Unlock(); + return KErrNoMemory; + } + } + while(++pSDump(); +#endif + __NK_ASSERT_DEBUG(!*pS); + ++pS; + } + } + else + { + MmuLock::Lock(); + while(pSDump(); +#endif + __NK_ASSERT_DEBUG(!*pS); + TRACE2(("RPageArray::~RPageArray delete segment=%d",pS-iSegments)); + ++pS; + if(pSLock(aLockCount); + return s; + } + + // no segment, so allocate one... + MmuLock::Unlock(); + s = TSegment::New(); + MmuLock::Lock(); + if(!s) + return s; + + // if someone else allocated one... + if(*aSegmentEntry) + { + // free the one we created... + TSegment::Unlock(s); + //and retry... + continue; + } + + // use new segment... + TRACE2(("RPageArray::GetOrAllocateSegment new segment=%d",aSegmentEntry-iSegments)); + *aSegmentEntry = s; + if(--aLockCount) + s->Lock(aLockCount); + return s; + } + } + + +TInt RPageArray::Alloc(TUint aIndex, TUint aCount) + { + TRACE2(("RPageArray::Alloc(0x%x,0x%x)",aIndex,aCount)); + __NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize); + __NK_ASSERT_DEBUG(aIndex+aCount>=aIndex); + + MmuLock::Lock(); + + TUint index = aIndex; + TUint endIndex = aIndex+aCount; + TSegment** pS = iSegments+(index>>KPageArraySegmentShift); + while(indexaIndex); + + MmuLock::Lock(); + + TUint index = aIndex; + TUint endIndex = aIndex+aCount; + TSegment** pS = iSegments+(index>>KPageArraySegmentShift); + while(indexaIndex); + + aIter.Set(iSegments,aIndex,aIndex+aCount); + + MmuLock::Lock(); + + TInt r; + TUint index = aIndex; + TUint endIndex = aIndex+aCount; + TSegment** pS = iSegments+(index>>KPageArraySegmentShift); + while(indexiPages+(index&KPageArraySegmentMask); + TUint limit = (nextIndexLock(); + + if(indexaIndex) + Release(iSegments,aIndex,endIndex-aIndex); + + // return error... + return r; + } + + +void RPageArray::AddEnd(TUint aIndex, TUint aCount) + { + Release(iSegments,aIndex,aCount); + } + + +void RPageArray::FindStart(TUint aIndex, TUint aCount, TIter& aIter) + { + TRACE2(("RPageArray::FindStart(0x%x,0x%x,?)",aIndex,aCount)); + __NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize); + __NK_ASSERT_DEBUG(aIndex+aCount>aIndex); + + aIter.Set(iSegments,aIndex,aIndex+aCount); + } + + +void RPageArray::Release(TSegment** aSegments, TUint aIndex, TUint aCount) + { + __NK_ASSERT_DEBUG(aIndex+aCount>aIndex); + + MmuLock::Lock(); + + TSegment** pS = aSegments+(aIndex>>KPageArraySegmentShift); + TSegment** pGLast = aSegments+((aIndex+aCount-1)>>KPageArraySegmentShift); + __NK_ASSERT_DEBUG(pS<=pGLast); + TUint flash = 0; + do + { + MmuLock::Flash(flash,KMaxPagesInOneGo); + if(TSegment::Unlock(*pS)==0) + { + TRACE2(("RPageArray::Release delete segment=%d",pS-aSegments)); + } + ++pS; + } + while(pS<=pGLast); + + MmuLock::Unlock(); + } + + +TPhysAddr* RPageArray::AddPageStart(TUint aIndex, TIter& aPageList) + { + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + MmuLock::Lock(); + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = GetOrAllocateSegment(pS,1); + MmuLock::Unlock(); + + if(!s) + return 0; + + aPageList.Set(iSegments,aIndex,aIndex+1); + + return s->iPages+(aIndex&KPageArraySegmentMask); + } + + +TPhysAddr* RPageArray::RemovePageStart(TUint aIndex, TIter& aPageList) + { + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + MmuLock::Lock(); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + if(!s) + { + MmuLock::Unlock(); + return 0; + } + + TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask); + TPhysAddr page = *p; + if(State(page)Lock(); + + MmuLock::Unlock(); + + aPageList.Set(iSegments,aIndex,aIndex+1); + + return p; + } + + +TPhysAddr RPageArray::RemovePage(TPhysAddr* aPageEntry) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + TPhysAddr page = *aPageEntry; + __NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages + if(State(page)==EDecommitting || State(page)==EDecommitted) + { + // remove a page... + if(page&EUnmapVetoed) + { + *aPageEntry = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state + } + else + { + *aPageEntry = EEmptyEntry; + return page&~KPageMask; + } + // check not removing managed pages without the RamAllocLock... + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld() + || SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged); + } + return KPhysAddrInvalid; + } + + +TPhysAddr* RPageArray::RestrictPageNAStart(TUint aIndex, TIter& aPageList) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + if(!s) + return 0; + + TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask); + TPhysAddr page = *p; + if(State(page) < RPageArray::ERestrictingNA) + return 0; + + *p = (page&~EStateMask) | RPageArray::ERestrictingNA; + + s->Lock(); + + aPageList.Set(iSegments,aIndex,aIndex+1); + + return p; + } + + +TPhysAddr* RPageArray::StealPageStart(TUint aIndex, TIter& aPageList) + { + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + __NK_ASSERT_DEBUG(s); // we only steal pages in the live list and these can not go away yet because we hold the RamAllocLock + + TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask); + TPhysAddr page = *p; + + if(State(page)>EStealing) + *p = (page&~EStateMask)|EStealing; + + s->Lock(); + + aPageList.Set(iSegments,aIndex,aIndex+1); + + return p; + } + + +TPhysAddr* RPageArray::MovePageStart(TUint aIndex, TIter& aPageList) + { + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex <= iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + // The segment should always exist for a page that is being moved. + __NK_ASSERT_DEBUG(s); + + TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask); + TPhysAddr page = *p; + if(State(page) <= RPageArray::EMoving) + return NULL; + + *p = (page & ~EStateMask) | EMoving; + + aPageList.Set(iSegments, aIndex, aIndex+1); + + return p; + } + + +void RPageArray::ReleasePage(TUint aIndex, TInt aDelta) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + __NK_ASSERT_DEBUG(s); // must exist because FindPageStart/AddPageStart locked it + + __NK_ASSERT_DEBUG(aDelta>=-1 && aDelta<=1); + if(aDelta) + s->AdjustAllocCount(aDelta); + + if(TSegment::Unlock(*pS)==0) + { + TRACE2(("RPageArray::ReleasePage delete segment=%d",pS-iSegments)); + } + } + + +TPhysAddr RPageArray::Page(TUint aIndex) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + if(!s) + return ENotPresent; + return s->iPages[aIndex&KPageArraySegmentMask]; + } + + +TPhysAddr* RPageArray::PageEntry(TUint aIndex) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + if(!s) + return NULL; + return s->iPages + (aIndex & KPageArraySegmentMask); + } + + +TUint RPageArray::PagingManagerData(TUint aIndex) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + __NK_ASSERT_DEBUG(s); + TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask]; + + TPhysAddr entry = *p; + if(IsPresent(entry)) + { +#ifdef _DEBUG + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask); + if(!pi) + Kern::Printf("RPageArray::PagingManagerData bad entry 0x%08x",entry); + __NK_ASSERT_DEBUG(pi); +#else + SPageInfo* pi = SPageInfo::FromPhysAddr(entry); +#endif + entry = pi->PagingManagerData(); + } + __NK_ASSERT_DEBUG((entry&(EFlagsMask|EStateMask))==ENotPresent); + + return entry>>(EFlagsShift+EStateShift); + } + + +void RPageArray::SetPagingManagerData(TUint aIndex, TUint aValue) + { + aValue = (aValue<<(EFlagsShift+EStateShift))|ENotPresent; + + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + __NK_ASSERT_DEBUG(s); + TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask]; + + TPhysAddr entry = *p; + if(!IsPresent(entry)) + *p = aValue; + else + { +#ifdef _DEBUG + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask); + if(!pi) + Kern::Printf("RPageArray::SetPagingManagerData bad entry 0x%08x",entry); + __NK_ASSERT_DEBUG(pi); +#else + SPageInfo* pi = SPageInfo::FromPhysAddr(entry); +#endif + pi->SetPagingManagerData(aValue); + } + } + + +TPhysAddr RPageArray::PhysAddr(TUint aIndex) + { + __NK_ASSERT_DEBUG(MmuLock::IsHeld()); + __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize); + + TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift); + TSegment* s = *pS; + if(s) + { + TPhysAddr page = s->iPages[aIndex&KPageArraySegmentMask]; + if(IsPresent(page)) + { + return page&~KPageMask; + } + } + return KPhysAddrInvalid; + } + + +TInt RPageArray::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList) + { + __NK_ASSERT_DEBUG(aCount); + MmuLock::Lock(); + + TUint32* pageList = aPhysicalPageList; + + // get first page... + TPhysAddr physStart = PhysAddr(aIndex++); + if(physStart==KPhysAddrInvalid) + { + MmuLock::Unlock(); + return KErrNotFound; + } + if(pageList) + *pageList++ = physStart; + + TUint32 nextPhys = physStart+KPageSize; + + TUint flash = 0; + while(--aCount) + { + MmuLock::Flash(flash,KMaxPagesInOneGo); + + // get next page... + TPhysAddr phys = PhysAddr(aIndex++); + if(phys==KPhysAddrInvalid) + { + MmuLock::Unlock(); + return KErrNotFound; + } + if(pageList) + *pageList++ = phys; + + // check for contiguity... + if(phys!=nextPhys) + nextPhys = KPhysAddrInvalid; + else + nextPhys += KPageSize; + } + + MmuLock::Unlock(); + + if(nextPhys==KPhysAddrInvalid) + { + // memory is discontiguous... + if(!aPhysicalPageList) + return KErrNotFound; + aPhysicalAddress = KPhysAddrInvalid; + return 1; + } + else + { + // memory is contiguous... + aPhysicalAddress = physStart; + return KErrNone; + } + } + +