diff -r e880629062dd -r e4a7b1cbe40c kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp --- a/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp Wed May 05 05:11:16 2010 +0100 +++ b/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp Mon May 10 11:40:53 2010 +0100 @@ -312,7 +312,8 @@ } -#if 0 +#ifdef FMM_VERIFY_RAM +// Attempt to write to each unused RAM page and verify the contents. void Mmu::VerifyRam() { Kern::Printf("Mmu::VerifyRam() pass 1"); @@ -474,6 +475,10 @@ __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot)); if(pi->Type()==SPageInfo::EUnused) pi->SetFixed(); +#ifdef BTRACE_KERNEL_MEMORY + if(r == KErrNone) + ++Epoc::KernelMiscPages; +#endif } } } @@ -500,21 +505,24 @@ r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem); if(r!=KErrNone) Panic(EPhysMemSyncMutexCreateFailed); -// VerifyRam(); + +#ifdef FMM_VERIFY_RAM + VerifyRam(); +#endif } void Mmu::Init2FinalCommon() { __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon")); - // hack, reduce free memory to <2GB... + // Reduce free memory to <2GB... while(FreeRamInPages()>=0x80000000/KPageSize) { TPhysAddr dummyPage; TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed); __NK_ASSERT_ALWAYS(r==KErrNone); } - // hack, reduce total RAM to <2GB... + // Reduce total RAM to <2GB... if(TheSuperPage().iTotalRamSize<0) TheSuperPage().iTotalRamSize = 0x80000000-KPageSize; @@ -540,6 +548,27 @@ iDefrag->Init3(TheMmu.iRamPageAllocator); } + +void Mmu::BTracePrime(TUint aCategory) + { + (void)aCategory; + +#ifdef BTRACE_RAM_ALLOCATOR + // Must check for -1 as that is the default value of aCategory for + // BTrace::Prime() which is intended to prime all categories that are + // currently enabled via a single invocation of BTrace::Prime(). + if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1) + { + NKern::ThreadEnterCS(); + RamAllocLock::Lock(); + iRamPageAllocator->DoBTracePrime(); + RamAllocLock::Unlock(); + NKern::ThreadLeaveCS(); + } +#endif + } + + // // Utils // @@ -593,7 +622,7 @@ __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign)); __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); - TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign); + TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, aAlign); if(r!=KErrNone) iRamAllocFailed = ETrue; else @@ -619,18 +648,7 @@ PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered); // update page infos... - TUint flash = 0; - TPhysAddr* pageEnd = aPageList + aNumPages; - MmuLock::Lock(); - TPhysAddr* page = aPageList; - while (page < pageEnd) - { - MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2); - TPhysAddr pagePhys = *page++; - __NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid); - SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc(); - } - MmuLock::Unlock(); + SetAllocPhysRam(aPageList, aNumPages); } __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r)); return r; @@ -853,6 +871,22 @@ } +/** +Mark a page as being allocated to a particular page type. + +NOTE - This page should not be used until PagesAllocated() has been invoked on it. + +@param aPhysAddr The physical address of the page to mark as allocated. +@param aZonePageType The type of the page to mark as allocated. +*/ +void Mmu::MarkPageAllocated(TPhysAddr aPhysAddr, TZonePageType aZonePageType) + { + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::MarkPageAllocated(0x%x, %d)", aPhysAddr, aZonePageType)); + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()); + iRamPageAllocator->MarkPageAllocated(aPhysAddr, aZonePageType); + } + + void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType) { __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount)); @@ -872,20 +906,31 @@ SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys); PageFreed(pi); - // If this is an old page of a page being moved that was previously pinned - // then make sure it is freed as discardable otherwise despite DPager::DonatePages() - // having marked it as discardable it would be freed as movable. - __NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1); - if (pi->PagedState() == SPageInfo::EPagedPinnedMoved) - aZonePageType = EPageDiscard; - - if(ThePager.PageFreed(pi)==KErrNone) - --aCount; // pager has dealt with this page, so one less for us - else + switch (ThePager.PageFreed(pi)) { - // All paged pages should have been dealt with by the pager above. - __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); - *pagesOut++ = pagePhys; // store page address for freeing later + case KErrNone: + --aCount; // pager has dealt with this page, so one less for us + break; + case KErrCompletion: + // This was a pager controlled page but it is no longer required. + __NK_ASSERT_DEBUG(aZonePageType == EPageMovable || aZonePageType == EPageDiscard); + __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); + if (aZonePageType == EPageMovable) + {// This page was donated to the pager so have to free it here + // as aZonePageType is incorrect for this page but aPages may + // contain a mixture of movable and discardable pages. + MmuLock::Unlock(); + iRamPageAllocator->FreeRamPages(&pagePhys, 1, EPageDiscard); + aCount--; // We've freed this page here so one less to free later + flash = 0; // reset flash count as we released the mmulock. + MmuLock::Lock(); + break; + } + // fall through.. + default: + // Free this page.. + __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged); + *pagesOut++ = pagePhys; // store page address for freeing later } } MmuLock::Unlock(); @@ -904,21 +949,15 @@ __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory)); return KErrNoMemory; } - // Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. + // Only the pager sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram. __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim)); #endif - TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); - if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages) - { - // flush paging cache and retry... - ThePager.FlushAll(); - r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift); - } + TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, aAlign+KPageShift); if(r!=KErrNone) iRamAllocFailed = ETrue; else PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); - __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); + __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr)); return r; } @@ -963,19 +1002,7 @@ return r; // update page infos... - TPhysAddr* pages = aPages; - TPhysAddr* pagesEnd = pages+aCount; - MmuLock::Lock(); - TUint flash = 0; - while(pagesSetPhysAlloc(); - } - MmuLock::Unlock(); + SetAllocPhysRam(aPages, aCount); return KErrNone; } @@ -1004,6 +1031,19 @@ MmuLock::Unlock(); iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed); + +#ifdef BTRACE_KERNEL_MEMORY + if (BTrace::CheckFilter(BTrace::EKernelMemory)) + {// Only loop round each page if EKernelMemory tracing is enabled + pages = aPages; + pagesEnd = aPages + aCount; + while (pages < pagesEnd) + { + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pages++); + Epoc::DriverAllocdPhysRam -= KPageSize; + } + } +#endif } @@ -1015,17 +1055,7 @@ return r; // update page infos... - SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); - SPageInfo* piEnd = pi+aCount; - TUint flash = 0; - MmuLock::Lock(); - while(piSetPhysAlloc(); - ++pi; - } - MmuLock::Unlock(); + SetAllocPhysRam(aPhysAddr, aCount); return KErrNone; } @@ -1050,7 +1080,25 @@ } MmuLock::Unlock(); - iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift); + TUint bytes = aCount << KPageShift; + iRamPageAllocator->FreePhysicalRam(aPhysAddr, bytes); + +#ifdef BTRACE_KERNEL_MEMORY + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, bytes, aPhysAddr); + Epoc::DriverAllocdPhysRam -= bytes; +#endif + } + + +TInt Mmu::FreeRamZone(TUint aZoneId) + { + TPhysAddr zoneBase; + TUint zonePages; + TInt r = iRamPageAllocator->GetZoneAddress(aZoneId, zoneBase, zonePages); + if (r != KErrNone) + return r; + FreePhysicalRam(zoneBase, zonePages); + return KErrNone; } @@ -1058,25 +1106,11 @@ { __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags)); aPhysAddr &= ~KPageMask; - TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift)); - if(r!=KErrNone) + TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr, aCount << KPageShift); + if(r != KErrNone) return r; - PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); - - // update page infos... - SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); - SPageInfo* piEnd = pi+aCount; - TUint flash = 0; - MmuLock::Lock(); - while(piSetPhysAlloc(); - ++pi; - } - MmuLock::Unlock(); - + AllocatedPhysicalRam(aPhysAddr, aCount, aFlags); return KErrNone; } @@ -1088,17 +1122,59 @@ PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags); // update page infos... + SetAllocPhysRam(aPhysAddr, aCount); + } + + +void Mmu::SetAllocPhysRam(TPhysAddr aPhysAddr, TUint aCount) + { SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr); SPageInfo* piEnd = pi+aCount; TUint flash = 0; MmuLock::Lock(); while(piSetPhysAlloc(); ++pi; } MmuLock::Unlock(); + +#ifdef BTRACE_KERNEL_MEMORY + TUint bytes = aCount << KPageShift; + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, bytes, aPhysAddr); + Epoc::DriverAllocdPhysRam += bytes; +#endif + } + + +void Mmu::SetAllocPhysRam(TPhysAddr* aPageList, TUint aNumPages) + { + TPhysAddr* page = aPageList; + TPhysAddr* pageEnd = aPageList + aNumPages; + TUint flash = 0; + MmuLock::Lock(); + while (page < pageEnd) + { + MmuLock::Flash(flash, KMaxPageInfoUpdatesInOneGo / 2); + TPhysAddr pagePhys = *page++; + __NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid); + SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc(); + } + MmuLock::Unlock(); + +#ifdef BTRACE_KERNEL_MEMORY + if (BTrace::CheckFilter(BTrace::EKernelMemory)) + {// Only loop round each page if EKernelMemory tracing is enabled + TPhysAddr* pAddr = aPageList; + TPhysAddr* pAddrEnd = aPageList + aNumPages; + while (pAddr < pAddrEnd) + { + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++); + Epoc::DriverAllocdPhysRam += KPageSize; + } + } +#endif } @@ -1187,20 +1263,10 @@ __NK_ASSERT_DEBUG(iSize>=1); __NK_ASSERT_DEBUG(iCount==0); - TUint colour = aColour&KPageColourMask; - TLinAddr addr = iLinAddr+(colour<=1); __NK_ASSERT_DEBUG(iCount==0); - - TUint colour = aColour&KPageColourMask; - TLinAddr addr = iLinAddr+(colour<